Author: skabashnyuk
Date: 2009-09-03 04:15:16 -0400 (Thu, 03 Sep 2009)
New Revision: 133
Added:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMigration.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JackrabbitIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RefCountingIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReleaseableIndexReader.java
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractIndex.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CachingMultiIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CommittableIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheckError.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldNames.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexFormatVersion.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHits.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Recovery.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedIndexReader.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Util.java
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/VolatileIndex.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestNodeIndexer.java
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestSearchManagerIndexing.java
Log:
EXOJCR-17 : Upgrade to Lucene 2.4
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerEntryWrapper.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -18,21 +18,7 @@
*/
package org.exoplatform.services.jcr.config;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-
-import javax.jcr.RepositoryException;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-
-import org.w3c.dom.Element;
-import org.xml.sax.SAXException;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.search.Query;
-
import org.exoplatform.container.configuration.ConfigurationManager;
import org.exoplatform.services.jcr.datamodel.IllegalNameException;
import org.exoplatform.services.jcr.impl.Constants;
@@ -52,7 +38,19 @@
import org.exoplatform.services.jcr.impl.core.query.lucene.SynonymProvider;
import org.exoplatform.services.jcr.util.StringNumberParser;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+
+import javax.jcr.RepositoryException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+
/**
* Created by The eXo Platform SAS.
*
@@ -145,6 +143,10 @@
private final static int DEFAULT_VOLATILEIDLETIME = 3;
+ //since
https://jira.jboss.org/jira/browse/EXOJCR-17
+
+ public static final boolean DEFAULT_UPGRADE_INDEX = false;
+
private QueryHandlerEntry queryHandlerEntry;
public QueryHandlerEntry getQueryHandlerEntry()
@@ -597,6 +599,20 @@
}
/**
+ *
+ * @return true if index upgrade allowed.
+ */
+ public boolean isUpgradeIndex()
+ {
+ Boolean updateIndex = queryHandlerEntry.getParameterBoolean(PARAM_UPGRADE_INDEX,
null);
+ if (updateIndex == null || !updateIndex)
+ {
+ updateIndex = Boolean.valueOf(System.getProperty(PARAM_UPGRADE_INDEX));
+ }
+ return updateIndex;
+ }
+
+ /**
* Creates a file system resource to the synonym provider configuration.
*
* @param cfm
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -93,4 +93,8 @@
public static final String PARAM_VOLATILE_IDLE_TIME = "volatile-idle-time";
+ //since
https://jira.jboss.org/jira/browse/EXOJCR-17
+
+ public static final String PARAM_UPGRADE_INDEX = "upgrade-index";
+
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,6 +16,14 @@
*/
package org.exoplatform.services.jcr.impl.core.query;
+import org.apache.lucene.search.Query;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
+import org.exoplatform.services.jcr.datamodel.InternalQName;
+import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.exoplatform.services.jcr.impl.core.SessionDataManager;
+import org.exoplatform.services.jcr.impl.core.SessionImpl;
+import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
+
import java.io.IOException;
import java.util.Iterator;
import java.util.Set;
@@ -23,14 +31,6 @@
import javax.jcr.RepositoryException;
import javax.jcr.query.InvalidQueryException;
-import org.apache.lucene.search.Query;
-
-import org.exoplatform.services.jcr.datamodel.InternalQName;
-import org.exoplatform.services.jcr.datamodel.NodeData;
-import org.exoplatform.services.jcr.impl.core.SessionDataManager;
-import org.exoplatform.services.jcr.impl.core.SessionImpl;
-import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
-
/**
* Defines an interface for the actual node indexing and query execution. The goal is to
allow
* different implementations based on the persistent manager in use. Some persistent
model might
@@ -90,8 +90,11 @@
/**
* Closes this <code>QueryHandler</code> and frees resources attached to
this handler.
+ * @throws IOException
+ * @throws RepositoryException
+ * @throws RepositoryConfigurationException
*/
- void init();
+ void init() throws IOException, RepositoryException,
RepositoryConfigurationException;
/**
* Creates a new query by specifying the query statement itself and the language in
which the
@@ -108,7 +111,7 @@
* @return A <code>Query</code> object.
*/
ExecutableQuery createExecutableQuery(SessionImpl session, SessionDataManager itemMgr,
String statement,
- String language) throws InvalidQueryException;
+ String language) throws InvalidQueryException;
/**
* Creates a new instance of an {@link AbstractQueryImpl} which is not initialized.
@@ -140,6 +143,6 @@
* @throws IOException if an error occurs while searching the index.
*/
public QueryHits executeQuery(Query query, boolean needsSystemTree, InternalQName[]
orderProps, boolean[] orderSpecs)
- throws IOException;
+ throws IOException;
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,28 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-import javax.jcr.Node;
-import javax.jcr.RepositoryException;
-import javax.jcr.query.InvalidQueryException;
-import javax.jcr.query.Query;
-
-import org.picocontainer.Startable;
-
-import org.exoplatform.services.log.Log;
-
import org.exoplatform.container.configuration.ConfigurationManager;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
@@ -57,459 +35,526 @@
import org.exoplatform.services.jcr.impl.core.SessionImpl;
import
org.exoplatform.services.jcr.impl.dataflow.persistent.WorkspacePersistentDataManager;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.picocontainer.Startable;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import javax.jcr.Node;
+import javax.jcr.RepositoryException;
+import javax.jcr.query.InvalidQueryException;
+import javax.jcr.query.Query;
+
/**
* Acts as a global entry point to execute queries and index nodes.
*/
-public class SearchManager implements Startable, MandatoryItemsPersistenceListener {
+public class SearchManager implements Startable, MandatoryItemsPersistenceListener
+{
- /**
- * Logger instance for this class
- */
- private static final Log log =
ExoLogger.getLogger(SearchManager.class);
+ /**
+ * Logger instance for this class
+ */
+ private static final Log log = ExoLogger.getLogger(SearchManager.class);
- protected final QueryHandlerEntryWrapper config;
+ protected final QueryHandlerEntryWrapper config;
- /**
- * Text extractor for extracting text content of binary properties.
- */
- protected final DocumentReaderService extractor;
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ protected final DocumentReaderService extractor;
- /**
- * QueryHandler where query execution is delegated to
- */
- protected QueryHandler handler;
+ /**
+ * QueryHandler where query execution is delegated to
+ */
+ protected QueryHandler handler;
- /**
- * The shared item state manager instance for the workspace.
- */
- protected final ItemDataConsumer itemMgr;
+ /**
+ * The shared item state manager instance for the workspace.
+ */
+ protected final ItemDataConsumer itemMgr;
- /**
- * The namespace registry of the repository.
- */
- protected final NamespaceRegistryImpl nsReg;
+ /**
+ * The namespace registry of the repository.
+ */
+ protected final NamespaceRegistryImpl nsReg;
- /**
- * The node type registry.
- */
- protected final NodeTypeDataManager nodeTypeDataManager;
+ /**
+ * The node type registry.
+ */
+ protected final NodeTypeDataManager nodeTypeDataManager;
- /**
- * QueryHandler of the parent search manager or <code>null</code> if there
is
- * none.
- */
- protected final SearchManager parentSearchManager;
+ /**
+ * QueryHandler of the parent search manager or <code>null</code> if there
is
+ * none.
+ */
+ protected final SearchManager parentSearchManager;
- protected QPath indexingRoot;
+ protected QPath indexingRoot;
- protected List<QPath> excludedPaths = new
ArrayList<QPath>();
+ protected List<QPath> excludedPaths = new ArrayList<QPath>();
- private final ConfigurationManager cfm;
+ private final ConfigurationManager cfm;
- /**
- * Creates a new <code>SearchManager</code>.
- *
- * @param config the search configuration.
- * @param nsReg the namespace registry.
- * @param ntReg the node type registry.
- * @param itemMgr the shared item state manager.
- * @param rootNodeId the id of the root node.
- * @param parentMgr the parent search manager or <code>null</code> if there
is
- * no parent search manager.
- * @param excludedNodeId id of the node that should be excluded from indexing.
- * Any descendant of that node will also be excluded from indexing.
- * @throws RepositoryException if the search manager cannot be initialized
- * @throws RepositoryConfigurationException
- */
- public SearchManager(QueryHandlerEntry config,
- NamespaceRegistryImpl nsReg,
- NodeTypeDataManager ntReg,
- WorkspacePersistentDataManager itemMgr,
- SystemSearchManagerHolder parentSearchManager,
- DocumentReaderService extractor,
- ConfigurationManager cfm) throws RepositoryException,
- RepositoryConfigurationException {
+ /**
+ * Creates a new <code>SearchManager</code>.
+ *
+ * @param config the search configuration.
+ * @param nsReg the namespace registry.
+ * @param ntReg the node type registry.
+ * @param itemMgr the shared item state manager.
+ * @param rootNodeId the id of the root node.
+ * @param parentMgr the parent search manager or <code>null</code> if
there is
+ * no parent search manager.
+ * @param excludedNodeId id of the node that should be excluded from indexing.
+ * Any descendant of that node will also be excluded from indexing.
+ * @throws RepositoryException if the search manager cannot be initialized
+ * @throws RepositoryConfigurationException
+ */
+ public SearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg,
NodeTypeDataManager ntReg,
+ WorkspacePersistentDataManager itemMgr, SystemSearchManagerHolder
parentSearchManager,
+ DocumentReaderService extractor, ConfigurationManager cfm) throws
RepositoryException,
+ RepositoryConfigurationException
+ {
- this.extractor = extractor;
+ this.extractor = extractor;
- this.config = new QueryHandlerEntryWrapper(config);
- this.nodeTypeDataManager = ntReg;
- this.nsReg = nsReg;
- this.itemMgr = itemMgr;
- this.cfm = cfm;
+ this.config = new QueryHandlerEntryWrapper(config);
+ this.nodeTypeDataManager = ntReg;
+ this.nsReg = nsReg;
+ this.itemMgr = itemMgr;
+ this.cfm = cfm;
- this.parentSearchManager = parentSearchManager != null ? parentSearchManager.get() :
null;
- itemMgr.addItemPersistenceListener(this);
-
- initializeQueryHandler();
- }
+ this.parentSearchManager = parentSearchManager != null ? parentSearchManager.get()
: null;
+ itemMgr.addItemPersistenceListener(this);
- /**
- * Creates a query object from a node that can be executed on the workspace.
- *
- * @param session the session of the user executing the query.
- * @param itemMgr the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param node a node of type nt:query.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException if <code>absPath</code> is not a valid
- * persisted query (that is, a node of type nt:query)
- * @throws RepositoryException if any other error occurs.
- */
- public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager,
Node node) throws InvalidQueryException,
-
RepositoryException {
- AbstractQueryImpl query = handler.createQueryInstance();
- query.init(session, sessionDataManager, handler, node);
- return query;
- }
+ initializeQueryHandler();
+ }
- /**
- * Creates a query object that can be executed on the workspace.
- *
- * @param session the session of the user executing the query.
- * @param itemMgr the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param statement the actual query statement.
- * @param language the syntax of the query statement.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException if the query is malformed or the
- * <code>language</code> is unknown.
- * @throws RepositoryException if any other error occurs.
- */
- public Query createQuery(SessionImpl session,
- SessionDataManager sessionDataManager,
- String statement,
- String language) throws InvalidQueryException,
RepositoryException {
- AbstractQueryImpl query = handler.createQueryInstance();
- query.init(session, sessionDataManager, handler, statement, language);
- return query;
- }
+ /**
+ * Creates a query object from a node that can be executed on the workspace.
+ *
+ * @param session the session of the user executing the query.
+ * @param itemMgr the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param node a node of type nt:query.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException if <code>absPath</code> is not a valid
+ * persisted query (that is, a node of type nt:query)
+ * @throws RepositoryException if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager,
Node node)
+ throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = handler.createQueryInstance();
+ query.init(session, sessionDataManager, handler, node);
+ return query;
+ }
- /**
- * just for test use only
- */
- public QueryHandler getHandler() {
+ /**
+ * Creates a query object that can be executed on the workspace.
+ *
+ * @param session the session of the user executing the query.
+ * @param itemMgr the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param statement the actual query statement.
+ * @param language the syntax of the query statement.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException if the query is malformed or the
+ * <code>language</code> is unknown.
+ * @throws RepositoryException if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager,
String statement,
+ String language) throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = handler.createQueryInstance();
+ query.init(session, sessionDataManager, handler, statement, language);
+ return query;
+ }
- return handler;
- }
+ /**
+ * just for test use only
+ */
+ public QueryHandler getHandler()
+ {
- public void onSaveItems(ItemStateChangesLog changesLog) {
- if (handler == null)
- return;
+ return handler;
+ }
- long time = System.currentTimeMillis();
+ public void onSaveItems(ItemStateChangesLog changesLog)
+ {
+ if (handler == null)
+ return;
- // nodes that need to be removed from the index.
- final Set<String> removedNodes = new HashSet<String>();
- // nodes that need to be added to the index.
- final Set<String> addedNodes = new HashSet<String>();
+ long time = System.currentTimeMillis();
- final Map<String, List<ItemState>> updatedNodes = new HashMap<String,
List<ItemState>>();
+ // nodes that need to be removed from the index.
+ final Set<String> removedNodes = new HashSet<String>();
+ // nodes that need to be added to the index.
+ final Set<String> addedNodes = new HashSet<String>();
- for (Iterator<ItemState> iter = changesLog.getAllStates().iterator();
iter.hasNext();) {
- ItemState itemState = iter.next();
+ final Map<String, List<ItemState>> updatedNodes = new
HashMap<String, List<ItemState>>();
- if (!isExcluded(itemState)) {
- String uuid = itemState.isNode() ? itemState.getData().getIdentifier()
- : itemState.getData().getParentIdentifier();
+ for (Iterator<ItemState> iter = changesLog.getAllStates().iterator();
iter.hasNext();)
+ {
+ ItemState itemState = iter.next();
- if (itemState.isAdded()) {
- if (itemState.isNode()) {
- addedNodes.add(uuid);
- } else {
- if (!addedNodes.contains(uuid)) {
- createNewOrAdd(uuid, itemState, updatedNodes);
+ if (!isExcluded(itemState))
+ {
+ String uuid =
+ itemState.isNode() ? itemState.getData().getIdentifier() :
itemState.getData().getParentIdentifier();
+
+ if (itemState.isAdded())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ if (!addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
}
- }
- } else if (itemState.isRenamed()) {
- if (itemState.isNode()) {
- addedNodes.add(uuid);
- } else {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- } else if (itemState.isUpdated()) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- } else if (itemState.isMixinChanged()) {
- createNewOrAdd(uuid, itemState, updatedNodes);
- } else if (itemState.isDeleted()) {
- if (itemState.isNode()) {
- if (addedNodes.contains(uuid)) {
- addedNodes.remove(uuid);
- removedNodes.remove(uuid);
- } else {
- removedNodes.add(uuid);
+ else if (itemState.isRenamed())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
}
- // remove all changes after node remove
- updatedNodes.remove(uuid);
- } else {
- if (!removedNodes.contains(uuid) && !addedNodes.contains(uuid)) {
- createNewOrAdd(uuid, itemState, updatedNodes);
+ else if (itemState.isUpdated())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
}
- }
- }
+ else if (itemState.isMixinChanged())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ else if (itemState.isDeleted())
+ {
+ if (itemState.isNode())
+ {
+ if (addedNodes.contains(uuid))
+ {
+ addedNodes.remove(uuid);
+ removedNodes.remove(uuid);
+ }
+ else
+ {
+ removedNodes.add(uuid);
+ }
+ // remove all changes after node remove
+ updatedNodes.remove(uuid);
+ }
+ else
+ {
+ if (!removedNodes.contains(uuid) &&
!addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ }
+ }
}
- }
- // TODO make quick changes
- for (String uuid : updatedNodes.keySet()) {
- removedNodes.add(uuid);
- addedNodes.add(uuid);
- }
+ // TODO make quick changes
+ for (String uuid : updatedNodes.keySet())
+ {
+ removedNodes.add(uuid);
+ addedNodes.add(uuid);
+ }
- // // property events
- // List<ItemState> propEvents = new ArrayList<ItemState>();
- // List<ItemState> itemStates = changesLog.getAllStates();
- //
- // final Set<String> allRemovedNodesId = new HashSet<String>();
- // final Set<String> allAddedNodesId = new HashSet<String>();
- // for (ItemState itemState : itemStates) {
- // if (!isExcluded(itemState)) {
- // if (itemState.isNode()) {
- // if (itemState.isAdded() || itemState.isRenamed()) {
- // addedNodes.add(itemState.getData().getIdentifier());
- // allAddedNodesId.add(itemState.getData().getIdentifier());
- // } else if (itemState.isDeleted()) {
- // // remove node from add list, and if node not in add list add it to
- // // removed list
- // if (!addedNodes.remove(itemState.getData().getIdentifier()))
- // removedNodes.add(itemState.getData().getIdentifier());
- // allRemovedNodesId.add(itemState.getData().getIdentifier());
- // } else if (itemState.isMixinChanged()) {
- // removedNodes.add(itemState.getData().getIdentifier());
- // addedNodes.add(itemState.getData().getIdentifier());
- // }
- // } else {
- // propEvents.add(itemState);
- // }
- // }
- // }
- //
- // // sort out property events
- // for (int i = 0; i < propEvents.size(); i++) {
- // ItemState event = propEvents.get(i);
- // String nodeId = event.getData().getParentIdentifier();
- // if (event.isAdded()) {
- // if (!addedNodes.contains(nodeId) && !allAddedNodesId.contains(nodeId)) {
- // // only property added
- // // need to re-index
- // addedNodes.add(nodeId);
- // removedNodes.add(nodeId);
- // } else {
- // // the node where this prop belongs to is also new
- // }
- // } else if (event.isRenamed() || event.isUpdated()) {
- // // need to re-index
- // addedNodes.add(nodeId);
- // removedNodes.add(nodeId);
- // } else if (event.isDeleted()) {
- // if (!allRemovedNodesId.contains(nodeId)) {
- // addedNodes.add(nodeId);
- // removedNodes.add(nodeId);
- // }
- // }
- // }
+ Iterator<NodeData> addedStates = new Iterator<NodeData>()
+ {
+ private final Iterator<String> iter = addedNodes.iterator();
- Iterator<NodeData> addedStates = new Iterator<NodeData>() {
- private final Iterator<String> iter = addedNodes.iterator();
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
- public boolean hasNext() {
- return iter.hasNext();
- }
+ public NodeData next()
+ {
- public NodeData next() {
+ // cycle till find a next or meet the end of set
+ do
+ {
+ String id = iter.next();
+ try
+ {
+ ItemData item = itemMgr.getItemData(id);
+ if (item != null)
+ {
+ if (item.isNode())
+ return (NodeData)item; // return node
+ else
+ log.warn("Node not found, but property " + id + ",
" + item.getQPath().getAsString()
+ + " found. ");
+ }
+ else
+ log.warn("Unable to index node with id " + id + ",
node does not exist.");
- // cycle till find a next or meet the end of set
- do {
- String id = iter.next();
- try {
- ItemData item = itemMgr.getItemData(id);
- if (item != null) {
- if (item.isNode())
- return (NodeData) item; // return node
- else
- log.warn("Node not found, but property " + id + ", "
- + item.getQPath().getAsString() + " found. ");
- } else
- log.warn("Unable to index node with id " + id + ", node does
not exist.");
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Can't read next node data " + id, e);
+ }
+ }
+ while (iter.hasNext()); // get next if error or node not found
- } catch (RepositoryException e) {
- log.error("Can't read next node data " + id, e);
- }
- } while (iter.hasNext()); // get next if error or node not found
+ return null; // we met the end of iterator set
+ }
- return null; // we met the end of iterator set
- }
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
+ }
+ };
- public void remove() {
- throw new UnsupportedOperationException();
- }
- };
+ Iterator<String> removedIds = new Iterator<String>()
+ {
+ private final Iterator<String> iter = removedNodes.iterator();
- Iterator<String> removedIds = new Iterator<String>() {
- private final Iterator<String> iter = removedNodes.iterator();
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
- public boolean hasNext() {
- return iter.hasNext();
- }
+ public String next()
+ {
+ return nextNodeId();
+ }
- public String next() {
- return nextNodeId();
- }
+ public String nextNodeId() throws NoSuchElementException
+ {
+ return iter.next();
+ }
- public String nextNodeId() throws NoSuchElementException {
- return iter.next();
- }
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
- public void remove() {
- throw new UnsupportedOperationException();
+ }
+ };
+ if (removedNodes.size() > 0 || addedNodes.size() > 0)
+ {
+ try
+ {
+ handler.updateNodes(removedIds, addedStates);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ handler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not
complete. " + ioe, ioe);
+ }
+ }
}
- };
- if (removedNodes.size() > 0 || addedNodes.size() > 0) {
- try {
- handler.updateNodes(removedIds, addedStates);
- } catch (RepositoryException e) {
- log.error("Error indexing changes " + e, e);
- } catch (IOException e) {
- log.error("Error indexing changes " + e, e);
- try {
- handler.logErrorChanges(removedNodes, addedNodes);
- } catch (IOException ioe) {
- log.warn("Exception occure when errorLog writed. Error log is not
complete. " + ioe, ioe);
- }
+ if (log.isDebugEnabled())
+ {
+ log.debug("onEvent: indexing finished in " +
String.valueOf(System.currentTimeMillis() - time) + " ms.");
}
- }
+ }
- if (log.isDebugEnabled()) {
- log.debug("onEvent: indexing finished in "
- + String.valueOf(System.currentTimeMillis() - time) + " ms.");
- }
- }
+ public void createNewOrAdd(String key, ItemState state, Map<String,
List<ItemState>> updatedNodes)
+ {
+ List<ItemState> list = updatedNodes.get(key);
+ if (list == null)
+ {
+ list = new ArrayList<ItemState>();
+ updatedNodes.put(key, list);
+ }
+ list.add(state);
- public void createNewOrAdd(String key, ItemState state, Map<String,
List<ItemState>> updatedNodes) {
- List<ItemState> list = updatedNodes.get(key);
- if (list == null) {
- list = new ArrayList<ItemState>();
- updatedNodes.put(key, list);
- }
- list.add(state);
+ }
- }
+ public void start()
+ {
- public void start() {
+ if (log.isDebugEnabled())
+ log.debug("start");
- if (log.isDebugEnabled())
- log.debug("start");
+ // Calculating excluded node identifiers
+ excludedPaths.add(Constants.JCR_SYSTEM_PATH);
- // Calculating excluded node identifiers
- excludedPaths.add(Constants.JCR_SYSTEM_PATH);
+ if (config.getExcludedNodeIdentifers() != null)
+ {
+ StringTokenizer stringTokenizer = new
StringTokenizer(config.getExcludedNodeIdentifers());
+ while (stringTokenizer.hasMoreTokens())
+ {
- if (config.getExcludedNodeIdentifers() != null) {
- StringTokenizer stringTokenizer = new
StringTokenizer(config.getExcludedNodeIdentifers());
- while (stringTokenizer.hasMoreTokens()) {
+ try
+ {
+ ItemData excludeData = itemMgr.getItemData(stringTokenizer.nextToken());
+ if (excludeData != null)
+ excludedPaths.add(excludeData.getQPath());
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage());
+ }
+ }
+ }
- try {
- ItemData excludeData = itemMgr.getItemData(stringTokenizer.nextToken());
- if (excludeData != null)
- excludedPaths.add(excludeData.getQPath());
- } catch (RepositoryException e) {
- log.warn(e.getLocalizedMessage());
- }
+ indexingRoot = Constants.ROOT_PATH;
+ if (config.getRootNodeIdentifer() != null)
+ {
+ try
+ {
+ ItemData indexingRootData =
itemMgr.getItemData(config.getRootNodeIdentifer());
+ if (indexingRootData != null && indexingRootData.isNode())
+ indexingRoot = indexingRootData.getQPath();
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage() + " Indexing root set to " +
indexingRoot.getAsString());
+ }
+
}
- }
+ try
+ {
+ handler.init();
- indexingRoot = Constants.ROOT_PATH;
- if (config.getRootNodeIdentifer() != null) {
- try {
- ItemData indexingRootData = itemMgr.getItemData(config.getRootNodeIdentifer());
- if (indexingRootData != null && indexingRootData.isNode())
- indexingRoot = indexingRootData.getQPath();
- } catch (RepositoryException e) {
- log.warn(e.getLocalizedMessage() + " Indexing root set to " +
indexingRoot.getAsString());
}
+ catch (IOException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ }
- }
- handler.init();
- }
+ public void stop()
+ {
+ handler.close();
+ log.info("Search manager stopped");
+ }
- public void stop() {
- handler.close();
- log.info("Search manager stopped");
- }
+ /**
+ * Checks if the given event should be excluded based on the
+ * {@link #excludePath} setting.
+ *
+ * @param event observation event
+ * @return <code>true</code> if the event should be excluded,
+ * <code>false</code> otherwise
+ */
+ protected boolean isExcluded(ItemState event)
+ {
- /**
- * Checks if the given event should be excluded based on the
- * {@link #excludePath} setting.
- *
- * @param event observation event
- * @return <code>true</code> if the event should be excluded,
- * <code>false</code> otherwise
- */
- protected boolean isExcluded(ItemState event) {
+ for (QPath excludedPath : excludedPaths)
+ {
+ if (event.getData().getQPath().isDescendantOf(excludedPath) ||
event.getData().getQPath().equals(excludedPath))
+ return true;
+ }
- for (QPath excludedPath : excludedPaths) {
- if (event.getData().getQPath().isDescendantOf(excludedPath)
- || event.getData().getQPath().equals(excludedPath))
- return true;
- }
+ return !event.getData().getQPath().isDescendantOf(indexingRoot)
+ && !event.getData().getQPath().equals(indexingRoot);
+ }
- return !event.getData().getQPath().isDescendantOf(indexingRoot)
- && !event.getData().getQPath().equals(indexingRoot);
- }
+ protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
+ throws RepositoryConfigurationException
+ {
- protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
throws RepositoryConfigurationException {
+ QueryHandlerContext context =
+ new QueryHandlerContext(itemMgr, config.getRootNodeIdentifer() != null ?
config.getRootNodeIdentifer()
+ : Constants.ROOT_UUID, nodeTypeDataManager, nsReg, parentHandler,
config.getIndexDir(), extractor);
+ return context;
+ }
- QueryHandlerContext context = new QueryHandlerContext(itemMgr,
- config.getRootNodeIdentifer()
!= null ? config.getRootNodeIdentifer()
-
: Constants.ROOT_UUID,
- nodeTypeDataManager,
- nsReg,
- parentHandler,
- config.getIndexDir(),
- extractor);
- return context;
- }
+ /**
+ * Initializes the query handler.
+ *
+ * @throws RepositoryException if the query handler cannot be initialized.
+ * @throws RepositoryConfigurationException
+ * @throws ClassNotFoundException
+ */
+ private void initializeQueryHandler() throws RepositoryException,
RepositoryConfigurationException
+ {
+ // initialize query handler
+ String className = config.getType();
+ if (className == null)
+ throw new RepositoryConfigurationException("Content hanler
configuration fail");
- /**
- * Initializes the query handler.
- *
- * @throws RepositoryException if the query handler cannot be initialized.
- * @throws RepositoryConfigurationException
- * @throws ClassNotFoundException
- */
- private void initializeQueryHandler() throws RepositoryException,
- RepositoryConfigurationException {
- // initialize query handler
- String className = config.getType();
- if (className == null)
- throw new RepositoryConfigurationException("Content hanler configuration
fail");
+ try
+ {
+ Class qHandlerClass = Class.forName(className, true,
this.getClass().getClassLoader());
+ Constructor constuctor = qHandlerClass.getConstructor(QueryHandlerEntry.class,
ConfigurationManager.class);
+ handler = (QueryHandler)constuctor.newInstance(config.getQueryHandlerEntry(),
cfm);
+ QueryHandler parentHandler = (this.parentSearchManager != null) ?
parentSearchManager.getHandler() : null;
+ QueryHandlerContext context = createQueryHandlerContext(parentHandler);
+ handler.setContext(context);
+ }
+ catch (SecurityException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalArgumentException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (ClassNotFoundException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (NoSuchMethodException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InstantiationException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalAccessException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InvocationTargetException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IOException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ }
- try {
- Class qHandlerClass = Class.forName(className, true,
this.getClass().getClassLoader());
- Constructor constuctor = qHandlerClass.getConstructor(QueryHandlerEntry.class,
ConfigurationManager.class);
- handler = (QueryHandler) constuctor.newInstance(config.getQueryHandlerEntry(),
cfm);
- QueryHandler parentHandler = (this.parentSearchManager != null) ?
parentSearchManager.getHandler()
- : null;
- QueryHandlerContext context = createQueryHandlerContext(parentHandler);
- handler.setContext(context);
- } catch (SecurityException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IllegalArgumentException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (ClassNotFoundException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (NoSuchMethodException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (InstantiationException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IllegalAccessException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (InvocationTargetException e) {
- throw new RepositoryException(e.getMessage(), e);
- } catch (IOException e) {
- throw new RepositoryException(e.getMessage(), e);
- }
- }
-
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,11 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.jcr.RepositoryException;
-
import org.exoplatform.container.configuration.ConfigurationManager;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
@@ -33,6 +28,12 @@
import org.exoplatform.services.log.ExoLogger;
import org.exoplatform.services.log.Log;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.jcr.RepositoryException;
+
/**
* Created by The eXo Platform SAS.
*
@@ -40,67 +41,97 @@
* @version $Id: SystemSearchManager.java 13891 2008-05-05 16:02:30Z pnedonosko
* $
*/
-public class SystemSearchManager extends SearchManager {
+public class SystemSearchManager extends SearchManager
+{
- /**
- * Class logger.
- */
- private final Log log =
ExoLogger.getLogger("jcr.SystemSearchManager");
+ /**
+ * Class logger.
+ */
+ private final Log log = ExoLogger.getLogger("jcr.SystemSearchManager");
- /**
- * Is started flag.
- */
- private boolean isStarted = false;
+ /**
+ * Is started flag.
+ */
+ private boolean isStarted = false;
- /**
- * ChangesLog Buffer (used for saves before start).
- */
- private List<ItemStateChangesLog> changesLogBuffer = new
ArrayList<ItemStateChangesLog>();
+ /**
+ * ChangesLog Buffer (used for saves before start).
+ */
+ private List<ItemStateChangesLog> changesLogBuffer = new
ArrayList<ItemStateChangesLog>();
- public static final String INDEX_DIR_SUFFIX = "system";
+ public static final String INDEX_DIR_SUFFIX = "system";
- public SystemSearchManager(QueryHandlerEntry config,
- NamespaceRegistryImpl nsReg,
- NodeTypeDataManager ntReg,
- WorkspacePersistentDataManager itemMgr,
- DocumentReaderService service,
- ConfigurationManager cfm) throws RepositoryException,
- RepositoryConfigurationException {
- super(config, nsReg, ntReg, itemMgr, null, service, cfm);
- }
+ public SystemSearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg,
NodeTypeDataManager ntReg,
+ WorkspacePersistentDataManager itemMgr, DocumentReaderService service,
ConfigurationManager cfm)
+ throws RepositoryException, RepositoryConfigurationException
+ {
+ super(config, nsReg, ntReg, itemMgr, null, service, cfm);
+ }
- @Override
- public void onSaveItems(ItemStateChangesLog changesLog) {
- if (!isStarted) {
- changesLogBuffer.add(changesLog);
- } else {
- super.onSaveItems(changesLog);
- }
- }
+ @Override
+ public void onSaveItems(ItemStateChangesLog changesLog)
+ {
+ if (!isStarted)
+ {
+ changesLogBuffer.add(changesLog);
+ }
+ else
+ {
+ super.onSaveItems(changesLog);
+ }
+ }
- @Override
- public void start() {
- indexingRoot = Constants.JCR_SYSTEM_PATH;
- excludedPaths.remove(Constants.JCR_SYSTEM_PATH);
- isStarted = true;
- handler.init();
- for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer) {
- super.onSaveItems(bufferedChangesLog);
- }
- changesLogBuffer.clear();
- changesLogBuffer = null;
- }
+ @Override
+ public void start()
+ {
+ indexingRoot = Constants.JCR_SYSTEM_PATH;
+ excludedPaths.remove(Constants.JCR_SYSTEM_PATH);
+ isStarted = true;
+ try
+ {
+ handler.init();
- @Override
- protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
throws RepositoryConfigurationException {
- QueryHandlerContext context = new QueryHandlerContext(itemMgr,
- Constants.SYSTEM_UUID,
- nodeTypeDataManager,
- nsReg,
- parentHandler,
- config.getIndexDir() +
"_"
- + INDEX_DIR_SUFFIX,
- extractor);
- return context;
- }
+ }
+ catch (IOException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ throw new RuntimeException(e);
+ }
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ throw new RuntimeException(e);
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ throw new RuntimeException(e);
+ }
+ for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer)
+ {
+ super.onSaveItems(bufferedChangesLog);
+ }
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ }
+
+ @Override
+ protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
+ throws RepositoryConfigurationException
+ {
+ QueryHandlerContext context =
+ new QueryHandlerContext(itemMgr, Constants.SYSTEM_UUID, nodeTypeDataManager,
nsReg, parentHandler, config
+ .getIndexDir()
+ + "_" + INDEX_DIR_SUFFIX, extractor);
+ return context;
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,17 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
@@ -38,324 +27,276 @@
import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.index.TermVectorOffsetInfo;
import org.apache.lucene.search.Query;
-
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.io.IOException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
/**
- * <code>AbstractExcerpt</code> implements base functionality for an excerpt
provider.
+ * <code>AbstractExcerpt</code> implements base functionality for an excerpt
+ * provider.
*/
-public abstract class AbstractExcerpt
- implements HighlightingExcerptProvider
-{
+public abstract class AbstractExcerpt implements HighlightingExcerptProvider {
- /**
- * Logger instance for this class.
- */
- private static final Log log = ExoLogger.getLogger(AbstractExcerpt.class);
+ /**
+ * Logger instance for this class.
+ */
+ private static final Log log = ExoLogger.getLogger(AbstractExcerpt.class);
- /**
- * The search index.
- */
- protected SearchIndex index;
+ /**
+ * The search index.
+ */
+ protected SearchIndex index;
- /**
- * The current query.
- */
- protected Query query;
+ /**
+ * The current query.
+ */
+ protected Query query;
- /**
- * Indicates whether the query is already rewritten.
- */
- private boolean rewritten = false;
+ /**
+ * Indicates whether the query is already rewritten.
+ */
+ private boolean rewritten = false;
- /**
- * {@inheritDoc}
- */
- public void init(Query query, SearchIndex index) throws IOException
- {
- this.index = index;
- this.query = query;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public void init(Query query, SearchIndex index) throws IOException {
+ this.index = index;
+ this.query = query;
+ }
- /**
- * {@inheritDoc}
- */
- public String getExcerpt(String nodeId, int maxFragments, int maxFragmentSize) throws
IOException
- {
- IndexReader reader = index.getIndexReader();
- try
- {
- checkRewritten(reader);
- Term idTerm = new Term(FieldNames.UUID, nodeId);
- TermDocs tDocs = reader.termDocs(idTerm);
- int docNumber;
- Document doc;
- try
- {
- if (tDocs.next())
- {
- docNumber = tDocs.doc();
- doc = reader.document(docNumber);
+ /**
+ * {@inheritDoc}
+ */
+ public String getExcerpt(String nodeId, int maxFragments, int maxFragmentSize) throws
IOException {
+ IndexReader reader = index.getIndexReader();
+ try {
+ checkRewritten(reader);
+ Term idTerm = new Term(FieldNames.UUID, nodeId);
+ TermDocs tDocs = reader.termDocs(idTerm);
+ int docNumber;
+ Document doc;
+ try {
+ if (tDocs.next()) {
+ docNumber = tDocs.doc();
+ doc = reader.document(docNumber);
+ } else {
+ // node not found in index
+ return null;
+ }
+ } finally {
+ tDocs.close();
+ }
+ Field[] fields = doc.getFields(FieldNames.FULLTEXT);
+ if (fields == null) {
+ log.debug("Fulltext field not stored, using " +
SimpleExcerptProvider.class.getName());
+ SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
+ exProvider.init(query, index);
+ return exProvider.getExcerpt(nodeId, maxFragments, maxFragmentSize);
+ }
+ StringBuffer text = new StringBuffer();
+ String separator = "";
+ for (int i = 0; i < fields.length; i++) {
+ if (fields[i].stringValue().length() == 0) {
+ continue;
+ }
+ text.append(separator);
+ text.append(fields[i].stringValue());
+ // this is a hack! in general multiple fields with the same
+ // name are handled properly, that is, offset and position is
+ // calculated correctly. there is one case however where
+ // the offset gets wrong:
+ // if a term text ends with characters that are considered noise
+ // then the offset of the next field will be off by the number
+ // of noise characters.
+ // therefore we delete noise characters at the end of the text.
+ // this process is required for all but the last field
+ if (i < fields.length - 1) {
+ for (int j = text.length() - 1; j >= 0; j--) {
+ if (Character.isLetterOrDigit(text.charAt(j))) {
+ break;
+ } else {
+ text.deleteCharAt(j);
}
- else
- {
- // node not found in index
- return null;
- }
- }
- finally
- {
- tDocs.close();
- }
- Field[] fields = doc.getFields(FieldNames.FULLTEXT);
- if (fields == null)
- {
- log.debug("Fulltext field not stored, using " +
SimpleExcerptProvider.class.getName());
- SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
- exProvider.init(query, index);
- return exProvider.getExcerpt(nodeId, maxFragments, maxFragmentSize);
- }
- StringBuffer text = new StringBuffer();
- String separator = "";
- for (int i = 0; i < fields.length; i++)
- {
- if (fields[i].stringValue().length() == 0)
- {
- continue;
- }
- text.append(separator);
- text.append(fields[i].stringValue());
- // this is a hack! in general multiple fields with the same
- // name are handled properly, that is, offset and position is
- // calculated correctly. there is one case however where
- // the offset gets wrong:
- // if a term text ends with characters that are considered noise
- // then the offset of the next field will be off by the number
- // of noise characters.
- // therefore we delete noise characters at the end of the text.
- // this process is required for all but the last field
- if (i < fields.length - 1)
- {
- for (int j = text.length() - 1; j >= 0; j--)
- {
- if (Character.isLetterOrDigit(text.charAt(j)))
- {
- break;
- }
- else
- {
- text.deleteCharAt(j);
- }
- }
- }
- separator = " ";
- }
- TermFreqVector tfv = reader.getTermFreqVector(docNumber, FieldNames.FULLTEXT);
- if (tfv instanceof TermPositionVector)
- {
- return createExcerpt((TermPositionVector) tfv, text.toString(), maxFragments,
maxFragmentSize);
- }
- else
- {
- log.debug("No TermPositionVector on Fulltext field, using " +
SimpleExcerptProvider.class.getName());
- SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
- exProvider.init(query, index);
- return exProvider.getExcerpt(nodeId, maxFragments, maxFragmentSize);
- }
+ }
+ }
+ separator = " ";
}
- finally
- {
- reader.close();
+ TermFreqVector tfv = reader.getTermFreqVector(docNumber, FieldNames.FULLTEXT);
+ if (tfv instanceof TermPositionVector) {
+ return createExcerpt((TermPositionVector) tfv,
+ text.toString(),
+ maxFragments,
+ maxFragmentSize);
+ } else {
+ log.debug("No TermPositionVector on Fulltext field, using "
+ + SimpleExcerptProvider.class.getName());
+ SimpleExcerptProvider exProvider = new SimpleExcerptProvider();
+ exProvider.init(query, index);
+ return exProvider.getExcerpt(nodeId, maxFragments, maxFragmentSize);
}
- }
+ } finally {
+ Util.closeOrRelease(reader);
+ }
+ }
- /**
- * {@inheritDoc}
- */
- public String highlight(String text) throws IOException
- {
- checkRewritten(null);
- return createExcerpt(createTermPositionVector(text), text, 1, (text.length() + 1) *
2);
- }
+ /**
+ * {@inheritDoc}
+ */
+ public String highlight(String text) throws IOException {
+ checkRewritten(null);
+ return createExcerpt(createTermPositionVector(text), text, 1, (text.length() + 1) *
2);
+ }
- /**
- * Creates an excerpt for the given <code>text</code> using token offset
information provided by
- * <code>tpv</code>.
- *
- * @param tpv
- * the term position vector for the fulltext field.
- * @param text
- * the original text.
- * @param maxFragments
- * the maximum number of fragments to create.
- * @param maxFragmentSize
- * the maximum number of characters in a fragment.
- * @return the xml excerpt.
- * @throws IOException
- * if an error occurs while creating the excerpt.
- */
- protected abstract String createExcerpt(TermPositionVector tpv, String text, int
maxFragments, int maxFragmentSize)
- throws IOException;
+ /**
+ * Creates an excerpt for the given <code>text</code> using token offset
+ * information provided by <code>tpv</code>.
+ *
+ * @param tpv the term position vector for the fulltext field.
+ * @param text the original text.
+ * @param maxFragments the maximum number of fragments to create.
+ * @param maxFragmentSize the maximum number of characters in a fragment.
+ * @return the xml excerpt.
+ * @throws IOException if an error occurs while creating the excerpt.
+ */
+ protected abstract String createExcerpt(TermPositionVector tpv,
+ String text,
+ int maxFragments,
+ int maxFragmentSize) throws IOException;
- /**
- * @return the extracted terms from the query.
- */
- protected final Set<Term> getQueryTerms()
- {
- Set<Term> extractedTerms = new HashSet<Term>();
- Set<Term> relevantTerms = new HashSet<Term>();
- query.extractTerms(extractedTerms);
- // only keep terms for fulltext fields
- for (Iterator<Term> it = extractedTerms.iterator(); it.hasNext();)
- {
- Term t = it.next();
- if (t.field().equals(FieldNames.FULLTEXT))
- {
- relevantTerms.add(t);
- }
- else
- {
- int idx = t.field().indexOf(FieldNames.FULLTEXT_PREFIX);
- if (idx != -1)
- {
- relevantTerms.add(new Term(FieldNames.FULLTEXT, t.text()));
- }
- }
+ /**
+ * @return the extracted terms from the query.
+ */
+ protected final Set<Term> getQueryTerms() {
+ Set<Term> extractedTerms = new HashSet<Term>();
+ Set<Term> relevantTerms = new HashSet<Term>();
+ query.extractTerms(extractedTerms);
+ // only keep terms for fulltext fields
+ for (Iterator<Term> it = extractedTerms.iterator(); it.hasNext();) {
+ Term t = it.next();
+ if (t.field().equals(FieldNames.FULLTEXT)) {
+ relevantTerms.add(t);
+ } else {
+ int idx = t.field().indexOf(FieldNames.FULLTEXT_PREFIX);
+ if (idx != -1) {
+ relevantTerms.add(new Term(FieldNames.FULLTEXT, t.text()));
+ }
}
- return relevantTerms;
- }
+ }
+ return relevantTerms;
+ }
- /**
- * Makes sure the {@link #query} is rewritten. If the query is already rewritten, this
method
- * returns immediately.
- *
- * @param reader
- * an optional index reader, if none is passed this method will retrieve one
from the
- * {@link #index} and close it again after the rewrite operation.
- * @throws IOException
- * if an error occurs while the query is rewritten.
- */
- private void checkRewritten(IndexReader reader) throws IOException
- {
- if (!rewritten)
- {
- IndexReader r = reader;
- if (r == null)
- {
- r = index.getIndexReader();
- }
- try
- {
- query = query.rewrite(r);
- }
- finally
- {
- // only close reader if this method opened one
- if (reader == null)
- {
- r.close();
- }
- }
- rewritten = true;
+ /**
+ * Makes sure the {@link #query} is rewritten. If the query is already
+ * rewritten, this method returns immediately.
+ *
+ * @param reader an optional index reader, if none is passed this method will
+ * retrieve one from the {@link #index} and close it again after the
+ * rewrite operation.
+ * @throws IOException if an error occurs while the query is rewritten.
+ */
+ private void checkRewritten(IndexReader reader) throws IOException {
+ if (!rewritten) {
+ IndexReader r = reader;
+ if (r == null) {
+ r = index.getIndexReader();
}
- }
+ try {
+ query = query.rewrite(r);
+ } finally {
+ // only close reader if this method opened one
+ if (reader == null) {
+ Util.closeOrRelease(r);
+ }
+ }
+ rewritten = true;
+ }
+ }
- /**
- * @param text
- * the text.
- * @return a <code>TermPositionVector</code> for the given text.
- */
- private TermPositionVector createTermPositionVector(String text)
- {
- // term -> TermVectorOffsetInfo[]
- final SortedMap<String, TermVectorOffsetInfo[]> termMap = new
TreeMap<String, TermVectorOffsetInfo[]>();
- Reader r = new StringReader(text);
- TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
- Token t;
- try
- {
- while ((t = ts.next()) != null)
- {
- TermVectorOffsetInfo[] info = termMap.get(t.termText());
- if (info == null)
- {
- info = new TermVectorOffsetInfo[1];
- }
- else
- {
- TermVectorOffsetInfo[] tmp = info;
- info = new TermVectorOffsetInfo[tmp.length + 1];
- System.arraycopy(tmp, 0, info, 0, tmp.length);
- }
- info[info.length - 1] = new TermVectorOffsetInfo(t.startOffset(),
t.endOffset());
- termMap.put(t.termText(), info);
- }
+ /**
+ * @param text the text.
+ * @return a <code>TermPositionVector</code> for the given text.
+ */
+ private TermPositionVector createTermPositionVector(String text) {
+ // term -> TermVectorOffsetInfo[]
+ final SortedMap<String, TermVectorOffsetInfo[]> termMap = new
TreeMap<String, TermVectorOffsetInfo[]>();
+ Reader r = new StringReader(text);
+ TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
+ Token t;
+ try {
+ while ((t = ts.next()) != null) {
+ TermVectorOffsetInfo[] info = termMap.get(t.termText());
+ if (info == null) {
+ info = new TermVectorOffsetInfo[1];
+ } else {
+ TermVectorOffsetInfo[] tmp = info;
+ info = new TermVectorOffsetInfo[tmp.length + 1];
+ System.arraycopy(tmp, 0, info, 0, tmp.length);
+ }
+ info[info.length - 1] = new TermVectorOffsetInfo(t.startOffset(),
t.endOffset());
+ termMap.put(t.termText(), info);
}
- catch (IOException e)
- {
- // should never happen, we are reading from a string
- }
+ } catch (IOException e) {
+ // should never happen, we are reading from a string
+ }
- return new TermPositionVector()
- {
+ return new TermPositionVector() {
- private String[] terms = termMap.keySet().toArray(new String[termMap.size()]);
+ private String[] terms = termMap.keySet().toArray(new String[termMap.size()]);
- public int[] getTermPositions(int index)
- {
- return null;
- }
+ public int[] getTermPositions(int index) {
+ return null;
+ }
- public TermVectorOffsetInfo[] getOffsets(int index)
- {
- TermVectorOffsetInfo[] info = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
- if (index >= 0 && index < terms.length)
- {
- info = termMap.get(terms[index]);
- }
- return info;
- }
+ public TermVectorOffsetInfo[] getOffsets(int index) {
+ TermVectorOffsetInfo[] info = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
+ if (index >= 0 && index < terms.length) {
+ info = termMap.get(terms[index]);
+ }
+ return info;
+ }
- public String getField()
- {
- return "";
- }
+ public String getField() {
+ return "";
+ }
- public int size()
- {
- return terms.length;
- }
+ public int size() {
+ return terms.length;
+ }
- public String[] getTerms()
- {
- return terms;
- }
+ public String[] getTerms() {
+ return terms;
+ }
- public int[] getTermFrequencies()
- {
- int[] freqs = new int[terms.length];
- for (int i = 0; i < terms.length; i++)
- {
- freqs[i] = termMap.get(terms[i]).length;
- }
- return freqs;
- }
+ public int[] getTermFrequencies() {
+ int[] freqs = new int[terms.length];
+ for (int i = 0; i < terms.length; i++) {
+ freqs[i] = termMap.get(terms[i]).length;
+ }
+ return freqs;
+ }
- public int indexOf(String term)
- {
- int res = Arrays.binarySearch(terms, term);
- return res >= 0 ? res : -1;
- }
+ public int indexOf(String term) {
+ int res = Arrays.binarySearch(terms, term);
+ return res >= 0 ? res : -1;
+ }
- public int[] indexesOf(String[] terms, int start, int len)
- {
- int res[] = new int[len];
- for (int i = 0; i < len; i++)
- {
- res[i] = indexOf(terms[i]);
- }
- return res;
- }
- };
- }
+ public int[] indexesOf(String[] terms, int start, int len) {
+ int res[] = new int[len];
+ for (int i = 0; i < len; i++) {
+ res[i] = indexOf(terms[i]);
+ }
+ return res;
+ }
+ };
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractIndex.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractIndex.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,35 +16,39 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.io.StringReader;
-import java.util.BitSet;
-import java.util.Iterator;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.LogDocMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
-
-import org.exoplatform.services.jcr.config.QueryHandlerEntry;
import org.exoplatform.services.jcr.config.QueryHandlerEntryWrapper;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.io.StringReader;
+import java.util.BitSet;
+import java.util.Iterator;
+
/**
- * Implements common functionality for a lucene index. <p/> Note on
synchronization: This class is
- * not entirely thread-safe. Certain concurrent access is however allowed. Read-only
access on this
- * index using {@link #getReadOnlyIndexReader()} is thread-safe. That is, multiple
threads my call
- * that method concurrently and use the returned IndexReader at the same time.<br/>
Modifying
- * threads must be synchronized externally in a way that only one thread is using the
returned
- * IndexReader and IndexWriter instances returned by {@link #getIndexReader()} and
- * {@link #getIndexWriter()} at a time.<br/> Concurrent access by
<b>one</b> modifying thread and
- * multiple read-only threads is safe!
+ * Implements common functionality for a lucene index.
+ * <p/>
+ * Note on synchronization: This class is not entirely thread-safe. Certain
+ * concurrent access is however allowed. Read-only access on this index using
+ * {@link #getReadOnlyIndexReader()} is thread-safe. That is, multiple threads
+ * my call that method concurrently and use the returned IndexReader at the same
+ * time.<br/>
+ * Modifying threads must be synchronized externally in a way that only one
+ * thread is using the returned IndexReader and IndexWriter instances returned
+ * by {@link #getIndexReader()} and {@link #getIndexWriter()} at a time.<br/>
+ * Concurrent access by <b>one</b> modifying thread and multiple read-only
+ * threads is safe!
*/
abstract class AbstractIndex {
@@ -98,24 +102,26 @@
private ReadOnlyIndexReader readOnlyReader;
/**
+ * Flag that indicates whether there was an index present in the directory
+ * when this AbstractIndex was created.
+ */
+ private boolean isExisting;
+
+ /**
* The indexing queue.
*/
private IndexingQueue indexingQueue;
/**
- * Constructs an index with an <code>analyzer</code> and a
<code>directory</code>.
+ * Constructs an index with an <code>analyzer</code> and a
+ * <code>directory</code>.
*
- * @param analyzer
- * the analyzer for text tokenizing.
- * @param directory
- * the underlying directory.
- * @param cache
- * the document number cache if this index should use one; otherwise
<code>cache</code>
- * is <code>null</code>.
- * @param indexingQueue
- * the indexing queue.
- * @throws IOException
- * if the index cannot be initialized.
+ * @param analyzer the analyzer for text tokenizing.
+ * @param directory the underlying directory.
+ * @param cache the document number cache if this index should use one;
+ * otherwise <code>cache</code> is <code>null</code>.
+ * @param indexingQueue the indexing queue.
+ * @throws IOException if the index cannot be initialized.
*/
AbstractIndex(Analyzer analyzer,
Directory directory,
@@ -125,8 +131,9 @@
this.directory = directory;
this.cache = cache;
this.indexingQueue = indexingQueue;
+ this.isExisting = IndexReader.indexExists(directory);
- if (!IndexReader.indexExists(directory)) {
+ if (!isExisting) {
indexWriter = new IndexWriter(directory, analyzer);
// immediately close, now that index has been created
indexWriter.close();
@@ -135,7 +142,8 @@
}
/**
- * Default implementation returns the same instance as passed in the constructor.
+ * Default implementation returns the same instance as passed in the
+ * constructor.
*
* @return the directory instance passed in the constructor
* @throws IOException
@@ -145,12 +153,21 @@
}
/**
+ * Returns <code>true</code> if this index was openend on a directory with
an
+ * existing index in it; <code>false</code> otherwise.
+ *
+ * @return <code>true</code> if there was an index present when this index
was
+ * created; <code>false</code> otherwise.
+ */
+ boolean isExisting() {
+ return isExisting;
+ }
+
+ /**
* Adds documents to this index and invalidates the shared reader.
*
- * @param docs
- * the documents to add.
- * @throws IOException
- * if an error occurs while writing to the index.
+ * @param docs the documents to add.
+ * @throws IOException if an error occurs while writing to the index.
*/
void addDocuments(Document[] docs) throws IOException {
final IndexWriter writer = getIndexWriter();
@@ -196,14 +213,12 @@
}
/**
- * Removes the document from this index. This call will not invalidate the shared
reader. If a
- * subclass whishes to do so, it should overwrite this method and call
- * {@link #invalidateSharedReader()}.
+ * Removes the document from this index. This call will not invalidate the
+ * shared reader. If a subclass whishes to do so, it should overwrite this
+ * method and call {@link #invalidateSharedReader()}.
*
- * @param idTerm
- * the id term of the document to remove.
- * @throws IOException
- * if an error occurs while removing the document.
+ * @param idTerm the id term of the document to remove.
+ * @throws IOException if an error occurs while removing the document.
* @return number of documents deleted
*/
int removeDocument(Term idTerm) throws IOException {
@@ -211,12 +226,11 @@
}
/**
- * Returns an <code>IndexReader</code> on this index. This index reader may
be used to delete
- * documents.
+ * Returns an <code>IndexReader</code> on this index. This index reader may
be
+ * used to delete documents.
*
* @return an <code>IndexReader</code> on this index.
- * @throws IOException
- * if the reader cannot be obtained.
+ * @throws IOException if the reader cannot be obtained.
*/
protected synchronized CommittableIndexReader getIndexReader() throws IOException {
if (indexWriter != null) {
@@ -231,13 +245,13 @@
}
/**
- * Returns a read-only index reader, that can be used concurrently with other threads
writing to
- * this index. The returned index reader is read-only, that is, any attempt to delete a
document
- * from the index will throw an
<code>UnsupportedOperationException</code>.
+ * Returns a read-only index reader, that can be used concurrently with other
+ * threads writing to this index. The returned index reader is read-only, that
+ * is, any attempt to delete a document from the index will throw an
+ * <code>UnsupportedOperationException</code>.
*
* @return a read-only index reader.
- * @throws IOException
- * if an error occurs while obtaining the index reader.
+ * @throws IOException if an error occurs while obtaining the index reader.
*/
synchronized ReadOnlyIndexReader getReadOnlyIndexReader() throws IOException {
// get current modifiable index reader
@@ -246,7 +260,7 @@
if (readOnlyReader != null) {
if (readOnlyReader.getDeletedDocsVersion() == modCount) {
// reader up-to-date
- readOnlyReader.incrementRefCount();
+ readOnlyReader.acquire();
return readOnlyReader;
} else {
// reader outdated
@@ -254,12 +268,12 @@
// not in use, except by this index
// update the reader
readOnlyReader.updateDeletedDocs(modifiableReader);
- readOnlyReader.incrementRefCount();
+ readOnlyReader.acquire();
return readOnlyReader;
} else {
// cannot update reader, it is still in use
// need to create a new instance
- readOnlyReader.close();
+ readOnlyReader.release();
readOnlyReader = null;
}
}
@@ -278,7 +292,7 @@
sharedReader = new SharedIndexReader(cr);
}
readOnlyReader = new ReadOnlyIndexReader(sharedReader, deleted, modCount);
- readOnlyReader.incrementRefCount();
+ readOnlyReader.acquire();
return readOnlyReader;
}
@@ -286,8 +300,7 @@
* Returns an <code>IndexWriter</code> on this index.
*
* @return an <code>IndexWriter</code> on this index.
- * @throws IOException
- * if the writer cannot be obtained.
+ * @throws IOException if the writer cannot be obtained.
*/
protected synchronized IndexWriter getIndexWriter() throws IOException {
if (indexReader != null) {
@@ -305,6 +318,10 @@
indexWriter.setMaxFieldLength(maxFieldLength);
indexWriter.setUseCompoundFile(useCompoundFile);
indexWriter.setInfoStream(STREAM_LOGGER);
+ indexWriter.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
+ indexWriter.setMergeScheduler(new SerialMergeScheduler());
+ indexWriter.setMergePolicy(new LogDocMergePolicy());
+
}
return indexWriter;
}
@@ -312,8 +329,7 @@
/**
* Commits all pending changes to the underlying <code>Directory</code>.
*
- * @throws IOException
- * if an error occurs while commiting changes.
+ * @throws IOException if an error occurs while commiting changes.
*/
protected void commit() throws IOException {
commit(false);
@@ -322,14 +338,13 @@
/**
* Commits all pending changes to the underlying <code>Directory</code>.
*
- * @param optimize
- * if <code>true</code> the index is optimized after the commit.
- * @throws IOException
- * if an error occurs while commiting changes.
+ * @param optimize if <code>true</code> the index is optimized after the
+ * commit.
+ * @throws IOException if an error occurs while commiting changes.
*/
protected synchronized void commit(boolean optimize) throws IOException {
if (indexReader != null) {
- indexReader.commitDeleted();
+ indexReader.flush();
}
if (indexWriter != null) {
log.debug("committing IndexWriter.");
@@ -349,6 +364,20 @@
* Closes this index, releasing all held resources.
*/
synchronized void close() {
+ releaseWriterAndReaders();
+ if (directory != null) {
+ try {
+ directory.close();
+ } catch (IOException e) {
+ directory = null;
+ }
+ }
+ }
+
+ /**
+ * Releases all potentially held index writer and readers.
+ */
+ protected void releaseWriterAndReaders() {
if (indexWriter != null) {
try {
indexWriter.close();
@@ -367,55 +396,48 @@
}
if (readOnlyReader != null) {
try {
- readOnlyReader.close();
+ readOnlyReader.release();
} catch (IOException e) {
log.warn("Exception closing index reader: " + e.toString());
}
+ readOnlyReader = null;
}
if (sharedReader != null) {
try {
- sharedReader.close();
+ sharedReader.release();
} catch (IOException e) {
log.warn("Exception closing index reader: " + e.toString());
}
+ sharedReader = null;
}
- if (directory != null) {
- try {
- directory.close();
- } catch (IOException e) {
- directory = null;
- }
- }
}
/**
* Closes the shared reader.
*
- * @throws IOException
- * if an error occurs while closing the reader.
+ * @throws IOException if an error occurs while closing the reader.
*/
protected synchronized void invalidateSharedReader() throws IOException {
// also close the read-only reader
if (readOnlyReader != null) {
- readOnlyReader.close();
+ readOnlyReader.release();
readOnlyReader = null;
}
// invalidate shared reader
if (sharedReader != null) {
- sharedReader.close();
+ sharedReader.release();
sharedReader = null;
}
}
/**
- * Returns a document that is finished with text extraction and is ready to be added to
the index.
+ * Returns a document that is finished with text extraction and is ready to be
+ * added to the index.
*
- * @param doc
- * the document to check.
- * @return <code>doc</code> if it is finished already or a stripped down
copy of <code>doc</code>
- * without text extractors.
- * @throws IOException
- * if the document cannot be added to the indexing queue.
+ * @param doc the document to check.
+ * @return <code>doc</code> if it is finished already or a stripped down
copy
+ * of <code>doc</code> without text extractors.
+ * @throws IOException if the document cannot be added to the indexing queue.
*/
private Document getFinishedDocument(Document doc) throws IOException {
if (!Util.isDocumentReady(doc)) {
@@ -511,8 +533,7 @@
/**
* Returns the index parameter set on <code>f</code>.
*
- * @param f
- * a lucene field.
+ * @param f a lucene field.
* @return the index parameter on <code>f</code>.
*/
private Field.Index getIndexParameter(Field f) {
@@ -528,8 +549,7 @@
/**
* Returns the store parameter set on <code>f</code>.
*
- * @param f
- * a lucene field.
+ * @param f a lucene field.
* @return the store parameter on <code>f</code>.
*/
private Field.Store getStoreParameter(Field f) {
@@ -545,8 +565,7 @@
/**
* Returns the term vector parameter set on <code>f</code>.
*
- * @param f
- * a lucene field.
+ * @param f a lucene field.
* @return the term vector parameter on <code>f</code>.
*/
private Field.TermVector getTermVectorParameter(Field f) {
@@ -587,6 +606,6 @@
buffer.append(s);
log.debug(buffer.toString());
buffer.setLength(0);
- }
- }
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CachingMultiIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CachingMultiIndexReader.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CachingMultiIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,297 +16,256 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
/**
- * Extends a <code>MultiReader</code> with support for cached
<code>TermDocs</code> on
- * {@link FieldNames#UUID} field.
+ * Extends a <code>MultiReader</code> with support for cached
+ * <code>TermDocs</code> on {@link FieldNames#UUID} field.
*/
-public final class CachingMultiIndexReader
- extends MultiReader
- implements HierarchyResolver, MultiIndexReader
-{
+public final class CachingMultiIndexReader extends MultiReader implements
HierarchyResolver,
+ MultiIndexReader {
- /**
- * The sub readers.
- */
- private ReadOnlyIndexReader[] subReaders;
+ /**
+ * The sub readers.
+ */
+ private ReadOnlyIndexReader[] subReaders;
- /**
- * Map of {@link OffsetReader}s, identified by creation tick.
- */
- private final Map<Long, OffsetReader> readersByCreationTick = new
HashMap<Long, OffsetReader>();
+ /**
+ * Map of {@link OffsetReader}s, identified by creation tick.
+ */
+ private final Map<Long, OffsetReader> readersByCreationTick = new
HashMap<Long, OffsetReader>();
- /**
- * Document number cache if available. May be <code>null</code>.
- */
- private final DocNumberCache cache;
+ /**
+ * Document number cache if available. May be <code>null</code>.
+ */
+ private final DocNumberCache cache;
- /**
- * Doc number starts for each sub reader
- */
- private int[] starts;
+ /**
+ * Doc number starts for each sub reader
+ */
+ private int[] starts;
- /**
- * Reference count. Every time close is called refCount is decremented. If refCount
drops to zero
- * the underlying readers are closed as well.
- */
- private int refCount = 1;
+ /**
+ * Reference count. Every time close is called refCount is decremented. If
+ * refCount drops to zero the underlying readers are closed as well.
+ */
+ private int refCount = 1;
- /**
- * Creates a new <code>CachingMultiIndexReader</code> based on sub
readers.
- *
- * @param subReaders
- * the sub readers.
- * @param cache
- * the document number cache.
- * @throws IOException
- * if an error occurs while reading from the indexes.
- */
- public CachingMultiIndexReader(ReadOnlyIndexReader[] subReaders, DocNumberCache cache)
throws IOException
- {
- super(subReaders);
- this.cache = cache;
- this.subReaders = subReaders;
- starts = new int[subReaders.length + 1];
- int maxDoc = 0;
- for (int i = 0; i < subReaders.length; i++)
- {
- starts[i] = maxDoc;
- maxDoc += subReaders[i].maxDoc();
- OffsetReader offsetReader = new OffsetReader(subReaders[i], starts[i]);
- readersByCreationTick.put(new Long(subReaders[i].getCreationTick()),
offsetReader);
- }
- starts[subReaders.length] = maxDoc;
- }
+ /**
+ * Creates a new <code>CachingMultiIndexReader</code> based on sub
readers.
+ *
+ * @param subReaders the sub readers.
+ * @param cache the document number cache.
+ * @throws IOException if an error occurs while reading from the indexes.
+ */
+ public CachingMultiIndexReader(ReadOnlyIndexReader[] subReaders, DocNumberCache cache)
throws IOException {
+ super(subReaders);
+ this.cache = cache;
+ this.subReaders = subReaders;
+ starts = new int[subReaders.length + 1];
+ int maxDoc = 0;
+ for (int i = 0; i < subReaders.length; i++) {
+ starts[i] = maxDoc;
+ maxDoc += subReaders[i].maxDoc();
+ OffsetReader offsetReader = new OffsetReader(subReaders[i], starts[i]);
+ readersByCreationTick.put(new Long(subReaders[i].getCreationTick()),
offsetReader);
+ }
+ starts[subReaders.length] = maxDoc;
+ }
- /**
- * Returns the document number of the parent of <code>n</code> or
<code>-1</code> if
- * <code>n</code> does not have a parent (<code>n</code> is
the root node).
- *
- * @param n
- * the document number.
- * @return the document number of <code>n</code>'s parent.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public int getParent(int n) throws IOException
- {
- DocId id = getParentDocId(n);
- return id.getDocumentNumber(this);
- }
+ /**
+ * Returns the document number of the parent of <code>n</code> or
+ * <code>-1</code> if <code>n</code> does not have a parent
(<code>n</code> is
+ * the root node).
+ *
+ * @param n the document number.
+ * @return the document number of <code>n</code>'s parent.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public int getParent(int n) throws IOException {
+ DocId id = getParentDocId(n);
+ return id.getDocumentNumber(this);
+ }
- /**
- * Returns the DocId of the parent of <code>n</code> or {@link DocId#NULL}
if <code>n</code> does
- * not have a parent (<code>n</code> is the root node).
- *
- * @param n
- * the document number.
- * @return the DocId of <code>n</code>'s parent.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public DocId getParentDocId(int n) throws IOException
- {
- int i = readerIndex(n);
- DocId id = subReaders[i].getParent(n - starts[i]);
- return id.applyOffset(starts[i]);
- }
+ /**
+ * Returns the DocId of the parent of <code>n</code> or {@link DocId#NULL}
if
+ * <code>n</code> does not have a parent (<code>n</code> is the
root node).
+ *
+ * @param n the document number.
+ * @return the DocId of <code>n</code>'s parent.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public DocId getParentDocId(int n) throws IOException {
+ int i = readerIndex(n);
+ DocId id = subReaders[i].getParent(n - starts[i]);
+ return id.applyOffset(starts[i]);
+ }
- /**
- * {@inheritDoc}
- */
- public TermDocs termDocs(Term term) throws IOException
- {
- if (term.field() == FieldNames.UUID)
- {
- // check cache
- DocNumberCache.Entry e = cache.get(term.text());
- if (e != null)
- {
- // check if valid:
- // 1) reader must be in the set of readers
- // 2) doc must not be deleted
- OffsetReader offsetReader = readersByCreationTick.get(new
Long(e.creationTick));
- if (offsetReader != null && !offsetReader.reader.isDeleted(e.doc))
- {
- return new SingleTermDocs(e.doc + offsetReader.offset);
- }
- }
+ /**
+ * {@inheritDoc}
+ */
+ public TermDocs termDocs(Term term) throws IOException {
+ if (term.field() == FieldNames.UUID) {
+ // check cache
+ DocNumberCache.Entry e = cache.get(term.text());
+ if (e != null) {
+ // check if valid:
+ // 1) reader must be in the set of readers
+ // 2) doc must not be deleted
+ OffsetReader offsetReader = readersByCreationTick.get(new Long(e.creationTick));
+ if (offsetReader != null && !offsetReader.reader.isDeleted(e.doc)) {
+ return new SingleTermDocs(e.doc + offsetReader.offset);
+ }
+ }
- // if we get here, entry is either invalid or did not exist
- // search through readers
- for (int i = 0; i < subReaders.length; i++)
- {
- TermDocs docs = subReaders[i].termDocs(term);
- try
- {
- if (docs.next())
- {
- return new SingleTermDocs(docs.doc() + starts[i]);
- }
- }
- finally
- {
- docs.close();
- }
- }
+ // if we get here, entry is either invalid or did not exist
+ // search through readers
+ for (int i = 0; i < subReaders.length; i++) {
+ TermDocs docs = subReaders[i].termDocs(term);
+ try {
+ if (docs.next()) {
+ return new SingleTermDocs(docs.doc() + starts[i]);
+ }
+ } finally {
+ docs.close();
+ }
}
+ }
- return super.termDocs(term);
- }
+ return super.termDocs(term);
+ }
- /**
- * Increments the reference count of this reader. Each call to this method must later
be
- * acknowledged by a call to {@link #close()}
- */
- synchronized void incrementRefCount()
- {
- refCount++;
- }
+ /**
+ * Increments the reference count of this reader. Each call to this method
+ * must later be acknowledged by a call to {@link #release()}.
+ */
+ synchronized void acquire() {
+ refCount++;
+ }
- /**
- * Decrements the reference count and closes the underlying readers if this reader is
not in use
- * anymore.
- *
- * @throws IOException
- * if an error occurs while closing this reader.
- */
- protected synchronized void doClose() throws IOException
- {
- if (--refCount == 0)
- {
- super.doClose();
- }
- }
+ /**
+ * {@inheritDoc}
+ */
+ public synchronized final void release() throws IOException {
+ if (--refCount == 0) {
+ close();
+ }
+ }
- // -------------------------< MultiIndexReader >-----------------------------
+ /**
+ * {@inheritDoc}
+ */
+ protected synchronized void doClose() throws IOException {
+ for (int i = 0; i < subReaders.length; i++) {
+ subReaders[i].release();
+ }
+ }
- /**
- * {@inheritDoc}
- */
- public IndexReader[] getIndexReaders()
- {
- IndexReader readers[] = new IndexReader[subReaders.length];
- System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
- return readers;
- }
+ // -------------------------< MultiIndexReader >-----------------------------
- /**
- * {@inheritDoc}
- */
- public ForeignSegmentDocId createDocId(String uuid) throws IOException
- {
- Term id = new Term(FieldNames.UUID, uuid);
- int doc;
- long tick;
- for (int i = 0; i < subReaders.length; i++)
- {
- TermDocs docs = subReaders[i].termDocs(id);
- try
- {
- if (docs.next())
- {
- doc = docs.doc();
- tick = subReaders[i].getCreationTick();
- return new ForeignSegmentDocId(doc, tick);
- }
- }
- finally
- {
- docs.close();
- }
- }
- return null;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public IndexReader[] getIndexReaders() {
+ IndexReader readers[] = new IndexReader[subReaders.length];
+ System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
+ return readers;
+ }
- /**
- * {@inheritDoc}
- */
- public int getDocumentNumber(ForeignSegmentDocId docId)
- {
- OffsetReader r = readersByCreationTick.get(new Long(docId.getCreationTick()));
- if (r != null && !r.reader.isDeleted(docId.getDocNumber()))
- {
- return r.offset + docId.getDocNumber();
+ /**
+ * {@inheritDoc}
+ */
+ public ForeignSegmentDocId createDocId(String uuid) throws IOException {
+ Term id = new Term(FieldNames.UUID, uuid);
+ int doc;
+ long tick;
+ for (int i = 0; i < subReaders.length; i++) {
+ TermDocs docs = subReaders[i].termDocs(id);
+ try {
+ if (docs.next()) {
+ doc = docs.doc();
+ tick = subReaders[i].getCreationTick();
+ return new ForeignSegmentDocId(doc, tick);
+ }
+ } finally {
+ docs.close();
}
- return -1;
- }
+ }
+ return null;
+ }
- /**
- * Returns the reader index for document <code>n</code>. Implementation
copied from lucene
- * MultiReader class.
- *
- * @param n
- * document number.
- * @return the reader index.
- */
- private int readerIndex(int n)
- {
- int lo = 0; // search starts array
- int hi = subReaders.length - 1; // for first element less
+ /**
+ * {@inheritDoc}
+ */
+ public int getDocumentNumber(ForeignSegmentDocId docId) {
+ OffsetReader r = readersByCreationTick.get(new Long(docId.getCreationTick()));
+ if (r != null && !r.reader.isDeleted(docId.getDocNumber())) {
+ return r.offset + docId.getDocNumber();
+ }
+ return -1;
+ }
- while (hi >= lo)
- {
- int mid = (lo + hi) >> 1;
- int midValue = starts[mid];
- if (n < midValue)
- {
- hi = mid - 1;
- }
- else if (n > midValue)
- {
- lo = mid + 1;
- }
- else
- { // found a match
- while (mid + 1 < subReaders.length && starts[mid + 1] ==
midValue)
- {
- mid++; // scan to last match
- }
- return mid;
- }
+ /**
+ * Returns the reader index for document <code>n</code>. Implementation
copied
+ * from lucene MultiReader class.
+ *
+ * @param n document number.
+ * @return the reader index.
+ */
+ private int readerIndex(int n) {
+ int lo = 0; // search starts array
+ int hi = subReaders.length - 1; // for first element less
+
+ while (hi >= lo) {
+ int mid = (lo + hi) >> 1;
+ int midValue = starts[mid];
+ if (n < midValue) {
+ hi = mid - 1;
+ } else if (n > midValue) {
+ lo = mid + 1;
+ } else { // found a match
+ while (mid + 1 < subReaders.length && starts[mid + 1] == midValue) {
+ mid++; // scan to last match
+ }
+ return mid;
}
- return hi;
- }
+ }
+ return hi;
+ }
- // -----------------------< OffsetTermDocs >---------------------------------
+ // -----------------------< OffsetTermDocs >---------------------------------
- /**
- * Simple helper struct that associates an offset with an IndexReader.
- */
- private static final class OffsetReader
- {
+ /**
+ * Simple helper struct that associates an offset with an IndexReader.
+ */
+ private static final class OffsetReader {
- /**
- * The index reader.
- */
- private final ReadOnlyIndexReader reader;
+ /**
+ * The index reader.
+ */
+ private final ReadOnlyIndexReader reader;
- /**
- * The reader offset in this multi reader instance.
- */
- private final int offset;
+ /**
+ * The reader offset in this multi reader instance.
+ */
+ private final int offset;
- /**
- * Creates a new <code>OffsetReader</code>.
- *
- * @param reader
- * the index reader.
- * @param offset
- * the reader offset in a multi reader.
- */
- OffsetReader(ReadOnlyIndexReader reader, int offset)
- {
- this.reader = reader;
- this.offset = offset;
- }
- }
+ /**
+ * Creates a new <code>OffsetReader</code>.
+ *
+ * @param reader the index reader.
+ * @param offset the reader offset in a multi reader.
+ */
+ OffsetReader(ReadOnlyIndexReader reader, int offset) {
+ this.reader = reader;
+ this.offset = offset;
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CommittableIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CommittableIndexReader.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CommittableIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,66 +16,63 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.IndexReader;
+import java.io.IOException;
+
/**
- * Wraps an <code>IndexReader</code> and allows to commit changes without
closing the reader.
+ * Wraps an <code>IndexReader</code> and allows to commit changes without
+ * closing the reader.
*/
-class CommittableIndexReader
- extends FilterIndexReader
-{
+class CommittableIndexReader extends FilterIndexReader {
- /**
- * A modification count on this index reader. Initialied with {@link
IndexReader#getVersion()} and
- * incremented with every call to {@link #doDelete(int)}.
- */
- private volatile long modCount;
+ /**
+ * A modification count on this index reader. Initialied with
+ * {@link IndexReader#getVersion()} and incremented with every call to
+ * {@link #doDelete(int)}.
+ */
+ private volatile long modCount;
- /**
- * Creates a new <code>CommittableIndexReader</code> based on
<code>in</code>.
- *
- * @param in
- * the <code>IndexReader</code> to wrap.
- */
- CommittableIndexReader(IndexReader in)
- {
- super(in);
- modCount = in.getVersion();
- }
+ /**
+ * Creates a new <code>CommittableIndexReader</code> based on
<code>in</code>.
+ *
+ * @param in the <code>IndexReader</code> to wrap.
+ */
+ CommittableIndexReader(IndexReader in) {
+ super(in);
+ modCount = in.getVersion();
+ }
- // ------------------------< FilterIndexReader >-----------------------------
+ // ------------------------< FilterIndexReader >-----------------------------
- /**
- * {@inheritDoc} <p/> Increments the modification count.
- */
- protected void doDelete(int n) throws CorruptIndexException, IOException
- {
- super.doDelete(n);
- modCount++;
- }
+ /**
+ * {@inheritDoc}
+ * <p/>
+ * Increments the modification count.
+ */
+ protected void doDelete(int n) throws CorruptIndexException, IOException {
+ super.doDelete(n);
+ modCount++;
+ }
- // ------------------------< additional methods >----------------------------
+ // ------------------------< additional methods >----------------------------
+ //
+ // /**
+ // * Commits the documents marked as deleted to disc.
+ // *
+ // * @throws IOException
+ // * if an error occurs while writing.
+ // */
+ // void commitDeleted() throws IOException {
+ // commit();
+ // }
- /**
- * Commits the documents marked as deleted to disc.
- *
- * @throws IOException
- * if an error occurs while writing.
- */
- void commitDeleted() throws IOException
- {
- commit();
- }
-
- /**
- * @return the modification count of this index reader.
- */
- long getModificationCount()
- {
- return modCount;
- }
+ /**
+ * @return the modification count of this index reader.
+ */
+ long getModificationCount() {
+ return modCount;
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheckError.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheckError.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ConsistencyCheckError.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -21,47 +21,44 @@
/**
* Common base class for errors detected during the consistency check.
*/
-abstract class ConsistencyCheckError
-{
+abstract class ConsistencyCheckError {
- /**
- * Diagnostic message for this error.
- */
- protected final String message;
+ /**
+ * Diagnostic message for this error.
+ */
+ protected final String message;
- /**
- * The UUID of the affected node.
- */
- protected final String uuid;
+ /**
+ * The UUID of the affected node.
+ */
+ protected final String uuid;
- ConsistencyCheckError(String message, String uuid)
- {
- this.message = message;
- this.uuid = uuid;
- }
+ ConsistencyCheckError(String message, String uuid) {
+ this.message = message;
+ this.uuid = uuid;
+ }
- /**
- * Returns the diagnostic message.
- *
- * @return the diagnostic message.
- */
- public String toString()
- {
- return message;
- }
+ /**
+ * Returns the diagnostic message.
+ *
+ * @return the diagnostic message.
+ */
+ public String toString() {
+ return message;
+ }
- /**
- * Returns <code>true</code> if this error can be repaired.
- *
- * @return <code>true</code> if this error can be repaired.
- */
- abstract boolean repairable();
+ /**
+ * Returns <code>true</code> if this error can be repaired.
+ *
+ * @return <code>true</code> if this error can be repaired.
+ */
+ abstract boolean repairable();
- /**
- * Executes the repair operation.
- *
- * @throws IOException
- * if an error occurs while repairing.
- */
- abstract void repair() throws IOException;
+ /**
+ * Executes the repair operation.
+ *
+ * @throws IOException
+ * if an error occurs while repairing.
+ */
+ abstract void repair() throws IOException;
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldNames.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldNames.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldNames.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -17,7 +17,8 @@
package org.exoplatform.services.jcr.impl.core.query.lucene;
/**
- * Defines field names that are used internally to store UUID, etc in the search index.
+ * Defines field names that are used internally to store UUID, etc in the
+ * search index.
*/
public class FieldNames
{
@@ -30,13 +31,14 @@
}
/**
- * Name of the field that contains the UUID of the node. Terms are stored but not
tokenized.
+ * Name of the field that contains the UUID of the node. Terms are stored
+ * but not tokenized.
*/
public static final String UUID = "_:UUID".intern();
/**
- * Name of the field that contains the fulltext index including terms from all
properties of a
- * node. Terms are tokenized.
+ * Name of the field that contains the fulltext index including terms
+ * from all properties of a node. Terms are tokenized.
*/
public static final String FULLTEXT = "_:FULLTEXT".intern();
@@ -46,81 +48,127 @@
public static final String FULLTEXT_PREFIX = "FULL:";
/**
- * Name of the field that contains the UUID of the parent node. Terms are stored and
but not
- * tokenized.
+ * Name of the field that contains the UUID of the parent node. Terms are
+ * stored and but not tokenized.
*/
public static final String PARENT = "_:PARENT".intern();
/**
- * Name of the field that contains the label of the node. Terms are not tokenized.
+ * Name of the field that contains the label of the node. Terms are not
+ * tokenized.
*/
public static final String LABEL = "_:LABEL".intern();
/**
- * Name of the field that contains the names of multi-valued properties that hold more
than one
- * value. Terms are not tokenized and not stored, only indexed.
+ * Name of the field that contains the local name of the node. Terms are not
+ * tokenized.
*/
+ public static final String LOCAL_NAME = "_:LOCAL_NAME".intern();
+
+ /**
+ * Name of the field that contains the namespace URI of the node name. Terms
+ * are not tokenized.
+ */
+ public static final String NAMESPACE_URI = "_:NAMESPACE_URI".intern();
+
+ /**
+ * Name of the field that contains the names of multi-valued properties that
+ * hold more than one value. Terms are not tokenized and not stored, only
+ * indexed.
+ */
public static final String MVP = "_:MVP".intern();
/**
- * Name of the field that contains all values of properties that are indexed as is
without
- * tokenizing. Terms are prefixed with the property name.
+ * Name of the field that contains all values of properties that are indexed
+ * as is without tokenizing. Terms are prefixed with the property name.
*/
public static final String PROPERTIES = "_:PROPERTIES".intern();
/**
- * Name of the field that contains the names of all properties that are set on an
indexed node.
+ * Name of the field that contains the names of all properties that are set
+ * on an indexed node.
*/
public static final String PROPERTIES_SET = "_:PROPERTIES_SET".intern();
/**
- * Name of the field that contains the UUIDs of the aggregated nodes. The terms are
not tokenized
- * and not stored, only indexed.
+ * Name of the field that contains the UUIDs of the aggregated nodes. The
+ * terms are not tokenized and not stored, only indexed.
*/
public static final String AGGREGATED_NODE_UUID =
"_:AGGR_NODE_UUID".intern();
/**
- * Returns a named value for use as a term in the index. The named value is of the
form:
- * <code>fieldName</code> + '\uFFFF' + value
- *
- * @param fieldName
- * the field name.
- * @param value
- * the value.
- * @return value prefixed with field name.
+ * Name of the field that contains the lengths of properties. The lengths
+ * are encoded using {@link #createNamedLength(String, long)}.
*/
- public static String createNamedValue(String fieldName, String value)
+ public static final String PROPERTY_LENGTHS =
"_:PROPERTY_LENGTHS".intern();
+
+ /**
+ * Name of the field that marks nodes that require reindexing because the
+ * text extraction process timed out. See also {@link IndexingQueue}.
+ */
+ public static final String REINDEXING_REQUIRED =
"_:REINDEXING_REQUIRED".intern();
+
+ /**
+ * Name of the field that marks shareable nodes.
+ */
+ public static final String SHAREABLE_NODE = "_:SHAREABLE_NODE".intern();
+
+ /**
+ * Name of the field that contains all weak reference property values.
+ */
+ public static final String WEAK_REFS = "_:WEAK_REFS".intern();
+
+ /**
+ * Returns a named length for use as a term in the index. The named length
+ * is of the form: <code>propertyName</code> + '[' +
+ * {@link LongField#longToString(long)}.
+ *
+ * @param propertyName a property name.
+ * @param length the length of the property value.
+ * @return the named length string for use as a term in the index.
+ */
+ public static String createNamedLength(String propertyName, long length)
{
- return fieldName + '\uFFFF' + value;
+ return propertyName + '[' + LongField.longToString(length);
}
/**
- * Returns a named value for use as a term in the index. The named value is of the
form:
- * <code>fieldName</code> + '\uFFFF' + value
- *
- * @param fieldName
- * the field name.
- * @param value
- * the value.
+ * Returns a named value for use as a term in the index. The named
+ * value is of the form: <code>fieldName</code> + '[' + value
+ *
+ * @param fieldName the field name.
+ * @param value the value.
* @return value prefixed with field name.
*/
- public static String createFullTextFieldName(String fieldName)
+ public static String createNamedValue(String fieldName, String value)
{
- int idx = fieldName.indexOf(':');
- return fieldName.substring(0, idx + 1) + FieldNames.FULLTEXT_PREFIX +
fieldName.substring(idx + 1);
+ return fieldName + '[' + value;
}
/**
- * Returns the length of the field prefix in <code>namedValue</code>. See
also
- * {@link #createNamedValue(String, String)}. If <code>namedValue</code>
does not contain a name
- * prefix, this method return 0.
- *
- * @param namedValue
- * the named value as created by {@link #createNamedValue(String, String)}.
- * @return the length of the field prefix including the separator char (\uFFFF).
+ * Returns the length of the field prefix in <code>namedValue</code>. See
+ * also {@link #createNamedValue(String, String)}. If
<code>namedValue</code>
+ * does not contain a name prefix, this method return 0.
+ *
+ * @param namedValue the named value as created by {@link #createNamedValue(String,
String)}.
+ * @return the length of the field prefix including the separator char '['.
*/
public static int getNameLength(String namedValue)
{
- return namedValue.indexOf('\uFFFF') + 1;
+ return namedValue.indexOf('[') + 1;
}
+
+ /**
+ *
+ * @param fieldName
+ * the field name.
+ * @param value
+ * the value.
+ * @return value prefixed with field name.
+ */
+ public static String createFullTextFieldName(String fieldName)
+ {
+ int idx = fieldName.indexOf(':');
+ return fieldName.substring(0, idx + 1) + FieldNames.FULLTEXT_PREFIX +
fieldName.substring(idx + 1);
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexFormatVersion.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexFormatVersion.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexFormatVersion.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,17 +16,31 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+
+import java.io.IOException;
import java.util.Collection;
-import org.apache.lucene.index.IndexReader;
-
/**
- * This class indicates the lucene index format that is used. Version 1 formats do not
have the
- * <code>PROPERTIES_SET</code> lucene fieldname and queries assuming this
format also run on newer
- * versions. When the index is recreated from scratch, the Version 2 format will
automatically be
- * used. This format is faster certain queries, so if the index does not contain
- * <code>PROPERTIES_SET</code> fieldname and re-indexing is an option, this
is advisable. Existing
- * indexes are not automatically upgraded to a newer version!
+ * This class indicates the lucene index format that is used.
+ * <ul>
+ * <li><b>Version 1</b> is the initial index format, which is used for
Jackrabbit
+ * releases 1.0 to 1.3.x. Unless a re-index happens upgraded Jackrabbit
+ * instances will still use this version.</li>
+ * <li><b>Version 2</b> is the index format introduced with Jackrabbit
1.4.x. It
+ * adds a <code>PROPERTIES_SET</code> field which contains all property names
of
+ * a node. This speeds up queries that check the existence of a property.</li>
+ * <li><b>Version 3</b> is the index format introduced with Jackrabbit
1.5.x. It
+ * adds support for length and local name queries using the newly added
+ * fields <code>PROPERTY_LENGTHS</code>, <code>LOCAL_NAME</code>
and
+ * <code>NAMESPACE_URI</code>. Furthermore a Payload is added to
+ * <code>PROPERTIES</code> fields to indicate the property type.</li>
+ * </ul>
+ * Please note that existing indexes are not automatically upgraded to a newer
+ * version! If you want to take advantage of a certain 'feature' in an index
+ * format version you need to re-index the repository.
*/
public class IndexFormatVersion
{
@@ -37,20 +51,24 @@
public static final IndexFormatVersion V1 = new IndexFormatVersion(1);
/**
- * V2 is the index format for Jackrabbit releases >= 1.4
+ * V2 is the index format for Jackrabbit releases 1.4.x
*/
public static final IndexFormatVersion V2 = new IndexFormatVersion(2);
/**
+ * V3 is the index format for Jackrabbit releases >= 1.5
+ */
+ public static final IndexFormatVersion V3 = new IndexFormatVersion(3);
+
+ /**
* The used version of the index format
*/
private final int version;
/**
* Creates a index format version.
- *
- * @param version
- * The version of the index.
+ *
+ * @param version The version of the index.
*/
private IndexFormatVersion(int version)
{
@@ -59,7 +77,6 @@
/**
* Returns the index format version
- *
* @return the index format version.
*/
public int getVersion()
@@ -68,6 +85,19 @@
}
/**
+ * Returns <code>true</code> if this version is at least as high as the
+ * given <code>version</code>.
+ *
+ * @param version the other version to compare.
+ * @return <code>true</code> if this version is at least as high as the
+ * provided; <code>false</code> otherwise.
+ */
+ public boolean isAtLeast(IndexFormatVersion version)
+ {
+ return this.version >= version.getVersion();
+ }
+
+ /**
* @return a string representation of this index format version.
*/
public String toString()
@@ -76,13 +106,18 @@
}
/**
- * @return the index format version of the index used by the given index reader.
+ * @return the index format version of the index used by the given
+ * index reader.
*/
public static IndexFormatVersion getVersion(IndexReader indexReader)
{
Collection fields = indexReader.getFieldNames(IndexReader.FieldOption.ALL);
- if (fields.contains(FieldNames.PROPERTIES_SET) || indexReader.numDocs() == 0)
+ if (indexReader.numDocs() == 0 || isV3Index(indexReader))
{
+ return IndexFormatVersion.V3;
+ }
+ else if (fields.contains(FieldNames.PROPERTIES_SET))
+ {
return IndexFormatVersion.V2;
}
else
@@ -90,4 +125,55 @@
return IndexFormatVersion.V1;
}
}
+
+ /**
+ * Test if index contains primary type terms in new format
+ * @param indexReader
+ * @return
+ */
+ private static boolean isV3Index(IndexReader indexReader)
+ {
+ if (containsV3PrimaryType(indexReader, "nt:base"))
+ {
+ return true;
+ }
+ else if (containsV3PrimaryType(indexReader, "nt:unstructured"))
+ {
+ return true;
+ }
+ else if (containsV3PrimaryType(indexReader, "nt:file"))
+ {
+ return true;
+ }
+ else if (containsV3PrimaryType(indexReader, "nt:folder"))
+ {
+ return true;
+ }
+ else if (containsV3PrimaryType(indexReader, "exo:versionStorage"))
+ {
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Return true if index contains term jcr:primaryType+'['+primaryType
+ * @param indexReader
+ * @param primaryType
+ * @return
+ */
+ private static boolean containsV3PrimaryType(IndexReader indexReader, String
primaryType)
+ {
+ try
+ {
+ TermDocs doc =
+ indexReader.termDocs(new Term(FieldNames.PROPERTIES,
FieldNames.createNamedValue("jcr:primaryType",
+ primaryType)));
+ return doc.next();
+ }
+ catch (IOException e)
+ {
+ return false;
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,13 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.Semaphore;
-
import org.apache.commons.collections.Buffer;
import org.apache.commons.collections.BufferUtils;
import org.apache.commons.collections.buffer.UnboundedFifoBuffer;
@@ -32,553 +25,486 @@
import org.exoplatform.services.log.ExoLogger;
import org.exoplatform.services.log.Log;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.Semaphore;
+
/**
* Merges indexes in a separate deamon thread.
*/
-class IndexMerger extends Thread implements IndexListener
-{
+class IndexMerger extends Thread implements IndexListener {
- /**
- * Logger instance for this class.
- */
- private static final Log log = ExoLogger.getLogger(IndexMerger.class);
+ /**
+ * Logger instance for this class.
+ */
+ private static final Log log =
ExoLogger.getLogger(IndexMerger.class);
- /**
- * Marker task to signal the background thread to quit.
- */
- private static final Merge QUIT = new Merge(new Index[0]);
+ /**
+ * Marker task to signal the background thread to quit.
+ */
+ private static final Merge QUIT = new Merge(new Index[0]);
- /**
- * minMergeDocs config parameter.
- */
- private int minMergeDocs = new
Integer(QueryHandlerEntryWrapper.DEFAULT_MIN_MERGE_DOCS);
+ /**
+ * minMergeDocs config parameter.
+ */
+ private int minMergeDocs = new
Integer(QueryHandlerEntryWrapper.DEFAULT_MIN_MERGE_DOCS);
- /**
- * maxMergeDocs config parameter
- */
- private int maxMergeDocs = new
Integer(QueryHandlerEntryWrapper.DEFAULT_MAX_MERGE_DOCS);
+ /**
+ * maxMergeDocs config parameter
+ */
+ private int maxMergeDocs = new
Integer(QueryHandlerEntryWrapper.DEFAULT_MAX_MERGE_DOCS);
- /**
- * mergeFactor config parameter
- */
- private int mergeFactor = new Integer(QueryHandlerEntryWrapper.DEFAULT_MERGE_FACTOR);
+ /**
+ * mergeFactor config parameter
+ */
+ private int mergeFactor = new
Integer(QueryHandlerEntryWrapper.DEFAULT_MERGE_FACTOR);
- /**
- * Queue of merge Tasks
- */
- private final Buffer mergeTasks = BufferUtils.blockingBuffer(new
UnboundedFifoBuffer());
+ /**
+ * Queue of merge Tasks
+ */
+ private final Buffer mergeTasks = BufferUtils.blockingBuffer(new
UnboundedFifoBuffer());
- /**
- * List of id <code>Term</code> that identify documents that were deleted
while a merge was
- * running.
- */
- private final List<Term> deletedDocuments = Collections.synchronizedList(new
ArrayList<Term>());
+ /**
+ * List of id <code>Term</code> that identify documents that were deleted
+ * while a merge was running.
+ */
+ private final List<Term> deletedDocuments =
Collections.synchronizedList(new ArrayList<Term>());
- /**
- * List of <code>IndexBucket</code>s in ascending document limit.
- */
- private final List<IndexBucket> indexBuckets = new
ArrayList<IndexBucket>();
+ /**
+ * List of <code>IndexBucket</code>s in ascending document limit.
+ */
+ private final List<IndexBucket> indexBuckets = new
ArrayList<IndexBucket>();
- /**
- * The <code>MultiIndex</code> this index merger is working on.
- */
- private final MultiIndex multiIndex;
+ /**
+ * The <code>MultiIndex</code> this index merger is working on.
+ */
+ private final MultiIndex multiIndex;
- /**
- * Monitor object to synchronize merge calculation.
- */
- private final Object lock = new Object();
+ /**
+ * Monitor object to synchronize merge calculation.
+ */
+ private final Object lock = new Object();
- /**
- * Mutex that is acquired when replacing indexes on MultiIndex.
- */
- private final Semaphore indexReplacement = new Semaphore(1);
+ /**
+ * Mutex that is acquired when replacing indexes on MultiIndex.
+ */
+ private final Semaphore indexReplacement = new Semaphore(1);
- /**
- * When released, indicates that this index merger is idle.
- */
- private final Semaphore mergerIdle = new Semaphore(1);
+ /**
+ * When released, indicates that this index merger is idle.
+ */
+ private final Semaphore mergerIdle = new Semaphore(1);
- /**
- * Creates an <code>IndexMerger</code>.
- *
- * @param multiIndex
- * the <code>MultiIndex</code>.
- */
- IndexMerger(MultiIndex multiIndex)
- {
- this.multiIndex = multiIndex;
- setName("IndexMerger " + this.multiIndex.getIndexDir().getPath());
- setDaemon(true);
- try
- {
- mergerIdle.acquire();
+ /**
+ * Creates an <code>IndexMerger</code>.
+ *
+ * @param multiIndex the <code>MultiIndex</code>.
+ */
+ IndexMerger(MultiIndex multiIndex) {
+ this.multiIndex = multiIndex;
+ setName("IndexMerger " + this.multiIndex.getIndexDir().getPath());
+ setDaemon(true);
+ try {
+ mergerIdle.acquire();
+ } catch (InterruptedException e) {
+ // will never happen, lock is free upon construction
+ throw new InternalError("Unable to acquire mutex after construction");
+ }
+ }
+
+ /**
+ * Informs the index merger that an index was added / created.
+ *
+ * @param name the name of the index.
+ * @param numDocs the number of documents it contains.
+ */
+ void indexAdded(String name, int numDocs) {
+ if (numDocs < 0) {
+ throw new IllegalArgumentException("numDocs must be positive");
+ }
+ // multiple threads may enter this method:
+ // - the background thread of this IndexMerger, when it replaces indexes
+ // after a successful merge
+ // - a regular thread that updates the workspace
+ //
+ // therefore we have to synchronize this block
+ synchronized (lock) {
+ // initially create buckets
+ if (indexBuckets.size() == 0) {
+ long lower = 0;
+ long upper = minMergeDocs;
+ while (upper < maxMergeDocs) {
+ indexBuckets.add(new IndexBucket(lower, upper, true));
+ lower = upper + 1;
+ upper *= mergeFactor;
+ }
+ // one with upper = maxMergeDocs
+ indexBuckets.add(new IndexBucket(lower, maxMergeDocs, false));
+ // and another one as overflow, just in case...
+ indexBuckets.add(new IndexBucket(maxMergeDocs + 1, Long.MAX_VALUE, false));
}
- catch (InterruptedException e)
- {
- // will never happen, lock is free upon construction
- throw new InternalError("Unable to acquire mutex after
construction");
+
+ // put index in bucket
+ IndexBucket bucket = indexBuckets.get(indexBuckets.size() - 1);
+ for (int i = 0; i < indexBuckets.size(); i++) {
+ bucket = indexBuckets.get(i);
+ if (bucket.fits(numDocs)) {
+ break;
+ }
}
- }
+ bucket.add(new Index(name, numDocs));
- /**
- * Informs the index merger that an index was added / created.
- *
- * @param name
- * the name of the index.
- * @param numDocs
- * the number of documents it contains.
- */
- void indexAdded(String name, int numDocs)
- {
- if (numDocs < 0)
- {
- throw new IllegalArgumentException("numDocs must be positive");
+ if (log.isDebugEnabled()) {
+ log.debug("index added: name=" + name + ", numDocs=" +
numDocs);
}
- // multiple threads may enter this method:
- // - the background thread of this IndexMerger, when it replaces indexes
- // after a successful merge
- // - a regular thread that updates the workspace
- //
- // therefore we have to synchronize this block
- synchronized (lock)
- {
- // initially create buckets
- if (indexBuckets.size() == 0)
- {
- long lower = 0;
- long upper = minMergeDocs;
- while (upper < maxMergeDocs)
- {
- indexBuckets.add(new IndexBucket(lower, upper, true));
- lower = upper + 1;
- upper *= mergeFactor;
- }
- // one with upper = maxMergeDocs
- indexBuckets.add(new IndexBucket(lower, maxMergeDocs, false));
- // and another one as overflow, just in case...
- indexBuckets.add(new IndexBucket(maxMergeDocs + 1, Long.MAX_VALUE, false));
- }
- // put index in bucket
- IndexBucket bucket = indexBuckets.get(indexBuckets.size() - 1);
- for (int i = 0; i < indexBuckets.size(); i++)
- {
- bucket = indexBuckets.get(i);
- if (bucket.fits(numDocs))
- {
- break;
- }
- }
- bucket.add(new Index(name, numDocs));
+ // if bucket does not allow merge, we don't have to continue
+ if (!bucket.allowsMerge()) {
+ return;
+ }
- if (log.isDebugEnabled())
- {
- log.debug("index added: name=" + name + ", numDocs=" +
numDocs);
- }
+ // check if we need a merge
+ if (bucket.size() >= mergeFactor) {
+ long targetMergeDocs = bucket.upper;
+ targetMergeDocs = Math.min(targetMergeDocs * mergeFactor, maxMergeDocs);
+ // sum up docs in bucket
+ List<Index> indexesToMerge = new ArrayList<Index>();
+ int mergeDocs = 0;
+ for (Iterator<Index> it = bucket.iterator(); it.hasNext() &&
mergeDocs <= targetMergeDocs;) {
+ indexesToMerge.add(it.next());
+ }
+ if (indexesToMerge.size() > 2) {
+ // found merge
+ Index[] idxs = indexesToMerge.toArray(new Index[indexesToMerge.size()]);
+ bucket.removeAll(indexesToMerge);
+ if (log.isDebugEnabled()) {
+ log.debug("requesting merge for " + indexesToMerge);
+ }
+ mergeTasks.add(new Merge(idxs));
+ log.debug("merge queue now contains " + mergeTasks.size() + "
tasks.");
+ }
+ }
+ }
+ }
- // if bucket does not allow merge, we don't have to continue
- if (!bucket.allowsMerge())
- {
- return;
- }
+ /**
+ * @inheritDoc
+ */
+ public void documentDeleted(Term id) {
+ log.debug("document deleted: " + id.text());
+ deletedDocuments.add(id);
+ }
- // check if we need a merge
- if (bucket.size() >= mergeFactor)
- {
- long targetMergeDocs = bucket.upper;
- targetMergeDocs = Math.min(targetMergeDocs * mergeFactor, maxMergeDocs);
- // sum up docs in bucket
- List<Index> indexesToMerge = new ArrayList<Index>();
- int mergeDocs = 0;
- for (Iterator<Index> it = bucket.iterator(); it.hasNext() &&
mergeDocs <= targetMergeDocs;)
- {
- indexesToMerge.add(it.next());
- }
- if (indexesToMerge.size() > 2)
- {
- // found merge
- Index[] idxs = indexesToMerge.toArray(new Index[indexesToMerge.size()]);
- bucket.removeAll(indexesToMerge);
- if (log.isDebugEnabled())
- {
- log.debug("requesting merge for " + indexesToMerge);
- }
- mergeTasks.add(new Merge(idxs));
- log.debug("merge queue now contains " + mergeTasks.size() +
" tasks.");
- }
- }
- }
- }
+ /**
+ * When the calling thread returns this index merger will be idle, that is
+ * there will be no merge tasks pending anymore. The method returns
+ * immediately if there are currently no tasks pending at all.
+ */
+ void waitUntilIdle() throws InterruptedException {
+ mergerIdle.acquire();
+ // and immediately release again
+ mergerIdle.release();
+ }
- /**
- * @inheritDoc
- */
- public void documentDeleted(Term id)
- {
- log.debug("document deleted: " + id.text());
- deletedDocuments.add(id);
- }
+ /**
+ * Signals this <code>IndexMerger</code> to stop and waits until it has
+ * terminated.
+ */
+ void dispose() {
+ if (log.isDebugEnabled())
+ log.info("dispose " + getName() + ", " +
Thread.currentThread());
+ // get mutex for index replacements
+ try {
+ indexReplacement.acquire();
+ } catch (InterruptedException e) {
+ log.warn("Interrupted while acquiring index replacement sync: " + e);
+ // try to stop IndexMerger without the sync
+ }
- /**
- * When the calling thread returns this index merger will be idle, that is there will
be no merge
- * tasks pending anymore. The method returns immediately if there are currently no
tasks pending
- * at all.
- */
- void waitUntilIdle() throws InterruptedException
- {
- mergerIdle.acquire();
- // and immediately release again
- mergerIdle.release();
- }
+ // clear task queue
+ mergeTasks.clear();
- /**
- * Signals this <code>IndexMerger</code> to stop and waits until it has
terminated.
- */
- void dispose()
- {
- if (log.isDebugEnabled())
- log.info("dispose " + getName() + ", " +
Thread.currentThread());
- // get mutex for index replacements
- try
- {
- indexReplacement.acquire();
+ // send quit
+ mergeTasks.add(QUIT);
+ log.debug("quit sent");
+
+ try {
+ // give the merger thread some time to quit,
+ // it is possible that the merger is busy working on a large index.
+ // if that is the case we will just ignore it and the deamon will
+ // die without being able to finish the merge.
+ // at this point it is not possible anymore to replace indexes
+ // on the MultiIndex because we hold the indexReplacement Sync.
+ this.join(500);
+ if (isAlive()) {
+ log.info("Unable to stop IndexMerger. Deamon is busy.");
+ } else {
+ log.debug("IndexMerger thread stopped");
}
- catch (InterruptedException e)
- {
- log.warn("Interrupted while acquiring index replacement sync: " + e);
- // try to stop IndexMerger without the sync
+ log.debug("merge queue size: " + mergeTasks.size());
+ } catch (InterruptedException e) {
+ log.warn("Interrupted while waiting for IndexMerger thread to
terminate.");
+ }
+ }
+
+ /**
+ * Implements the index merging.
+ */
+ public void run() {
+ for (;;) {
+ boolean isIdle = false;
+ if (mergeTasks.size() == 0) {
+ mergerIdle.release();
+ isIdle = true;
}
+ Merge task = (Merge) mergeTasks.remove();
+ if (task == QUIT) {
+ mergerIdle.release();
+ break;
+ }
+ if (isIdle) {
+ try {
+ mergerIdle.acquire();
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ log.warn("Unable to acquire mergerIdle sync");
+ }
+ }
- // clear task queue
- mergeTasks.clear();
+ log.debug("accepted merge request");
- // send quit
- mergeTasks.add(QUIT);
- log.debug("quit sent");
+ // reset deleted documents
+ deletedDocuments.clear();
- try
- {
- // give the merger thread some time to quit,
- // it is possible that the merger is busy working on a large index.
- // if that is the case we will just ignore it and the deamon will
- // die without being able to finish the merge.
- // at this point it is not possible anymore to replace indexes
- // on the MultiIndex because we hold the indexReplacement Sync.
- this.join(500);
- if (isAlive())
- {
- log.info("Unable to stop IndexMerger. Deamon is busy.");
- }
- else
- {
- log.debug("IndexMerger thread stopped");
- }
- log.debug("merge queue size: " + mergeTasks.size());
+ // get readers
+ String[] names = new String[task.indexes.length];
+ for (int i = 0; i < task.indexes.length; i++) {
+ names[i] = task.indexes[i].name;
}
- catch (InterruptedException e)
- {
- log.warn("Interrupted while waiting for IndexMerger thread to
terminate.");
- }
- }
+ try {
+ log.debug("create new index");
+ PersistentIndex index = multiIndex.getOrCreateIndex(null);
+ boolean success = false;
+ try {
- /**
- * Implements the index merging.
- */
- public void run()
- {
- for (;;)
- {
- boolean isIdle = false;
- if (mergeTasks.size() == 0)
- {
- mergerIdle.release();
- isIdle = true;
- }
- Merge task = (Merge)mergeTasks.remove();
- if (task == QUIT)
- {
- mergerIdle.release();
- break;
- }
- if (isIdle)
- {
- try
- {
- mergerIdle.acquire();
+ log.debug("get index readers from MultiIndex");
+ IndexReader[] readers = multiIndex.getIndexReaders(names, this);
+ try {
+ // do the merge
+ long time = System.currentTimeMillis();
+ index.addIndexes(readers);
+ time = System.currentTimeMillis() - time;
+ int docCount = 0;
+ for (int i = 0; i < readers.length; i++) {
+ docCount += readers[i].numDocs();
}
- catch (InterruptedException e)
- {
- Thread.interrupted();
- log.warn("Unable to acquire mergerIdle sync");
- }
- }
+ log.info("merged " + docCount + " documents in " + time +
" ms into " + index.getName()
+ + ".");
- log.debug("accepted merge request");
+ // force initializing of caches
+ // time = System.currentTimeMillis();
+ // index.getReadOnlyIndexReader().release();
+ // time = System.currentTimeMillis() - time;
- // reset deleted documents
- deletedDocuments.clear();
+ } finally {
+ for (int i = 0; i < readers.length; i++) {
+ try {
+ Util.closeOrRelease(readers[i]);
+ } catch (IOException e) {
+ log.warn("Unable to close IndexReader: " + e);
+ }
+ }
+ }
+ // inform multi index
+ // if we cannot get the sync immediately we have to quit
+ if (!indexReplacement.tryAcquire()) {
+ if (log.isDebugEnabled())
+ log.debug("index merging canceled " + getName());
+ break;
+ }
+ try {
+ if (log.isDebugEnabled())
+ log.debug("replace indexes");
- // get readers
- String[] names = new String[task.indexes.length];
- for (int i = 0; i < task.indexes.length; i++)
- {
- names[i] = task.indexes[i].name;
- }
- try
- {
- log.debug("create new index");
- PersistentIndex index = multiIndex.getOrCreateIndex(null);
- boolean success = false;
- try
- {
+ multiIndex.replaceIndexes(names, index, deletedDocuments);
+ } finally {
+ indexReplacement.release();
+ }
- log.debug("get index readers from MultiIndex");
- IndexReader[] readers = multiIndex.getIndexReaders(names, this);
- try
- {
- // do the merge
- long time = System.currentTimeMillis();
- index.addIndexes(readers);
- time = System.currentTimeMillis() - time;
- int docCount = 0;
- for (int i = 0; i < readers.length; i++)
- {
- docCount += readers[i].numDocs();
- }
- log.info("merged " + docCount + " documents in " +
time + " ms into " + index.getName() + ".");
- }
- finally
- {
- for (int i = 0; i < readers.length; i++)
- {
- try
- {
- readers[i].close();
- }
- catch (IOException e)
- {
- log.warn("Unable to close IndexReader: " + e);
- }
- }
- }
- // inform multi index
- // if we cannot get the sync immediately we have to quit
- if (!indexReplacement.tryAcquire())
- {
- if (log.isDebugEnabled())
- log.debug("index merging canceled " + getName());
- break;
- }
- try
- {
- if (log.isDebugEnabled())
- log.debug("replace indexes");
+ success = true;
- multiIndex.replaceIndexes(names, index, deletedDocuments);
- }
- finally
- {
- indexReplacement.release();
- }
-
- success = true;
-
- }
- finally
- {
- if (!success)
- {
- // delete index
- log.debug("deleting index " + index.getName());
- multiIndex.deleteIndex(index);
- }
- }
- }
- catch (Throwable e)
- {
- log.error("Error while merging indexes in " + getName() + ":
" + e);
- }
+ } finally {
+ if (!success) {
+ // delete index
+ log.debug("deleting index " + index.getName());
+ multiIndex.deleteIndex(index);
+ }
+ }
+ } catch (Throwable e) {
+ log.error("Error while merging indexes in " + getName() + ":
" + e);
}
- log.info(getName() + " terminated");
- }
+ }
+ log.info(getName() + " terminated");
+ }
- // -----------------------< merge properties >-------------------------------
+ // -----------------------< merge properties >-------------------------------
- /**
- * The merge factor.
- */
- public void setMergeFactor(int mergeFactor)
- {
- this.mergeFactor = mergeFactor;
- }
+ /**
+ * The merge factor.
+ */
+ public void setMergeFactor(int mergeFactor) {
+ this.mergeFactor = mergeFactor;
+ }
- /**
- * The initial threshold for number of documents to merge to a new index.
- */
- public void setMinMergeDocs(int minMergeDocs)
- {
- this.minMergeDocs = minMergeDocs;
- }
+ /**
+ * The initial threshold for number of documents to merge to a new index.
+ */
+ public void setMinMergeDocs(int minMergeDocs) {
+ this.minMergeDocs = minMergeDocs;
+ }
- /**
- * The maximum number of document to merge.
- */
- public void setMaxMergeDocs(int maxMergeDocs)
- {
- this.maxMergeDocs = maxMergeDocs;
- }
+ /**
+ * The maximum number of document to merge.
+ */
+ public void setMaxMergeDocs(int maxMergeDocs) {
+ this.maxMergeDocs = maxMergeDocs;
+ }
- // ------------------------------< internal >--------------------------------
+ // ------------------------------< internal >--------------------------------
- /**
- * Implements a simple struct that holds the name of an index and how many document it
contains.
- * <code>Index</code> is comparable using the number of documents it
contains.
- */
- private static final class Index implements Comparable
- {
+ /**
+ * Implements a simple struct that holds the name of an index and how many
+ * document it contains. <code>Index</code> is comparable using the number
of
+ * documents it contains.
+ */
+ private static final class Index implements Comparable {
- /**
- * The name of the index.
- */
- private final String name;
+ /**
+ * The name of the index.
+ */
+ private final String name;
- /**
- * The number of documents the index contains.
- */
- private final int numDocs;
+ /**
+ * The number of documents the index contains.
+ */
+ private final int numDocs;
- /**
- * Creates a new index struct.
- *
- * @param name
- * name of an index.
- * @param numDocs
- * number of documents it contains.
- */
- Index(String name, int numDocs)
- {
- this.name = name;
- this.numDocs = numDocs;
- }
+ /**
+ * Creates a new index struct.
+ *
+ * @param name name of an index.
+ * @param numDocs number of documents it contains.
+ */
+ Index(String name, int numDocs) {
+ this.name = name;
+ this.numDocs = numDocs;
+ }
- /**
- * Indexes are first ordered by {@link #numDocs} and then by {@link #name}.
- *
- * @param o
- * the other <code>Index</code>.
- * @return a negative integer, zero, or a positive integer as this Index is less
than, equal to,
- * or greater than the specified Index.
- */
- public int compareTo(Object o)
- {
- Index other = (Index)o;
- int val = numDocs < other.numDocs ? -1 : (numDocs == other.numDocs ? 0 : 1);
- if (val != 0)
- {
- return val;
- }
- else
- {
- return name.compareTo(other.name);
- }
+ /**
+ * Indexes are first ordered by {@link #numDocs} and then by {@link #name}.
+ *
+ * @param o the other <code>Index</code>.
+ * @return a negative integer, zero, or a positive integer as this Index is
+ * less than, equal to, or greater than the specified Index.
+ */
+ public int compareTo(Object o) {
+ Index other = (Index) o;
+ int val = numDocs < other.numDocs ? -1 : (numDocs == other.numDocs ? 0 : 1);
+ if (val != 0) {
+ return val;
+ } else {
+ return name.compareTo(other.name);
}
+ }
- /**
- * @inheritDoc
- */
- public String toString()
- {
- return name + ":" + numDocs;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString() {
+ return name + ":" + numDocs;
+ }
+ }
- /**
- * Defines a merge task, to merge a couple of indexes into a new index.
- */
- private static final class Merge
- {
+ /**
+ * Defines a merge task, to merge a couple of indexes into a new index.
+ */
+ private static final class Merge {
- private final Index[] indexes;
+ private final Index[] indexes;
- /**
- * Merge task, to merge <code>indexes</code> into a new index with
<code>name</code>.
- *
- * @param indexes
- * the indexes to merge.
- */
- Merge(Index[] indexes)
- {
- this.indexes = new Index[indexes.length];
- System.arraycopy(indexes, 0, this.indexes, 0, indexes.length);
- }
- }
+ /**
+ * Merge task, to merge <code>indexes</code> into a new index with
+ * <code>name</code>.
+ *
+ * @param indexes the indexes to merge.
+ */
+ Merge(Index[] indexes) {
+ this.indexes = new Index[indexes.length];
+ System.arraycopy(indexes, 0, this.indexes, 0, indexes.length);
+ }
+ }
- /**
- * Implements a <code>List</code> with a document limit value. An
<code>IndexBucket</code>
- * contains {@link Index}es with documents less or equal the document limit of the
bucket.
- */
- private static final class IndexBucket extends ArrayList<Index>
- {
+ /**
+ * Implements a <code>List</code> with a document limit value. An
+ * <code>IndexBucket</code> contains {@link Index}es with documents less
or
+ * equal the document limit of the bucket.
+ */
+ private static final class IndexBucket extends ArrayList<Index> {
- /**
- *
- */
- private static final long serialVersionUID = 1885162315017837466L;
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1885162315017837466L;
- /**
- * The lower document limit.
- */
- private final long lower;
+ /**
+ * The lower document limit.
+ */
+ private final long lower;
- /**
- * The upper document limit.
- */
- private final long upper;
+ /**
+ * The upper document limit.
+ */
+ private final long upper;
- /**
- * Flag indicating if indexes in this bucket can be merged.
- */
- private final boolean allowMerge;
+ /**
+ * Flag indicating if indexes in this bucket can be merged.
+ */
+ private final boolean allowMerge;
- /**
- * Creates a new <code>IndexBucket</code>. Limits are both inclusive.
- *
- * @param lower
- * document limit.
- * @param upper
- * document limit.
- * @param allowMerge
- * if indexes in this bucket can be merged.
- */
- IndexBucket(long lower, long upper, boolean allowMerge)
- {
- this.lower = lower;
- this.upper = upper;
- this.allowMerge = allowMerge;
- }
+ /**
+ * Creates a new <code>IndexBucket</code>. Limits are both inclusive.
+ *
+ * @param lower document limit.
+ * @param upper document limit.
+ * @param allowMerge if indexes in this bucket can be merged.
+ */
+ IndexBucket(long lower, long upper, boolean allowMerge) {
+ this.lower = lower;
+ this.upper = upper;
+ this.allowMerge = allowMerge;
+ }
- /**
- * Returns <code>true</code> if the number of documents fit in this
<code>IndexBucket</code>;
- * otherwise <code>false</code>
- *
- * @param numDocs
- * the number of documents.
- * @return <code>true</code> if <code>numDocs</code> fit.
- */
- boolean fits(long numDocs)
- {
- return numDocs >= lower && numDocs <= upper;
- }
+ /**
+ * Returns <code>true</code> if the number of documents fit in this
+ * <code>IndexBucket</code>; otherwise <code>false</code>
+ *
+ * @param numDocs the number of documents.
+ * @return <code>true</code> if <code>numDocs</code> fit.
+ */
+ boolean fits(long numDocs) {
+ return numDocs >= lower && numDocs <= upper;
+ }
- /**
- * Returns <code>true</code> if indexes in this bucket can be merged.
- *
- * @return <code>true</code> if indexes in this bucket can be merged.
- */
- boolean allowsMerge()
- {
- return allowMerge;
- }
- }
+ /**
+ * Returns <code>true</code> if indexes in this bucket can be merged.
+ *
+ * @return <code>true</code> if indexes in this bucket can be merged.
+ */
+ boolean allowsMerge() {
+ return allowMerge;
+ }
+ }
}
Added:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMigration.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMigration.java
(rev 0)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMigration.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.FilterIndexReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.NoLockFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * <code>IndexMigration</code> implements a utility that migrates a
Jackrabbit
+ * 1.4.x index to version 1.5. Until version 1.4.x, indexes used the character
+ * '\uFFFF' to separate the name of a property from the value. As of Lucene 2.3
+ * this does not work anymore. See LUCENE-1221. Jackrabbit >= 1.5 uses the
+ * character '[' as a separator. Whenever an index is opened from disk, a quick
+ * check is run to find out whether a migration is required. See also JCR-1363
+ * for more details.
+ */
+public class IndexMigration
+{
+
+ /**
+ * The logger instance for this class.
+ */
+ private static final Logger log = LoggerFactory.getLogger(IndexMigration.class);
+
+ /**
+ * Checks if the given <code>index</code> needs to be migrated.
+ *
+ * @param index the index to check and migration if needed.
+ * @param indexDir the directory where the index is stored.
+ * @param isUpgradeIndex
+ * @throws IOException if an error occurs while migrating the index.
+ */
+ public static void migrate(PersistentIndex index, File indexDir, boolean
isUpgradeIndex) throws IOException
+ {
+ log.debug("Checking {} ...", indexDir.getAbsolutePath());
+ ReadOnlyIndexReader reader = index.getReadOnlyIndexReader();
+ try
+ {
+ if (IndexFormatVersion.getVersion(reader).getVersion() >=
IndexFormatVersion.V3.getVersion())
+ {
+ // index was created with Jackrabbit 1.5 or higher
+ // no need for migration
+ log.debug("IndexFormatVersion >= V3, no migration needed");
+ return;
+ }
+ if (!isUpgradeIndex)
+ {
+ throw new IOException("Outdated index format. To allow index upgrade
process add "
+ + "to the index configuration parameter 'upgrade-index' with
value 'true'"
+ + " or set system property -Dupgrade-index=true.");
+ }
+ // assert: there is at least one node in the index, otherwise the
+ // index format version would be at least V3
+ TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, ""));
+ try
+ {
+ Term t = terms.term();
+ if (t.text().indexOf('\uFFFF') == -1)
+ {
+ log.debug("Index already migrated");
+ return;
+ }
+ }
+ finally
+ {
+ terms.close();
+ }
+ }
+ finally
+ {
+ reader.release();
+ }
+
+ // if we get here then the index must be migrated
+ log.debug("Index requires migration {}", indexDir.getAbsolutePath());
+
+ // make sure readers are closed, otherwise the directory
+ // cannot be deleted
+ index.releaseWriterAndReaders();
+
+ File migrationDir = new File(indexDir.getAbsoluteFile().getParentFile(),
indexDir.getName() + "_v2.3");
+ if (migrationDir.exists())
+ {
+ // TODO DELETE
+ fullyDelete(migrationDir);
+ }
+ if (!migrationDir.mkdirs())
+ {
+ throw new IOException("failed to create directory " +
migrationDir.getAbsolutePath());
+ }
+ FSDirectory fsDir = FSDirectory.getDirectory(migrationDir,
NoLockFactory.getNoLockFactory());
+ try
+ {
+ IndexWriter writer = new IndexWriter(fsDir, new StandardAnalyzer());
+ try
+ {
+ IndexReader r = new
MigrationIndexReader(IndexReader.open(index.getDirectory()));
+ try
+ {
+ writer.addIndexes(new IndexReader[]{r});
+ writer.close();
+ }
+ finally
+ {
+ r.close();
+ }
+ }
+ finally
+ {
+ writer.close();
+ }
+ }
+ finally
+ {
+ fsDir.close();
+ }
+ // TODO DELETE
+ fullyDelete(indexDir);
+ if (!migrationDir.renameTo(indexDir))
+ {
+ throw new IOException("failed to move migrated directory " +
migrationDir.getAbsolutePath());
+ }
+ log.info("Migrated " + indexDir.getAbsolutePath());
+ }
+
+ // ---------------------------< internal helper >----------------------------
+ /**
+ * Delete files and directories, even if non-empty.
+ *
+ * @param dir file or directory
+ * @return true on success, false if no or part of files have been deleted
+ * @throws java.io.IOException
+ */
+ public static boolean fullyDelete(File dir) throws IOException
+ {
+ if (dir == null || !dir.exists())
+ return false;
+ File contents[] = dir.listFiles();
+ if (contents != null)
+ {
+ for (int i = 0; i < contents.length; i++)
+ {
+ if (contents[i].isFile())
+ {
+ if (!contents[i].delete())
+ {
+ return false;
+ }
+ }
+ else
+ {
+ if (!fullyDelete(contents[i]))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ return dir.delete();
+ }
+
+ /**
+ * An index reader that migrates stored field values and term text on the fly.
+ */
+ private static class MigrationIndexReader extends FilterIndexReader
+ {
+
+ public MigrationIndexReader(IndexReader in)
+ {
+ super(in);
+ }
+
+ public Document document(int n, FieldSelector fieldSelector) throws
CorruptIndexException, IOException
+ {
+ Document doc = super.document(n, fieldSelector);
+ Fieldable[] fields = doc.getFieldables(FieldNames.PROPERTIES);
+ if (fields != null)
+ {
+ doc.removeFields(FieldNames.PROPERTIES);
+ for (int i = 0; i < fields.length; i++)
+ {
+ String value = fields[i].stringValue();
+ value = value.replace('\uFFFF', '[');
+ doc.add(new Field(FieldNames.PROPERTIES, value, Field.Store.YES,
Field.Index.NO_NORMS));
+ }
+ }
+ return doc;
+ }
+
+ public TermEnum terms() throws IOException
+ {
+ return new MigrationTermEnum(in.terms());
+ }
+
+ public TermPositions termPositions() throws IOException
+ {
+ return new MigrationTermPositions(in.termPositions());
+ }
+
+ private static class MigrationTermEnum extends FilterTermEnum
+ {
+
+ public MigrationTermEnum(TermEnum in)
+ {
+ super(in);
+ }
+
+ public Term term()
+ {
+ Term t = super.term();
+ if (t == null)
+ {
+ return t;
+ }
+ if (t.field().equals(FieldNames.PROPERTIES))
+ {
+ String text = t.text();
+ return t.createTerm(text.replace('\uFFFF', '['));
+ }
+ else
+ {
+ return t;
+ }
+ }
+
+ TermEnum unwrap()
+ {
+ return in;
+ }
+ }
+
+ private static class MigrationTermPositions extends FilterTermPositions
+ {
+
+ public MigrationTermPositions(TermPositions in)
+ {
+ super(in);
+ }
+
+ public void seek(Term term) throws IOException
+ {
+ if (term.field().equals(FieldNames.PROPERTIES))
+ {
+ char[] text = term.text().toCharArray();
+ text[term.text().indexOf('[')] = '\uFFFF';
+ super.seek(term.createTerm(new String(text)));
+ }
+ else
+ {
+ super.seek(term);
+ }
+ }
+
+ public void seek(TermEnum termEnum) throws IOException
+ {
+ if (termEnum instanceof MigrationTermEnum)
+ {
+ super.seek(((MigrationTermEnum)termEnum).unwrap());
+ }
+ else
+ {
+ super.seek(termEnum);
+ }
+ }
+ }
+ }
+}
Property changes on:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMigration.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JackrabbitIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JackrabbitIndexReader.java
(rev 0)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JackrabbitIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.index.FilterIndexReader;
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
+
+/**
+ * <code>JackrabbitIndexReader</code> wraps an index reader and
+ * {@link ReleaseableIndexReader#release() releases} the underlying reader when
+ * a client calls {@link #close()} on this reader. This allows reusing of the
+ * underlying index reader instance.
+ */
+public final class JackrabbitIndexReader extends FilterIndexReader implements
HierarchyResolver,
+ MultiIndexReader {
+
+ /**
+ * The hierarchy resolver.
+ */
+ private final HierarchyResolver resolver;
+
+ /**
+ * The underlying index reader exposed as a {@link MultiIndexReader}.
+ */
+ private final MultiIndexReader reader;
+
+ /**
+ * Creates a new <code>JackrabbitIndexReader</code>. The passed index
reader
+ * must also implement the interfaces {@link HierarchyResolver} and
+ * {@link MultiIndexReader}.
+ *
+ * @param in the underlying index reader.
+ * @throws IllegalArgumentException if <code>in</code> does not implement
+ * {@link HierarchyResolver} and {@link MultiIndexReader}.
+ */
+ public JackrabbitIndexReader(IndexReader in) {
+ super(in);
+ if (!(in instanceof MultiIndexReader)) {
+ throw new IllegalArgumentException("IndexReader must also implement
MultiIndexReader");
+ }
+ if (!(in instanceof HierarchyResolver)) {
+ throw new IllegalArgumentException("IndexReader must also implement
HierarchyResolver");
+ }
+ this.resolver = (HierarchyResolver) in;
+ this.reader = (MultiIndexReader) in;
+ }
+
+ // --------------------------< FilterIndexReader >---------------------------
+
+ /**
+ * Calls release on the underlying {@link MultiIndexReader} instead of closing
+ * it.
+ *
+ * @throws IOException if an error occurs while releaseing the underlying
+ * index reader.
+ */
+ protected void doClose() throws IOException {
+ reader.release();
+ }
+
+ // ------------------------< HierarchyResolver >-----------------------------
+
+ /**
+ * {@inheritDoc}
+ */
+ public int getParent(int n) throws IOException {
+ return resolver.getParent(n);
+ }
+
+ // -------------------------< MultiIndexReader >-----------------------------
+
+ /**
+ * {@inheritDoc}
+ */
+ public IndexReader[] getIndexReaders() {
+ return reader.getIndexReaders();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public ForeignSegmentDocId createDocId(String uuid) throws IOException {
+ return reader.createDocId(uuid);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public int getDocumentNumber(ForeignSegmentDocId docId) throws IOException {
+ return reader.getDocumentNumber(docId);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void release() throws IOException {
+ reader.release();
+ }
+}
Property changes on:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JackrabbitIndexReader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,6 +16,17 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.apache.commons.collections.iterators.EmptyIterator;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
+import org.exoplatform.services.jcr.datamodel.ItemData;
+import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.exoplatform.services.jcr.impl.Constants;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
@@ -33,18 +44,6 @@
import javax.jcr.ItemNotFoundException;
import javax.jcr.RepositoryException;
-import org.apache.commons.collections.iterators.EmptyIterator;
-import org.exoplatform.services.log.Log;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-
-import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
-import org.exoplatform.services.jcr.datamodel.ItemData;
-import org.exoplatform.services.jcr.datamodel.NodeData;
-import org.exoplatform.services.jcr.impl.Constants;
-import org.exoplatform.services.log.ExoLogger;
-
/**
* A <code>MultiIndex</code> consists of a {@link VolatileIndex} and
multiple
* {@link PersistentIndex}es. The goal is to keep most parts of the index open
@@ -57,16 +56,20 @@
* then added to the list of already existing persistent indexes. Further
* operations on the new persistent index will however only require an
* <code>IndexReader</code> which serves for queries but also for delete
- * operations on the index. <p/> The persistent indexes are merged from time to
- * time. The merge behaviour is configurable using the methods:
- * {@link SearchIndex#setMaxMergeDocs(int)},
+ * operations on the index.
+ * <p/>
+ * The persistent indexes are merged from time to time. The merge behaviour is
+ * configurable using the methods: {@link SearchIndex#setMaxMergeDocs(int)},
* {@link SearchIndex#setMergeFactor(int)} and
* {@link SearchIndex#setMinMergeDocs(int)}. For detailed description of the
* configuration parameters see also the lucene <code>IndexWriter</code>
class.
- * <p/> This class is thread-safe. <p/> Note on implementation: Multiple
- * modifying threads are synchronized on a <code>MultiIndex</code> instance
- * itself. Sychronization between a modifying thread and reader threads is done
- * using {@link #updateMonitor} and {@link #updateInProgress}.
+ * <p/>
+ * This class is thread-safe.
+ * <p/>
+ * Note on implementation: Multiple modifying threads are synchronized on a
+ * <code>MultiIndex</code> instance itself. Sychronization between a
modifying
+ * thread and reader threads is done using {@link #updateMonitor} and
+ * {@link #updateInProgress}.
*/
public class MultiIndex
{
@@ -207,6 +210,11 @@
private final IndexFormatVersion version;
/**
+ * true if index upgrade allowed.
+ */
+ private final boolean isUpgradeIndex;
+
+ /**
* Creates a new MultiIndex.
*
* @param indexDir the base file system
@@ -223,6 +231,7 @@
this.handler = handler;
this.cache = new DocNumberCache(handler.getQueryHandlerConfig().getCacheSize());
this.redoLog = new RedoLog(new File(indexDir, REDO_LOG));
+ this.isUpgradeIndex = handler.getQueryHandlerConfig().isUpgradeIndex();
log.info("Index dir = " + indexDir.getAbsolutePath());
log.info("Redo log = " + (new File(indexDir,
REDO_LOG).getAbsoluteFile()));
@@ -265,7 +274,8 @@
continue;
}
PersistentIndex index =
- new PersistentIndex(indexNames.getName(i), sub,
handler.getTextAnalyzer(), cache, indexingQueue);
+ new PersistentIndex(indexNames.getName(i), sub, handler.getTextAnalyzer(),
cache, indexingQueue,
+ isUpgradeIndex);
index.setMaxMergeDocs(handler.getQueryHandlerConfig().getMaxMergeDocs());
index.setMergeFactor(handler.getQueryHandlerConfig().getMergeFactor());
index.setMinMergeDocs(handler.getQueryHandlerConfig().getMinMergeDocs());
@@ -279,7 +289,7 @@
resetVolatileIndex();
// set index format version
- IndexReader reader = getIndexReader();
+ CachingMultiIndexReader reader = getIndexReader();
try
{
version = IndexFormatVersion.getVersion(reader);
@@ -287,7 +297,7 @@
}
finally
{
- reader.close();
+ reader.release();
}
redoLogApplied = redoLog.hasEntries();
@@ -354,14 +364,14 @@
return volatileIndex.getNumDocuments();
}
- IndexReader reader = getIndexReader();
+ CachingMultiIndexReader reader = getIndexReader();
try
{
return reader.numDocs();
}
finally
{
- reader.close();
+ reader.release();
}
}
@@ -393,7 +403,7 @@
{
// traverse and index workspace
executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
+ NodeData rootState = (NodeData)stateMgr.getItemData(rootId);
createIndex(rootState, stateMgr);
executeAndLog(new Commit(getTransactionId()));
scheduleFlushTask();
@@ -465,11 +475,7 @@
{
updateInProgress = false;
updateMonitor.notifyAll();
- if (multiReader != null)
- {
- multiReader.close();
- multiReader = null;
- }
+ releaseMultiReader();
}
}
}
@@ -483,8 +489,7 @@
*/
void addDocument(Document doc) throws IOException
{
- List<Document> add = Arrays.asList(new Document[]
- {doc});
+ List<Document> add = Arrays.asList(new Document[]{doc});
update(EmptyIterator.INSTANCE, add.iterator());
}
@@ -496,8 +501,7 @@
*/
void removeDocument(String uuid) throws IOException
{
- List<String> remove = Arrays.asList(new String[]
- {uuid});
+ List<String> remove = Arrays.asList(new String[]{uuid});
update(remove.iterator(), EmptyIterator.INSTANCE);
}
@@ -546,11 +550,7 @@
{
updateInProgress = false;
updateMonitor.notifyAll();
- if (multiReader != null)
- {
- multiReader.close();
- multiReader = null;
- }
+ releaseMultiReader();
}
}
return num;
@@ -560,10 +560,11 @@
* Returns <code>IndexReader</code>s for the indexes named
* <code>indexNames</code>. An <code>IndexListener</code> is
registered and
* notified when documents are deleted from one of the indexes in
- * <code>indexNames</code>. <p/> Note: the number of
<code>IndexReaders</code>
- * returned by this method is not necessarily the same as the number of index
- * names passed. An index might have been deleted and is not reachable
- * anymore.
+ * <code>indexNames</code>.
+ * <p/>
+ * Note: the number of <code>IndexReaders</code> returned by this method
is
+ * not necessarily the same as the number of index names passed. An index
+ * might have been deleted and is not reachable anymore.
*
* @param indexNames the names of the indexes for which to obtain readers.
* @param listener the listener to notify when documents are deleted.
@@ -589,17 +590,17 @@
}
catch (IOException e)
{
- // close readers obtained so far
+ // releasing readers obtained so far
for (Iterator<ReadOnlyIndexReader> it = indexReaders.keySet().iterator();
it.hasNext();)
{
ReadOnlyIndexReader reader = it.next();
try
{
- reader.close();
+ reader.release();
}
catch (IOException ex)
{
- log.warn("Exception closing index reader: " + ex);
+ log.warn("Exception releasing index reader: " + ex);
}
indexReaders.get(reader).resetListener();
}
@@ -641,7 +642,8 @@
{
sub = new File(indexDir, indexName);
}
- PersistentIndex index = new PersistentIndex(indexName, sub,
handler.getTextAnalyzer(), cache, indexingQueue);
+ PersistentIndex index =
+ new PersistentIndex(indexName, sub, handler.getTextAnalyzer(), cache,
indexingQueue, isUpgradeIndex);
index.setMaxMergeDocs(handler.getQueryHandlerConfig().getMaxMergeDocs());
index.setMergeFactor(handler.getQueryHandlerConfig().getMergeFactor());
index.setMinMergeDocs(handler.getQueryHandlerConfig().getMinMergeDocs());
@@ -743,11 +745,7 @@
{
updateInProgress = false;
updateMonitor.notifyAll();
- if (multiReader != null)
- {
- multiReader.close();
- multiReader = null;
- }
+ releaseMultiReader();
}
}
}
@@ -772,7 +770,7 @@
{
if (multiReader != null)
{
- multiReader.incrementRefCount();
+ multiReader.acquire();
return multiReader;
}
// no reader available
@@ -805,7 +803,7 @@
ReadOnlyIndexReader[] readers = readerList.toArray(new
ReadOnlyIndexReader[readerList.size()]);
multiReader = new CachingMultiIndexReader(readers, cache);
}
- multiReader.incrementRefCount();
+ multiReader.acquire();
return multiReader;
}
}
@@ -841,7 +839,7 @@
{
try
{
- multiReader.close();
+ releaseMultiReader();
}
catch (IOException e)
{
@@ -923,7 +921,7 @@
throw new ItemNotFoundException("Item id=" + id + " not
found");
if (!data.isNode())
throw new RepositoryException("Item with id " + id + " is not a
node");
- return createDocument((NodeData) data);
+ return createDocument((NodeData)data);
}
/**
@@ -940,8 +938,10 @@
/**
* Removes the <code>index</code> from the list of active sub indexes.
The
* Index is not acutally deleted right away, but postponed to the transaction
- * commit. <p/> This method does not close the index, but rather expects that
- * the index has already been closed.
+ * commit.
+ * <p/>
+ * This method does not close the index, but rather expects that the index has
+ * already been closed.
*
* @param index the index to delete.
*/
@@ -1234,7 +1234,7 @@
long idleTime = System.currentTimeMillis() - lastFlushTime;
// do not flush if volatileIdleTime is zero or negative
if (handler.getQueryHandlerConfig().getVolatileIdleTime() > 0
- && idleTime >
handler.getQueryHandlerConfig().getVolatileIdleTime() * 1000)
+ && idleTime > handler.getQueryHandlerConfig().getVolatileIdleTime() *
1000)
{
try
{
@@ -1255,11 +1255,7 @@
{
updateInProgress = false;
updateMonitor.notifyAll();
- if (multiReader != null)
- {
- multiReader.close();
- multiReader = null;
- }
+ releaseMultiReader();
}
}
}
@@ -1564,8 +1560,7 @@
/**
* Adds an index to the MultiIndex's active persistent index list.
*/
- private static class AddIndex
- extends Action
+ private static class AddIndex extends Action
{
/**
@@ -1633,15 +1628,14 @@
/**
* Adds a node to the index.
*/
- private static class AddNode
- extends Action
+ private static class AddNode extends Action
{
/**
* The maximum length of a AddNode String.
*/
private static final int ENTRY_LENGTH =
- Long.toString(Long.MAX_VALUE).length() + Action.ADD_NODE.length() +
Constants.UUID_FORMATTED_LENGTH + 2;
+ Long.toString(Long.MAX_VALUE).length() + Action.ADD_NODE.length() +
Constants.UUID_FORMATTED_LENGTH + 2;
/**
* The uuid of the node to add.
@@ -1718,8 +1712,7 @@
}
if (doc != null)
{
- index.volatileIndex.addDocuments(new Document[]
- {doc});
+ index.volatileIndex.addDocuments(new Document[]{doc});
}
}
@@ -1741,8 +1734,7 @@
/**
* Commits a transaction.
*/
- private static class Commit
- extends Action
+ private static class Commit extends Action
{
/**
@@ -1790,8 +1782,7 @@
* Creates an new sub index but does not add it to the active persistent index
* list.
*/
- private static class CreateIndex
- extends Action
+ private static class CreateIndex extends Action
{
/**
@@ -1879,8 +1870,7 @@
/**
* Closes and deletes an index that is no longer in use.
*/
- private static class DeleteIndex
- extends Action
+ private static class DeleteIndex extends Action
{
/**
@@ -1951,16 +1941,14 @@
/**
* Deletes a node from the index.
*/
- private static class DeleteNode
- extends Action
+ private static class DeleteNode extends Action
{
/**
* The maximum length of a DeleteNode String.
*/
private static final int ENTRY_LENGTH =
- Long.toString(Long.MAX_VALUE).length() + Action.DELETE_NODE.length() +
Constants.UUID_FORMATTED_LENGTH
- + 2;
+ Long.toString(Long.MAX_VALUE).length() + Action.DELETE_NODE.length() +
Constants.UUID_FORMATTED_LENGTH + 2;
/**
* The uuid of the node to remove.
@@ -2053,8 +2041,7 @@
/**
* Starts a transaction.
*/
- private static class Start
- extends Action
+ private static class Start extends Action
{
/**
@@ -2101,8 +2088,7 @@
/**
* Commits the volatile index to disk.
*/
- private static class VolatileCommit
- extends Action
+ private static class VolatileCommit extends Action
{
/**
@@ -2165,4 +2151,32 @@
{
return indexDir;
}
+
+ /**
+ * Releases the {@link #multiReader} and sets it <code>null</code>. If
the
+ * reader is already <code>null</code> this method does nothing. When
this
+ * method returns {@link #multiReader} is guaranteed to be
<code>null</code>
+ * even if an exception is thrown.
+ * <p/>
+ * Please note that this method does not take care of any synchronization. A
+ * caller must ensure that it is the only thread operating on this multi
+ * index, or that it holds the {@link #updateMonitor}.
+ *
+ * @throws IOException if an error occurs while releasing the reader.
+ */
+ void releaseMultiReader() throws IOException
+ {
+ if (multiReader != null)
+ {
+ try
+ {
+ multiReader.release();
+ }
+ finally
+ {
+ multiReader = null;
+ }
+ }
+ }
+
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndexReader.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,43 +16,40 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-
import org.apache.lucene.index.IndexReader;
+import java.io.IOException;
+
/**
- * <code>MultiIndexReader</code> exposes methods to get access to the
contained {@link IndexReader}s
- * of this <code>MultiIndexReader</code>.
+ * <code>MultiIndexReader</code> exposes methods to get access to the
contained
+ * {@link IndexReader}s of this <code>MultiIndexReader</code>.
*/
-public interface MultiIndexReader
-{
+public interface MultiIndexReader extends ReleaseableIndexReader {
- /**
- * @return the <code>IndexReader</code>s that are contained in this
<code>MultiIndexReader</code>.
- */
- public IndexReader[] getIndexReaders();
+ /**
+ * @return the <code>IndexReader</code>s that are contained in this
+ * <code>MultiIndexReader</code>.
+ */
+ public IndexReader[] getIndexReaders();
- /**
- * Creates a document id for the given <code>uuid</code>.
- *
- * @param uuid
- * the uuid of the node.
- * @return a foreign segment doc id or <code>null</code> if there is no
node with the given
- * <code>uuid</code>.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public ForeignSegmentDocId createDocId(String uuid) throws IOException;
+ /**
+ * Creates a document id for the given <code>uuid</code>.
+ *
+ * @param uuid the uuid of the node.
+ * @return a foreign segment doc id or <code>null</code> if there is no
node
+ * with the given <code>uuid</code>.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public ForeignSegmentDocId createDocId(String uuid) throws IOException;
- /**
- * Returns the document number for the passed <code>docId</code>. If the
id is invalid
- * <code>-1</code> is returned.
- *
- * @param docId
- * the document id to resolve.
- * @return the document number or <code>-1</code> if it is invalid (e.g.
does not exist).
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public int getDocumentNumber(ForeignSegmentDocId docId) throws IOException;
+ /**
+ * Returns the document number for the passed <code>docId</code>. If the id
is
+ * invalid <code>-1</code> is returned.
+ *
+ * @param docId the document id to resolve.
+ * @return the document number or <code>-1</code> if it is invalid (e.g.
does
+ * not exist).
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public int getDocumentNumber(ForeignSegmentDocId docId) throws IOException;
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,21 +16,8 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.Reader;
-import java.util.Calendar;
-import java.util.List;
-
-import javax.jcr.NamespaceException;
-import javax.jcr.PropertyType;
-import javax.jcr.RepositoryException;
-import javax.jcr.ValueFormatException;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-
import org.exoplatform.services.document.DocumentReader;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.document.HandlerNotFoundException;
@@ -47,7 +34,19 @@
import org.exoplatform.services.jcr.impl.core.value.ValueFactoryImpl;
import org.exoplatform.services.jcr.impl.dataflow.AbstractValueData;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.Reader;
+import java.util.Calendar;
+import java.util.List;
+
+import javax.jcr.NamespaceException;
+import javax.jcr.PropertyType;
+import javax.jcr.RepositoryException;
+import javax.jcr.ValueFormatException;
+
/**
* Creates a lucene <code>Document</code> object from a {@link
javax.jcr.Node}.
*/
@@ -118,7 +117,7 @@
* @param extractor content extractor
*/
public NodeIndexer(NodeData node, ItemDataConsumer stateProvider, NamespaceMappings
mappings,
- DocumentReaderService extractor)
+ DocumentReaderService extractor)
{
this.node = node;
this.stateProvider = stateProvider;
@@ -184,7 +183,7 @@
// special fields UUID
doc.add(new Field(FieldNames.UUID, node.getIdentifier(), Field.Store.YES,
Field.Index.NO_NORMS,
- Field.TermVector.NO));
+ Field.TermVector.NO));
try
{
// parent UUID
@@ -197,7 +196,7 @@
else
{
doc.add(new Field(FieldNames.PARENT, node.getParentIdentifier(),
Field.Store.YES, Field.Index.NO_NORMS,
- Field.TermVector.NO));
+ Field.TermVector.NO));
String name =
resolver.createJCRName(node.getQPath().getName()).getAsString();
doc.add(new Field(FieldNames.LABEL, name, Field.Store.YES,
Field.Index.NO_NORMS, Field.TermVector.NO));
@@ -256,27 +255,27 @@
// seems nt:file found, try for nt:resource props
PropertyData pmime =
- (PropertyData) stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
if (pmime != null)
{
// index if have jcr:mimeType sibling for this binary property only
try
{
DocumentReader dreader =
- extractor.getDocumentReader(new
String(pmime.getValues().get(0).getAsByteArray()));
+ extractor.getDocumentReader(new
String(pmime.getValues().get(0).getAsByteArray()));
// ok, have a reader
// if the prop obtainer from cache it will contains a values,
// otherwise read prop with values from DM
data =
- prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData) stateProvider.getItemData(
- node, new QPathEntry(Constants.JCR_DATA,
0))).getValues();
+ prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(node,
+ new QPathEntry(Constants.JCR_DATA, 0))).getValues();
if (data == null)
log.warn("null value found at property " +
prop.getQPath().getAsString());
// check the jcr:encoding property
PropertyData encProp =
- (PropertyData) stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
if (encProp != null)
{
@@ -360,8 +359,8 @@
// WARN. DON'T USE access item BY PATH - it's may be a node in case
of
// residual definitions in NT
List<ValueData> data =
- prop.getValues().size() > 0 ? prop.getValues() : ((PropertyData)
stateProvider.getItemData(prop
- .getIdentifier())).getValues();
+ prop.getValues().size() > 0 ? prop.getValues() :
((PropertyData)stateProvider.getItemData(prop
+ .getIdentifier())).getValues();
if (data == null)
log.warn("null value found at property " +
prop.getQPath().getAsString());
@@ -371,7 +370,7 @@
for (ValueData value : data)
{
- val = (ExtendedValue) vFactory.loadValue(((AbstractValueData)
value).createTransientCopy(), propType);
+ val =
(ExtendedValue)vFactory.loadValue(((AbstractValueData)value).createTransientCopy(),
propType);
switch (propType)
{
@@ -422,7 +421,7 @@
else
{
addStringValue(doc, fieldName, val.getString(), true,
isIncludedInNodeIndex(name),
- getPropertyBoost(name));
+ getPropertyBoost(name));
}
}
break;
@@ -430,7 +429,7 @@
// jcr:primaryType and jcr:mixinTypes are required for correct
// node type resolution in queries
if (isIndexed(name) || name.equals(Constants.JCR_PRIMARYTYPE)
- || name.equals(Constants.JCR_MIXINTYPES))
+ || name.equals(Constants.JCR_MIXINTYPES))
{
addNameValue(doc, fieldName, val.getString());
}
@@ -440,6 +439,12 @@
default :
throw new IllegalArgumentException("illegal internal value type
" + propType);
}
+ // add length
+ // add not planed
+ // if (indexFormatVersion.getVersion() >=
IndexFormatVersion.V3.getVersion())
+ //{
+ // addLength(doc, fieldName, value, propType);
+ //}
}
if (data.size() > 1)
// real multi-valued
@@ -449,7 +454,7 @@
{
e.printStackTrace();
throw new RepositoryException("Index of property value error. " +
prop.getQPath().getAsString() + ". " + e,
- e);
+ e);
}
}
}
@@ -467,12 +472,12 @@
*/
@Deprecated
private void addValue(Document doc, ValueData value, InternalQName name, int
propertyType)
- throws ValueFormatException, IllegalStateException, RepositoryException
+ throws ValueFormatException, IllegalStateException, RepositoryException
{
String fieldName = resolver.createJCRName(name).getAsString();
ExtendedValue val = null;
if (PropertyType.BINARY != propertyType)
- val = (ExtendedValue) vFactory.loadValue(((AbstractValueData)
value).createTransientCopy(), propertyType);
+ val =
(ExtendedValue)vFactory.loadValue(((AbstractValueData)value).createTransientCopy(),
propertyType);
switch (propertyType)
{
case PropertyType.BINARY :
@@ -529,7 +534,7 @@
else
{
addStringValue(doc, fieldName, val.getString(), true,
isIncludedInNodeIndex(name),
- getPropertyBoost(name));
+ getPropertyBoost(name));
}
}
break;
@@ -545,7 +550,7 @@
break;
default :
throw new IllegalArgumentException("illegal internal value type:"
- + ExtendedPropertyType.nameFromValue(propertyType));
+ + ExtendedPropertyType.nameFromValue(propertyType));
}
}
@@ -582,7 +587,7 @@
try
{
PropertyData prop =
- (PropertyData) stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_MIMETYPE, 0));
if (prop != null)
{
List<ValueData> values = prop.getValues();
@@ -598,7 +603,7 @@
// check the jcr:encoding property
PropertyData encProp =
- (PropertyData) stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
+ (PropertyData)stateProvider.getItemData(node, new
QPathEntry(Constants.JCR_ENCODING, 0));
if (encProp != null)
{
ValueData encValue = encProp.getValues().get(0);
@@ -678,8 +683,8 @@
protected Field createFieldWithoutNorms(String fieldName, String internalValue,
boolean store)
{
Field field =
- new Field(FieldNames.PROPERTIES, FieldNames.createNamedValue(fieldName,
internalValue), true
- ? Field.Store.YES : Field.Store.NO, Field.Index.NO_NORMS,
Field.TermVector.NO);
+ new Field(FieldNames.PROPERTIES, FieldNames.createNamedValue(fieldName,
internalValue), true ? Field.Store.YES
+ : Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO);
return field;
}
@@ -694,7 +699,7 @@
*/
protected void addCalendarValue(Document doc, String fieldName, Object internalValue)
{
- Calendar value = (Calendar) internalValue;
+ Calendar value = (Calendar)internalValue;
long millis = value.getTimeInMillis();
doc.add(createFieldWithoutNorms(fieldName, DateField.timeToString(millis),
false));
}
@@ -710,7 +715,7 @@
*/
protected void addDoubleValue(Document doc, String fieldName, Object internalValue)
{
- double doubleVal = ((Double) internalValue).doubleValue();
+ double doubleVal = ((Double)internalValue).doubleValue();
doc.add(createFieldWithoutNorms(fieldName, DoubleField.doubleToString(doubleVal),
false));
}
@@ -724,11 +729,29 @@
*/
protected void addLongValue(Document doc, String fieldName, Object internalValue)
{
- long longVal = ((Long) internalValue).longValue();
+ long longVal = ((Long)internalValue).longValue();
doc.add(createFieldWithoutNorms(fieldName, LongField.longToString(longVal),
false));
}
/**
+ * Adds a {@link FieldNames#PROPERTY_LENGTHS} field to
<code>document</code>
+ * with a named length value.
+ *
+ * @param doc the lucene document.
+ * @param propertyName the property name.
+ * @param value the internal value.
+ */
+ protected void addLength(Document doc, String propertyName, ValueData value, int
type)
+ {
+ long length = Util.getLength(value, type);
+ if (length != -1)
+ {
+ doc.add(new Field(FieldNames.PROPERTY_LENGTHS,
FieldNames.createNamedLength(propertyName, length),
+ Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
+ }
+ }
+
+ /**
* Adds the reference value to the document as the named field. The value's
* string representation is added as the reference data. Additionally the
* reference data is stored in the index.
@@ -787,10 +810,10 @@
* @param boost the boost value for this string field.
*/
protected void addStringValue(Document doc, String fieldName, Object internalValue,
boolean tokenized,
- boolean includeInNodeIndex, float boost)
+ boolean includeInNodeIndex, float boost)
{
// simple String
- String stringValue = (String) internalValue;
+ String stringValue = (String)internalValue;
doc.add(createFieldWithoutNorms(fieldName, stringValue, false));
if (tokenized)
{
@@ -800,8 +823,8 @@
}
// create fulltext index on property
Field f =
- new Field(FieldNames.createFullTextFieldName(fieldName), stringValue,
Field.Store.NO,
- Field.Index.TOKENIZED, Field.TermVector.NO);
+ new Field(FieldNames.createFullTextFieldName(fieldName), stringValue,
Field.Store.NO,
+ Field.Index.TOKENIZED, Field.TermVector.NO);
f.setBoost(boost);
doc.add(f);
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,9 +16,6 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.File;
-import java.io.IOException;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -28,45 +25,46 @@
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.NativeFSLockFactory;
+import java.io.File;
+import java.io.IOException;
+
/**
- * Implements a lucene index which is based on a {@link
org.apache.jackrabbit.core.fs.FileSystem}.
+ * Implements a lucene index which is based on a
+ * {@link org.apache.jackrabbit.core.fs.FileSystem}.
*/
-class PersistentIndex
- extends AbstractIndex
+class PersistentIndex extends AbstractIndex
{
/** The name of this persistent index */
private final String name;
/**
- * If non <code>null</code>, <code>listener</code> needs to be
informed when a document is
- * deleted.
+ * If non <code>null</code>, <code>listener</code> needs to be
informed when a
+ * document is deleted.
*/
private IndexListener listener;
/**
- * Creates a new <code>PersistentIndex</code> based on the file system
<code>indexDir</code>.
+ * Creates a new <code>PersistentIndex</code> based on the file system
+ * <code>indexDir</code>.
*
- * @param name
- * the name of this index.
- * @param indexDir
- * the directory to store the index.
- * @param analyzer
- * the analyzer for text tokenizing.
- * @param cache
- * the document number cache
- * @param indexingQueue
- * the indexing queue.
- * @throws IOException
- * if an error occurs while opening / creating the index.
- * @throws IOException
- * if an error occurs while opening / creating the index.
+ * @param name the name of this index.
+ * @param indexDir the directory to store the index.
+ * @param analyzer the analyzer for text tokenizing.
+ * @param cache the document number cache
+ * @param indexingQueue the indexing queue.
+ * @throws IOException if an error occurs while opening / creating the index.
+ * @throws IOException if an error occurs while opening / creating the index.
*/
- PersistentIndex(String name, File indexDir, Analyzer analyzer, DocNumberCache cache,
IndexingQueue indexingQueue)
- throws IOException
+ PersistentIndex(String name, File indexDir, Analyzer analyzer, DocNumberCache cache,
IndexingQueue indexingQueue,
+ boolean isUpgradeIndex) throws IOException
{
super(analyzer, FSDirectory.getDirectory(indexDir, new
NativeFSLockFactory(indexDir)), cache, indexingQueue);
this.name = name;
+ if (isExisting())
+ {
+ IndexMigration.migrate(this, indexDir, isUpgradeIndex);
+ }
}
/**
@@ -83,32 +81,30 @@
}
/**
- * Merges another index into this persistent index. Before
<code>index</code> is merged,
- * {@link AbstractIndex#commit()} is called on that <code>index</code>.
+ * Merges another index into this persistent index. Before
<code>index</code>
+ * is merged, {@link AbstractIndex#commit()} is called on that
+ * <code>index</code>.
*
- * @param index
- * the other index to merge.
- * @throws IOException
- * if an error occurs while merging.
+ * @param index the other index to merge.
+ * @throws IOException if an error occurs while merging.
*/
void mergeIndex(AbstractIndex index) throws IOException
{
// commit changes to directory on other index.
index.commit();
// merge index
- getIndexWriter().addIndexes(new Directory[]
- {index.getDirectory()});
+ getIndexWriter().addIndexes(new Directory[]{index.getDirectory()});
invalidateSharedReader();
}
/**
- * Merges the provided indexes into this index. After this completes, the index is
optimized. <p/>
+ * Merges the provided indexes into this index. After this completes, the
+ * index is optimized.
+ * <p/>
* The provided IndexReaders are not closed.
*
- * @param readers
- * the readers of indexes to add.
- * @throws IOException
- * if an error occurs while adding indexes.
+ * @param readers the readers of indexes to add.
+ * @throws IOException if an error occurs while adding indexes.
*/
void addIndexes(IndexReader[] readers) throws IOException
{
@@ -117,13 +113,12 @@
}
/**
- * Copies <code>index</code> into this persistent index. This method
should only be called when
- * <code>this</code> index is empty otherwise the behaviour is undefined.
+ * Copies <code>index</code> into this persistent index. This method
should
+ * only be called when <code>this</code> index is empty otherwise the
+ * behaviour is undefined.
*
- * @param index
- * the index to copy from.
- * @throws IOException
- * if an error occurs while copying.
+ * @param index the index to copy from.
+ * @throws IOException if an error occurs while copying.
*/
void copyIndex(AbstractIndex index) throws IOException
{
@@ -145,7 +140,7 @@
long remaining = in.length();
while (remaining > 0)
{
- int num = (int) Math.min(remaining, buffer.length);
+ int num = (int)Math.min(remaining, buffer.length);
in.readBytes(buffer, 0, num);
out.writeBytes(buffer, num);
remaining -= num;
@@ -164,14 +159,13 @@
}
/**
- * Returns a <code>ReadOnlyIndexReader</code> and registeres
<code>listener</code> to send
- * notifications when documents are deleted on <code>this</code> index.
+ * Returns a <code>ReadOnlyIndexReader</code> and registeres
+ * <code>listener</code> to send notifications when documents are deleted
on
+ * <code>this</code> index.
*
- * @param listener
- * the listener to notify when documents are deleted.
+ * @param listener the listener to notify when documents are deleted.
* @return a <code>ReadOnlyIndexReader</code>.
- * @throws IOException
- * if the reader cannot be obtained.
+ * @throws IOException if the reader cannot be obtained.
*/
synchronized ReadOnlyIndexReader getReadOnlyIndexReader(IndexListener listener) throws
IOException
{
@@ -192,8 +186,7 @@
* Returns the number of documents in this persistent index.
*
* @return the number of documents in this persistent index.
- * @throws IOException
- * if an error occurs while reading from the index.
+ * @throws IOException if an error occurs while reading from the index.
*/
int getNumDocuments() throws IOException
{
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHits.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHits.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHits.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,130 +16,115 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Hits;
+import java.io.IOException;
+
/**
- * Wraps the lucene <code>Hits</code> object and adds a close method that
allows to release
- * resources after a query has been executed and the results have been read completely.
+ * Wraps the lucene <code>Hits</code> object and adds a close method that
allows
+ * to release resources after a query has been executed and the results have
+ * been read completely.
*/
-public class QueryHits
-{
+public class QueryHits {
- /**
- * The lucene hits we wrap.
- */
- private final Hits hits;
+ /**
+ * The lucene hits we wrap.
+ */
+ private final Hits hits;
- /**
- * The IndexReader in use by the lucene hits.
- */
- private final IndexReader reader;
+ /**
+ * The IndexReader in use by the lucene hits.
+ */
+ private final IndexReader reader;
- /**
- * Number of results.
- */
- private final int length;
+ /**
+ * Number of results.
+ */
+ private final int length;
- /**
- * Creates a new <code>QueryHits</code> instance wrapping
<code>hits</code>.
- *
- * @param hits
- * the lucene hits.
- * @param reader
- * the IndexReader in use by <code>hits</code>.
- */
- public QueryHits(Hits hits, IndexReader reader)
- {
- this.hits = hits;
- this.reader = reader;
- this.length = hits.length();
- }
+ /**
+ * Creates a new <code>QueryHits</code> instance wrapping
<code>hits</code>.
+ *
+ * @param hits the lucene hits.
+ * @param reader the IndexReader in use by <code>hits</code>.
+ */
+ public QueryHits(Hits hits, IndexReader reader) {
+ this.hits = hits;
+ this.reader = reader;
+ this.length = hits.length();
+ }
- /**
- * Releases resources held by this hits instance.
- *
- * @throws IOException
- * if an error occurs while releasing resources.
- */
- public final void close() throws IOException
- {
- reader.close();
- PerQueryCache.getInstance().dispose();
- }
+ /**
+ * Releases resources held by this hits instance.
+ *
+ * @throws IOException if an error occurs while releasing resources.
+ */
+ public final void close() throws IOException {
+ PerQueryCache.getInstance().dispose();
+ Util.closeOrRelease(reader);
+ }
- /**
- * Returns the number of results.
- *
- * @return the number of results.
- */
- public final int length()
- {
- return length;
- }
+ /**
+ * Returns the number of results.
+ *
+ * @return the number of results.
+ */
+ public final int length() {
+ return length;
+ }
- /**
- * Returns the <code>n</code><sup>th</sup> document in this
QueryHits.
- *
- * @param n
- * index.
- * @return the <code>n</code><sup>th</sup> document in this
QueryHits.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public final Document doc(int n) throws IOException
- {
- return hits.doc(n);
- }
+ /**
+ * Returns the <code>n</code><sup>th</sup> document in this
QueryHits.
+ *
+ * @param n index.
+ * @return the <code>n</code><sup>th</sup> document in this
QueryHits.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public final Document doc(int n) throws IOException {
+ return hits.doc(n);
+ }
- public String getFieldContent(int n, final String field) throws IOException
- {
- int id = hits.id(n);
+ public String getFieldContent(int n, final String field) throws IOException {
+ int id = hits.id(n);
- FieldSelector fieldSelector = new FieldSelector()
- {
- public FieldSelectorResult accept(String fieldName)
- {
- if (fieldName.equals(field))
- return FieldSelectorResult.LOAD_AND_BREAK;
- return FieldSelectorResult.NO_LOAD;
- }
- };
+ FieldSelector fieldSelector = new FieldSelector() {
+ public FieldSelectorResult accept(String fieldName) {
+ if (fieldName.equals(field))
+ return FieldSelectorResult.LOAD_AND_BREAK;
+ return FieldSelectorResult.NO_LOAD;
+ }
+ };
- Document doc = reader.document(id, fieldSelector);
- if (doc == null)
- throw new IOException("Document with id " + id + " not
found");
- return doc.get(field);
- }
+ Document doc = reader.document(id, fieldSelector);
+ if (doc == null)
+ throw new IOException("Document with id " + id + " not
found");
+ return doc.get(field);
+ }
- /**
- * Returns the score for the <code>n</code><sup>th</sup>
document in this QueryHits.
- *
- * @param n
- * index.
- * @return the score for the <code>n</code><sup>th</sup>
document.
- */
- public final float score(int n) throws IOException
- {
- return hits.score(n);
- }
+ /**
+ * Returns the score for the <code>n</code><sup>th</sup>
document in this
+ * QueryHits.
+ *
+ * @param n index.
+ * @return the score for the <code>n</code><sup>th</sup>
document.
+ */
+ public final float score(int n) throws IOException {
+ return hits.score(n);
+ }
- /**
- * Returns the document number for the
<code>n</code><sup>th</sup> document in this QueryHits.
- *
- * @param n
- * index.
- * @return the document number for the
<code>n</code><sup>th</sup> document.
- * @throws IOException
- * if an error occurs.
- */
- public final int id(int n) throws IOException
- {
- return hits.id(n);
- }
+ /**
+ * Returns the document number for the
<code>n</code><sup>th</sup> document in
+ * this QueryHits.
+ *
+ * @param n index.
+ * @return the document number for the
<code>n</code><sup>th</sup> document.
+ * @throws IOException if an error occurs.
+ */
+ public final int id(int n) throws IOException {
+ return hits.id(n);
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,387 +16,302 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.util.BitSet;
-
-import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermPositions;
+import java.io.IOException;
+import java.util.BitSet;
+
/**
* Overwrites the methods that would modify the index and throws an
* {@link UnsupportedOperationException} in each of those methods. A
- * <code>ReadOnlyIndexReader</code> will always show all documents that have
not been deleted at the
- * time when the index reader is created.
+ * <code>ReadOnlyIndexReader</code> will always show all documents that have
not
+ * been deleted at the time when the index reader is created.
*/
-class ReadOnlyIndexReader
- extends FilterIndexReader
-{
+class ReadOnlyIndexReader extends RefCountingIndexReader {
- /**
- * The underlying shared reader.
- */
- private final SharedIndexReader reader;
+ /**
+ * The underlying shared reader.
+ */
+ private final SharedIndexReader reader;
- /**
- * The deleted documents as initially read from the IndexReader passed in the
constructor of this
- * class.
- */
- private final BitSet deleted;
+ /**
+ * The deleted documents as initially read from the IndexReader passed in the
+ * constructor of this class.
+ */
+ private final BitSet deleted;
- /**
- * The version of the index reader from where the deleted BitSet was obtained from.
- */
- private long deletedDocsVersion;
+ /**
+ * The version of the index reader from where the deleted BitSet was obtained
+ * from.
+ */
+ private long deletedDocsVersion;
- /**
- * A reference counter. When constructed the refCount is one.
- */
- private int refCount = 1;
+ /**
+ * Creates a new index reader based on <code>reader</code> at
+ * <code>modificationTick</code>.
+ *
+ * @param reader the underlying <code>IndexReader</code>.
+ * @param deleted the documents that are deleted in <code>reader</code>.
+ * @param deletedDocsVersion the version of the index reader from where the
+ * deleted BitSet was obtained from.
+ */
+ public ReadOnlyIndexReader(SharedIndexReader reader, BitSet deleted, long
deletedDocsVersion) {
+ super(reader);
+ this.reader = reader;
+ this.deleted = deleted;
+ this.deletedDocsVersion = deletedDocsVersion;
+ // acquire underlying reader
+ reader.acquire();
+ }
- /**
- * Creates a new index reader based on <code>reader</code> at
<code>modificationTick</code>.
- *
- * @param reader
- * the underlying <code>IndexReader</code>.
- * @param deleted
- * the documents that are deleted in <code>reader</code>.
- * @param deletedDocsVersion
- * the version of the index reader from where the deleted BitSet was obtained
from.
- */
- public ReadOnlyIndexReader(SharedIndexReader reader, BitSet deleted, long
deletedDocsVersion)
- {
- super(reader);
- this.reader = reader;
- this.deleted = deleted;
- this.deletedDocsVersion = deletedDocsVersion;
- // register this
- reader.addClient(this);
- }
+ /**
+ * @return version of the deleted docs.
+ */
+ long getDeletedDocsVersion() {
+ return deletedDocsVersion;
+ }
- /**
- * Increments the reference count on this index reader. The reference count is
decremented on
- * {@link #close()}.
- */
- synchronized void incrementRefCount()
- {
- refCount++;
- }
+ /**
+ * Returns the tick value when the underlying {@link CachingIndexReader} was
+ * created.
+ *
+ * @return the creation tick for the underlying reader.
+ */
+ long getCreationTick() {
+ return reader.getCreationTick();
+ }
- /**
- * @return the current reference count value.
- */
- synchronized int getRefCount()
- {
- return refCount;
- }
-
- /**
- * @return version of the deleted docs.
- */
- long getDeletedDocsVersion()
- {
- return deletedDocsVersion;
- }
-
- /**
- * Returns the tick value when the underlying {@link CachingIndexReader} was created.
- *
- * @return the creation tick for the underlying reader.
- */
- long getCreationTick()
- {
- return reader.getCreationTick();
- }
-
- /**
- * Updates the deleted documents in this index reader. When this method returns this
index reader
- * will have the same documents marked as deleted as the passed
<code>reader</code>. <p/> This
- * method is not thread-safe! Make sure no other thread is concurrently using this
reader at the
- * same time.
- *
- * @param reader
- * the reader from where to obtain the deleted documents info.
- */
- void updateDeletedDocs(CommittableIndexReader reader)
- {
- int maxDoc = reader.maxDoc();
- for (int i = 0; i < maxDoc; i++)
- {
- if (reader.isDeleted(i))
- {
- deleted.set(i);
- }
+ /**
+ * Updates the deleted documents in this index reader. When this method
+ * returns this index reader will have the same documents marked as deleted as
+ * the passed <code>reader</code>.
+ * <p/>
+ * This method is not thread-safe! Make sure no other thread is concurrently
+ * using this reader at the same time.
+ *
+ * @param reader the reader from where to obtain the deleted documents info.
+ */
+ void updateDeletedDocs(CommittableIndexReader reader) {
+ int maxDoc = reader.maxDoc();
+ for (int i = 0; i < maxDoc; i++) {
+ if (reader.isDeleted(i)) {
+ deleted.set(i);
}
- deletedDocsVersion = reader.getModificationCount();
- }
+ }
+ deletedDocsVersion = reader.getModificationCount();
+ }
- /**
- * Returns the <code>DocId</code> of the parent of
<code>n</code> or {@link DocId#NULL} if
- * <code>n</code> does not have a parent (<code>n</code> is
the root node).
- *
- * @param n
- * the document number.
- * @return the <code>DocId</code> of <code>n</code>'s
parent.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public DocId getParent(int n) throws IOException
- {
- return getBase().getParent(n, deleted);
- }
+ /**
+ * Returns the <code>DocId</code> of the parent of
<code>n</code> or
+ * {@link DocId#NULL} if <code>n</code> does not have a parent
(<code>n</code>
+ * is the root node).
+ *
+ * @param n the document number.
+ * @return the <code>DocId</code> of <code>n</code>'s
parent.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public DocId getParent(int n) throws IOException {
+ return getBase().getParent(n, deleted);
+ }
- /**
- * Returns the {@link SharedIndexReader} this reader is based on.
- *
- * @return the {@link SharedIndexReader} this reader is based on.
- */
- public SharedIndexReader getBase()
- {
- return (SharedIndexReader) in;
- }
+ /**
+ * Returns the {@link SharedIndexReader} this reader is based on.
+ *
+ * @return the {@link SharedIndexReader} this reader is based on.
+ */
+ public SharedIndexReader getBase() {
+ return (SharedIndexReader) in;
+ }
- // ---------------------< IndexReader overwrites >---------------------------
+ // ---------------------< IndexReader overwrites >---------------------------
- /**
- * Returns true if document <code>n</code> has been deleted
- *
- * @param n
- * the document number
- * @return true if document <code>n</code> has been deleted
- */
- public boolean isDeleted(int n)
- {
- return deleted.get(n);
- }
+ /**
+ * Returns true if document <code>n</code> has been deleted
+ *
+ * @param n the document number
+ * @return true if document <code>n</code> has been deleted
+ */
+ public boolean isDeleted(int n) {
+ return deleted.get(n);
+ }
- /**
- * Returns <code>true</code> if any documents have been deleted.
- *
- * @return <code>true</code> if any documents have been deleted.
- */
- public boolean hasDeletions()
- {
- return !deleted.isEmpty();
- }
+ /**
+ * Returns <code>true</code> if any documents have been deleted.
+ *
+ * @return <code>true</code> if any documents have been deleted.
+ */
+ public boolean hasDeletions() {
+ return !deleted.isEmpty();
+ }
- /**
- * Returns the number of documents in this index reader.
- *
- * @return the number of documents in this index reader.
- */
- public int numDocs()
- {
- return maxDoc() - deleted.cardinality();
- }
+ /**
+ * Returns the number of documents in this index reader.
+ *
+ * @return the number of documents in this index reader.
+ */
+ public int numDocs() {
+ return maxDoc() - deleted.cardinality();
+ }
- /**
- * @exception UnsupportedOperationException
- * always
- */
- protected final void doDelete(int docNum)
- {
- throw new UnsupportedOperationException("IndexReader is read-only");
- }
+ /**
+ * @exception UnsupportedOperationException always
+ */
+ protected final void doDelete(int docNum) {
+ throw new UnsupportedOperationException("IndexReader is read-only");
+ }
- /**
- * @exception UnsupportedOperationException
- * always
- */
- protected final void doUndeleteAll()
- {
- throw new UnsupportedOperationException("IndexReader is read-only");
- }
+ /**
+ * @exception UnsupportedOperationException always
+ */
+ protected final void doUndeleteAll() {
+ throw new UnsupportedOperationException("IndexReader is read-only");
+ }
- /**
- * @exception UnsupportedOperationException
- * always
- */
- protected final void doCommit()
- {
- throw new UnsupportedOperationException("IndexReader is read-only");
- }
+ /**
+ * @exception UnsupportedOperationException always
+ */
+ protected final void doCommit() {
+ throw new UnsupportedOperationException("IndexReader is read-only");
+ }
- /**
- * Unregisters this reader from the shared index reader if the reference count for
this reader
- * drops to zero. Specifically, this method does <b>not</b> close the
underlying index reader,
- * because it is shared by multiple <code>ReadOnlyIndexReader</code>s.
- *
- * @throws IOException
- * if an error occurs while closing the reader.
- */
- protected void doClose() throws IOException
- {
- synchronized (this)
- {
- if (--refCount == 0)
- {
- reader.removeClient(this);
- }
- }
- }
+ /**
+ * Wraps the underlying <code>TermDocs</code> and filters out documents
marked
+ * as deleted.<br/>
+ * If <code>term</code> is for a {@link FieldNames#UUID} field and this
+ * <code>ReadOnlyIndexReader</code> does not have such a document,
+ * {@link CachingIndexReader#EMPTY} is returned.
+ *
+ * @param term the term to enumerate the docs for.
+ * @return TermDocs for <code>term</code>.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public TermDocs termDocs(Term term) throws IOException {
+ // do not wrap for empty TermDocs
+ TermDocs td = reader.termDocs(term);
+ if (td != CachingIndexReader.EMPTY) {
+ td = new FilteredTermDocs(td);
+ }
+ return td;
+ }
- /**
- * Wraps the underlying <code>TermDocs</code> and filters out documents
marked as deleted.<br/> If
- * <code>term</code> is for a {@link FieldNames#UUID} field and this
- * <code>ReadOnlyIndexReader</code> does not have such a document,
- * {@link CachingIndexReader#EMPTY} is returned.
- *
- * @param term
- * the term to enumerate the docs for.
- * @return TermDocs for <code>term</code>.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public TermDocs termDocs(Term term) throws IOException
- {
- // do not wrap for empty TermDocs
- TermDocs td = reader.termDocs(term);
- if (td != CachingIndexReader.EMPTY)
- {
- td = new FilteredTermDocs(td);
- }
- return td;
- }
+ /**
+ * Wraps the underlying <code>TermDocs</code> and filters out documents
marked
+ * as deleted.
+ *
+ * @return TermDocs over the whole index.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public TermDocs termDocs() throws IOException {
+ return new FilteredTermDocs(super.termDocs());
+ }
- /**
- * Wraps the underlying <code>TermDocs</code> and filters out documents
marked as deleted.
- *
- * @return TermDocs over the whole index.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public TermDocs termDocs() throws IOException
- {
- return new FilteredTermDocs(super.termDocs());
- }
+ /**
+ * Wraps the underlying <code>TermPositions</code> and filters out
documents
+ * marked as deleted.
+ *
+ * @return TermPositions over the whole index.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public TermPositions termPositions() throws IOException {
+ return new FilteredTermPositions(super.termPositions());
+ }
- /**
- * Wraps the underlying <code>TermPositions</code> and filters out
documents marked as deleted.
- *
- * @return TermPositions over the whole index.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public TermPositions termPositions() throws IOException
- {
- return new FilteredTermPositions(super.termPositions());
- }
+ // ----------------------< FilteredTermDocs >--------------------------------
- // ----------------------< FilteredTermDocs >--------------------------------
+ /**
+ * Filters a wrapped TermDocs by omitting documents marked as deleted.
+ */
+ private class FilteredTermDocs extends FilterTermDocs {
- /**
- * Filters a wrapped TermDocs by omitting documents marked as deleted.
- */
- private class FilteredTermDocs
- extends FilterTermDocs
- {
+ /**
+ * Creates a new filtered TermDocs based on <code>in</code>.
+ *
+ * @param in the TermDocs to filter.
+ */
+ public FilteredTermDocs(TermDocs in) {
+ super(in);
+ }
- /**
- * Creates a new filtered TermDocs based on <code>in</code>.
- *
- * @param in
- * the TermDocs to filter.
- */
- public FilteredTermDocs(TermDocs in)
- {
- super(in);
+ /**
+ * @inheritDoc
+ */
+ public boolean next() throws IOException {
+ boolean hasNext = super.next();
+ while (hasNext && deleted.get(super.doc())) {
+ hasNext = super.next();
}
+ return hasNext;
+ }
- /**
- * @inheritDoc
- */
- public boolean next() throws IOException
- {
- boolean hasNext = super.next();
- while (hasNext && deleted.get(super.doc()))
- {
- hasNext = super.next();
- }
- return hasNext;
+ /**
+ * @inheritDoc
+ */
+ public int read(int[] docs, int[] freqs) throws IOException {
+ int count;
+ for (count = 0; count < docs.length && next(); count++) {
+ docs[count] = doc();
+ freqs[count] = freq();
}
+ return count;
+ }
- /**
- * @inheritDoc
- */
- public int read(int[] docs, int[] freqs) throws IOException
- {
- int count;
- for (count = 0; count < docs.length && next(); count++)
- {
- docs[count] = doc();
- freqs[count] = freq();
- }
- return count;
+ /**
+ * @inheritDoc
+ */
+ public boolean skipTo(int i) throws IOException {
+ boolean exists = super.skipTo(i);
+ while (exists && deleted.get(doc())) {
+ exists = next();
}
+ return exists;
+ }
+ }
- /**
- * @inheritDoc
- */
- public boolean skipTo(int i) throws IOException
- {
- boolean exists = super.skipTo(i);
- while (exists && deleted.get(doc()))
- {
- exists = next();
- }
- return exists;
- }
- }
+ // ---------------------< FilteredTermPositions >----------------------------
- // ---------------------< FilteredTermPositions >----------------------------
+ /**
+ * Filters a wrapped TermPositions by omitting documents marked as deleted.
+ */
+ private final class FilteredTermPositions extends FilteredTermDocs implements
TermPositions {
- /**
- * Filters a wrapped TermPositions by omitting documents marked as deleted.
- */
- private final class FilteredTermPositions
- extends FilteredTermDocs
- implements TermPositions
- {
+ /**
+ * Creates a new filtered TermPositions based on <code>in</code>.
+ *
+ * @param in the TermPositions to filter.
+ */
+ public FilteredTermPositions(TermPositions in) {
+ super(in);
+ }
- /**
- * Creates a new filtered TermPositions based on <code>in</code>.
- *
- * @param in
- * the TermPositions to filter.
- */
- public FilteredTermPositions(TermPositions in)
- {
- super(in);
- }
+ /**
+ * @inheritDoc
+ */
+ public int nextPosition() throws IOException {
+ return ((TermPositions) this.in).nextPosition();
+ }
- /**
- * @inheritDoc
- */
- public int nextPosition() throws IOException
- {
- return ((TermPositions) this.in).nextPosition();
- }
+ /**
+ * @inheritDoc
+ */
+ public int getPayloadLength() {
+ return ((TermPositions) in).getPayloadLength();
+ }
- /**
- * @inheritDoc
- */
- public int getPayloadLength()
- {
- return ((TermPositions) in).getPayloadLength();
- }
+ /**
+ * @inheritDoc
+ */
+ public byte[] getPayload(byte data[], int offset) throws IOException {
+ return ((TermPositions) in).getPayload(data, offset);
+ }
- /**
- * @inheritDoc
- */
- public byte[] getPayload(byte data[], int offset) throws IOException
- {
- return ((TermPositions) in).getPayload(data, offset);
- }
+ /**
+ * @inheritDoc
+ */
+ public boolean isPayloadAvailable() {
+ return ((TermPositions) in).isPayloadAvailable();
+ }
- /**
- * @inheritDoc
- */
- public boolean isPayloadAvailable()
- {
- return ((TermPositions) in).isPayloadAvailable();
- }
-
- }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Recovery.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Recovery.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Recovery.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,192 +16,161 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
-import org.exoplatform.services.log.Log;
-
-import org.exoplatform.services.log.ExoLogger;
-
/**
* Implements the recovery process.
*/
-class Recovery
-{
+class Recovery {
- /**
- * The logger instance for this class.
- */
- private static final Log log = ExoLogger.getLogger(Recovery.class);
+ /**
+ * The logger instance for this class.
+ */
+ private static final Log log = ExoLogger.getLogger(Recovery.class);
- /**
- * The MultiIndex where to run the recovery on.
- */
- private final MultiIndex index;
+ /**
+ * The MultiIndex where to run the recovery on.
+ */
+ private final MultiIndex index;
- /**
- * The redo redoLog.
- */
- private final RedoLog redoLog;
+ /**
+ * The redo redoLog.
+ */
+ private final RedoLog redoLog;
- /**
- * The ids of the uncommitted transactions. Set of Integer objects.
- */
- private final Set losers = new HashSet();
+ /**
+ * The ids of the uncommitted transactions. Set of Integer objects.
+ */
+ private final Set losers = new HashSet();
- /**
- * Creates a new Recovery instance.
- *
- * @param index
- * the MultiIndex to recover.
- * @param redoLog
- * the redo redoLog.
- */
- private Recovery(MultiIndex index, RedoLog redoLog)
- {
- this.index = index;
- this.redoLog = redoLog;
- }
+ /**
+ * Creates a new Recovery instance.
+ *
+ * @param index the MultiIndex to recover.
+ * @param redoLog the redo redoLog.
+ */
+ private Recovery(MultiIndex index, RedoLog redoLog) {
+ this.index = index;
+ this.redoLog = redoLog;
+ }
- /**
- * Runs a recovery on <code>index</code> if
<code>redoLog</code> contains log entries. <p/> If
- * recovery succeeds the <code>index</code> is flushed and the redo log is
cleared. That is, the
- * <code>index</code> is stable.<br/> If recovery fails an
IOException is thrown, and the redo log
- * will not be modified. The recovery process can then be executed again, after fixing
the cause
- * of the IOException (e.g. disk full).
- *
- * @param index
- * the index to recover.
- * @param redoLog
- * the redo log.
- * @throws IOException
- * if the recovery fails.
- */
- static void run(MultiIndex index, RedoLog redoLog) throws IOException
- {
- if (!redoLog.hasEntries())
- {
- log.debug("RedoLog is empty, no recovery needed.");
- return;
- }
- log.info("Found uncommitted redo log. Applying changes now...");
- Recovery r = new Recovery(index, redoLog);
- r.run();
- log.info("Redo changes applied.");
- }
+ /**
+ * Runs a recovery on <code>index</code> if
<code>redoLog</code> contains log
+ * entries.
+ * <p/>
+ * If recovery succeeds the <code>index</code> is flushed and the redo log
is
+ * cleared. That is, the <code>index</code> is stable.<br/>
+ * If recovery fails an IOException is thrown, and the redo log will not be
+ * modified. The recovery process can then be executed again, after fixing the
+ * cause of the IOException (e.g. disk full).
+ *
+ * @param index the index to recover.
+ * @param redoLog the redo log.
+ * @throws IOException if the recovery fails.
+ */
+ static void run(MultiIndex index, RedoLog redoLog) throws IOException {
+ if (!redoLog.hasEntries()) {
+ log.debug("RedoLog is empty, no recovery needed.");
+ return;
+ }
+ log.info("Found uncommitted redo log. Applying changes now...");
+ Recovery r = new Recovery(index, redoLog);
+ r.run();
+ log.info("Redo changes applied.");
+ }
- /**
- * Runs the recovery process.
- *
- * @throws IOException
- * if the recovery fails.
- */
- private void run() throws IOException
- {
- List actions = redoLog.getActions();
+ /**
+ * Runs the recovery process.
+ *
+ * @throws IOException if the recovery fails.
+ */
+ private void run() throws IOException {
+ List actions = redoLog.getActions();
- // find loser transactions
- for (Iterator it = actions.iterator(); it.hasNext();)
- {
- MultiIndex.Action a = (MultiIndex.Action) it.next();
+ // find loser transactions
+ for (Iterator it = actions.iterator(); it.hasNext();) {
+ MultiIndex.Action a = (MultiIndex.Action) it.next();
- if (a.getType() == MultiIndex.Action.TYPE_START)
- {
- losers.add(new Long(a.getTransactionId()));
- }
- else if (a.getType() == MultiIndex.Action.TYPE_COMMIT)
- {
- losers.remove(new Long(a.getTransactionId()));
- }
+ if (a.getType() == MultiIndex.Action.TYPE_START) {
+ losers.add(new Long(a.getTransactionId()));
+ } else if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
+ losers.remove(new Long(a.getTransactionId()));
}
+ }
- // find last volatile commit without changes from a loser
- int lastSafeVolatileCommit = -1;
- Set transactionIds = new HashSet();
- for (int i = 0; i < actions.size(); i++)
- {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
- if (a.getType() == MultiIndex.Action.TYPE_COMMIT)
- {
- transactionIds.clear();
- }
- else if (a.getType() == MultiIndex.Action.TYPE_VOLATILE_COMMIT)
- {
- transactionIds.retainAll(losers);
- // check if transactionIds contains losers
- if (transactionIds.size() > 0)
- {
- // found dirty volatile commit
- break;
- }
- else
- {
- lastSafeVolatileCommit = i;
- }
- }
- else
- {
- transactionIds.add(new Long(a.getTransactionId()));
- }
+ // find last volatile commit without changes from a loser
+ int lastSafeVolatileCommit = -1;
+ Set transactionIds = new HashSet();
+ for (int i = 0; i < actions.size(); i++) {
+ MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ if (a.getType() == MultiIndex.Action.TYPE_COMMIT) {
+ transactionIds.clear();
+ } else if (a.getType() == MultiIndex.Action.TYPE_VOLATILE_COMMIT) {
+ transactionIds.retainAll(losers);
+ // check if transactionIds contains losers
+ if (transactionIds.size() > 0) {
+ // found dirty volatile commit
+ break;
+ } else {
+ lastSafeVolatileCommit = i;
+ }
+ } else {
+ transactionIds.add(new Long(a.getTransactionId()));
}
+ }
- // delete dirty indexes
- for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++)
- {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
- if (a.getType() == MultiIndex.Action.TYPE_CREATE_INDEX)
- {
- a.undo(index);
- }
+ // delete dirty indexes
+ for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
+ MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ if (a.getType() == MultiIndex.Action.TYPE_CREATE_INDEX) {
+ a.undo(index);
}
+ }
- // replay actions up to last safe volatile commit
- // ignore add node actions, they are included in volatile commits
- for (int i = 0; i < actions.size() && i <= lastSafeVolatileCommit;
i++)
- {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
- switch (a.getType())
- {
- case MultiIndex.Action.TYPE_ADD_INDEX :
- case MultiIndex.Action.TYPE_CREATE_INDEX :
- case MultiIndex.Action.TYPE_DELETE_INDEX :
- case MultiIndex.Action.TYPE_DELETE_NODE :
- // ignore actions by the index merger.
- // the previously created index of a merge has been
- // deleted because it was considered dirty.
- // we are conservative here and let the index merger do
- // its work again.
- if (a.getTransactionId() ==
MultiIndex.Action.INTERNAL_TRANS_REPL_INDEXES)
- {
- continue;
- }
- a.execute(index);
- }
+ // replay actions up to last safe volatile commit
+ // ignore add node actions, they are included in volatile commits
+ for (int i = 0; i < actions.size() && i <= lastSafeVolatileCommit; i++)
{
+ MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ switch (a.getType()) {
+ case MultiIndex.Action.TYPE_ADD_INDEX:
+ case MultiIndex.Action.TYPE_CREATE_INDEX:
+ case MultiIndex.Action.TYPE_DELETE_INDEX:
+ case MultiIndex.Action.TYPE_DELETE_NODE:
+ // ignore actions by the index merger.
+ // the previously created index of a merge has been
+ // deleted because it was considered dirty.
+ // we are conservative here and let the index merger do
+ // its work again.
+ if (a.getTransactionId() == MultiIndex.Action.INTERNAL_TRANS_REPL_INDEXES) {
+ continue;
+ }
+ a.execute(index);
}
+ }
- // now replay the rest until we encounter a loser transaction
- for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++)
- {
- MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
- if (losers.contains(new Long(a.getTransactionId())))
- {
- break;
- }
- else
- {
- // ignore actions by the index merger.
- if (a.getTransactionId() == MultiIndex.Action.INTERNAL_TRANS_REPL_INDEXES)
- {
- continue;
- }
- a.execute(index);
- }
+ // now replay the rest until we encounter a loser transaction
+ for (int i = lastSafeVolatileCommit + 1; i < actions.size(); i++) {
+ MultiIndex.Action a = (MultiIndex.Action) actions.get(i);
+ if (losers.contains(new Long(a.getTransactionId()))) {
+ break;
+ } else {
+ // ignore actions by the index merger.
+ if (a.getTransactionId() == MultiIndex.Action.INTERNAL_TRANS_REPL_INDEXES) {
+ continue;
+ }
+ a.execute(index);
}
+ }
- // now we are consistent again -> flush
- index.flush();
- }
+ // now we are consistent again -> flush
+ index.flush();
+ index.releaseMultiReader();
+ }
}
Added:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RefCountingIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RefCountingIndexReader.java
(rev 0)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RefCountingIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.index.FilterIndexReader;
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
+
+/**
+ * <code>RefCountingIndexReader</code>...
+ */
+public class RefCountingIndexReader extends FilterIndexReader implements
ReleaseableIndexReader {
+
+ /**
+ * A reference counter. When constructed the refCount is one.
+ */
+ private int refCount = 1;
+
+ public RefCountingIndexReader(IndexReader in) {
+ super(in);
+ }
+
+ /**
+ * Increments the reference count on this index reader. The reference count is
+ * decremented on {@link #release()}.
+ */
+ synchronized final void acquire() {
+ refCount++;
+ }
+
+ /**
+ * @return the current reference count value.
+ */
+ synchronized int getRefCount() {
+ return refCount;
+ }
+
+ // -----------------------< ReleaseableIndexReader >--------------------------
+
+ /**
+ * {@inheritDoc}
+ */
+ public synchronized final void release() throws IOException {
+ if (--refCount == 0) {
+ close();
+ }
+ }
+
+ // -----------------------< FilterIndexReader >--------------------------
+
+ protected void doClose() throws IOException {
+ Util.closeOrRelease(in);
+ }
+}
Property changes on:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RefCountingIndexReader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReleaseableIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReleaseableIndexReader.java
(rev 0)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReleaseableIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *
http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import java.io.IOException;
+
+/**
+ * <code>ReleaseableIndexReader</code>...
+ */
+public interface ReleaseableIndexReader {
+
+ /**
+ * Releases this index reader and potentially frees resources. In contrast to
+ * {@link org.apache.lucene.index.IndexReader#close()} this method does not
+ * necessarily close the index reader, but gives the implementation the
+ * opportunity to do reference counting.
+ *
+ * @throws IOException if an error occurs while releasing the index reader.
+ */
+ public void release() throws IOException;
+}
Property changes on:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReleaseableIndexReader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,22 +16,7 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.jcr.RepositoryException;
-import javax.jcr.query.InvalidQueryException;
-
import org.apache.commons.collections.iterators.AbstractIteratorDecorator;
-import org.exoplatform.services.log.Log;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -44,7 +29,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
-
import org.exoplatform.container.configuration.ConfigurationManager;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
@@ -64,1077 +48,960 @@
import org.exoplatform.services.jcr.impl.core.query.QueryHandler;
import org.exoplatform.services.jcr.impl.core.query.QueryHandlerContext;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+import javax.jcr.query.InvalidQueryException;
+
/**
* Implements a {@link org.apache.jackrabbit.core.query.QueryHandler} using
* Lucene.
*/
-public class SearchIndex implements QueryHandler
-{
+public class SearchIndex implements QueryHandler {
- private static final DefaultQueryNodeFactory DEFAULT_QUERY_NODE_FACTORY = new
DefaultQueryNodeFactory();
+ private static final DefaultQueryNodeFactory DEFAULT_QUERY_NODE_FACTORY = new
DefaultQueryNodeFactory();
- /** The logger instance for this class */
- private static final Log log = ExoLogger.getLogger(SearchIndex.class);
+ /** The logger instance for this class */
+ private static final Log log =
ExoLogger.getLogger(SearchIndex.class);
- /**
- * Name of the file to persist search internal namespace mappings.
- */
- private static final String NS_MAPPING_FILE = "ns_mappings.properties";
+ /**
+ * Name of the file to persist search internal namespace mappings.
+ */
+ private static final String NS_MAPPING_FILE =
"ns_mappings.properties";
- /**
- * Default name of the error log file
- */
- private static final String ERROR_LOG = "error.log";
+ /**
+ * Default name of the error log file
+ */
+ private static final String ERROR_LOG =
"error.log";
- /**
- * Indicates if this <code>SearchIndex</code> is closed and cannot be
used
- * anymore.
- */
- private boolean closed = false;
+ /**
+ * Indicates if this <code>SearchIndex</code> is closed and cannot be used
+ * anymore.
+ */
+ private boolean closed = false;
- private QueryHandlerContext context;
+ private QueryHandlerContext context;
- /**
- * Text extractor for extracting text content of binary properties.
- */
- private DocumentReaderService extractor;
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ private DocumentReaderService extractor;
- /**
- * The actual index
- */
- private MultiIndex index;
+ /**
+ * The actual index
+ */
+ private MultiIndex index;
- /**
- * Indicates the index format version which is relevant to a
<b>query</b>.
- * This value may be different from what
- * {@link MultiIndex#getIndexFormatVersion()} returns because queries may be
- * executed on two physical indexes with different formats. Index format
- * versions are considered backward compatible. That is, the lower version of
- * the two physical indexes is used for querying.
- */
- private IndexFormatVersion indexFormatVersion;
+ /**
+ * Indicates the index format version which is relevant to a <b>query</b>.
+ * This value may be different from what
+ * {@link MultiIndex#getIndexFormatVersion()} returns because queries may be
+ * executed on two physical indexes with different formats. Index format
+ * versions are considered backward compatible. That is, the lower version of
+ * the two physical indexes is used for querying.
+ */
+ private IndexFormatVersion indexFormatVersion;
- /**
- * The indexing configuration.
- */
- private IndexingConfiguration indexingConfig;
+ /**
+ * The indexing configuration.
+ */
+ private IndexingConfiguration indexingConfig;
- /**
- * The name and path resolver used internally.
- */
- private LocationFactory npResolver;
+ /**
+ * The name and path resolver used internally.
+ */
+ private LocationFactory npResolver;
- /**
- * The namespace mappings used internally.
- */
- private NamespaceMappings nsMappings;
+ /**
+ * The namespace mappings used internally.
+ */
+ private NamespaceMappings nsMappings;
- private final QueryHandlerEntryWrapper queryHandlerConfig;
+ private final QueryHandlerEntryWrapper queryHandlerConfig;
- /**
- * The spell checker for this query handler or <code>null</code> if none
is
- * configured.
- */
- private SpellChecker spellChecker;
+ /**
+ * The spell checker for this query handler or <code>null</code> if none
is
+ * configured.
+ */
+ private SpellChecker spellChecker;
- /**
- * The currently set synonym provider.
- */
- private SynonymProvider synProvider;
+ /**
+ * The currently set synonym provider.
+ */
+ private SynonymProvider synProvider;
- private File indexDirectory;
+ private File indexDirectory;
- /**
- * The ErrorLog of this <code>MultiIndex</code>. All changes that must be
in
- * index but interrupted by IOException are here.
- */
- private ErrorLog errorLog;
+ /**
+ * The ErrorLog of this <code>MultiIndex</code>. All changes that must be
in
+ * index but interrupted by IOException are here.
+ */
+ private ErrorLog errorLog;
- private final ConfigurationManager cfm;
+ private final ConfigurationManager cfm;
- public SearchIndex(QueryHandlerEntry queryHandlerConfig, ConfigurationManager cfm)
- {
- this.queryHandlerConfig = new QueryHandlerEntryWrapper(queryHandlerConfig);
- this.cfm = cfm;
- }
+ public SearchIndex(QueryHandlerEntry queryHandlerConfig, ConfigurationManager cfm) {
+ this.queryHandlerConfig = new QueryHandlerEntryWrapper(queryHandlerConfig);
+ this.cfm = cfm;
+ }
- /**
- * Adds the <code>node</code> to the search index.
- *
- * @param node the node to add.
- * @throws RepositoryException if an error occurs while indexing the node.
- * @throws IOException if an error occurs while adding the node to the index.
- */
- public void addNode(NodeData node) throws RepositoryException, IOException
- {
- throw new UnsupportedOperationException("addNode");
- }
+ /**
+ * Adds the <code>node</code> to the search index.
+ *
+ * @param node the node to add.
+ * @throws RepositoryException if an error occurs while indexing the node.
+ * @throws IOException if an error occurs while adding the node to the index.
+ */
+ public void addNode(NodeData node) throws RepositoryException, IOException {
+ throw new UnsupportedOperationException("addNode");
+ }
- /**
- * Creates an excerpt provider for the given <code>query</code>.
- *
- * @param query the query.
- * @return an excerpt provider for the given <code>query</code>.
- * @throws IOException if the provider cannot be created.
- */
- public ExcerptProvider createExcerptProvider(Query query) throws IOException
- {
- ExcerptProvider ep = queryHandlerConfig.createExcerptProvider(query);
- ep.init(query, this);
- return ep;
- }
+ /**
+ * Creates an excerpt provider for the given <code>query</code>.
+ *
+ * @param query the query.
+ * @return an excerpt provider for the given <code>query</code>.
+ * @throws IOException if the provider cannot be created.
+ */
+ public ExcerptProvider createExcerptProvider(Query query) throws IOException {
+ ExcerptProvider ep = queryHandlerConfig.createExcerptProvider(query);
+ ep.init(query, this);
+ return ep;
+ }
- /**
- * Creates a new query by specifying the query statement itself and the
- * language in which the query is stated. If the query statement is
- * syntactically invalid, given the language specified, an
- * InvalidQueryException is thrown. <code>language</code> must specify a
query
- * language string from among those returned by
- * QueryManager.getSupportedQueryLanguages(); if it is not then an
- * <code>InvalidQueryException</code> is thrown.
- *
- * @param session the session of the current user creating the query object.
- * @param itemMgr the item manager of the current user.
- * @param statement the query statement.
- * @param language the syntax of the query statement.
- * @throws InvalidQueryException if statement is invalid or language is
- * unsupported.
- * @return A <code>Query</code> object.
- */
- public ExecutableQuery createExecutableQuery(SessionImpl session, SessionDataManager
itemMgr, String statement,
- String language) throws InvalidQueryException
- {
- QueryImpl query =
- new QueryImpl(session, itemMgr, this, getContext().getPropertyTypeRegistry(),
statement, language,
- getQueryNodeFactory());
- query.setRespectDocumentOrder(queryHandlerConfig.getDocumentOrder());
- return query;
- }
+ /**
+ * Creates a new query by specifying the query statement itself and the
+ * language in which the query is stated. If the query statement is
+ * syntactically invalid, given the language specified, an
+ * InvalidQueryException is thrown. <code>language</code> must specify a
query
+ * language string from among those returned by
+ * QueryManager.getSupportedQueryLanguages(); if it is not then an
+ * <code>InvalidQueryException</code> is thrown.
+ *
+ * @param session the session of the current user creating the query object.
+ * @param itemMgr the item manager of the current user.
+ * @param statement the query statement.
+ * @param language the syntax of the query statement.
+ * @throws InvalidQueryException if statement is invalid or language is
+ * unsupported.
+ * @return A <code>Query</code> object.
+ */
+ public ExecutableQuery createExecutableQuery(SessionImpl session,
+ SessionDataManager itemMgr,
+ String statement,
+ String language) throws
InvalidQueryException {
+ QueryImpl query = new QueryImpl(session,
+ itemMgr,
+ this,
+ getContext().getPropertyTypeRegistry(),
+ statement,
+ language,
+ getQueryNodeFactory());
+ query.setRespectDocumentOrder(queryHandlerConfig.getDocumentOrder());
+ return query;
+ }
- public org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl
createQueryInstance()
- throws RepositoryException
- {
- try
- {
- Object obj = Class.forName(queryHandlerConfig.getQueryClass()).newInstance();
- if (obj instanceof
org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl)
- {
- return (org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl)obj;
- }
- throw new IllegalArgumentException(queryHandlerConfig.getQueryClass() + "
is not of type "
- + AbstractQueryImpl.class.getName());
-
+ public org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl
createQueryInstance() throws RepositoryException {
+ try {
+ Object obj = Class.forName(queryHandlerConfig.getQueryClass()).newInstance();
+ if (obj instanceof org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl)
{
+ return (org.exoplatform.services.jcr.impl.core.query.AbstractQueryImpl) obj;
}
- catch (Throwable t)
- {
- throw new RepositoryException("Unable to create query: " +
t.toString());
- }
- }
+ throw new IllegalArgumentException(queryHandlerConfig.getQueryClass() + " is
not of type "
+ + AbstractQueryImpl.class.getName());
- /**
- * Removes the node with <code>uuid</code> from the search index.
- *
- * @param id the id of the node to remove from the index.
- * @throws IOException if an error occurs while removing the node from the
- * index.
- */
- public void deleteNode(String id) throws IOException
- {
- throw new UnsupportedOperationException("deleteNode");
- }
+ } catch (Throwable t) {
+ throw new RepositoryException("Unable to create query: " +
t.toString());
+ }
+ }
- public QueryHits executeQuery(Query query, boolean needsSystemTree, InternalQName[]
orderProps, boolean[] orderSpecs)
- throws IOException
- {
- checkOpen();
- SortField[] sortFields = createSortFields(orderProps, orderSpecs);
+ /**
+ * Removes the node with <code>uuid</code> from the search index.
+ *
+ * @param id the id of the node to remove from the index.
+ * @throws IOException if an error occurs while removing the node from the
+ * index.
+ */
+ public void deleteNode(String id) throws IOException {
+ throw new UnsupportedOperationException("deleteNode");
+ }
- IndexReader reader = getIndexReader(needsSystemTree);
- IndexSearcher searcher = new IndexSearcher(reader);
- Hits hits;
- if (sortFields.length > 0)
- {
- hits = searcher.search(query, new Sort(sortFields));
- }
- else
- {
- hits = searcher.search(query);
- }
- return new QueryHits(hits, reader);
- }
+ public QueryHits executeQuery(Query query,
+ boolean needsSystemTree,
+ InternalQName[] orderProps,
+ boolean[] orderSpecs) throws IOException {
+ checkOpen();
+ SortField[] sortFields = createSortFields(orderProps, orderSpecs);
- /**
- * Returns the context for this query handler.
- *
- * @return the <code>QueryHandlerContext</code> instance for this
- * <code>QueryHandler</code>.
- */
- public QueryHandlerContext getContext()
- {
- return context;
- }
+ IndexReader reader = getIndexReader(needsSystemTree);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ Hits hits;
+ if (sortFields.length > 0) {
+ hits = searcher.search(query, new Sort(sortFields));
+ } else {
+ hits = searcher.search(query);
+ }
+ return new QueryHits(hits, reader);
+ }
- /**
- * Returns the index format version that this search index is able to support
- * when a query is executed on this index.
- *
- * @return the index format version for this search index.
- */
- public IndexFormatVersion getIndexFormatVersion()
- {
- if (indexFormatVersion == null)
- {
- if (getContext().getParentHandler() instanceof SearchIndex)
- {
- SearchIndex parent = (SearchIndex)getContext().getParentHandler();
- if (parent.getIndexFormatVersion().getVersion() <
index.getIndexFormatVersion().getVersion())
- {
- indexFormatVersion = parent.getIndexFormatVersion();
- }
- else
- {
- indexFormatVersion = index.getIndexFormatVersion();
- }
- }
- else
- {
- indexFormatVersion = index.getIndexFormatVersion();
- }
+ /**
+ * Returns the context for this query handler.
+ *
+ * @return the <code>QueryHandlerContext</code> instance for this
+ * <code>QueryHandler</code>.
+ */
+ public QueryHandlerContext getContext() {
+ return context;
+ }
+
+ /**
+ * Returns the index format version that this search index is able to support
+ * when a query is executed on this index.
+ *
+ * @return the index format version for this search index.
+ */
+ public IndexFormatVersion getIndexFormatVersion() {
+ if (indexFormatVersion == null) {
+ if (getContext().getParentHandler() instanceof SearchIndex) {
+ SearchIndex parent = (SearchIndex) getContext().getParentHandler();
+ if (parent.getIndexFormatVersion().getVersion() <
index.getIndexFormatVersion()
+ .getVersion()) {
+ indexFormatVersion = parent.getIndexFormatVersion();
+ } else {
+ indexFormatVersion = index.getIndexFormatVersion();
+ }
+ } else {
+ indexFormatVersion = index.getIndexFormatVersion();
}
- return indexFormatVersion;
- }
+ }
+ return indexFormatVersion;
+ }
- /**
- * @return the indexing configuration or <code>null</code> if there is
none.
- */
- public IndexingConfiguration getIndexingConfig()
- {
- return indexingConfig;
- }
+ /**
+ * @return the indexing configuration or <code>null</code> if there is
none.
+ */
+ public IndexingConfiguration getIndexingConfig() {
+ return indexingConfig;
+ }
- /**
- * Returns an index reader for this search index. The caller of this method is
- * responsible for closing the index reader when he is finished using it.
- *
- * @return an index reader for this search index.
- * @throws IOException the index reader cannot be obtained.
- */
- public IndexReader getIndexReader() throws IOException
- {
- return getIndexReader(true);
- }
+ /**
+ * Returns an index reader for this search index. The caller of this method is
+ * responsible for closing the index reader when he is finished using it.
+ *
+ * @return an index reader for this search index.
+ * @throws IOException the index reader cannot be obtained.
+ */
+ public IndexReader getIndexReader() throws IOException {
+ return getIndexReader(true);
+ }
- // --------------------------< properties >----------------------------------
+ // --------------------------< properties >----------------------------------
- /**
- * Returns an index reader for this search index. The caller of this method is
- * responsible for closing the index reader when he is finished using it.
- *
- * @param includeSystemIndex if <code>true</code> the index reader will
cover
- * the complete workspace. If <code>false</code> the returned
index
- * reader will not contains any nodes under /jcr:system.
- * @return an index reader for this search index.
- * @throws IOException the index reader cannot be obtained.
- */
- public IndexReader getIndexReader(boolean includeSystemIndex) throws IOException
- {
- QueryHandler parentHandler = getContext().getParentHandler();
- CachingMultiIndexReader parentReader = null;
- if (parentHandler instanceof SearchIndex && includeSystemIndex)
- {
- parentReader = ((SearchIndex)parentHandler).index.getIndexReader();
- }
+ /**
+ * Returns an index reader for this search index. The caller of this method is
+ * responsible for closing the index reader when he is finished using it.
+ *
+ * @param includeSystemIndex if <code>true</code> the index reader will
cover
+ * the complete workspace. If <code>false</code> the returned
index
+ * reader will not contains any nodes under /jcr:system.
+ * @return an index reader for this search index.
+ * @throws IOException the index reader cannot be obtained.
+ */
+ public IndexReader getIndexReader(boolean includeSystemIndex) throws IOException {
+ QueryHandler parentHandler = getContext().getParentHandler();
+ CachingMultiIndexReader parentReader = null;
+ if (parentHandler instanceof SearchIndex && includeSystemIndex) {
+ parentReader = ((SearchIndex) parentHandler).index.getIndexReader();
+ }
- CachingMultiIndexReader reader = index.getIndexReader();
- if (parentReader != null)
- {
- CachingMultiIndexReader[] readers = {reader, parentReader};
- return new CombinedIndexReader(readers);
- }
- return reader;
+ IndexReader reader;
+ if (parentReader != null) {
+ CachingMultiIndexReader[] readers = { index.getIndexReader(), parentReader };
+ reader = new CombinedIndexReader(readers);
+ } else {
+ reader = index.getIndexReader();
+ }
+ return new JackrabbitIndexReader(reader);
- }
+ }
- /**
- * Returns the namespace mappings for the internal representation.
- *
- * @return the namespace mappings for the internal representation.
- */
- public NamespaceMappings getNamespaceMappings()
- {
- return nsMappings;
- }
+ /**
+ * Returns the namespace mappings for the internal representation.
+ *
+ * @return the namespace mappings for the internal representation.
+ */
+ public NamespaceMappings getNamespaceMappings() {
+ return nsMappings;
+ }
- /**
- * @return the spell checker of this search index. If none is configured this
- * method returns <code>null</code>.
- */
- public SpellChecker getSpellChecker()
- {
- return spellChecker;
- }
+ /**
+ * @return the spell checker of this search index. If none is configured this
+ * method returns <code>null</code>.
+ */
+ public SpellChecker getSpellChecker() {
+ return spellChecker;
+ }
- /**
- * @return the synonym provider of this search index. If none is set for this
- * search index the synonym provider of the parent handler is returned
- * if there is any.
- */
- public SynonymProvider getSynonymProvider()
- {
- if (synProvider != null)
- {
- return synProvider;
- }
- QueryHandler handler = getContext().getParentHandler();
- if (handler instanceof SearchIndex)
- return ((SearchIndex)handler).getSynonymProvider();
- return null;
+ /**
+ * @return the synonym provider of this search index. If none is set for this
+ * search index the synonym provider of the parent handler is returned
+ * if there is any.
+ */
+ public SynonymProvider getSynonymProvider() {
+ if (synProvider != null) {
+ return synProvider;
+ }
+ QueryHandler handler = getContext().getParentHandler();
+ if (handler instanceof SearchIndex)
+ return ((SearchIndex) handler).getSynonymProvider();
+ return null;
- }
+ }
- /**
- * Returns the analyzer in use for indexing.
- *
- * @return the analyzer in use for indexing.
- */
- public Analyzer getTextAnalyzer()
- {
- return queryHandlerConfig.getAnalyzer();
- }
+ /**
+ * Returns the analyzer in use for indexing.
+ *
+ * @return the analyzer in use for indexing.
+ */
+ public Analyzer getTextAnalyzer() {
+ return queryHandlerConfig.getAnalyzer();
+ }
- /**
- * Initializes this query handler by setting all properties in this class with
- * appropriate parameter values.
- *
- * @param context the context for this query handler.
- */
- public final void setContext(QueryHandlerContext queryHandlerContext) throws
IOException
- {
- this.context = queryHandlerContext;
- }
+ /**
+ * Initializes this query handler by setting all properties in this class with
+ * appropriate parameter values.
+ *
+ * @param context the context for this query handler.
+ */
+ public final void setContext(QueryHandlerContext queryHandlerContext) throws
IOException {
+ this.context = queryHandlerContext;
+ }
- /**
- * Initializes this <code>QueryHandler</code>. This implementation
requires
- * that a path parameter is set in the configuration. If this condition is not
- * met, a <code>IOException</code> is thrown.
- *
- * @throws IOException if an error occurs while initializing this handler.
- */
- public void init()
- {
- try
- {
- String indexDir = context.getIndexDirectory();
- if (indexDir != null)
- {
- indexDir = indexDir.replace("${java.io.tmpdir}",
System.getProperty("java.io.tmpdir"));
- indexDirectory = new File(indexDir);
- if (!indexDirectory.exists())
- if (!indexDirectory.mkdirs())
- throw new RepositoryException("fail to create index dir " +
indexDir);
- }
- else
- {
- throw new IOException("SearchIndex requires 'path' parameter in
configuration!");
- }
+ /**
+ * Initializes this <code>QueryHandler</code>. This implementation
requires
+ * that a path parameter is set in the configuration. If this condition is not
+ * met, a <code>IOException</code> is thrown.
+ * @throws IOException
+ *
+ * @throws IOException if an error occurs while initializing this handler.
+ * @throws RepositoryException
+ * @throws RepositoryConfigurationException
+ */
+ public void init() throws IOException, RepositoryException,
RepositoryConfigurationException {
+ //try {
+ String indexDir = context.getIndexDirectory();
+ if (indexDir != null) {
+ indexDir = indexDir.replace("${java.io.tmpdir}",
System.getProperty("java.io.tmpdir"));
+ indexDirectory = new File(indexDir);
+ if (!indexDirectory.exists())
+ if (!indexDirectory.mkdirs())
+ throw new RepositoryException("fail to create index dir " +
indexDir);
+ } else {
+ throw new IOException("SearchIndex requires 'path' parameter in
configuration!");
+ }
- extractor = context.getExtractor();
- synProvider = queryHandlerConfig.createSynonymProvider(cfm);
- // File indexDirFile = context.getFileSystem();
+ extractor = context.getExtractor();
+ synProvider = queryHandlerConfig.createSynonymProvider(cfm);
+ // File indexDirFile = context.getFileSystem();
- if (context.getParentHandler() instanceof SearchIndex)
- {
- // use system namespace mappings
- SearchIndex sysIndex = (SearchIndex)context.getParentHandler();
- nsMappings = sysIndex.getNamespaceMappings();
- }
- else
- {
- // read local namespace mappings
- File mapFile = new File(indexDirectory, NS_MAPPING_FILE);
- if (mapFile.exists())
- {
- // be backward compatible and use ns_mappings.properties from
- // index folder
- nsMappings = new FileBasedNamespaceMappings(mapFile);
- }
- else
- {
- // otherwise use repository wide stable index prefix from
- // namespace registry
- nsMappings = new
NSRegistryBasedNamespaceMappings(context.getNamespaceRegistry());
- }
- }
- npResolver = new LocationFactory(nsMappings);
+ if (context.getParentHandler() instanceof SearchIndex) {
+ // use system namespace mappings
+ SearchIndex sysIndex = (SearchIndex) context.getParentHandler();
+ nsMappings = sysIndex.getNamespaceMappings();
+ } else {
+ // read local namespace mappings
+ File mapFile = new File(indexDirectory, NS_MAPPING_FILE);
+ if (mapFile.exists()) {
+ // be backward compatible and use ns_mappings.properties from
+ // index folder
+ nsMappings = new FileBasedNamespaceMappings(mapFile);
+ } else {
+ // otherwise use repository wide stable index prefix from
+ // namespace registry
+ nsMappings = new
NSRegistryBasedNamespaceMappings(context.getNamespaceRegistry());
+ }
+ }
+ npResolver = new LocationFactory(nsMappings);
- indexingConfig = queryHandlerConfig.createIndexingConfiguration(nsMappings,
context, cfm);
+ indexingConfig = queryHandlerConfig.createIndexingConfiguration(nsMappings,
context, cfm);
- queryHandlerConfig.getAnalyzer().setIndexingConfig(indexingConfig);
+ queryHandlerConfig.getAnalyzer().setIndexingConfig(indexingConfig);
- index = new MultiIndex(indexDirectory, this/* , excludedIDs */, nsMappings);
+ index = new MultiIndex(indexDirectory, this/* , excludedIDs */, nsMappings);
- if (index.numDocs() == 0)
- {
- index.createInitialIndex(context.getItemStateManager(),
context.getRootNodeIdentifer());
- }
- if (queryHandlerConfig.isConsistencyCheckEnabled()
- && (index.getRedoLogApplied() ||
queryHandlerConfig.isForceConsistencyCheck()))
- {
- log.info("Running consistency check... ");
+ if (index.numDocs() == 0) {
+ index.createInitialIndex(context.getItemStateManager(),
context.getRootNodeIdentifer());
+ }
+ if (queryHandlerConfig.isConsistencyCheckEnabled()
+ && (index.getRedoLogApplied() ||
queryHandlerConfig.isForceConsistencyCheck())) {
+ log.info("Running consistency check... ");
- ConsistencyCheck check = ConsistencyCheck.run(index,
context.getItemStateManager());
- if (queryHandlerConfig.getAutoRepair())
- {
- check.repair(true);
- }
- else
- {
- List<ConsistencyCheckError> errors = check.getErrors();
- if (errors.size() == 0)
- {
- log.info("No errors detected.");
- }
- for (Iterator<ConsistencyCheckError> it = errors.iterator();
it.hasNext();)
- {
- ConsistencyCheckError err = it.next();
- log.info(err.toString());
- }
- }
- }
+ ConsistencyCheck check = ConsistencyCheck.run(index,
context.getItemStateManager());
+ if (queryHandlerConfig.getAutoRepair()) {
+ check.repair(true);
+ } else {
+ List<ConsistencyCheckError> errors = check.getErrors();
+ if (errors.size() == 0) {
+ log.info("No errors detected.");
+ }
+ for (Iterator<ConsistencyCheckError> it = errors.iterator();
it.hasNext();) {
+ ConsistencyCheckError err = it.next();
+ log.info(err.toString());
+ }
+ }
+ }
- // initialize spell checker
- spellChecker = queryHandlerConfig.createSpellChecker(this);
+ // initialize spell checker
+ spellChecker = queryHandlerConfig.createSpellChecker(this);
- log.info("Index initialized: " + queryHandlerConfig.getIndexDir() +
" Version: "
- + index.getIndexFormatVersion() + "");
+ log.info("Index initialized: " + queryHandlerConfig.getIndexDir() +
" Version: "
+ + index.getIndexFormatVersion() + "");
- File file = new File(indexDir, ERROR_LOG);
- errorLog = new ErrorLog(file, queryHandlerConfig.getErrorLogSize());
- // reprocess any notfinished notifies;
- recoverErrorLog(errorLog);
+ File file = new File(indexDir, ERROR_LOG);
+ errorLog = new ErrorLog(file, queryHandlerConfig.getErrorLogSize());
+ // reprocess any notfinished notifies;
+ recoverErrorLog(errorLog);
- }
- catch (IOException e)
- {
- log.error(e.getLocalizedMessage());
- throw new RuntimeException(e);
- }
- catch (RepositoryException e)
- {
- log.error(e.getLocalizedMessage());
- throw new RuntimeException(e);
- }
- catch (RepositoryConfigurationException e)
- {
- log.error(e.getLocalizedMessage());
- throw new RuntimeException(e);
- }
- }
+// } catch (IOException e) {
+// log.error(e.getLocalizedMessage());
+// throw new RuntimeException(e);
+// } catch (RepositoryException e) {
+// log.error(e.getLocalizedMessage());
+// throw new RuntimeException(e);
+// } catch (RepositoryConfigurationException e) {
+// log.error(e.getLocalizedMessage());
+// throw new RuntimeException(e);
+// }
+ }
- private void recoverErrorLog(ErrorLog errlog) throws IOException, RepositoryException
- {
- final Set<String> rem = new HashSet<String>();
- final Set<String> add = new HashSet<String>();
+ private void recoverErrorLog(ErrorLog errlog) throws IOException, RepositoryException
{
+ final Set<String> rem = new HashSet<String>();
+ final Set<String> add = new HashSet<String>();
- errlog.readChanges(rem, add);
+ errlog.readChanges(rem, add);
- // check is any notifies in log
- if (rem.isEmpty() && add.isEmpty())
- {
- // there is no sense to continue
- return;
- }
+ // check is any notifies in log
+ if (rem.isEmpty() && add.isEmpty()) {
+ // there is no sense to continue
+ return;
+ }
- Iterator<String> removedStates = rem.iterator();
+ Iterator<String> removedStates = rem.iterator();
- // make a new iterator;
- Iterator<NodeData> addedStates = new Iterator<NodeData>()
- {
- private final Iterator<String> iter = add.iterator();
+ // make a new iterator;
+ Iterator<NodeData> addedStates = new Iterator<NodeData>() {
+ private final Iterator<String> iter = add.iterator();
- public boolean hasNext()
- {
- return iter.hasNext();
- }
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
- public NodeData next()
- {
- String id;
- // we have to iterrate through items till will meet ones existing in
- // workspace
- while (iter.hasNext())
- {
- id = iter.next();
+ public NodeData next() {
+ String id;
+ // we have to iterrate through items till will meet ones existing in
+ // workspace
+ while (iter.hasNext()) {
+ id = iter.next();
- try
- {
- ItemData item = context.getItemStateManager().getItemData(id);
- if (item != null)
- {
- if (item.isNode())
- {
- return (NodeData)item; // return node here
- }
- else
- log.warn("Node expected but property found with id " +
id + ". Skipping "
- + item.getQPath().getAsString());
- }
- else
- {
- log.warn("Unable to recovery node index " + id + ".
Node not found.");
- }
- }
- catch (RepositoryException e)
- {
- log.error("ErrorLog recovery error. Item id " + id + ".
" + e, e);
- }
+ try {
+ ItemData item = context.getItemStateManager().getItemData(id);
+ if (item != null) {
+ if (item.isNode()) {
+ return (NodeData) item; // return node here
+ } else
+ log.warn("Node expected but property found with id " + id +
". Skipping "
+ + item.getQPath().getAsString());
+ } else {
+ log.warn("Unable to recovery node index " + id + ". Node not
found.");
}
+ } catch (RepositoryException e) {
+ log.error("ErrorLog recovery error. Item id " + id + ". "
+ e, e);
+ }
+ }
- return null;
- }
+ return null;
+ }
- public void remove()
- {
- throw new UnsupportedOperationException();
- }
- };
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
- updateNodes(removedStates, addedStates);
+ updateNodes(removedStates, addedStates);
- errlog.clear();
- }
+ errlog.clear();
+ }
- /**
- * Closes this <code>QueryHandler</code> and frees resources attached to
this
- * handler.
- */
- public void close()
- {
- if (spellChecker != null)
- {
- spellChecker.close();
- }
- index.close();
- getContext().destroy();
- closed = true;
+ /**
+ * Closes this <code>QueryHandler</code> and frees resources attached to
this
+ * handler.
+ */
+ public void close() {
+ if (spellChecker != null) {
+ spellChecker.close();
+ }
+ index.close();
+ getContext().destroy();
+ closed = true;
- log.info("Index closed: " + indexDirectory.getAbsolutePath());
- }
+ log.info("Index closed: " + indexDirectory.getAbsolutePath());
+ }
- /**
- * This implementation forwards the call to
- * {@link MultiIndex#update(java.util.Iterator, java.util.Iterator)} and
- * transforms the two iterators to the required types.
- *
- * @param remove uuids of nodes to remove.
- * @param add NodeStates to add. Calls to <code>next()</code> on this
iterator
- * may return <code>null</code>, to indicate that a node could
not be
- * indexed successfully.
- * @throws RepositoryException if an error occurs while indexing a node.
- * @throws IOException if an error occurs while updating the index.
- */
- public void updateNodes(final Iterator<String> remove, final
Iterator<NodeData> add) throws RepositoryException,
- IOException
- {
+ /**
+ * This implementation forwards the call to
+ * {@link MultiIndex#update(java.util.Iterator, java.util.Iterator)} and
+ * transforms the two iterators to the required types.
+ *
+ * @param remove uuids of nodes to remove.
+ * @param add NodeStates to add. Calls to <code>next()</code> on this
iterator
+ * may return <code>null</code>, to indicate that a node could not
be
+ * indexed successfully.
+ * @throws RepositoryException if an error occurs while indexing a node.
+ * @throws IOException if an error occurs while updating the index.
+ */
+ public void updateNodes(final Iterator<String> remove, final
Iterator<NodeData> add) throws RepositoryException,
+
IOException {
- checkOpen();
+ checkOpen();
- final Map<String, NodeData> aggregateRoots = new HashMap<String,
NodeData>();
- final Set<String> removedNodeIds = new HashSet<String>();
- final Set<String> addedNodeIds = new HashSet<String>();
+ final Map<String, NodeData> aggregateRoots = new HashMap<String,
NodeData>();
+ final Set<String> removedNodeIds = new HashSet<String>();
+ final Set<String> addedNodeIds = new HashSet<String>();
- index.update(new AbstractIteratorDecorator(remove)
- {
- public Object next()
- {
- String nodeId = (String)super.next();
- removedNodeIds.add(nodeId);
- return nodeId;
- }
- }, new AbstractIteratorDecorator(add)
- {
- public Object next()
- {
- NodeData state = (NodeData)super.next();
- if (state == null)
- {
- return null;
- }
- addedNodeIds.add(state.getIdentifier());
- removedNodeIds.remove(state.getIdentifier());
- Document doc = null;
- try
- {
- doc = createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
- retrieveAggregateRoot(state, aggregateRoots);
- }
- catch (RepositoryException e)
- {
- log
- .warn("Exception while creating document for node: " +
state.getIdentifier() + ": " + e.toString(), e);
+ index.update(new AbstractIteratorDecorator(remove) {
+ public Object next() {
+ String nodeId = (String) super.next();
+ removedNodeIds.add(nodeId);
+ return nodeId;
+ }
+ }, new AbstractIteratorDecorator(add) {
+ public Object next() {
+ NodeData state = (NodeData) super.next();
+ if (state == null) {
+ return null;
+ }
+ addedNodeIds.add(state.getIdentifier());
+ removedNodeIds.remove(state.getIdentifier());
+ Document doc = null;
+ try {
+ doc = createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
+ retrieveAggregateRoot(state, aggregateRoots);
+ } catch (RepositoryException e) {
+ log.warn("Exception while creating document for node: " +
state.getIdentifier() + ": "
+ + e.toString(), e);
- }
- return doc;
- }
- });
+ }
+ return doc;
+ }
+ });
- // remove any aggregateRoot nodes that are new
- // and therefore already up-to-date
- aggregateRoots.keySet().removeAll(addedNodeIds);
+ // remove any aggregateRoot nodes that are new
+ // and therefore already up-to-date
+ aggregateRoots.keySet().removeAll(addedNodeIds);
- // based on removed NodeIds get affected aggregate root nodes
- retrieveAggregateRoot(removedNodeIds, aggregateRoots);
+ // based on removed NodeIds get affected aggregate root nodes
+ retrieveAggregateRoot(removedNodeIds, aggregateRoots);
- // update aggregates if there are any affected
- if (aggregateRoots.size() > 0)
- {
- index.update(new AbstractIteratorDecorator(aggregateRoots.keySet().iterator())
- {
- public Object next()
- {
- return super.next();
- }
- }, new AbstractIteratorDecorator(aggregateRoots.values().iterator())
- {
- public Object next()
- {
- NodeData state = (NodeData)super.next();
- try
- {
- return createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
- }
- catch (RepositoryException e)
- {
- log
- .warn("Exception while creating document for node: " +
state.getIdentifier() + ": " + e.toString());
- }
- return null;
- }
- });
- }
+ // update aggregates if there are any affected
+ if (aggregateRoots.size() > 0) {
+ index.update(new AbstractIteratorDecorator(aggregateRoots.keySet().iterator()) {
+ public Object next() {
+ return super.next();
+ }
+ }, new AbstractIteratorDecorator(aggregateRoots.values().iterator()) {
+ public Object next() {
+ NodeData state = (NodeData) super.next();
+ try {
+ return createDocument(state, getNamespaceMappings(),
index.getIndexFormatVersion());
+ } catch (RepositoryException e) {
+ log.warn("Exception while creating document for node: " +
state.getIdentifier() + ": "
+ + e.toString());
+ }
+ return null;
+ }
+ });
+ }
- }
+ }
- /**
- * Creates a lucene <code>Document</code> for a node state using the
namespace
- * mappings <code>nsMappings</code>.
- *
- * @param node the node state to index.
- * @param nsMappings the namespace mappings of the search index.
- * @param indexFormatVersion the index format version that should be used to
- * index the passed node state.
- * @return a lucene <code>Document</code> that contains all properties of
- * <code>node</code>.
- * @throws RepositoryException if an error occurs while indexing the
- * <code>node</code>.
- */
- protected Document createDocument(NodeData node, NamespaceMappings nsMappings,
IndexFormatVersion indexFormatVersion)
- throws RepositoryException
- {
- NodeIndexer indexer = new NodeIndexer(node, getContext().getItemStateManager(),
nsMappings, extractor);
- indexer.setSupportHighlighting(queryHandlerConfig.getSupportHighlighting());
- indexer.setIndexingConfiguration(indexingConfig);
- indexer.setIndexFormatVersion(indexFormatVersion);
- Document doc = indexer.createDoc();
- mergeAggregatedNodeIndexes(node, doc);
- return doc;
- }
+ /**
+ * Creates a lucene <code>Document</code> for a node state using the
namespace
+ * mappings <code>nsMappings</code>.
+ *
+ * @param node the node state to index.
+ * @param nsMappings the namespace mappings of the search index.
+ * @param indexFormatVersion the index format version that should be used to
+ * index the passed node state.
+ * @return a lucene <code>Document</code> that contains all properties of
+ * <code>node</code>.
+ * @throws RepositoryException if an error occurs while indexing the
+ * <code>node</code>.
+ */
+ protected Document createDocument(NodeData node,
+ NamespaceMappings nsMappings,
+ IndexFormatVersion indexFormatVersion) throws
RepositoryException {
+ NodeIndexer indexer = new NodeIndexer(node,
+ getContext().getItemStateManager(),
+ nsMappings,
+ extractor);
+ indexer.setSupportHighlighting(queryHandlerConfig.getSupportHighlighting());
+ indexer.setIndexingConfiguration(indexingConfig);
+ indexer.setIndexFormatVersion(indexFormatVersion);
+ Document doc = indexer.createDoc();
+ mergeAggregatedNodeIndexes(node, doc);
+ return doc;
+ }
- // ----------------------------< internal >----------------------------------
+ // ----------------------------< internal >----------------------------------
- /**
- * Creates the SortFields for the order properties.
- *
- * @param orderProps the order properties.
- * @param orderSpecs the order specs for the properties.
- * @return an array of sort fields
- */
- protected SortField[] createSortFields(InternalQName[] orderProps, boolean[]
orderSpecs)
- {
- List<SortField> sortFields = new ArrayList<SortField>();
- for (int i = 0; i < orderProps.length; i++)
- {
- String prop = null;
- if (Constants.JCR_SCORE.equals(orderProps[i]))
- {
- // order on jcr:score does not use the natural order as
- // implemented in lucene. score ascending in lucene means that
- // higher scores are first. JCR specs that lower score values
- // are first.
- sortFields.add(new SortField(null, SortField.SCORE, orderSpecs[i]));
- }
- else
- {
- try
- {
- prop = npResolver.createJCRName(orderProps[i]).getAsString();
- }
- catch (RepositoryException e)
- {
- e.printStackTrace();
- // will never happen
- }
- sortFields.add(new SortField(prop, SharedFieldSortComparator.PROPERTIES,
!orderSpecs[i]));
- }
+ /**
+ * Creates the SortFields for the order properties.
+ *
+ * @param orderProps the order properties.
+ * @param orderSpecs the order specs for the properties.
+ * @return an array of sort fields
+ */
+ protected SortField[] createSortFields(InternalQName[] orderProps, boolean[]
orderSpecs) {
+ List<SortField> sortFields = new ArrayList<SortField>();
+ for (int i = 0; i < orderProps.length; i++) {
+ String prop = null;
+ if (Constants.JCR_SCORE.equals(orderProps[i])) {
+ // order on jcr:score does not use the natural order as
+ // implemented in lucene. score ascending in lucene means that
+ // higher scores are first. JCR specs that lower score values
+ // are first.
+ sortFields.add(new SortField(null, SortField.SCORE, orderSpecs[i]));
+ } else {
+ try {
+ prop = npResolver.createJCRName(orderProps[i]).getAsString();
+ } catch (RepositoryException e) {
+ e.printStackTrace();
+ // will never happen
+ }
+ sortFields.add(new SortField(prop, SharedFieldSortComparator.PROPERTIES,
!orderSpecs[i]));
}
- return sortFields.toArray(new SortField[sortFields.size()]);
- }
+ }
+ return sortFields.toArray(new SortField[sortFields.size()]);
+ }
- /**
- * Returns the actual index.
- *
- * @return the actual index.
- */
- protected MultiIndex getIndex()
- {
- return index;
- }
+ /**
+ * Returns the actual index.
+ *
+ * @return the actual index.
+ */
+ protected MultiIndex getIndex() {
+ return index;
+ }
- /**
- * This method returns the QueryNodeFactory used to parse Queries. This method
- * may be overridden to provide a customized QueryNodeFactory
- */
- protected DefaultQueryNodeFactory getQueryNodeFactory()
- {
- return DEFAULT_QUERY_NODE_FACTORY;
- }
+ /**
+ * This method returns the QueryNodeFactory used to parse Queries. This method
+ * may be overridden to provide a customized QueryNodeFactory
+ */
+ protected DefaultQueryNodeFactory getQueryNodeFactory() {
+ return DEFAULT_QUERY_NODE_FACTORY;
+ }
- /**
- * Merges the fulltext indexed fields of the aggregated node states into
- * <code>doc</code>.
- *
- * @param state the node state on which <code>doc</code> was created.
- * @param doc the lucene document with index fields from
<code>state</code>.
- */
- protected void mergeAggregatedNodeIndexes(NodeData state, Document doc)
- {
- if (indexingConfig != null)
- {
- AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
- if (aggregateRules == null)
- {
- return;
- }
- try
- {
- for (int i = 0; i < aggregateRules.length; i++)
- {
- NodeData[] aggregates = aggregateRules[i].getAggregatedNodeStates(state);
- if (aggregates == null)
- {
- continue;
- }
- for (int j = 0; j < aggregates.length; j++)
- {
- Document aDoc = createDocument(aggregates[j], getNamespaceMappings(),
index.getIndexFormatVersion());
- // transfer fields to doc if there are any
- Field[] fulltextFields = aDoc.getFields(FieldNames.FULLTEXT);
- if (fulltextFields != null)
- {
- for (int k = 0; k < fulltextFields.length; k++)
- {
- doc.add(fulltextFields[k]);
- }
- doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID,
aggregates[j].getIdentifier().toString(),
- Field.Store.NO, Field.Index.NO_NORMS));
- }
- }
- // only use first aggregate definition that matches
- break;
+ /**
+ * Merges the fulltext indexed fields of the aggregated node states into
+ * <code>doc</code>.
+ *
+ * @param state the node state on which <code>doc</code> was created.
+ * @param doc the lucene document with index fields from
<code>state</code>.
+ */
+ protected void mergeAggregatedNodeIndexes(NodeData state, Document doc) {
+ if (indexingConfig != null) {
+ AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
+ if (aggregateRules == null) {
+ return;
+ }
+ try {
+ for (int i = 0; i < aggregateRules.length; i++) {
+ NodeData[] aggregates = aggregateRules[i].getAggregatedNodeStates(state);
+ if (aggregates == null) {
+ continue;
+ }
+ for (int j = 0; j < aggregates.length; j++) {
+ Document aDoc = createDocument(aggregates[j],
+ getNamespaceMappings(),
+ index.getIndexFormatVersion());
+ // transfer fields to doc if there are any
+ Field[] fulltextFields = aDoc.getFields(FieldNames.FULLTEXT);
+ if (fulltextFields != null) {
+ for (int k = 0; k < fulltextFields.length; k++) {
+ doc.add(fulltextFields[k]);
+ }
+ doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID,
+ aggregates[j].getIdentifier().toString(),
+ Field.Store.NO,
+ Field.Index.NO_NORMS));
}
- }
- catch (Exception e)
- {
- // do not fail if aggregate cannot be created
- log
- .warn("Exception while building indexing aggregate for " +
"node with UUID: " + state.getIdentifier(), e);
- }
+ }
+ // only use first aggregate definition that matches
+ break;
+ }
+ } catch (Exception e) {
+ // do not fail if aggregate cannot be created
+ log.warn("Exception while building indexing aggregate for " +
"node with UUID: "
+ + state.getIdentifier(), e);
}
- }
+ }
+ }
- /**
- * Retrieves the root of the indexing aggregate for <code>state</code>
and
- * puts it into <code>map</code>.
- *
- * @param state the node state for which we want to retrieve the aggregate
- * root.
- * @param map aggregate roots are collected in this map. Key=NodeId,
- * value=NodeState.
- */
- protected void retrieveAggregateRoot(NodeData state, Map<String, NodeData> map)
- {
- if (indexingConfig != null)
- {
- AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
- if (aggregateRules == null)
- {
- return;
- }
- try
- {
- for (int i = 0; i < aggregateRules.length; i++)
- {
- NodeData root = aggregateRules[i].getAggregateRoot(state);
- if (root != null)
- {
- map.put(root.getIdentifier(), root);
- break;
- }
- }
- }
- catch (Exception e)
- {
- log.warn("Unable to get aggregate root for " +
state.getIdentifier(), e);
- }
+ /**
+ * Retrieves the root of the indexing aggregate for <code>state</code> and
+ * puts it into <code>map</code>.
+ *
+ * @param state the node state for which we want to retrieve the aggregate
+ * root.
+ * @param map aggregate roots are collected in this map. Key=NodeId,
+ * value=NodeState.
+ */
+ protected void retrieveAggregateRoot(NodeData state, Map<String, NodeData> map)
{
+ if (indexingConfig != null) {
+ AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
+ if (aggregateRules == null) {
+ return;
}
- }
+ try {
+ for (int i = 0; i < aggregateRules.length; i++) {
+ NodeData root = aggregateRules[i].getAggregateRoot(state);
+ if (root != null) {
+ map.put(root.getIdentifier(), root);
+ break;
+ }
+ }
+ } catch (Exception e) {
+ log.warn("Unable to get aggregate root for " + state.getIdentifier(),
e);
+ }
+ }
+ }
- /**
- * Retrieves the root of the indexing aggregate for
- * <code>removedNodeIds</code> and puts it into
<code>map</code>.
- *
- * @param removedNodeIds the ids of removed nodes.
- * @param map aggregate roots are collected in this map. Key=NodeId,
- * value=NodeState.
- */
- protected void retrieveAggregateRoot(Set<String> removedNodeIds, Map<String,
NodeData> map)
- {
- if (indexingConfig != null)
- {
- AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
- if (aggregateRules == null)
- {
- return;
- }
- int found = 0;
- long time = System.currentTimeMillis();
- try
- {
- IndexReader reader = index.getIndexReader();
- try
- {
- Term aggregateUUIDs = new Term(FieldNames.AGGREGATED_NODE_UUID,
"");
- TermDocs tDocs = reader.termDocs();
- try
- {
- ItemDataConsumer ism = getContext().getItemStateManager();
- for (Iterator<String> it = removedNodeIds.iterator();
it.hasNext();)
- {
- String id = it.next();
- aggregateUUIDs = aggregateUUIDs.createTerm(id);
- tDocs.seek(aggregateUUIDs);
- while (tDocs.next())
- {
- Document doc = reader.document(tDocs.doc());
- String uuid = doc.get(FieldNames.UUID);
- ItemData itd = ism.getItemData(uuid);
- if (itd == null)
- continue;
- if (!itd.isNode())
- throw new RepositoryException("Item with id:" + uuid
+ " is not a node");
- map.put(uuid, (NodeData)itd);
- found++;
- }
- }
- }
- finally
- {
- tDocs.close();
- }
+ /**
+ * Retrieves the root of the indexing aggregate for
+ * <code>removedNodeIds</code> and puts it into
<code>map</code>.
+ *
+ * @param removedNodeIds the ids of removed nodes.
+ * @param map aggregate roots are collected in this map. Key=NodeId,
+ * value=NodeState.
+ */
+ protected void retrieveAggregateRoot(Set<String> removedNodeIds, Map<String,
NodeData> map) {
+ if (indexingConfig != null) {
+ AggregateRule aggregateRules[] = indexingConfig.getAggregateRules();
+ if (aggregateRules == null) {
+ return;
+ }
+ int found = 0;
+ long time = System.currentTimeMillis();
+ try {
+ CachingMultiIndexReader reader = index.getIndexReader();
+ try {
+ Term aggregateUUIDs = new Term(FieldNames.AGGREGATED_NODE_UUID, "");
+ TermDocs tDocs = reader.termDocs();
+ try {
+ ItemDataConsumer ism = getContext().getItemStateManager();
+ for (Iterator<String> it = removedNodeIds.iterator(); it.hasNext();) {
+ String id = it.next();
+ aggregateUUIDs = aggregateUUIDs.createTerm(id);
+ tDocs.seek(aggregateUUIDs);
+ while (tDocs.next()) {
+ Document doc = reader.document(tDocs.doc());
+ String uuid = doc.get(FieldNames.UUID);
+ ItemData itd = ism.getItemData(uuid);
+ if (itd == null)
+ continue;
+ if (!itd.isNode())
+ throw new RepositoryException("Item with id:" + uuid + "
is not a node");
+ map.put(uuid, (NodeData) itd);
+ found++;
+ }
}
- finally
- {
- reader.close();
- }
- }
- catch (Exception e)
- {
- log.warn("Exception while retrieving aggregate roots", e);
- }
- time = System.currentTimeMillis() - time;
- log.debug("Retrieved " + new Integer(found) + " aggregate roots
in " + new Long(time) + " ms.");
+ } finally {
+ tDocs.close();
+ }
+ } finally {
+ reader.release();
+ }
+ } catch (Exception e) {
+ log.warn("Exception while retrieving aggregate roots", e);
}
- }
+ time = System.currentTimeMillis() - time;
+ log.debug("Retrieved " + new Integer(found) + " aggregate roots in
" + new Long(time)
+ + " ms.");
+ }
+ }
- /**
- * Checks if this <code>SearchIndex</code> is open, otherwise throws an
- * <code>IOException</code>.
- *
- * @throws IOException if this <code>SearchIndex</code> had been closed.
- */
- private void checkOpen() throws IOException
- {
- if (closed)
- {
- throw new IOException("query handler closed and cannot be used
anymore.");
- }
- }
+ /**
+ * Checks if this <code>SearchIndex</code> is open, otherwise throws an
+ * <code>IOException</code>.
+ *
+ * @throws IOException if this <code>SearchIndex</code> had been closed.
+ */
+ private void checkOpen() throws IOException {
+ if (closed) {
+ throw new IOException("query handler closed and cannot be used
anymore.");
+ }
+ }
- /**
- * Combines multiple {@link CachingMultiIndexReader} into a
- * <code>MultiReader</code> with {@link HierarchyResolver} support.
- */
- protected static final class CombinedIndexReader extends MultiReader implements
HierarchyResolver, MultiIndexReader
- {
+ /**
+ * Combines multiple {@link CachingMultiIndexReader} into a
+ * <code>MultiReader</code> with {@link HierarchyResolver} support.
+ */
+ protected static final class CombinedIndexReader extends MultiReader implements
+ HierarchyResolver, MultiIndexReader {
- /**
- * Doc number starts for each sub reader
- */
- private int[] starts;
+ /**
+ * Doc number starts for each sub reader
+ */
+ private int[] starts;
- /**
- * The sub readers.
- */
- final private CachingMultiIndexReader[] subReaders;
+ /**
+ * The sub readers.
+ */
+ final private CachingMultiIndexReader[] subReaders;
- public CombinedIndexReader(CachingMultiIndexReader[] indexReaders) throws
IOException
- {
- super(indexReaders);
- this.subReaders = indexReaders;
- this.starts = new int[subReaders.length + 1];
+ public CombinedIndexReader(CachingMultiIndexReader[] indexReaders) throws IOException
{
+ super(indexReaders);
+ this.subReaders = indexReaders;
+ this.starts = new int[subReaders.length + 1];
- int maxDoc = 0;
- for (int i = 0; i < subReaders.length; i++)
- {
- starts[i] = maxDoc;
- maxDoc += subReaders[i].maxDoc();
- }
- starts[subReaders.length] = maxDoc;
+ int maxDoc = 0;
+ for (int i = 0; i < subReaders.length; i++) {
+ starts[i] = maxDoc;
+ maxDoc += subReaders[i].maxDoc();
}
+ starts[subReaders.length] = maxDoc;
+ }
- /**
- * {@inheritDoc}
- */
- public ForeignSegmentDocId createDocId(String uuid) throws IOException
- {
- for (int i = 0; i < subReaders.length; i++)
- {
- CachingMultiIndexReader subReader = subReaders[i];
- ForeignSegmentDocId doc = subReader.createDocId(uuid);
- if (doc != null)
- {
- return doc;
- }
- }
- return null;
+ /**
+ * {@inheritDoc}
+ */
+ public ForeignSegmentDocId createDocId(String uuid) throws IOException {
+ for (int i = 0; i < subReaders.length; i++) {
+ CachingMultiIndexReader subReader = subReaders[i];
+ ForeignSegmentDocId doc = subReader.createDocId(uuid);
+ if (doc != null) {
+ return doc;
+ }
}
+ return null;
+ }
- // -------------------------< MultiIndexReader >-------------------------
+ // -------------------------< MultiIndexReader >-------------------------
- public boolean equals(Object obj)
- {
- if (obj instanceof CombinedIndexReader)
- {
- CombinedIndexReader other = (CombinedIndexReader)obj;
- return Arrays.equals(subReaders, other.subReaders);
- }
- return false;
+ public boolean equals(Object obj) {
+ if (obj instanceof CombinedIndexReader) {
+ CombinedIndexReader other = (CombinedIndexReader) obj;
+ return Arrays.equals(subReaders, other.subReaders);
}
+ return false;
+ }
- // ---------------------------< internal >-------------------------------
+ // ---------------------------< internal >-------------------------------
- /**
- * {@inheritDoc}
- */
- public int getDocumentNumber(ForeignSegmentDocId docId)
- {
- for (int i = 0; i < subReaders.length; i++)
- {
- CachingMultiIndexReader subReader = subReaders[i];
- int realDoc = subReader.getDocumentNumber(docId);
- if (realDoc >= 0)
- {
- return realDoc;
- }
- }
- return -1;
+ /**
+ * {@inheritDoc}
+ */
+ public int getDocumentNumber(ForeignSegmentDocId docId) {
+ for (int i = 0; i < subReaders.length; i++) {
+ CachingMultiIndexReader subReader = subReaders[i];
+ int realDoc = subReader.getDocumentNumber(docId);
+ if (realDoc >= 0) {
+ return realDoc;
+ }
}
+ return -1;
+ }
- /**
- * {@inheritDoc}
- */
- public IndexReader[] getIndexReaders()
- {
- IndexReader readers[] = new IndexReader[subReaders.length];
- System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
- return readers;
- }
+ /**
+ * {@inheritDoc}
+ */
+ public IndexReader[] getIndexReaders() {
+ IndexReader readers[] = new IndexReader[subReaders.length];
+ System.arraycopy(subReaders, 0, readers, 0, subReaders.length);
+ return readers;
+ }
- /**
- * @inheritDoc
- */
- public int getParent(int n) throws IOException
- {
- int i = readerIndex(n);
- DocId id = subReaders[i].getParentDocId(n - starts[i]);
- id = id.applyOffset(starts[i]);
- return id.getDocumentNumber(this);
+ /**
+ * {@inheritDoc}
+ */
+ public void release() throws IOException {
+ for (int i = 0; i < subReaders.length; i++) {
+ subReaders[i].release();
}
+ }
- public int hashCode()
- {
- int hash = 0;
- for (int i = 0; i < subReaders.length; i++)
- {
- hash = 31 * hash + subReaders[i].hashCode();
- }
- return hash;
+ /**
+ * @inheritDoc
+ */
+ public int getParent(int n) throws IOException {
+ int i = readerIndex(n);
+ DocId id = subReaders[i].getParentDocId(n - starts[i]);
+ id = id.applyOffset(starts[i]);
+ return id.getDocumentNumber(this);
+ }
+
+ public int hashCode() {
+ int hash = 0;
+ for (int i = 0; i < subReaders.length; i++) {
+ hash = 31 * hash + subReaders[i].hashCode();
}
+ return hash;
+ }
- /**
- * Returns the reader index for document <code>n</code>.
Implementation
- * copied from lucene MultiReader class.
- *
- * @param n document number.
- * @return the reader index.
- */
- private int readerIndex(int n)
- {
- int lo = 0; // search starts array
- int hi = subReaders.length - 1; // for first element less
+ /**
+ * Returns the reader index for document <code>n</code>. Implementation
+ * copied from lucene MultiReader class.
+ *
+ * @param n document number.
+ * @return the reader index.
+ */
+ private int readerIndex(int n) {
+ int lo = 0; // search starts array
+ int hi = subReaders.length - 1; // for first element less
- while (hi >= lo)
- {
- int mid = (lo + hi) >> 1;
- int midValue = starts[mid];
- if (n < midValue)
- {
- hi = mid - 1;
- }
- else if (n > midValue)
- {
- lo = mid + 1;
- }
- else
- { // found a match
- while (mid + 1 < subReaders.length && starts[mid + 1] ==
midValue)
- {
- mid++; // scan to last match
- }
- return mid;
- }
- }
- return hi;
+ while (hi >= lo) {
+ int mid = (lo + hi) >> 1;
+ int midValue = starts[mid];
+ if (n < midValue) {
+ hi = mid - 1;
+ } else if (n > midValue) {
+ lo = mid + 1;
+ } else { // found a match
+ while (mid + 1 < subReaders.length && starts[mid + 1] == midValue)
{
+ mid++; // scan to last match
+ }
+ return mid;
+ }
}
- }
+ return hi;
+ }
+ }
- public QueryHandlerEntryWrapper getQueryHandlerConfig()
- {
- return queryHandlerConfig;
- }
+ public QueryHandlerEntryWrapper getQueryHandlerConfig() {
+ return queryHandlerConfig;
+ }
- /**
- * Log unindexed changes into error.log
- *
- * @param removed set of removed node uuids
- * @param added map of added node states and uuids
- * @throws IOException
- */
- public void logErrorChanges(Set<String> removed, Set<String> added) throws
IOException
- {
- // backup the remove and add iterators
- errorLog.writeChanges(removed, added);
- }
+ /**
+ * Log unindexed changes into error.log
+ *
+ * @param removed set of removed node uuids
+ * @param added map of added node states and uuids
+ * @throws IOException
+ */
+ public void logErrorChanges(Set<String> removed, Set<String> added) throws
IOException {
+ // backup the remove and add iterators
+ errorLog.writeChanges(removed, added);
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedIndexReader.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedIndexReader.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedIndexReader.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,147 +16,74 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.util.BitSet;
-import java.util.IdentityHashMap;
-import java.util.Map;
-
-import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
+import java.io.IOException;
+import java.util.BitSet;
+
/**
- * Implements an <code>IndexReader</code>, that will close when all connected
clients are
- * disconnected AND the <code>SharedIndexReader</code>s
<code>close()</code> method itself has been
- * called.
+ * Implements an <code>IndexReader</code>, that will close when all
connected
+ * clients are disconnected AND the <code>SharedIndexReader</code>s
+ * <code>close()</code> method itself has been called.
*/
-class SharedIndexReader
- extends FilterIndexReader
-{
+class SharedIndexReader extends RefCountingIndexReader {
- /**
- * Set to <code>true</code> if this index reader should be closed, when
all connected clients are
- * disconnected.
- */
- private boolean closeRequested = false;
+ /**
+ * Creates a new <code>SharedIndexReader</code> which is based on
+ * <code>in</code>.
+ *
+ * @param in the underlying <code>IndexReader</code>.
+ */
+ public SharedIndexReader(CachingIndexReader in) {
+ super(in);
+ }
- /**
- * Map of all registered clients to this shared index reader. The Map is rather used
as a Set,
- * because each value is the same Object as its associated key.
- */
- private final Map clients = new IdentityHashMap();
+ /**
+ * Returns the tick value when the underlying {@link CachingIndexReader} was
+ * created.
+ *
+ * @return the creation tick for the underlying reader.
+ */
+ long getCreationTick() {
+ return getBase().getCreationTick();
+ }
- /**
- * Creates a new <code>SharedIndexReader</code> which is based on
<code>in</code>.
- *
- * @param in
- * the underlying <code>IndexReader</code>.
- */
- public SharedIndexReader(CachingIndexReader in)
- {
- super(in);
- }
+ /**
+ * Returns the <code>DocId</code> of the parent of
<code>n</code> or
+ * {@link DocId#NULL} if <code>n</code> does not have a parent
(<code>n</code>
+ * is the root node).
+ *
+ * @param n the document number.
+ * @param deleted the documents that should be regarded as deleted.
+ * @return the <code>DocId</code> of <code>n</code>'s
parent.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public DocId getParent(int n, BitSet deleted) throws IOException {
+ return getBase().getParent(n, deleted);
+ }
- /**
- * Returns the tick value when the underlying {@link CachingIndexReader} was created.
- *
- * @return the creation tick for the underlying reader.
- */
- long getCreationTick()
- {
- return getBase().getCreationTick();
- }
+ /**
+ * Simply passes the call to the wrapped reader as is.<br/>
+ * If <code>term</code> is for a {@link FieldNames#UUID} field and this
+ * <code>SharedIndexReader</code> does not have such a document,
+ * {@link CachingIndexReader#EMPTY} is returned.
+ *
+ * @param term the term to enumerate the docs for.
+ * @return TermDocs for <code>term</code>.
+ * @throws IOException if an error occurs while reading from the index.
+ */
+ public TermDocs termDocs(Term term) throws IOException {
+ return in.termDocs(term);
+ }
- /**
- * Returns the <code>DocId</code> of the parent of
<code>n</code> or {@link DocId#NULL} if
- * <code>n</code> does not have a parent (<code>n</code> is
the root node).
- *
- * @param n
- * the document number.
- * @param deleted
- * the documents that should be regarded as deleted.
- * @return the <code>DocId</code> of <code>n</code>'s
parent.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public DocId getParent(int n, BitSet deleted) throws IOException
- {
- return getBase().getParent(n, deleted);
- }
+ /**
+ * Returns the {@link CachingIndexReader} this reader is based on.
+ *
+ * @return the {@link CachingIndexReader} this reader is based on.
+ */
+ public CachingIndexReader getBase() {
+ return (CachingIndexReader) in;
+ }
- /**
- * Registeres <code>client</code> with this reader. As long as clients are
registered, this shared
- * reader will not release resources on {@link #close()} and will not actually close
but only
- * marks itself to close when the last client is unregistered.
- *
- * @param client
- * the client to register.
- */
- public synchronized void addClient(Object client)
- {
- clients.put(client, client);
- }
-
- /**
- * Unregisters the <code>client</code> from this index reader.
- *
- * @param client
- * a client of this reader.
- * @throws IOException
- * if an error occurs while detaching the client from this shared reader.
- */
- public synchronized void removeClient(Object client) throws IOException
- {
- clients.remove(client);
- if (clients.isEmpty() && closeRequested)
- {
- super.doClose();
- }
- }
-
- /**
- * Closes this index if no client is registered, otherwise this reader is marked to
close when the
- * last client is disconnected.
- *
- * @throws IOException
- * if an error occurs while closing.
- */
- protected synchronized void doClose() throws IOException
- {
- if (clients.isEmpty())
- {
- super.doClose();
- }
- else
- {
- closeRequested = true;
- }
- }
-
- /**
- * Simply passes the call to the wrapped reader as is.<br/> If
<code>term</code> is for a
- * {@link FieldNames#UUID} field and this <code>SharedIndexReader</code>
does not have such a
- * document, {@link CachingIndexReader#EMPTY} is returned.
- *
- * @param term
- * the term to enumerate the docs for.
- * @return TermDocs for <code>term</code>.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- public TermDocs termDocs(Term term) throws IOException
- {
- return in.termDocs(term);
- }
-
- /**
- * Returns the {@link CachingIndexReader} this reader is based on.
- *
- * @return the {@link CachingIndexReader} this reader is based on.
- */
- public CachingIndexReader getBase()
- {
- return (CachingIndexReader) in;
- }
-
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Util.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Util.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/Util.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,15 +16,18 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.exoplatform.services.jcr.datamodel.ValueData;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
import java.io.IOException;
import java.util.Enumeration;
-import org.exoplatform.services.log.Log;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import javax.jcr.PropertyType;
-import org.exoplatform.services.log.ExoLogger;
-
/**
* <code>Util</code> provides various static utility methods.
*/
@@ -37,17 +40,16 @@
private static final Log log = ExoLogger.getLogger(Util.class);
/**
- * Disposes the document <code>old</code>. Closes any potentially open
readers held by the
- * document.
+ * Disposes the document <code>old</code>. Closes any potentially open
readers
+ * held by the document.
*
- * @param old
- * the document to dispose.
+ * @param old the document to dispose.
*/
public static void disposeDocument(Document old)
{
for (Enumeration e = old.fields(); e.hasMoreElements();)
{
- Field f = (Field) e.nextElement();
+ Field f = (Field)e.nextElement();
if (f.readerValue() != null)
{
try
@@ -63,15 +65,55 @@
}
/**
- * Returns <code>true</code> if the document is ready to be added to the
index. That is all text
- * extractors have finished their work.
+ * Returns <code>true</code> if the document is ready to be added to the
+ * index. That is all text extractors have finished their work.
*
- * @param doc
- * the document to check.
- * @return <code>true</code> if the document is ready;
<code>false</code> otherwise.
+ * @param doc the document to check.
+ * @return <code>true</code> if the document is ready;
<code>false</code>
+ * otherwise.
*/
public static boolean isDocumentReady(Document doc)
{
return true;
}
+
+ /**
+ * Depending on the type of the <code>reader</code> this method either
closes
+ * or releases the reader. The reader is released if it implements
+ * {@link ReleaseableIndexReader}.
+ *
+ * @param reader the index reader to close or release.
+ * @throws IOException if an error occurs while closing or releasing the index
+ * reader.
+ */
+ public static void closeOrRelease(IndexReader reader) throws IOException
+ {
+ if (reader instanceof ReleaseableIndexReader)
+ {
+ ((ReleaseableIndexReader)reader).release();
+ }
+ else
+ {
+ reader.close();
+ }
+ }
+
+ /**
+ * Returns length of the internal value.
+ *
+ * @param value a value.
+ * @return the length of the internal value or <code>-1</code> if the
length
+ * cannot be determined.
+ */
+ public static long getLength(ValueData value, int propertyType)
+ {
+ if (propertyType == PropertyType.NAME || propertyType == PropertyType.PATH)
+ {
+ return -1;
+ }
+ else
+ {
+ return value.getLength();
+ }
+ }
}
Modified:
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/VolatileIndex.java
===================================================================
---
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/VolatileIndex.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/VolatileIndex.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -16,167 +16,149 @@
*/
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.IOException;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
+import java.io.IOException;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
/**
* Implements an in-memory index with a pending buffer.
*/
-class VolatileIndex
- extends AbstractIndex
-{
+class VolatileIndex extends AbstractIndex {
- /**
- * Default value for {@link #bufferSize}.
- */
- private static final int DEFAULT_BUFFER_SIZE = 10;
+ /**
+ * Default value for {@link #bufferSize}.
+ */
+ private static final int DEFAULT_BUFFER_SIZE = 10;
- /**
- * Map of pending documents to add to the index
- */
- private final Map<String, Document> pending = new LinkedHashMap<String,
Document>();
+ /**
+ * Map of pending documents to add to the index
+ */
+ private final Map<String, Document> pending = new
LinkedHashMap<String, Document>();
- /**
- * Number of documents that are buffered before they are added to the index.
- */
- private int bufferSize = DEFAULT_BUFFER_SIZE;
+ /**
+ * Number of documents that are buffered before they are added to the index.
+ */
+ private int bufferSize = DEFAULT_BUFFER_SIZE;
- /**
- * The number of documents in this index.
- */
- private int numDocs = 0;
+ /**
+ * The number of documents in this index.
+ */
+ private int numDocs = 0;
- /**
- * Creates a new <code>VolatileIndex</code> using an
<code>analyzer</code>.
- *
- * @param analyzer
- * the analyzer to use.
- * @param indexingQueue
- * the indexing queue.
- * @throws IOException
- * if an error occurs while opening the index.
- */
- VolatileIndex(Analyzer analyzer, IndexingQueue indexingQueue) throws IOException
- {
- super(analyzer, new RAMDirectory(), null, indexingQueue);
- }
+ /**
+ * Creates a new <code>VolatileIndex</code> using an
<code>analyzer</code>.
+ *
+ * @param analyzer the analyzer to use.
+ * @param indexingQueue the indexing queue.
+ * @throws IOException if an error occurs while opening the index.
+ */
+ VolatileIndex(Analyzer analyzer, IndexingQueue indexingQueue) throws IOException {
+ super(analyzer, new RAMDirectory(), null, indexingQueue);
+ }
- /**
- * Overwrites the default implementation by adding the documents to a pending list and
commits the
- * pending list if needed.
- *
- * @param docs
- * the documents to add to the index.
- * @throws IOException
- * if an error occurs while writing to the index.
- */
- void addDocuments(Document[] docs) throws IOException
- {
- for (int i = 0; i < docs.length; i++)
- {
- Document old = pending.put(docs[i].get(FieldNames.UUID), docs[i]);
- if (old != null)
- {
- Util.disposeDocument(old);
- }
- if (pending.size() >= bufferSize)
- {
- commitPending();
- }
- numDocs++;
+ /**
+ * Overwrites the default implementation by adding the documents to a pending
+ * list and commits the pending list if needed.
+ *
+ * @param docs the documents to add to the index.
+ * @throws IOException if an error occurs while writing to the index.
+ */
+ void addDocuments(Document[] docs) throws IOException {
+ for (int i = 0; i < docs.length; i++) {
+ Document old = pending.put(docs[i].get(FieldNames.UUID), docs[i]);
+ if (old != null) {
+ Util.disposeDocument(old);
}
- invalidateSharedReader();
- }
-
- /**
- * Overwrites the default implementation to remove the document from the pending list
if it is
- * present or simply calls <code>super.removeDocument()</code>.
- *
- * @param idTerm
- * the uuid term of the document to remove.
- * @return the number of deleted documents
- * @throws IOException
- * if an error occurs while removing the document from the index.
- */
- int removeDocument(Term idTerm) throws IOException
- {
- Document doc = pending.remove(idTerm.text());
- int num;
- if (doc != null)
- {
- Util.disposeDocument(doc);
- // pending document has been removed
- num = 1;
+ if (pending.size() >= bufferSize) {
+ commitPending();
}
- else
- {
- // remove document from index
- num = super.getIndexReader().deleteDocuments(idTerm);
- }
- numDocs -= num;
- return num;
- }
+ numDocs++;
+ }
+ invalidateSharedReader();
+ }
- /**
- * Returns the number of valid documents in this index.
- *
- * @return the number of valid documents in this index.
- */
- int getNumDocuments() throws IOException
- {
- return numDocs;
- }
+ /**
+ * Overwrites the default implementation to remove the document from the
+ * pending list if it is present or simply calls
+ * <code>super.removeDocument()</code>.
+ *
+ * @param idTerm the uuid term of the document to remove.
+ * @return the number of deleted documents
+ * @throws IOException if an error occurs while removing the document from the
+ * index.
+ */
+ int removeDocument(Term idTerm) throws IOException {
+ Document doc = pending.remove(idTerm.text());
+ int num;
+ if (doc != null) {
+ Util.disposeDocument(doc);
+ // pending document has been removed
+ num = 1;
+ } else {
+ // remove document from index
+ num = super.getIndexReader().deleteDocuments(idTerm);
+ }
+ numDocs -= num;
+ return num;
+ }
- /**
- * Overwrites the implementation in {@link AbstractIndex} to trigger commit of pending
documents
- * to index.
- *
- * @return the index reader for this index.
- * @throws IOException
- * if an error occurs building a reader.
- */
- protected synchronized CommittableIndexReader getIndexReader() throws IOException
- {
- commitPending();
- return super.getIndexReader();
- }
+ /**
+ * Returns the number of valid documents in this index.
+ *
+ * @return the number of valid documents in this index.
+ */
+ int getNumDocuments() throws IOException {
+ return numDocs;
+ }
- /**
- * Overwrites the implementation in {@link AbstractIndex} to commit pending
documents.
- *
- * @param optimize
- * if <code>true</code> the index is optimized after the commit.
- */
- protected synchronized void commit(boolean optimize) throws IOException
- {
- commitPending();
- super.commit(optimize);
- }
+ /**
+ * Overwrites the implementation in {@link AbstractIndex} to trigger commit of
+ * pending documents to index.
+ *
+ * @return the index reader for this index.
+ * @throws IOException if an error occurs building a reader.
+ */
+ protected synchronized CommittableIndexReader getIndexReader() throws IOException {
+ commitPending();
+ return super.getIndexReader();
+ }
- /**
- * Sets a new buffer size for pending documents to add to the index. Higher values
consume more
- * memory, but help to avoid multiple index cycles when a node is changed / saved
multiple times.
- *
- * @param size
- * the new buffer size.
- */
- void setBufferSize(int size)
- {
- bufferSize = size;
- }
+ /**
+ * Overwrites the implementation in {@link AbstractIndex} to commit pending
+ * documents.
+ *
+ * @param optimize if <code>true</code> the index is optimized after the
+ * commit.
+ */
+ protected synchronized void commit(boolean optimize) throws IOException {
+ commitPending();
+ super.commit(optimize);
+ }
- /**
- * Commits pending documents to the index.
- */
- private void commitPending() throws IOException
- {
- super.addDocuments(pending.values().toArray(new Document[pending.size()]));
- pending.clear();
- }
+ /**
+ * Sets a new buffer size for pending documents to add to the index. Higher
+ * values consume more memory, but help to avoid multiple index cycles when a
+ * node is changed / saved multiple times.
+ *
+ * @param size the new buffer size.
+ */
+ void setBufferSize(int size) {
+ bufferSize = size;
+ }
+
+ /**
+ * Commits pending documents to the index.
+ */
+ private void commitPending() throws IOException {
+ if (pending.isEmpty()) {
+ return;
+ }
+ super.addDocuments(pending.values().toArray(new Document[pending.size()]));
+ pending.clear();
+ }
}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestNodeIndexer.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestNodeIndexer.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestNodeIndexer.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -17,18 +17,8 @@
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.io.ByteArrayInputStream;
-import java.util.Calendar;
-import java.util.List;
-import java.util.Vector;
-
-import javax.jcr.Node;
-import javax.jcr.PropertyType;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-
import org.exoplatform.commons.utils.MimeTypeResolver;
import org.exoplatform.services.document.DocumentReaderService;
import org.exoplatform.services.document.impl.DocumentReaderServiceImpl;
@@ -38,15 +28,23 @@
import org.exoplatform.services.jcr.impl.core.NamespaceRegistryImpl;
import org.exoplatform.services.jcr.impl.core.NodeImpl;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.io.ByteArrayInputStream;
+import java.util.Calendar;
+import java.util.List;
+import java.util.Vector;
+
+import javax.jcr.Node;
+import javax.jcr.PropertyType;
+
/**
* Created by The eXo Platform SAS Author : Sergey Karpenko
<sergey.karpenko(a)exoplatform.com.ua>
*
* @version $Id: TestNodeIndexer.java 11907 2008-03-13 15:36:21Z ksm $
*/
-public class TestNodeIndexer
- extends JcrImplBaseTest
+public class TestNodeIndexer extends JcrImplBaseTest
{
public static final Log logger = ExoLogger.getLogger(TestNodeIndexer.class);
@@ -89,16 +87,16 @@
ItemDataConsumer manager = this.session.getTransientNodesManager();
- node = (Node) this.session.getItem("/test");
+ node = (Node)this.session.getItem("/test");
- NodeData data = (NodeData) ((NodeImpl) node).getData();
+ NodeData data = (NodeData)((NodeImpl)node).getData();
assertNotNull(data);
DocumentReaderService extractor =
- (DocumentReaderService)
container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
+
(DocumentReaderService)container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
NodeIndexer indexer =
- new NodeIndexer(data, manager, new NSRegistryBasedNamespaceMappings(
- (NamespaceRegistryImpl) this.repository.getNamespaceRegistry()),
extractor);
+ new NodeIndexer(data, manager, new
NSRegistryBasedNamespaceMappings((NamespaceRegistryImpl)this.repository
+ .getNamespaceRegistry()), extractor);
Document doc = indexer.createDoc();
@@ -128,12 +126,12 @@
assertEquals(2, props.size());
// :PROPERTIES jcr:primaryType "jcr:prop" + '\uFFFF' + "prop
value"
- List<Field> prop1 = this.findField(list, FieldNames.PROPERTIES,
"jcr:prop" + '\uFFFF' + "prop value");
+ List<Field> prop1 = this.findField(list, FieldNames.PROPERTIES,
"jcr:prop" + '[' + "prop value");
assertNotNull(prop1);
assertEquals(1, prop1.size());
// :PROPERTIES jcr:primaryType "jcr:prop" + '\uFFFF' + "prop
value"
- List<Field> prop2 = this.findField(list, FieldNames.PROPERTIES,
"jcr:primaryType" + '\uFFFF' + "nt:unstructured");
+ List<Field> prop2 = this.findField(list, FieldNames.PROPERTIES,
"jcr:primaryType" + '[' + "nt:unstructured");
assertNotNull(prop2);
assertEquals(1, prop2.size());
@@ -176,14 +174,14 @@
// ((NodeImpl)node).getData();
- NodeData data = (NodeData) ((NodeImpl) root).getData();
+ NodeData data = (NodeData)((NodeImpl)root).getData();
assertNotNull(data);
DocumentReaderService extractor =
- (DocumentReaderService)
container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
+
(DocumentReaderService)container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
NodeIndexer indexer =
- new NodeIndexer(data, manager, new NSRegistryBasedNamespaceMappings(
- (NamespaceRegistryImpl) this.repository.getNamespaceRegistry()),
extractor);
+ new NodeIndexer(data, manager, new
NSRegistryBasedNamespaceMappings((NamespaceRegistryImpl)this.repository
+ .getNamespaceRegistry()), extractor);
Document doc = indexer.createDoc();
@@ -267,14 +265,14 @@
ItemDataConsumer manager = this.session.getTransientNodesManager();
- NodeData data = (NodeData) ((NodeImpl) node).getData();
+ NodeData data = (NodeData)((NodeImpl)node).getData();
assertNotNull(data);
DocumentReaderService extractor =
- (DocumentReaderService)
container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
+
(DocumentReaderService)container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
NodeIndexer indexer =
- new NodeIndexer(data, manager, new NSRegistryBasedNamespaceMappings(
- (NamespaceRegistryImpl) this.repository.getNamespaceRegistry()),
extractor);
+ new NodeIndexer(data, manager, new
NSRegistryBasedNamespaceMappings((NamespaceRegistryImpl)this.repository
+ .getNamespaceRegistry()), extractor);
Document doc = indexer.createDoc();
@@ -338,14 +336,14 @@
ItemDataConsumer manager = this.session.getTransientNodesManager();
- NodeData data = (NodeData) ((NodeImpl) node).getData();
+ NodeData data = (NodeData)((NodeImpl)node).getData();
assertNotNull(data);
DocumentReaderService extractor =
- (DocumentReaderService)
container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
+
(DocumentReaderService)container.getComponentInstanceOfType(DocumentReaderServiceImpl.class);
NodeIndexer indexer =
- new NodeIndexer(data, manager, new NSRegistryBasedNamespaceMappings(
- (NamespaceRegistryImpl) this.repository.getNamespaceRegistry()),
extractor);
+ new NodeIndexer(data, manager, new
NSRegistryBasedNamespaceMappings((NamespaceRegistryImpl)this.repository
+ .getNamespaceRegistry()), extractor);
Document doc = indexer.createDoc();
@@ -378,13 +376,13 @@
assertEquals(2, props.size());
// :PROPERTIES jcr:primaryType "pathprop" + '\uFFFF' +
"/wooo"
- List<Field> prop1 = this.findField(list, FieldNames.PROPERTIES,
"pathprop" + '\uFFFF' + "/wooo");
+ List<Field> prop1 = this.findField(list, FieldNames.PROPERTIES,
"pathprop" + '[' + "/wooo");
assertNotNull(prop1);
assertEquals(1, prop1.size());
// :PROPERTIES jcr:primaryType "jcr:primaryType" + '\uFFFF' +
"jcr:primaryType" + '\uFFFF' +
// "nt:unstructured"
- List<Field> prop2 = this.findField(list, FieldNames.PROPERTIES,
"jcr:primaryType" + '\uFFFF' + "nt:unstructured");
+ List<Field> prop2 = this.findField(list, FieldNames.PROPERTIES,
"jcr:primaryType" + '[' + "nt:unstructured");
assertNotNull(prop2);
assertEquals(1, prop2.size());
@@ -415,7 +413,7 @@
for (int i = 0; i < list.size(); i++)
{
if ((list.get(i).name().equalsIgnoreCase(fieldName)) &&
(list.get(i).stringValue() != null)
- && (list.get(i).stringValue().equals(fieldStringValue)))
+ && (list.get(i).stringValue().equals(fieldStringValue)))
{
out.add(list.get(i));
}
Modified:
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestSearchManagerIndexing.java
===================================================================
---
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestSearchManagerIndexing.java 2009-09-03
08:14:09 UTC (rev 132)
+++
jcr/trunk/component/core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestSearchManagerIndexing.java 2009-09-03
08:15:16 UTC (rev 133)
@@ -17,13 +17,6 @@
package org.exoplatform.services.jcr.impl.core.query.lucene;
-import java.util.Properties;
-
-import javax.jcr.NamespaceException;
-import javax.jcr.Node;
-import javax.jcr.RepositoryException;
-
-import org.exoplatform.services.log.Log;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -32,21 +25,26 @@
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.BooleanClause.Occur;
-
import org.exoplatform.services.jcr.JcrImplBaseTest;
import org.exoplatform.services.jcr.datamodel.NodeData;
import org.exoplatform.services.jcr.impl.core.NodeImpl;
import org.exoplatform.services.jcr.impl.core.query.SearchManager;
import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import java.util.Properties;
+
+import javax.jcr.NamespaceException;
+import javax.jcr.Node;
+import javax.jcr.RepositoryException;
+
/**
* Created by The eXo Platform SAS Author : Sergey Karpenko
<sergey.karpenko(a)exoplatform.com.ua>
*
* @version $Id: TestSearchManagerIndexing.java 13111 2008-04-11 08:22:13Z serg $
*/
-public class TestSearchManagerIndexing
- extends JcrImplBaseTest
+public class TestSearchManagerIndexing extends JcrImplBaseTest
{
public static final Log logger =
ExoLogger.getLogger(TestSearchManagerIndexing.class);
@@ -55,7 +53,7 @@
public void testAdditionNode() throws Exception
{
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
ir.close();
@@ -82,17 +80,16 @@
// Test is next addings are in index
TermQuery name = new TermQuery(new Term(FieldNames.LABEL, getIndexPrefix(si,
"") + "test"));
TermQuery prop1 =
- new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"") + "prop" + '\uFFFF'
- + "string value"));
+ new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si, "") +
"prop" + '[' + "string value"));
TermQuery prop2 =
- new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "primaryType" + '\uFFFF'
- + getIndexPrefix(si, "nt") +
"unstructured"));
+ new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "primaryType" + '['
+ + getIndexPrefix(si, "nt") + "unstructured"));
TermQuery full1 = new TermQuery(new Term(FieldNames.FULLTEXT,
"string"));
TermQuery full2 = new TermQuery(new Term(FieldNames.FULLTEXT, "value"));
TermQuery fullprop1 =
- new TermQuery(new
Term(FieldNames.createFullTextFieldName(getIndexPrefix(si, "") +
"prop"), "string"));
+ new TermQuery(new Term(FieldNames.createFullTextFieldName(getIndexPrefix(si,
"") + "prop"), "string"));
TermQuery fullprop2 =
- new TermQuery(new
Term(FieldNames.createFullTextFieldName(getIndexPrefix(si, "") +
"prop"), "value"));
+ new TermQuery(new Term(FieldNames.createFullTextFieldName(getIndexPrefix(si,
"") + "prop"), "value"));
BooleanQuery compl = new BooleanQuery();
compl.add(name, Occur.MUST);
@@ -114,7 +111,7 @@
{
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
@@ -144,7 +141,7 @@
public void testMoveNode() throws Exception
{
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
ir.close();
@@ -157,7 +154,7 @@
assertEquals(docnum + 2, ir.numDocs());
IndexSearcher is = new IndexSearcher(ir);
- NodeData data = (NodeData) ((NodeImpl) node).getData();
+ NodeData data = (NodeData)((NodeImpl)node).getData();
TermQuery query = new TermQuery(new Term(FieldNames.UUID, data.getIdentifier()));
Hits hits = is.search(query);
@@ -165,8 +162,7 @@
Document doc = hits.doc(0);
// check that "node" parent is "mid"
- assertEquals(((NodeData) ((NodeImpl) mid).getData()).getIdentifier(),
doc.getField(FieldNames.PARENT)
- .stringValue());
+ assertEquals(((NodeData)((NodeImpl)mid).getData()).getIdentifier(),
doc.getField(FieldNames.PARENT).stringValue());
ir.close();
is.close();
@@ -184,8 +180,8 @@
doc = hits.doc(0);
// check that "node" parent is root-node
- assertEquals(((NodeData) ((NodeImpl) root).getData()).getIdentifier(),
doc.getField(FieldNames.PARENT)
- .stringValue());
+ assertEquals(((NodeData)((NodeImpl)root).getData()).getIdentifier(),
doc.getField(FieldNames.PARENT)
+ .stringValue());
ir.close();
is.close();
@@ -197,7 +193,7 @@
final String newNodeName = "newName";
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
ir.close();
@@ -235,7 +231,7 @@
public void testSameName() throws Exception
{
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
ir.close();
@@ -259,7 +255,7 @@
public void testAddMixinType() throws Exception
{
assertNotNull(manager);
- SearchIndex si = (SearchIndex) manager.getHandler();
+ SearchIndex si = (SearchIndex)manager.getHandler();
IndexReader ir = si.getIndex().getIndexReader();
int docnum = ir.numDocs();
ir.close();
@@ -274,8 +270,8 @@
TermQuery name = new TermQuery(new Term(FieldNames.LABEL, getIndexPrefix(si,
"") + nodeName));
TermQuery prop1 =
- new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "primaryType" + '\uFFFF'
- + getIndexPrefix(si, "nt") +
"unstructured"));
+ new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "primaryType" + '['
+ + getIndexPrefix(si, "nt") + "unstructured"));
BooleanQuery compl = new BooleanQuery();
compl.add(name, Occur.MUST);
compl.add(prop1, Occur.MUST);
@@ -292,8 +288,7 @@
assertEquals(docnum + 1, ir.numDocs());
TermQuery prop2 =
- new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "uuid" + '\uFFFF'
- + node.getUUID()));
+ new TermQuery(new Term(FieldNames.PROPERTIES, getIndexPrefix(si,
"jcr") + "uuid" + '[' + node.getUUID()));
compl.add(prop2, Occur.MUST);
is = new IndexSearcher(ir);
@@ -304,7 +299,7 @@
public void setUp() throws Exception
{
super.setUp();
- manager = (SearchManager)
this.session.getContainer().getComponentInstanceOfType(SearchManager.class);
+ manager =
(SearchManager)this.session.getContainer().getComponentInstanceOfType(SearchManager.class);
}
private String getIndexPrefix(SearchIndex si, String stPref) throws
RepositoryException, NamespaceException