[exo-jcr-commits] exo-jcr SVN: r1144 - in jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src: main/java/org/exoplatform/services/jcr/impl/core/query and 5 other directories.
do-not-reply at jboss.org
do-not-reply at jboss.org
Tue Dec 22 08:29:22 EST 2009
Author: skabashnyuk
Date: 2009-12-22 08:29:21 -0500 (Tue, 22 Dec 2009)
New Revision: 1144
Added:
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/DefaultChangesFilter.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerChangesFilter.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerIoMode.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/ChangesFilterListsWrapper.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerCacheLoader.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerSingletonStoreCacheLoader.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JbossCacheIndexChangesFilter.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/AbstractWriteOnlyCacheLoader.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/WriteOnlyCacheLoaderException.java
Modified:
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexingTree.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandlerContext.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchIndexConfigurationHelper.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java
Log:
EXOJCR-325: merge with JBC branch
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/config/QueryHandlerParams.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -92,6 +92,8 @@
public static final String PARAM_USE_COMPOUNDFILE = "use-compoundfile";
public static final String PARAM_VOLATILE_IDLE_TIME = "volatile-idle-time";
+
+ public static final String PARAM_MAX_VOLATILE_SIZE = "max-volatile-size";
//since https://jira.jboss.org/jira/browse/EXOJCR-17
@@ -99,4 +101,7 @@
public static final String PARAM_ANALYZER_CLASS = "analyzer";
+ public static final String PARAM_CHANGES_FILTER_CLASS = "changesfilter-class";
+
+ public static final String PARAM_CHANGES_FILTER_CONFIG_PATH = "changesfilter-config-path";
}
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -21,6 +21,7 @@
import javax.jcr.RepositoryException;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
import org.exoplatform.services.jcr.datamodel.NodeData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,6 +42,8 @@
*/
private QueryHandlerContext context;
+ protected boolean initialized = false;
+
/**
* The {@link OnWorkspaceInconsistency} handler. Defaults to 'fail'.
*/
@@ -58,16 +61,31 @@
private String idleTime;
/**
- * Initializes this query handler by setting all properties in this class
- * with appropriate parameter values.
- *
- * @param context the context for this query handler.
- * @throws RepositoryException
+ * Indexer io mode
*/
- public final void init(QueryHandlerContext context) throws IOException, RepositoryException
+ protected IndexerIoMode ioMode = IndexerIoMode.READ_ONLY;
+
+ public boolean isInitialized()
{
+ return initialized;
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.impl.core.query.QueryHandler#setContext(org.exoplatform.services.jcr.impl.core.query.QueryHandlerContext)
+ */
+ public void setContext(QueryHandlerContext context)
+ {
this.context = context;
+ }
+
+ /**
+ * Initializes QueryHandler with given IoMode (RW/RO)
+ */
+ public void init() throws IOException, RepositoryException, RepositoryConfigurationException
+ {
+ // TODO Auto-generated method stub
doInit();
+ initialized = true;
}
/**
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/DefaultChangesFilter.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/DefaultChangesFilter.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/DefaultChangesFilter.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query;
+
+import org.exoplatform.services.jcr.config.QueryHandlerEntry;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
+import java.io.IOException;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey Kabashnyuk</a>
+ * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ *
+ */
+public class DefaultChangesFilter extends IndexerChangesFilter
+{
+
+ /**
+ * @param searchManager
+ * @param parentSearchManager
+ * @param config
+ * @param indexingTree
+ * @param parentIndexingTree
+ * @param handler
+ * @param parentHandler
+ * @throws IOException
+ * @throws RepositoryConfigurationException
+ * @throws RepositoryException
+ */
+ public DefaultChangesFilter(SearchManager searchManager, SearchManager parentSearchManager,
+ QueryHandlerEntry config, IndexingTree indexingTree, IndexingTree parentIndexingTree, QueryHandler handler,
+ QueryHandler parentHandler) throws IOException, RepositoryConfigurationException, RepositoryException
+ {
+ super(searchManager, parentSearchManager, config, indexingTree, parentIndexingTree, handler, parentHandler);
+ handler.setIndexerIoMode(IndexerIoMode.READ_WRITE);
+ parentHandler.setIndexerIoMode(IndexerIoMode.READ_WRITE);
+
+ if (!parentHandler.isInitialized())
+ {
+ parentHandler.init();
+ }
+ if (!handler.isInitialized())
+ {
+ handler.init();
+ }
+ }
+
+ /**
+ * Logger instance for this class
+ */
+ private static final Log log = ExoLogger.getLogger(DefaultChangesFilter.class);
+
+ /**
+ * @param removedNodes
+ * @param addedNodes
+ * @see org.exoplatform.services.jcr.impl.core.query.IndexerChangesFilter#doUpdateIndex()
+ */
+ @Override
+ protected void doUpdateIndex(Set<String> removedNodes, Set<String> addedNodes, Set<String> parentRemovedNodes,
+ Set<String> parentAddedNodes)
+ {
+
+ try
+ {
+ searchManager.updateIndex(removedNodes, addedNodes);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ handler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not complete. " + ioe, ioe);
+ }
+ }
+
+ try
+ {
+ parentSearchManager.updateIndex(parentRemovedNodes, parentAddedNodes);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ parentHandler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not complete. " + ioe, ioe);
+ }
+ }
+
+ }
+
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/DefaultChangesFilter.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerChangesFilter.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerChangesFilter.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerChangesFilter.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query;
+
+import org.exoplatform.services.jcr.config.QueryHandlerEntry;
+import org.exoplatform.services.jcr.dataflow.ItemState;
+import org.exoplatform.services.jcr.dataflow.ItemStateChangesLog;
+import org.exoplatform.services.jcr.dataflow.persistent.ItemsPersistenceListener;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey Kabashnyuk</a>
+ * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ *
+ */
+public abstract class IndexerChangesFilter implements ItemsPersistenceListener
+{
+ /**
+ * Logger instance for this class
+ */
+ private static final Log log = ExoLogger.getLogger(DefaultChangesFilter.class);
+
+ protected final SearchManager searchManager;
+
+ protected final QueryHandlerEntry config;
+
+ protected final QueryHandler handler;
+
+ protected final QueryHandler parentHandler;
+
+ protected final IndexingTree indexingTree;
+
+ protected final SearchManager parentSearchManager;
+
+ protected final IndexingTree parentIndexingTree;
+
+ /**
+ * @param searchManager
+ * @param isSystem
+ * @param handler
+ * @param indexingTree
+ */
+ public IndexerChangesFilter(SearchManager searchManager, SearchManager parentSearchManager,
+ QueryHandlerEntry config, IndexingTree indexingTree, IndexingTree parentIndexingTree, QueryHandler handler,
+ QueryHandler parentHandler)
+ {
+ super();
+ this.searchManager = searchManager;
+ this.parentSearchManager = parentSearchManager;
+ this.config = config;
+ this.parentIndexingTree = parentIndexingTree;
+ this.indexingTree = indexingTree;
+ this.handler = handler;
+ this.parentHandler = parentHandler;
+ }
+
+ /**
+ * @return the handler
+ */
+ public QueryHandler getHandler()
+ {
+ return handler;
+ }
+
+ /**
+ * @return the indexingTree
+ */
+ public IndexingTree getIndexingTree()
+ {
+ return indexingTree;
+ }
+
+ /**
+ * @return the searchManager
+ */
+ public SearchManager getSearchManager()
+ {
+ return searchManager;
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.dataflow.persistent.ItemsPersistenceListener#onSaveItems(org.exoplatform.services.jcr.dataflow.ItemStateChangesLog)
+ */
+ public void onSaveItems(ItemStateChangesLog itemStates)
+ {
+
+ long time = System.currentTimeMillis();
+
+ // nodes that need to be removed from the index.
+ final Set<String> removedNodes = new HashSet<String>();
+ // nodes that need to be added to the index.
+ final Set<String> addedNodes = new HashSet<String>();
+ //updated
+ final Map<String, List<ItemState>> updatedNodes = new HashMap<String, List<ItemState>>();
+
+ final Set<String> parentRemovedNodes = new HashSet<String>();
+ // nodes that need to be added to the index.
+ final Set<String> parentAddedNodes = new HashSet<String>();
+ //updated
+ final Map<String, List<ItemState>> parentUpdatedNodes = new HashMap<String, List<ItemState>>();
+
+ for (Iterator<ItemState> iter = itemStates.getAllStates().iterator(); iter.hasNext();)
+ {
+ ItemState itemState = iter.next();
+
+ if (!indexingTree.isExcluded(itemState))
+ {
+ acceptChanges(removedNodes, addedNodes, updatedNodes, itemState);
+ }
+ else if (parentIndexingTree != null && !parentIndexingTree.isExcluded(itemState))
+ {
+ acceptChanges(parentRemovedNodes, parentAddedNodes, parentUpdatedNodes, itemState);
+ }
+ }
+
+ for (String uuid : updatedNodes.keySet())
+ {
+ removedNodes.add(uuid);
+ addedNodes.add(uuid);
+ }
+
+ for (String uuid : parentUpdatedNodes.keySet())
+ {
+ parentRemovedNodes.add(uuid);
+ parentAddedNodes.add(uuid);
+ }
+
+ doUpdateIndex(removedNodes, addedNodes, parentRemovedNodes, parentAddedNodes);
+ }
+
+ /**
+ * @param removedNodes
+ * @param addedNodes
+ * @param updatedNodes
+ * @param itemState
+ */
+ private void acceptChanges(final Set<String> removedNodes, final Set<String> addedNodes,
+ final Map<String, List<ItemState>> updatedNodes, ItemState itemState)
+ {
+ {
+ String uuid =
+ itemState.isNode() ? itemState.getData().getIdentifier() : itemState.getData().getParentIdentifier();
+
+ if (itemState.isAdded())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ if (!addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ }
+ else if (itemState.isRenamed())
+ {
+ if (itemState.isNode())
+ {
+ addedNodes.add(uuid);
+ }
+ else
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ else if (itemState.isUpdated())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ else if (itemState.isMixinChanged())
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ else if (itemState.isDeleted())
+ {
+ if (itemState.isNode())
+ {
+ if (addedNodes.contains(uuid))
+ {
+ addedNodes.remove(uuid);
+ removedNodes.remove(uuid);
+ }
+ else
+ {
+ removedNodes.add(uuid);
+ }
+ // remove all changes after node remove
+ updatedNodes.remove(uuid);
+ }
+ else
+ {
+ if (!removedNodes.contains(uuid) && !addedNodes.contains(uuid))
+ {
+ createNewOrAdd(uuid, itemState, updatedNodes);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Update index.
+ * @param removedNodes
+ * @param addedNodes
+ */
+ protected abstract void doUpdateIndex(Set<String> removedNodes, Set<String> addedNodes,
+ Set<String> parentRemovedNodes, Set<String> parentAddedNodes);
+
+ private void createNewOrAdd(String key, ItemState state, Map<String, List<ItemState>> updatedNodes)
+ {
+ List<ItemState> list = updatedNodes.get(key);
+ if (list == null)
+ {
+ list = new ArrayList<ItemState>();
+ updatedNodes.put(key, list);
+ }
+ list.add(state);
+
+ }
+
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerChangesFilter.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerIoMode.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerIoMode.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerIoMode.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query;
+
+/**
+ * @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey Kabashnyuk</a>
+ * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ *
+ */
+public enum IndexerIoMode {
+ /**
+ * Only query
+ */
+ READ_ONLY,
+ /**
+ * query on index and write changes
+ */
+ READ_WRITE
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexerIoMode.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexingTree.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexingTree.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/IndexingTree.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -29,90 +29,97 @@
/**
* @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey
* Kabashnyuk</a>
- * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ * @version $Id: IndexingTree.java 790 2009-11-20 13:45:40Z skabashnyuk $
*
*/
-public class IndexingTree {
- private final QPath indexingRootQpath;
- private final NodeData indexingRoot;
+public class IndexingTree
+{
+ private final QPath indexingRootQpath;
- private final List<QPath> excludedPaths;
+ private final NodeData indexingRoot;
- /**
- * @param indexingRoot
- * @param excludedPaths
- */
- public IndexingTree(NodeData indexingRoot, List<QPath> excludedPaths) {
- super();
- this.indexingRoot = indexingRoot;
- this.indexingRootQpath = indexingRoot.getQPath();
- this.excludedPaths = excludedPaths;
- }
+ private final List<QPath> excludedPaths;
- /**
- * @param indexingRoot
- * @param excludedPaths
- */
- public IndexingTree(NodeData indexingRoot) {
- super();
- this.indexingRoot = indexingRoot;
- this.indexingRootQpath = indexingRoot.getQPath();
- this.excludedPaths = new ArrayList<QPath>();
- }
+ /**
+ * @param indexingRoot
+ * @param excludedPaths
+ */
+ public IndexingTree(NodeData indexingRoot, List<QPath> excludedPaths)
+ {
+ super();
+ this.indexingRoot = indexingRoot;
+ this.indexingRootQpath = indexingRoot.getQPath();
+ this.excludedPaths = excludedPaths;
+ }
- /**
- * @return the excludedPaths
- */
- public List<QPath> getExcludedPaths() {
- return excludedPaths;
- }
+ /**
+ * @param indexingRoot
+ * @param excludedPaths
+ */
+ public IndexingTree(NodeData indexingRoot)
+ {
+ super();
+ this.indexingRoot = indexingRoot;
+ this.indexingRootQpath = indexingRoot.getQPath();
+ this.excludedPaths = new ArrayList<QPath>();
+ }
- /**
- * @return the indexingRoot
- */
- public NodeData getIndexingRoot() {
- return indexingRoot;
- }
+ /**
+ * @return the excludedPaths
+ */
+ public List<QPath> getExcludedPaths()
+ {
+ return excludedPaths;
+ }
- /**
- * Checks if the given event should be excluded based on the
- * {@link #excludePath} setting.
- *
- * @param event
- * observation event
- * @return <code>true</code> if the event should be excluded,
- * <code>false</code> otherwise
- */
- public boolean isExcluded(ItemState event) {
+ /**
+ * @return the indexingRoot
+ */
+ public NodeData getIndexingRoot()
+ {
+ return indexingRoot;
+ }
- for (QPath excludedPath : excludedPaths) {
- if (event.getData().getQPath().isDescendantOf(excludedPath)
- || event.getData().getQPath().equals(excludedPath))
- return true;
- }
+ /**
+ * Checks if the given event should be excluded based on the
+ * {@link #excludePath} setting.
+ *
+ * @param event
+ * observation event
+ * @return <code>true</code> if the event should be excluded,
+ * <code>false</code> otherwise
+ */
+ public boolean isExcluded(ItemState event)
+ {
- return !event.getData().getQPath().isDescendantOf(indexingRootQpath)
- && !event.getData().getQPath().equals(indexingRootQpath);
- }
+ for (QPath excludedPath : excludedPaths)
+ {
+ if (event.getData().getQPath().isDescendantOf(excludedPath) || event.getData().getQPath().equals(excludedPath))
+ return true;
+ }
- /**
- * Checks if the given event should be excluded based on the
- * {@link #excludePath} setting.
- *
- * @param event
- * observation event
- * @return <code>true</code> if the event should be excluded,
- * <code>false</code> otherwise
- */
- public boolean isExcluded(ItemData eventData) {
+ return !event.getData().getQPath().isDescendantOf(indexingRootQpath)
+ && !event.getData().getQPath().equals(indexingRootQpath);
+ }
- for (QPath excludedPath : excludedPaths) {
- if (eventData.getQPath().isDescendantOf(excludedPath)
- || eventData.getQPath().equals(excludedPath))
- return true;
- }
+ /**
+ * Checks if the given event should be excluded based on the
+ * {@link #excludePath} setting.
+ *
+ * @param event
+ * observation event
+ * @return <code>true</code> if the event should be excluded,
+ * <code>false</code> otherwise
+ */
+ public boolean isExcluded(ItemData eventData)
+ {
- return !eventData.getQPath().isDescendantOf(indexingRootQpath)
- && !eventData.getQPath().equals(indexingRootQpath);
- }
+ for (QPath excludedPath : excludedPaths)
+ {
+ if (eventData.getQPath().isDescendantOf(excludedPath) || eventData.getQPath().equals(excludedPath))
+ return true;
+ }
+
+ return !eventData.getQPath().isDescendantOf(indexingRootQpath) && !eventData.getQPath().equals(indexingRootQpath);
+ }
}
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -41,7 +41,7 @@
/**
* Returns the query handler context that passed in {@link
- * #init(QueryHandlerContext)}.
+ * #setContext(QueryHandlerContext)}.
*
* @return the query handler context.
*/
@@ -81,9 +81,31 @@
*/
void close();
- void init(QueryHandlerContext context) throws IOException, RepositoryException, RepositoryConfigurationException;
+ /**
+ * Sets QueryHandlerContext
+ * @param context
+ */
+ void setContext(QueryHandlerContext context);
/**
+ *
+ * initializes QueryHandler
+ *
+ * @param ioMode
+ * @throws IOException
+ * @throws RepositoryException
+ * @throws RepositoryConfigurationException
+ */
+ void init() throws IOException, RepositoryException, RepositoryConfigurationException;
+
+ /**
+ * Checks whether QueryHandler is initialized or not
+ *
+ * @return
+ */
+ boolean isInitialized();
+
+ /**
* Creates a new query by specifying the query statement itself and the
* language in which the query is stated. If the query statement is
* syntactically invalid, given the language specified, an
@@ -110,22 +132,7 @@
*/
void logErrorChanges(Set<String> removed, Set<String> added) throws IOException;
- // /**
- // * Creates a new query by specifying the query object model. If the query
- // * object model is considered invalid for the implementing class, an
- // * InvalidQueryException is thrown.
- // *
- // * @param session the session of the current user creating the query
- // * object.
- // * @param itemMgr the item manager of the current user.
- // * @param qomTree query query object model tree.
- // * @return A <code>Query</code> object.
- // * @throws InvalidQueryException if the query object model tree is invalid.
- // */
- // ExecutableQuery createExecutableQuery(SessionImpl session,
- // ItemManager itemMgr,
- // QueryObjectModelTree qomTree)
- // throws InvalidQueryException;
+ void setIndexerIoMode(IndexerIoMode ioMode) throws IOException;
/**
* @return the name of the query class to use.
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandlerContext.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandlerContext.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandlerContext.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -28,175 +28,184 @@
* stable. This class provides access to the environment where the query handler
* is running in.
*/
-public class QueryHandlerContext {
- /**
- * The persistent <code>ItemStateManager</code>
- */
- private final ItemDataConsumer stateMgr;
+public class QueryHandlerContext
+{
+ /**
+ * The persistent <code>ItemStateManager</code>
+ */
+ private final ItemDataConsumer stateMgr;
- /**
- * The node type registry of the repository
- */
- private final NodeTypeDataManager nodeTypeDataManager;
+ /**
+ * The node type registry of the repository
+ */
+ private final NodeTypeDataManager nodeTypeDataManager;
- /**
- * The namespace registry of the repository.
- */
- private final NamespaceRegistryImpl nsRegistry;
+ /**
+ * The namespace registry of the repository.
+ */
+ private final NamespaceRegistryImpl nsRegistry;
- /**
- * The id of the root node.
- */
- private final IndexingTree indexingTree;
+ /**
+ * The id of the root node.
+ */
+ private final IndexingTree indexingTree;
- /**
- * PropertyType registry to look up the type of a property with a given
- * name.
- */
- private final PropertyTypeRegistry propRegistry;
+ /**
+ * PropertyType registry to look up the type of a property with a given
+ * name.
+ */
+ private final PropertyTypeRegistry propRegistry;
- /**
- * The query handler for the jcr:system tree
- */
- private final QueryHandler parentHandler;
+ /**
+ * The query handler for the jcr:system tree
+ */
+ private final QueryHandler parentHandler;
- /**
- * Text extractor for extracting text content of binary properties.
- */
- private final DocumentReaderService extractor;
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ private final DocumentReaderService extractor;
- private final String indexDirectory;
+ private final String indexDirectory;
- private final boolean createInitialIndex;
+ private final boolean createInitialIndex;
- private final LuceneVirtualTableResolver virtualTableResolver;
+ private final LuceneVirtualTableResolver virtualTableResolver;
- /**
- * Creates a new context instance.
- *
- * @param fs
- * a this <code>QueryHandler</code> may use to store its index.
- * If no <code>FileSystem</code> has been configured
- * <code>fs</code> is <code>null</code>.
- * @param stateMgr
- * provides persistent item states.
- * @param rootId
- * the id of the root node.
- * @param ntRegistry
- * the node type registry.
- * @param nsRegistry
- * the namespace registry.
- * @param parentHandler
- * the parent query handler or <code>null</code> it there is no
- * parent handler.
- * @param virtualTableResolver
- * @param excludedNodeId
- * id of the node that should be excluded from indexing. Any
- * descendant of that node is also excluded from indexing.
- */
- public QueryHandlerContext(ItemDataConsumer stateMgr,
- IndexingTree indexingTree, NodeTypeDataManager nodeTypeDataManager,
- NamespaceRegistryImpl nsRegistry, QueryHandler parentHandler,
- String indexDirectory, DocumentReaderService extractor,
- boolean createInitialIndex,
- LuceneVirtualTableResolver virtualTableResolver) {
- this.stateMgr = stateMgr;
- this.indexingTree = indexingTree;
- this.nodeTypeDataManager = nodeTypeDataManager;
- this.nsRegistry = nsRegistry;
- this.indexDirectory = indexDirectory;
- this.extractor = extractor;
- this.createInitialIndex = createInitialIndex;
- this.virtualTableResolver = virtualTableResolver;
- this.propRegistry = new PropertyTypeRegistry(nodeTypeDataManager);
- this.parentHandler = parentHandler;
- ((NodeTypeDataManagerImpl) this.nodeTypeDataManager)
- .addListener(propRegistry);
- }
+ /**
+ * Creates a new context instance.
+ *
+ * @param fs
+ * a this <code>QueryHandler</code> may use to store its index.
+ * If no <code>FileSystem</code> has been configured
+ * <code>fs</code> is <code>null</code>.
+ * @param stateMgr
+ * provides persistent item states.
+ * @param rootId
+ * the id of the root node.
+ * @param ntRegistry
+ * the node type registry.
+ * @param nsRegistry
+ * the namespace registry.
+ * @param parentHandler
+ * the parent query handler or <code>null</code> it there is no
+ * parent handler.
+ * @param virtualTableResolver
+ * @param excludedNodeId
+ * id of the node that should be excluded from indexing. Any
+ * descendant of that node is also excluded from indexing.
+ */
+ public QueryHandlerContext(ItemDataConsumer stateMgr, IndexingTree indexingTree,
+ NodeTypeDataManager nodeTypeDataManager, NamespaceRegistryImpl nsRegistry, QueryHandler parentHandler,
+ String indexDirectory, DocumentReaderService extractor, boolean createInitialIndex,
+ LuceneVirtualTableResolver virtualTableResolver)
+ {
+ this.stateMgr = stateMgr;
+ this.indexingTree = indexingTree;
+ this.nodeTypeDataManager = nodeTypeDataManager;
+ this.nsRegistry = nsRegistry;
+ this.indexDirectory = indexDirectory;
+ this.extractor = extractor;
+ this.createInitialIndex = createInitialIndex;
+ this.virtualTableResolver = virtualTableResolver;
+ this.propRegistry = new PropertyTypeRegistry(nodeTypeDataManager);
+ this.parentHandler = parentHandler;
+ ((NodeTypeDataManagerImpl)this.nodeTypeDataManager).addListener(propRegistry);
+ }
- /**
- * @return the virtualTableResolver
- */
- public LuceneVirtualTableResolver getVirtualTableResolver() {
- return virtualTableResolver;
- }
+ /**
+ * @return the virtualTableResolver
+ */
+ public LuceneVirtualTableResolver getVirtualTableResolver()
+ {
+ return virtualTableResolver;
+ }
- /**
- * @return the createInitialIndex
- */
- public boolean isCreateInitialIndex() {
- return createInitialIndex;
- }
+ /**
+ * @return the createInitialIndex
+ */
+ public boolean isCreateInitialIndex()
+ {
+ return createInitialIndex;
+ }
- /**
- * Returns the persistent {@link ItemStateManager} of the workspace this
- * <code>QueryHandler</code> is based on.
- *
- * @return the persistent <code>ItemStateManager</code> of the current
- * workspace.
- */
- public ItemDataConsumer getItemStateManager() {
- return stateMgr;
- }
+ /**
+ * Returns the persistent {@link ItemStateManager} of the workspace this
+ * <code>QueryHandler</code> is based on.
+ *
+ * @return the persistent <code>ItemStateManager</code> of the current
+ * workspace.
+ */
+ public ItemDataConsumer getItemStateManager()
+ {
+ return stateMgr;
+ }
- /**
- * Returns the id of the root node.
- *
- * @return the idof the root node.
- */
- public IndexingTree getIndexingTree() {
- return indexingTree;
- }
+ /**
+ * Returns the id of the root node.
+ *
+ * @return the idof the root node.
+ */
+ public IndexingTree getIndexingTree()
+ {
+ return indexingTree;
+ }
- /**
- * Returns the PropertyTypeRegistry for this repository.
- *
- * @return the PropertyTypeRegistry for this repository.
- */
- public PropertyTypeRegistry getPropertyTypeRegistry() {
- return propRegistry;
- }
+ /**
+ * Returns the PropertyTypeRegistry for this repository.
+ *
+ * @return the PropertyTypeRegistry for this repository.
+ */
+ public PropertyTypeRegistry getPropertyTypeRegistry()
+ {
+ return propRegistry;
+ }
- /**
- * Returns the NodeTypeRegistry for this repository.
- *
- * @return the NodeTypeRegistry for this repository.
- */
- public NodeTypeDataManager getNodeTypeDataManager() {
- return nodeTypeDataManager;
- }
+ /**
+ * Returns the NodeTypeRegistry for this repository.
+ *
+ * @return the NodeTypeRegistry for this repository.
+ */
+ public NodeTypeDataManager getNodeTypeDataManager()
+ {
+ return nodeTypeDataManager;
+ }
- /**
- * Returns the NamespaceRegistryImpl for this repository.
- *
- * @return the NamespaceRegistryImpl for this repository.
- */
- public NamespaceRegistryImpl getNamespaceRegistry() {
- return nsRegistry;
- }
+ /**
+ * Returns the NamespaceRegistryImpl for this repository.
+ *
+ * @return the NamespaceRegistryImpl for this repository.
+ */
+ public NamespaceRegistryImpl getNamespaceRegistry()
+ {
+ return nsRegistry;
+ }
- /**
- * Returns the parent query handler.
- *
- * @return the parent query handler.
- */
- public QueryHandler getParentHandler() {
- return parentHandler;
- }
+ /**
+ * Returns the parent query handler.
+ *
+ * @return the parent query handler.
+ */
+ public QueryHandler getParentHandler()
+ {
+ return parentHandler;
+ }
- /**
- * Destroys this context and releases resources.
- */
- public void destroy() {
- ((NodeTypeDataManagerImpl) this.nodeTypeDataManager)
- .removeListener(propRegistry);
- }
+ /**
+ * Destroys this context and releases resources.
+ */
+ public void destroy()
+ {
+ ((NodeTypeDataManagerImpl)this.nodeTypeDataManager).removeListener(propRegistry);
+ }
- public DocumentReaderService getExtractor() {
- return extractor;
- }
+ public DocumentReaderService getExtractor()
+ {
+ return extractor;
+ }
- public String getIndexDirectory() {
- return indexDirectory;
- }
+ public String getIndexDirectory()
+ {
+ return indexDirectory;
+ }
}
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchIndexConfigurationHelper.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchIndexConfigurationHelper.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchIndexConfigurationHelper.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -29,7 +29,7 @@
/**
* @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey
* Kabashnyuk</a>
- * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ * @version $Id: SearchIndexConfigurationHelper.java 1053 2009-12-15 09:27:30Z nzamosenchuk $
*
*/
public class SearchIndexConfigurationHelper
@@ -129,6 +129,8 @@
searchIndex.setUseCompoundFile(Boolean.parseBoolean(value));
else if (QueryHandlerParams.PARAM_VOLATILE_IDLE_TIME.equals(name))
searchIndex.setVolatileIdleTime(Integer.parseInt(value));
+ else if (QueryHandlerParams.PARAM_MAX_VOLATILE_SIZE.equals(name))
+ searchIndex.setMaxVolatileIndexSize(Integer.parseInt(value));
else if (QueryHandlerParams.PARAM_ANALYZER_CLASS.equals(name))
{
searchIndex.setAnalyzer(value);
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SearchManager.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -1,860 +1,858 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.exoplatform.services.jcr.impl.core.query;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.search.BooleanClause.Occur;
-import org.exoplatform.container.configuration.ConfigurationManager;
-import org.exoplatform.services.document.DocumentReaderService;
-import org.exoplatform.services.jcr.config.QueryHandlerEntry;
-import org.exoplatform.services.jcr.config.QueryHandlerParams;
-import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
-import org.exoplatform.services.jcr.core.nodetype.NodeTypeDataManager;
-import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
-import org.exoplatform.services.jcr.dataflow.ItemState;
-import org.exoplatform.services.jcr.dataflow.ItemStateChangesLog;
-import org.exoplatform.services.jcr.dataflow.persistent.MandatoryItemsPersistenceListener;
-import org.exoplatform.services.jcr.datamodel.InternalQName;
-import org.exoplatform.services.jcr.datamodel.ItemData;
-import org.exoplatform.services.jcr.datamodel.NodeData;
-import org.exoplatform.services.jcr.datamodel.PropertyData;
-import org.exoplatform.services.jcr.datamodel.QPath;
-import org.exoplatform.services.jcr.datamodel.ValueData;
-import org.exoplatform.services.jcr.impl.Constants;
-import org.exoplatform.services.jcr.impl.core.LocationFactory;
-import org.exoplatform.services.jcr.impl.core.NamespaceRegistryImpl;
-import org.exoplatform.services.jcr.impl.core.SessionDataManager;
-import org.exoplatform.services.jcr.impl.core.SessionImpl;
-import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
-import org.exoplatform.services.jcr.impl.core.query.lucene.LuceneVirtualTableResolver;
-import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
-import org.exoplatform.services.jcr.impl.core.query.lucene.ScoreNode;
-import org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex;
-import org.exoplatform.services.jcr.impl.core.value.NameValue;
-import org.exoplatform.services.jcr.impl.core.value.PathValue;
-import org.exoplatform.services.jcr.impl.core.value.ValueFactoryImpl;
-import org.exoplatform.services.jcr.impl.dataflow.AbstractValueData;
-import org.exoplatform.services.jcr.impl.dataflow.persistent.WorkspacePersistentDataManager;
-import org.exoplatform.services.log.ExoLogger;
-import org.exoplatform.services.log.Log;
-import org.picocontainer.Startable;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.StringTokenizer;
-
-import javax.jcr.Node;
-import javax.jcr.PropertyType;
-import javax.jcr.RepositoryException;
-import javax.jcr.Value;
-import javax.jcr.query.InvalidQueryException;
-import javax.jcr.query.Query;
-
-/**
- * Acts as a global entry point to execute queries and index nodes.
- */
-public class SearchManager implements Startable, MandatoryItemsPersistenceListener
-{
-
- /**
- * Logger instance for this class
- */
- private static final Log log = ExoLogger.getLogger(SearchManager.class);
-
- protected final QueryHandlerEntry config;
-
- /**
- * Text extractor for extracting text content of binary properties.
- */
- protected final DocumentReaderService extractor;
-
- /**
- * QueryHandler where query execution is delegated to
- */
- protected QueryHandler handler;
-
- /**
- * The shared item state manager instance for the workspace.
- */
- protected final ItemDataConsumer itemMgr;
-
- /**
- * The namespace registry of the repository.
- */
- protected final NamespaceRegistryImpl nsReg;
-
- /**
- * The node type registry.
- */
- protected final NodeTypeDataManager nodeTypeDataManager;
-
- /**
- * QueryHandler of the parent search manager or <code>null</code> if there
- * is none.
- */
- protected final SearchManager parentSearchManager;
-
- // protected QPath indexingRoot;
- //
- // protected List<QPath> excludedPaths = new ArrayList<QPath>();
-
- protected IndexingTree indexingTree;
-
- private final ConfigurationManager cfm;
-
- protected LuceneVirtualTableResolver virtualTableResolver;
-
- /**
- * Creates a new <code>SearchManager</code>.
- *
- * @param config
- * the search configuration.
- * @param nsReg
- * the namespace registry.
- * @param ntReg
- * the node type registry.
- * @param itemMgr
- * the shared item state manager.
- * @param rootNodeId
- * the id of the root node.
- * @param parentMgr
- * the parent search manager or <code>null</code> if there is no
- * parent search manager.
- * @param excludedNodeId
- * id of the node that should be excluded from indexing. Any
- * descendant of that node will also be excluded from indexing.
- * @throws RepositoryException
- * if the search manager cannot be initialized
- * @throws RepositoryConfigurationException
- */
- public SearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg, NodeTypeDataManager ntReg,
- WorkspacePersistentDataManager itemMgr, SystemSearchManagerHolder parentSearchManager,
- DocumentReaderService extractor, ConfigurationManager cfm, final RepositoryIndexSearcherHolder indexSearcherHolder)
- throws RepositoryException, RepositoryConfigurationException
- {
-
- this.extractor = extractor;
- indexSearcherHolder.addIndexSearcher(this);
- this.config = config;
- this.nodeTypeDataManager = ntReg;
- this.nsReg = nsReg;
- this.itemMgr = itemMgr;
- this.cfm = cfm;
- this.virtualTableResolver = new LuceneVirtualTableResolver(nodeTypeDataManager, nsReg);
- this.parentSearchManager = parentSearchManager != null ? parentSearchManager.get() : null;
- itemMgr.addItemPersistenceListener(this);
- }
-
- /**
- * Creates a query object from a node that can be executed on the workspace.
- *
- * @param session
- * the session of the user executing the query.
- * @param itemMgr
- * the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param node
- * a node of type nt:query.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException
- * if <code>absPath</code> is not a valid persisted query (that
- * is, a node of type nt:query)
- * @throws RepositoryException
- * if any other error occurs.
- */
- public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager, Node node)
- throws InvalidQueryException, RepositoryException
- {
- AbstractQueryImpl query = createQueryInstance();
- query.init(session, sessionDataManager, handler, node);
- return query;
- }
-
- /**
- * Creates a query object that can be executed on the workspace.
- *
- * @param session
- * the session of the user executing the query.
- * @param itemMgr
- * the item manager of the user executing the query. Needed to
- * return <code>Node</code> instances in the result set.
- * @param statement
- * the actual query statement.
- * @param language
- * the syntax of the query statement.
- * @return a <code>Query</code> instance to execute.
- * @throws InvalidQueryException
- * if the query is malformed or the <code>language</code> is
- * unknown.
- * @throws RepositoryException
- * if any other error occurs.
- */
- public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager, String statement,
- String language) throws InvalidQueryException, RepositoryException
- {
- AbstractQueryImpl query = createQueryInstance();
- query.init(session, sessionDataManager, handler, statement, language);
- return query;
- }
-
- /**
- * just for test use only
- */
- public QueryHandler getHandler()
- {
-
- return handler;
- }
-
- public void onSaveItems(ItemStateChangesLog changesLog)
- {
- if (handler == null)
- return;
-
- long time = System.currentTimeMillis();
-
- // nodes that need to be removed from the index.
- final Set<String> removedNodes = new HashSet<String>();
- // nodes that need to be added to the index.
- final Set<String> addedNodes = new HashSet<String>();
-
- final Map<String, List<ItemState>> updatedNodes = new HashMap<String, List<ItemState>>();
-
- for (Iterator<ItemState> iter = changesLog.getAllStates().iterator(); iter.hasNext();)
- {
- ItemState itemState = iter.next();
-
- if (!indexingTree.isExcluded(itemState))
- {
- String uuid =
- itemState.isNode() ? itemState.getData().getIdentifier() : itemState.getData().getParentIdentifier();
-
- if (itemState.isAdded())
- {
- if (itemState.isNode())
- {
- addedNodes.add(uuid);
- }
- else
- {
- if (!addedNodes.contains(uuid))
- {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- }
- }
- else if (itemState.isRenamed())
- {
- if (itemState.isNode())
- {
- addedNodes.add(uuid);
- }
- else
- {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- }
- else if (itemState.isUpdated())
- {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- else if (itemState.isMixinChanged())
- {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- else if (itemState.isDeleted())
- {
- if (itemState.isNode())
- {
- if (addedNodes.contains(uuid))
- {
- addedNodes.remove(uuid);
- removedNodes.remove(uuid);
- }
- else
- {
- removedNodes.add(uuid);
- }
- // remove all changes after node remove
- updatedNodes.remove(uuid);
- }
- else
- {
- if (!removedNodes.contains(uuid) && !addedNodes.contains(uuid))
- {
- createNewOrAdd(uuid, itemState, updatedNodes);
- }
- }
- }
- }
- }
- // TODO make quick changes
- for (String uuid : updatedNodes.keySet())
- {
- removedNodes.add(uuid);
- addedNodes.add(uuid);
- }
-
- Iterator<NodeData> addedStates = new Iterator<NodeData>()
- {
- private final Iterator<String> iter = addedNodes.iterator();
-
- public boolean hasNext()
- {
- return iter.hasNext();
- }
-
- public NodeData next()
- {
-
- // cycle till find a next or meet the end of set
- do
- {
- String id = iter.next();
- try
- {
- ItemData item = itemMgr.getItemData(id);
- if (item != null)
- {
- if (item.isNode())
- return (NodeData)item; // return node
- else
- log.warn("Node not found, but property " + id + ", " + item.getQPath().getAsString()
- + " found. ");
- }
- else
- log.warn("Unable to index node with id " + id + ", node does not exist.");
-
- }
- catch (RepositoryException e)
- {
- log.error("Can't read next node data " + id, e);
- }
- }
- while (iter.hasNext()); // get next if error or node not found
-
- return null; // we met the end of iterator set
- }
-
- public void remove()
- {
- throw new UnsupportedOperationException();
- }
- };
-
- Iterator<String> removedIds = new Iterator<String>()
- {
- private final Iterator<String> iter = removedNodes.iterator();
-
- public boolean hasNext()
- {
- return iter.hasNext();
- }
-
- public String next()
- {
- return nextNodeId();
- }
-
- public String nextNodeId() throws NoSuchElementException
- {
- return iter.next();
- }
-
- public void remove()
- {
- throw new UnsupportedOperationException();
-
- }
- };
-
- if (removedNodes.size() > 0 || addedNodes.size() > 0)
- {
- try
- {
- handler.updateNodes(removedIds, addedStates);
- }
- catch (RepositoryException e)
- {
- log.error("Error indexing changes " + e, e);
- }
- catch (IOException e)
- {
- log.error("Error indexing changes " + e, e);
- try
- {
- handler.logErrorChanges(removedNodes, addedNodes);
- }
- catch (IOException ioe)
- {
- log.warn("Exception occure when errorLog writed. Error log is not complete. " + ioe, ioe);
- }
- }
- }
-
- if (log.isDebugEnabled())
- {
- log.debug("onEvent: indexing finished in " + String.valueOf(System.currentTimeMillis() - time) + " ms.");
- }
- }
-
- public void createNewOrAdd(String key, ItemState state, Map<String, List<ItemState>> updatedNodes)
- {
- List<ItemState> list = updatedNodes.get(key);
- if (list == null)
- {
- list = new ArrayList<ItemState>();
- updatedNodes.put(key, list);
- }
- list.add(state);
-
- }
-
- public void start()
- {
-
- if (log.isDebugEnabled())
- log.debug("start");
- try
- {
- if (indexingTree == null)
- {
- List<QPath> excludedPath = new ArrayList<QPath>();
- // Calculating excluded node identifiers
- excludedPath.add(Constants.JCR_SYSTEM_PATH);
-
- //if (config.getExcludedNodeIdentifers() != null)
- String excludedNodeIdentifer =
- config.getParameterValue(QueryHandlerParams.PARAM_EXCLUDED_NODE_IDENTIFERS, null);
- if (excludedNodeIdentifer != null)
- {
- StringTokenizer stringTokenizer = new StringTokenizer(excludedNodeIdentifer);
- while (stringTokenizer.hasMoreTokens())
- {
-
- try
- {
- ItemData excludeData = itemMgr.getItemData(stringTokenizer.nextToken());
- if (excludeData != null)
- excludedPath.add(excludeData.getQPath());
- }
- catch (RepositoryException e)
- {
- log.warn(e.getLocalizedMessage());
- }
- }
- }
-
- NodeData indexingRootData = null;
- String rootNodeIdentifer = config.getParameterValue(QueryHandlerParams.PARAM_ROOT_NODE_ID, null);
- if (rootNodeIdentifer != null)
- {
- try
- {
- ItemData indexingRootDataItem = itemMgr.getItemData(rootNodeIdentifer);
- if (indexingRootDataItem != null && indexingRootDataItem.isNode())
- indexingRootData = (NodeData)indexingRootDataItem;
- }
- catch (RepositoryException e)
- {
- log.warn(e.getLocalizedMessage() + " Indexing root set to " + Constants.ROOT_PATH.getAsString());
-
- }
-
- }
- else
- {
- try
- {
- indexingRootData = (NodeData)itemMgr.getItemData(Constants.ROOT_UUID);
- }
- catch (RepositoryException e)
- {
- log.error("Fail to load root node data");
- }
- }
-
- indexingTree = new IndexingTree(indexingRootData, excludedPath);
- }
-
- initializeQueryHandler();
- }
- catch (RepositoryException e)
- {
- log.error(e.getLocalizedMessage());
- handler = null;
- throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
- }
- catch (RepositoryConfigurationException e)
- {
- log.error(e.getLocalizedMessage());
- handler = null;
- throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
- }
- }
-
- public void stop()
- {
- handler.close();
- log.info("Search manager stopped");
- }
-
- // /**
- // * Checks if the given event should be excluded based on the
- // * {@link #excludePath} setting.
- // *
- // * @param event
- // * observation event
- // * @return <code>true</code> if the event should be excluded,
- // * <code>false</code> otherwise
- // */
- // protected boolean isExcluded(ItemState event) {
- //
- // for (QPath excludedPath : excludedPaths) {
- // if (event.getData().getQPath().isDescendantOf(excludedPath)
- // || event.getData().getQPath().equals(excludedPath))
- // return true;
- // }
- //
- // return !event.getData().getQPath().isDescendantOf(indexingRoot)
- // && !event.getData().getQPath().equals(indexingRoot);
- // }
-
- protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
- throws RepositoryConfigurationException
- {
-
- QueryHandlerContext context =
- new QueryHandlerContext(itemMgr, indexingTree, nodeTypeDataManager, nsReg, parentHandler, getIndexDir(),
- extractor, true, virtualTableResolver);
- return context;
- }
-
- protected String getIndexDir() throws RepositoryConfigurationException
- {
- String dir = config.getParameterValue(QueryHandlerParams.PARAM_INDEX_DIR, null);
- if (dir == null)
- {
- log.warn(QueryHandlerParams.PARAM_INDEX_DIR + " parameter not found. Using outdated parameter name "
- + QueryHandlerParams.OLD_PARAM_INDEX_DIR);
- dir = config.getParameterValue(QueryHandlerParams.OLD_PARAM_INDEX_DIR);
- }
- return dir;
- }
-
- /**
- * Initializes the query handler.
- *
- * @throws RepositoryException
- * if the query handler cannot be initialized.
- * @throws RepositoryConfigurationException
- * @throws ClassNotFoundException
- */
- protected void initializeQueryHandler() throws RepositoryException, RepositoryConfigurationException
- {
- // initialize query handler
- String className = config.getType();
- if (className == null)
- throw new RepositoryConfigurationException("Content hanler configuration fail");
-
- try
- {
- Class qHandlerClass = Class.forName(className, true, this.getClass().getClassLoader());
- Constructor constuctor = qHandlerClass.getConstructor(QueryHandlerEntry.class, ConfigurationManager.class);
- handler = (QueryHandler)constuctor.newInstance(config, cfm);
- QueryHandler parentHandler = (this.parentSearchManager != null) ? parentSearchManager.getHandler() : null;
- QueryHandlerContext context = createQueryHandlerContext(parentHandler);
- handler.init(context);
-
- }
- catch (SecurityException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (IllegalArgumentException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (ClassNotFoundException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (NoSuchMethodException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (InstantiationException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (IllegalAccessException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (InvocationTargetException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- catch (IOException e)
- {
- throw new RepositoryException(e.getMessage(), e);
- }
- }
-
- /**
- * Creates a new instance of an {@link AbstractQueryImpl} which is not
- * initialized.
- *
- * @return an new query instance.
- * @throws RepositoryException
- * if an error occurs while creating a new query instance.
- */
- protected AbstractQueryImpl createQueryInstance() throws RepositoryException
- {
- try
- {
- String queryImplClassName = handler.getQueryClass();
- Object obj = Class.forName(queryImplClassName).newInstance();
- if (obj instanceof AbstractQueryImpl)
- {
- return (AbstractQueryImpl)obj;
- }
- else
- {
- throw new IllegalArgumentException(queryImplClassName + " is not of type "
- + AbstractQueryImpl.class.getName());
- }
- }
- catch (Throwable t)
- {
- throw new RepositoryException("Unable to create query: " + t.toString(), t);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- public Set<String> getFieldNames() throws IndexException
- {
- final Set<String> fildsSet = new HashSet<String>();
- if (handler instanceof SearchIndex)
- {
- IndexReader reader = null;
- try
- {
- reader = ((SearchIndex)handler).getIndexReader();
- final Collection fields = reader.getFieldNames(IndexReader.FieldOption.ALL);
- for (final Object field : fields)
- {
- fildsSet.add((String)field);
- }
- }
- catch (IOException e)
- {
- throw new IndexException(e.getLocalizedMessage(), e);
- }
- finally
- {
- try
- {
- if (reader != null)
- reader.close();
- }
- catch (IOException e)
- {
- throw new IndexException(e.getLocalizedMessage(), e);
- }
- }
-
- }
- return fildsSet;
- }
-
- public Set<String> getNodesByNodeType(final InternalQName nodeType) throws RepositoryException
- {
-
- return getNodes(virtualTableResolver.resolve(nodeType, true));
- }
-
- /**
- * Return set of uuid of nodes. Contains in names prefixes maped to the
- * given uri
- *
- * @param prefix
- * @return
- * @throws RepositoryException
- */
- public Set<String> getNodesByUri(final String uri) throws RepositoryException
- {
- Set<String> result;
- final int defaultClauseCount = BooleanQuery.getMaxClauseCount();
- try
- {
-
- // final LocationFactory locationFactory = new
- // LocationFactory(this);
- final ValueFactoryImpl valueFactory = new ValueFactoryImpl(new LocationFactory(nsReg));
- BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
- BooleanQuery query = new BooleanQuery();
-
- final String prefix = nsReg.getNamespacePrefixByURI(uri);
- query.add(new WildcardQuery(new Term(FieldNames.LABEL, prefix + ":*")), Occur.SHOULD);
- // name of the property
- query.add(new WildcardQuery(new Term(FieldNames.PROPERTIES_SET, prefix + ":*")), Occur.SHOULD);
-
- result = getNodes(query);
-
- // value of the property
-
- try
- {
- final Set<String> props = getFieldNames();
-
- query = new BooleanQuery();
- for (final String fieldName : props)
- {
- if (!FieldNames.PROPERTIES_SET.equals(fieldName))
- {
- query.add(new WildcardQuery(new Term(fieldName, "*" + prefix + ":*")), Occur.SHOULD);
- }
- }
- }
- catch (final IndexException e)
- {
- throw new RepositoryException(e.getLocalizedMessage(), e);
- }
-
- final Set<String> propSet = getNodes(query);
- // Manually check property values;
- for (final String uuid : propSet)
- {
- if (isPrefixMatch(valueFactory, uuid, prefix))
- {
- result.add(uuid);
- }
- }
- }
- finally
- {
- BooleanQuery.setMaxClauseCount(defaultClauseCount);
- }
-
- return result;
- }
-
- private boolean isPrefixMatch(final InternalQName value, final String prefix) throws RepositoryException
- {
- return value.getNamespace().equals(nsReg.getNamespaceURIByPrefix(prefix));
- }
-
- private boolean isPrefixMatch(final QPath value, final String prefix) throws RepositoryException
- {
- for (int i = 0; i < value.getEntries().length; i++)
- {
- if (isPrefixMatch(value.getEntries()[i], prefix))
- {
- return true;
- }
- }
- return false;
- }
-
- /**
- * @param valueFactory
- * @param dm
- * @param uuid
- * @param prefix
- * @throws RepositoryException
- */
- private boolean isPrefixMatch(final ValueFactoryImpl valueFactory, final String uuid, final String prefix)
- throws RepositoryException
- {
-
- final ItemData node = itemMgr.getItemData(uuid);
- if (node != null && node.isNode())
- {
- final List<PropertyData> props = itemMgr.getChildPropertiesData((NodeData)node);
- for (final PropertyData propertyData : props)
- {
- if (propertyData.getType() == PropertyType.PATH || propertyData.getType() == PropertyType.NAME)
- {
- for (final ValueData vdata : propertyData.getValues())
- {
- final Value val =
- valueFactory.loadValue(((AbstractValueData)vdata).createTransientCopy(), propertyData.getType());
- if (propertyData.getType() == PropertyType.PATH)
- {
- if (isPrefixMatch(((PathValue)val).getQPath(), prefix))
- {
- return true;
- }
- }
- else if (propertyData.getType() == PropertyType.NAME)
- {
- if (isPrefixMatch(((NameValue)val).getQName(), prefix))
- {
- return true;
- }
- }
- }
- }
- }
- }
- return false;
- }
-
- /**
- * @param query
- * @return
- * @throws RepositoryException
- */
- private Set<String> getNodes(final org.apache.lucene.search.Query query) throws RepositoryException
- {
- Set<String> result = new HashSet<String>();
- try
- {
- QueryHits hits = handler.executeQuery(query);
-
- ScoreNode sn;
-
- while ((sn = hits.nextScoreNode()) != null)
- {
- // Node node = session.getNodeById(sn.getNodeId());
- result.add(sn.getNodeId());
- }
- }
- catch (IOException e)
- {
- throw new RepositoryException(e.getLocalizedMessage(), e);
- }
- return result;
- }
-
-}
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.exoplatform.container.configuration.ConfigurationManager;
+import org.exoplatform.services.document.DocumentReaderService;
+import org.exoplatform.services.jcr.config.QueryHandlerEntry;
+import org.exoplatform.services.jcr.config.QueryHandlerParams;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
+import org.exoplatform.services.jcr.core.nodetype.NodeTypeDataManager;
+import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
+import org.exoplatform.services.jcr.dataflow.ItemState;
+import org.exoplatform.services.jcr.dataflow.ItemStateChangesLog;
+import org.exoplatform.services.jcr.dataflow.persistent.MandatoryItemsPersistenceListener;
+import org.exoplatform.services.jcr.datamodel.InternalQName;
+import org.exoplatform.services.jcr.datamodel.ItemData;
+import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.exoplatform.services.jcr.datamodel.PropertyData;
+import org.exoplatform.services.jcr.datamodel.QPath;
+import org.exoplatform.services.jcr.datamodel.ValueData;
+import org.exoplatform.services.jcr.impl.Constants;
+import org.exoplatform.services.jcr.impl.core.LocationFactory;
+import org.exoplatform.services.jcr.impl.core.NamespaceRegistryImpl;
+import org.exoplatform.services.jcr.impl.core.SessionDataManager;
+import org.exoplatform.services.jcr.impl.core.SessionImpl;
+import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
+import org.exoplatform.services.jcr.impl.core.query.lucene.LuceneVirtualTableResolver;
+import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
+import org.exoplatform.services.jcr.impl.core.query.lucene.ScoreNode;
+import org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex;
+import org.exoplatform.services.jcr.impl.core.value.NameValue;
+import org.exoplatform.services.jcr.impl.core.value.PathValue;
+import org.exoplatform.services.jcr.impl.core.value.ValueFactoryImpl;
+import org.exoplatform.services.jcr.impl.dataflow.AbstractValueData;
+import org.exoplatform.services.jcr.impl.dataflow.persistent.WorkspacePersistentDataManager;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.jboss.cache.factories.annotations.NonVolatile;
+import org.picocontainer.Startable;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import javax.jcr.Node;
+import javax.jcr.PropertyType;
+import javax.jcr.RepositoryException;
+import javax.jcr.Value;
+import javax.jcr.query.InvalidQueryException;
+import javax.jcr.query.Query;
+
+/**
+ * Created by The eXo Platform SAS.
+ *
+ * <br/>Date:
+ *
+ * @author <a href="karpenko.sergiy at gmail.com">Karpenko Sergiy</a>
+ * @version $Id: SearchManager.java 1008 2009-12-11 15:14:51Z nzamosenchuk $
+ */
+ at NonVolatile
+public class SearchManager implements Startable, MandatoryItemsPersistenceListener
+{
+
+ /**
+ * Logger instance for this class
+ */
+ private static final Log log = ExoLogger.getLogger(SearchManager.class);
+
+ protected final QueryHandlerEntry config;
+
+ /**
+ * Text extractor for extracting text content of binary properties.
+ */
+ protected final DocumentReaderService extractor;
+
+ /**
+ * QueryHandler where query execution is delegated to
+ */
+
+ protected QueryHandler handler;
+
+ /**
+ * The shared item state manager instance for the workspace.
+ */
+ protected final ItemDataConsumer itemMgr;
+
+ /**
+ * The namespace registry of the repository.
+ */
+ protected final NamespaceRegistryImpl nsReg;
+
+ /**
+ * The node type registry.
+ */
+ protected final NodeTypeDataManager nodeTypeDataManager;
+
+ /**
+ * QueryHandler of the parent search manager or <code>null</code> if there
+ * is none.
+ */
+ protected final SearchManager parentSearchManager;
+
+ protected IndexingTree indexingTree;
+
+ private final ConfigurationManager cfm;
+
+ protected LuceneVirtualTableResolver virtualTableResolver;
+
+ protected IndexerChangesFilter changesFilter;
+
+ /**
+ * ChangesLog Buffer (used for saves before start).
+ */
+ private List<ItemStateChangesLog> changesLogBuffer = new ArrayList<ItemStateChangesLog>();
+
+ /**
+ * Creates a new <code>SearchManager</code>.
+ *
+ * @param config
+ * the search configuration.
+ * @param nsReg
+ * the namespace registry.
+ * @param ntReg
+ * the node type registry.
+ * @param itemMgr
+ * the shared item state manager.
+ * @param rootNodeId
+ * the id of the root node.
+ * @param parentMgr
+ * the parent search manager or <code>null</code> if there is no
+ * parent search manager.
+ * @param excludedNodeId
+ * id of the node that should be excluded from indexing. Any
+ * descendant of that node will also be excluded from indexing.
+ * @throws RepositoryException
+ * if the search manager cannot be initialized
+ * @throws RepositoryConfigurationException
+ */
+
+ public SearchManager(QueryHandlerEntry config, NamespaceRegistryImpl nsReg, NodeTypeDataManager ntReg,
+ WorkspacePersistentDataManager itemMgr, SystemSearchManagerHolder parentSearchManager,
+ DocumentReaderService extractor, ConfigurationManager cfm, final RepositoryIndexSearcherHolder indexSearcherHolder)
+ throws RepositoryException, RepositoryConfigurationException
+ {
+
+ this.extractor = extractor;
+ indexSearcherHolder.addIndexSearcher(this);
+ this.config = config;
+ this.nodeTypeDataManager = ntReg;
+ this.nsReg = nsReg;
+ this.itemMgr = itemMgr;
+ this.cfm = cfm;
+ this.virtualTableResolver = new LuceneVirtualTableResolver(nodeTypeDataManager, nsReg);
+ this.parentSearchManager = parentSearchManager != null ? parentSearchManager.get() : null;
+ if (parentSearchManager != null)
+ {
+ ((WorkspacePersistentDataManager)this.itemMgr).addItemPersistenceListener(this);
+ }
+ }
+
+ public void createNewOrAdd(String key, ItemState state, Map<String, List<ItemState>> updatedNodes)
+ {
+ List<ItemState> list = updatedNodes.get(key);
+ if (list == null)
+ {
+ list = new ArrayList<ItemState>();
+ updatedNodes.put(key, list);
+ }
+ list.add(state);
+
+ }
+
+ /**
+ * Creates a query object from a node that can be executed on the workspace.
+ *
+ * @param session
+ * the session of the user executing the query.
+ * @param itemMgr
+ * the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param node
+ * a node of type nt:query.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException
+ * if <code>absPath</code> is not a valid persisted query (that
+ * is, a node of type nt:query)
+ * @throws RepositoryException
+ * if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager, Node node)
+ throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = createQueryInstance();
+ query.init(session, sessionDataManager, handler, node);
+ return query;
+ }
+
+ /**
+ * Creates a query object that can be executed on the workspace.
+ *
+ * @param session
+ * the session of the user executing the query.
+ * @param itemMgr
+ * the item manager of the user executing the query. Needed to
+ * return <code>Node</code> instances in the result set.
+ * @param statement
+ * the actual query statement.
+ * @param language
+ * the syntax of the query statement.
+ * @return a <code>Query</code> instance to execute.
+ * @throws InvalidQueryException
+ * if the query is malformed or the <code>language</code> is
+ * unknown.
+ * @throws RepositoryException
+ * if any other error occurs.
+ */
+ public Query createQuery(SessionImpl session, SessionDataManager sessionDataManager, String statement,
+ String language) throws InvalidQueryException, RepositoryException
+ {
+ AbstractQueryImpl query = createQueryInstance();
+ query.init(session, sessionDataManager, handler, statement, language);
+ return query;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Set<String> getFieldNames() throws IndexException
+ {
+ final Set<String> fildsSet = new HashSet<String>();
+ if (handler instanceof SearchIndex)
+ {
+ IndexReader reader = null;
+ try
+ {
+ reader = ((SearchIndex)handler).getIndexReader();
+ final Collection fields = reader.getFieldNames(IndexReader.FieldOption.ALL);
+ for (final Object field : fields)
+ {
+ fildsSet.add((String)field);
+ }
+ }
+ catch (IOException e)
+ {
+ throw new IndexException(e.getLocalizedMessage(), e);
+ }
+ finally
+ {
+ try
+ {
+ if (reader != null)
+ reader.close();
+ }
+ catch (IOException e)
+ {
+ throw new IndexException(e.getLocalizedMessage(), e);
+ }
+ }
+
+ }
+ return fildsSet;
+ }
+
+ /**
+ * just for test use only
+ */
+ public QueryHandler getHandler()
+ {
+
+ return handler;
+ }
+
+ public Set<String> getNodesByNodeType(final InternalQName nodeType) throws RepositoryException
+ {
+
+ return getNodes(virtualTableResolver.resolve(nodeType, true));
+ }
+
+ /**
+ * Return set of uuid of nodes. Contains in names prefixes maped to the
+ * given uri
+ *
+ * @param prefix
+ * @return
+ * @throws RepositoryException
+ */
+ public Set<String> getNodesByUri(final String uri) throws RepositoryException
+ {
+ Set<String> result;
+ final int defaultClauseCount = BooleanQuery.getMaxClauseCount();
+ try
+ {
+
+ // final LocationFactory locationFactory = new
+ // LocationFactory(this);
+ final ValueFactoryImpl valueFactory = new ValueFactoryImpl(new LocationFactory(nsReg));
+ BooleanQuery.setMaxClauseCount(Integer.MAX_VALUE);
+ BooleanQuery query = new BooleanQuery();
+
+ final String prefix = nsReg.getNamespacePrefixByURI(uri);
+ query.add(new WildcardQuery(new Term(FieldNames.LABEL, prefix + ":*")), Occur.SHOULD);
+ // name of the property
+ query.add(new WildcardQuery(new Term(FieldNames.PROPERTIES_SET, prefix + ":*")), Occur.SHOULD);
+
+ result = getNodes(query);
+
+ // value of the property
+
+ try
+ {
+ final Set<String> props = getFieldNames();
+
+ query = new BooleanQuery();
+ for (final String fieldName : props)
+ {
+ if (!FieldNames.PROPERTIES_SET.equals(fieldName))
+ {
+ query.add(new WildcardQuery(new Term(fieldName, "*" + prefix + ":*")), Occur.SHOULD);
+ }
+ }
+ }
+ catch (final IndexException e)
+ {
+ throw new RepositoryException(e.getLocalizedMessage(), e);
+ }
+
+ final Set<String> propSet = getNodes(query);
+ // Manually check property values;
+ for (final String uuid : propSet)
+ {
+ if (isPrefixMatch(valueFactory, uuid, prefix))
+ {
+ result.add(uuid);
+ }
+ }
+ }
+ finally
+ {
+ BooleanQuery.setMaxClauseCount(defaultClauseCount);
+ }
+
+ return result;
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.dataflow.persistent.ItemsPersistenceListener#onSaveItems(org.exoplatform.services.jcr.dataflow.ItemStateChangesLog)
+ */
+ public void onSaveItems(ItemStateChangesLog itemStates)
+ {
+ //skip empty
+ if (itemStates.getSize() > 0)
+ {
+ //Check if SearchManager started and filter configured
+ if (changesFilter == null)
+ {
+ changesLogBuffer.add(itemStates);
+ }
+ else
+ {
+ changesFilter.onSaveItems(itemStates);
+ }
+ }
+ }
+
+ public void start()
+ {
+
+ if (log.isDebugEnabled())
+ log.debug("start");
+ try
+ {
+ if (indexingTree == null)
+ {
+ List<QPath> excludedPath = new ArrayList<QPath>();
+ // Calculating excluded node identifiers
+ excludedPath.add(Constants.JCR_SYSTEM_PATH);
+
+ //if (config.getExcludedNodeIdentifers() != null)
+ String excludedNodeIdentifer =
+ config.getParameterValue(QueryHandlerParams.PARAM_EXCLUDED_NODE_IDENTIFERS, null);
+ if (excludedNodeIdentifer != null)
+ {
+ StringTokenizer stringTokenizer = new StringTokenizer(excludedNodeIdentifer);
+ while (stringTokenizer.hasMoreTokens())
+ {
+
+ try
+ {
+ ItemData excludeData = itemMgr.getItemData(stringTokenizer.nextToken());
+ if (excludeData != null)
+ excludedPath.add(excludeData.getQPath());
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage());
+ }
+ }
+ }
+
+ NodeData indexingRootData = null;
+ String rootNodeIdentifer = config.getParameterValue(QueryHandlerParams.PARAM_ROOT_NODE_ID, null);
+ if (rootNodeIdentifer != null)
+ {
+ try
+ {
+ ItemData indexingRootDataItem = itemMgr.getItemData(rootNodeIdentifer);
+ if (indexingRootDataItem != null && indexingRootDataItem.isNode())
+ indexingRootData = (NodeData)indexingRootDataItem;
+ }
+ catch (RepositoryException e)
+ {
+ log.warn(e.getLocalizedMessage() + " Indexing root set to " + Constants.ROOT_PATH.getAsString());
+
+ }
+
+ }
+ else
+ {
+ try
+ {
+ indexingRootData = (NodeData)itemMgr.getItemData(Constants.ROOT_UUID);
+ // indexingRootData =
+ // new TransientNodeData(Constants.ROOT_PATH, Constants.ROOT_UUID, 1, Constants.NT_UNSTRUCTURED,
+ // new InternalQName[0], 0, null, new AccessControlList());
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Fail to load root node data");
+ }
+ }
+
+ indexingTree = new IndexingTree(indexingRootData, excludedPath);
+ }
+ initializeQueryHandler();
+
+ if (changesLogBuffer.size() > 0)
+ {
+ for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer)
+ {
+ onSaveItems(bufferedChangesLog);
+ }
+ changesLogBuffer.clear();
+ changesLogBuffer = null;
+ }
+
+ }
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ throw new RuntimeException(e.getLocalizedMessage(), e.getCause());
+ }
+ }
+
+ public void stop()
+ {
+ handler.close();
+ log.info("Search manager stopped");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void updateIndex(final Set<String> removedNodes, final Set<String> addedNodes) throws RepositoryException,
+ IOException
+ {
+ if (handler != null)
+ {
+ Iterator<NodeData> addedStates = new Iterator<NodeData>()
+ {
+ private final Iterator<String> iter = addedNodes.iterator();
+
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
+
+ public NodeData next()
+ {
+
+ // cycle till find a next or meet the end of set
+ do
+ {
+ String id = iter.next();
+ try
+ {
+ ItemData item = itemMgr.getItemData(id);
+ if (item != null)
+ {
+ if (item.isNode())
+ {
+ if (!indexingTree.isExcluded(item))
+ return (NodeData)item;
+ }
+ else
+ log.warn("Node not found, but property " + id + ", " + item.getQPath().getAsString()
+ + " found. ");
+ }
+ else
+ log.warn("Unable to index node with id " + id + ", node does not exist.");
+
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Can't read next node data " + id, e);
+ }
+ }
+ while (iter.hasNext()); // get next if error or node not found
+
+ return null; // we met the end of iterator set
+ }
+
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ Iterator<String> removedIds = new Iterator<String>()
+ {
+ private final Iterator<String> iter = removedNodes.iterator();
+
+ public boolean hasNext()
+ {
+ return iter.hasNext();
+ }
+
+ public String next()
+ {
+ return nextNodeId();
+ }
+
+ public String nextNodeId() throws NoSuchElementException
+ {
+ return iter.next();
+ }
+
+ public void remove()
+ {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ if (removedNodes.size() > 0 || addedNodes.size() > 0)
+ {
+ handler.updateNodes(removedIds, addedStates);
+ }
+ }
+
+ }
+
+ protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
+ throws RepositoryConfigurationException
+ {
+
+ QueryHandlerContext context =
+ new QueryHandlerContext(itemMgr, indexingTree, nodeTypeDataManager, nsReg, parentHandler, getIndexDir(),
+ extractor, true, virtualTableResolver);
+ return context;
+ }
+
+ /**
+ * Creates a new instance of an {@link AbstractQueryImpl} which is not
+ * initialized.
+ *
+ * @return an new query instance.
+ * @throws RepositoryException
+ * if an error occurs while creating a new query instance.
+ */
+ protected AbstractQueryImpl createQueryInstance() throws RepositoryException
+ {
+ try
+ {
+ String queryImplClassName = handler.getQueryClass();
+ Object obj = Class.forName(queryImplClassName).newInstance();
+ if (obj instanceof AbstractQueryImpl)
+ {
+ return (AbstractQueryImpl)obj;
+ }
+ else
+ {
+ throw new IllegalArgumentException(queryImplClassName + " is not of type "
+ + AbstractQueryImpl.class.getName());
+ }
+ }
+ catch (Throwable t)
+ {
+ throw new RepositoryException("Unable to create query: " + t.toString(), t);
+ }
+ }
+
+ protected String getIndexDir() throws RepositoryConfigurationException
+ {
+ String dir = config.getParameterValue(QueryHandlerParams.PARAM_INDEX_DIR, null);
+ if (dir == null)
+ {
+ log.warn(QueryHandlerParams.PARAM_INDEX_DIR + " parameter not found. Using outdated parameter name "
+ + QueryHandlerParams.OLD_PARAM_INDEX_DIR);
+ dir = config.getParameterValue(QueryHandlerParams.OLD_PARAM_INDEX_DIR);
+ }
+ return dir;
+ }
+
+ /**
+ * @return the indexingTree
+ */
+ protected IndexingTree getIndexingTree()
+ {
+ return indexingTree;
+ }
+
+ /**
+ * Initialize changes filter.
+ * @throws RepositoryException
+ * @throws RepositoryConfigurationException
+ * @throws ClassNotFoundException
+ * @throws NoSuchMethodException
+ * @throws SecurityException
+ */
+ protected IndexerChangesFilter initializeChangesFilter() throws RepositoryException,
+ RepositoryConfigurationException
+
+ {
+ IndexerChangesFilter newChangesFilter = null;
+ Class<? extends IndexerChangesFilter> changesFilterClass = DefaultChangesFilter.class;
+ String changesFilterClassName = config.getParameterValue(QueryHandlerParams.PARAM_CHANGES_FILTER_CLASS, null);
+ try
+ {
+ if (changesFilterClassName != null)
+ {
+ changesFilterClass =
+ (Class<? extends IndexerChangesFilter>)Class.forName(changesFilterClassName, true, this.getClass()
+ .getClassLoader());
+ }
+ Constructor<? extends IndexerChangesFilter> constuctor =
+ changesFilterClass.getConstructor(SearchManager.class, SearchManager.class, QueryHandlerEntry.class,
+ IndexingTree.class, IndexingTree.class, QueryHandler.class, QueryHandler.class);
+ if (parentSearchManager != null)
+ {
+ newChangesFilter =
+ constuctor.newInstance(this, parentSearchManager, config, indexingTree, parentSearchManager
+ .getIndexingTree(), handler, parentSearchManager.getHandler());
+ }
+ }
+ catch (SecurityException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalArgumentException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (ClassNotFoundException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (NoSuchMethodException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InstantiationException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalAccessException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InvocationTargetException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ return newChangesFilter;
+ }
+
+ /**
+ * Initializes the query handler.
+ *
+ * @throws RepositoryException
+ * if the query handler cannot be initialized.
+ * @throws RepositoryConfigurationException
+ * @throws ClassNotFoundException
+ */
+ protected void initializeQueryHandler() throws RepositoryException, RepositoryConfigurationException
+ {
+ // initialize query handler
+ String className = config.getType();
+ if (className == null)
+ throw new RepositoryConfigurationException("Content hanler configuration fail");
+
+ try
+ {
+ Class qHandlerClass = Class.forName(className, true, this.getClass().getClassLoader());
+ Constructor constuctor = qHandlerClass.getConstructor(QueryHandlerEntry.class, ConfigurationManager.class);
+ handler = (QueryHandler)constuctor.newInstance(config, cfm);
+ QueryHandler parentHandler = (this.parentSearchManager != null) ? parentSearchManager.getHandler() : null;
+ QueryHandlerContext context = createQueryHandlerContext(parentHandler);
+ handler.setContext(context);
+
+ if (parentSearchManager != null)
+ {
+ changesFilter = initializeChangesFilter();
+ }
+ }
+ catch (SecurityException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalArgumentException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (ClassNotFoundException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (NoSuchMethodException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InstantiationException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (IllegalAccessException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ catch (InvocationTargetException e)
+ {
+ throw new RepositoryException(e.getMessage(), e);
+ }
+ }
+
+ /**
+ * @param query
+ * @return
+ * @throws RepositoryException
+ */
+ private Set<String> getNodes(final org.apache.lucene.search.Query query) throws RepositoryException
+ {
+ Set<String> result = new HashSet<String>();
+ try
+ {
+ QueryHits hits = handler.executeQuery(query);
+
+ ScoreNode sn;
+
+ while ((sn = hits.nextScoreNode()) != null)
+ {
+ // Node node = session.getNodeById(sn.getNodeId());
+ result.add(sn.getNodeId());
+ }
+ }
+ catch (IOException e)
+ {
+ throw new RepositoryException(e.getLocalizedMessage(), e);
+ }
+ return result;
+ }
+
+ private boolean isPrefixMatch(final InternalQName value, final String prefix) throws RepositoryException
+ {
+ return value.getNamespace().equals(nsReg.getNamespaceURIByPrefix(prefix));
+ }
+
+ private boolean isPrefixMatch(final QPath value, final String prefix) throws RepositoryException
+ {
+ for (int i = 0; i < value.getEntries().length; i++)
+ {
+ if (isPrefixMatch(value.getEntries()[i], prefix))
+ {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * @param valueFactory
+ * @param dm
+ * @param uuid
+ * @param prefix
+ * @throws RepositoryException
+ */
+ private boolean isPrefixMatch(final ValueFactoryImpl valueFactory, final String uuid, final String prefix)
+ throws RepositoryException
+ {
+
+ final ItemData node = itemMgr.getItemData(uuid);
+ if (node != null && node.isNode())
+ {
+ final List<PropertyData> props = itemMgr.getChildPropertiesData((NodeData)node);
+ for (final PropertyData propertyData : props)
+ {
+ if (propertyData.getType() == PropertyType.PATH || propertyData.getType() == PropertyType.NAME)
+ {
+ for (final ValueData vdata : propertyData.getValues())
+ {
+ final Value val =
+ valueFactory.loadValue(((AbstractValueData)vdata).createTransientCopy(), propertyData.getType());
+ if (propertyData.getType() == PropertyType.PATH)
+ {
+ if (isPrefixMatch(((PathValue)val).getQPath(), prefix))
+ {
+ return true;
+ }
+ }
+ else if (propertyData.getType() == PropertyType.NAME)
+ {
+ if (isPrefixMatch(((NameValue)val).getQName(), prefix))
+ {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+}
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/SystemSearchManager.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -21,7 +21,6 @@
import org.exoplatform.services.jcr.config.QueryHandlerEntry;
import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
import org.exoplatform.services.jcr.core.nodetype.NodeTypeDataManager;
-import org.exoplatform.services.jcr.dataflow.ItemStateChangesLog;
import org.exoplatform.services.jcr.datamodel.NodeData;
import org.exoplatform.services.jcr.datamodel.QPath;
import org.exoplatform.services.jcr.impl.Constants;
@@ -55,10 +54,16 @@
*/
private boolean isStarted = false;
- /**
- * ChangesLog Buffer (used for saves before start).
- */
- private List<ItemStateChangesLog> changesLogBuffer = new ArrayList<ItemStateChangesLog>();
+ //
+ // /**
+ // * ChangesLog Buffer (used for saves before start).
+ // */
+ // private List<ItemStateChangesLog> changesLogBuffer = new ArrayList<ItemStateChangesLog>();
+ //
+ // /**
+ // * ChangesLog Buffer (used for saves before start).
+ // */
+ // private List<List<WriteCommand>> writeCommandBuffer = new ArrayList<List<WriteCommand>>();
public static final String INDEX_DIR_SUFFIX = "system";
@@ -69,69 +74,166 @@
super(config, nsReg, ntReg, itemMgr, null, service, cfm, indexSearcherHolder);
}
+ // @Override
+ // public void onSaveItems(ItemStateChangesLog changesLog)
+ // {
+ // if (!isStarted)
+ // {
+ // changesLogBuffer.add(changesLog);
+ // }
+ // else
+ // {
+ // super.onSaveItems(changesLog);
+ // }
+ // }
+
@Override
- public void onSaveItems(ItemStateChangesLog changesLog)
+ public void start()
{
if (!isStarted)
{
- changesLogBuffer.add(changesLog);
- }
- else
- {
- super.onSaveItems(changesLog);
- }
- }
- @Override
- public void start()
- {
-
- isStarted = true;
- try
- {
- if (indexingTree == null)
+ try
{
- List<QPath> excludedPaths = new ArrayList<QPath>();
+ if (indexingTree == null)
+ {
+ List<QPath> excludedPaths = new ArrayList<QPath>();
- NodeData indexingRootNodeData = (NodeData)itemMgr.getItemData(Constants.SYSTEM_UUID);
+ NodeData indexingRootNodeData = (NodeData)itemMgr.getItemData(Constants.SYSTEM_UUID);
- indexingTree = new IndexingTree(indexingRootNodeData, excludedPaths);
+ indexingTree = new IndexingTree(indexingRootNodeData, excludedPaths);
+ }
+ initializeQueryHandler();
+
}
- initializeQueryHandler();
+ catch (RepositoryException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ //freeBuffers();
+ throw new RuntimeException(e);
+ }
+ catch (RepositoryConfigurationException e)
+ {
+ log.error(e.getLocalizedMessage());
+ handler = null;
+ //freeBuffers();
+ throw new RuntimeException(e);
+ }
+ isStarted = true;
}
-
- catch (RepositoryException e)
- {
- log.error(e.getLocalizedMessage());
- handler = null;
- changesLogBuffer.clear();
- changesLogBuffer = null;
- throw new RuntimeException(e);
- }
- catch (RepositoryConfigurationException e)
- {
- log.error(e.getLocalizedMessage());
- handler = null;
- changesLogBuffer.clear();
- changesLogBuffer = null;
- throw new RuntimeException(e);
- }
- for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer)
- {
- super.onSaveItems(bufferedChangesLog);
- }
- changesLogBuffer.clear();
- changesLogBuffer = null;
+ // if (changesLogBuffer.size() > 0)
+ // {
+ // for (ItemStateChangesLog bufferedChangesLog : changesLogBuffer)
+ // {
+ // super.onSaveItems(bufferedChangesLog);
+ // }
+ //
+ // }
+ //
+ // if (writeCommandBuffer.size() > 0)
+ // {
+ // try
+ // {
+ // for (List<WriteCommand> bufferedWriteLog : writeCommandBuffer)
+ // {
+ // super.onSaveItems(bufferedWriteLog);
+ // }
+ // }
+ // catch (RepositoryException e)
+ // {
+ // freeBuffers();
+ // throw new RuntimeException(e);
+ //
+ // }
+ // }
+ // freeBuffers();
}
+ // /**
+ // * @see org.exoplatform.services.jcr.impl.core.query.SearchManager#initializeChangesFilter()
+ // */
+ // @Override
+ // protected void initializeChangesFilter() throws RepositoryException, RepositoryConfigurationException
+ // {
+ // Class<? extends IndexerChangesFilter> changesFilterClass = DefaultChangesFilter.class;
+ // String changesFilterClassName = config.getParameterValue(QueryHandlerParams.PARAM_CHANGES_FILTER_CLASS, null);
+ // try
+ // {
+ // if (changesFilterClassName != null)
+ // {
+ // changesFilterClass =
+ // (Class<? extends IndexerChangesFilter>)Class.forName(changesFilterClassName, true, this.getClass()
+ // .getClassLoader());
+ // }
+ // Constructor<? extends IndexerChangesFilter> constuctor =
+ // changesFilterClass.getConstructor(SearchManager.class, QueryHandlerEntry.class, Boolean.class,
+ // IndexingTree.class);
+ // changesFilter = constuctor.newInstance(this, config, true, indexingTree);
+ // }
+ // catch (SecurityException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (IllegalArgumentException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (ClassNotFoundException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (NoSuchMethodException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (InstantiationException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (IllegalAccessException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // catch (InvocationTargetException e)
+ // {
+ // throw new RepositoryException(e.getMessage(), e);
+ // }
+ // }
+
+ // private void freeBuffers()
+ // {
+ // changesLogBuffer.clear();
+ // changesLogBuffer = null;
+ // writeCommandBuffer.clear();
+ // writeCommandBuffer = null;
+ // }
+
@Override
protected QueryHandlerContext createQueryHandlerContext(QueryHandler parentHandler)
throws RepositoryConfigurationException
{
QueryHandlerContext context =
new QueryHandlerContext(itemMgr, indexingTree, nodeTypeDataManager, nsReg, parentHandler, getIndexDir() + "_"
- + INDEX_DIR_SUFFIX, extractor, changesLogBuffer.size() > 0 && !isStarted, virtualTableResolver);
+ + INDEX_DIR_SUFFIX, extractor, true, virtualTableResolver);
return context;
}
+ //
+ // /* (non-Javadoc)
+ // * @see org.exoplatform.services.jcr.impl.core.query.SearchManager#onSaveItems(java.util.List)
+ // */
+ // @Override
+ // public void onSaveItems(List<WriteCommand> modifications) throws RepositoryException
+ // {
+ // if (!isStarted)
+ // {
+ // writeCommandBuffer.add(modifications);
+ // }
+ // else
+ // {
+ // super.onSaveItems(modifications);
+ // }
+ //
+ // }
}
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/ChangesFilterListsWrapper.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/ChangesFilterListsWrapper.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/ChangesFilterListsWrapper.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query.jbosscache;
+
+import java.io.Serializable;
+import java.util.Set;
+
+/**
+ * FOR TESTING PURPOSES ONLY. Used to avoid batching usage in indexer cache.
+ *
+ * @author <a href="mailto:nikolazius at gmail.com">Nikolay Zamosenchuk</a>
+ * @version $Id: ChangesFilterListsWrapper.java 34360 2009-07-22 23:58:59Z nzamosenchuk $
+ *
+ */
+public class ChangesFilterListsWrapper implements Serializable
+{
+ private static final long serialVersionUID = 1L;
+
+ private Set<String> addedNodes;
+
+ private Set<String> removedNodes;
+
+ private Set<String> parentAddedNodes;
+
+ private Set<String> parentRemovedNodes;
+
+ /**
+ * Creates ChangesFilterListsWrapper data class, containing given lists.
+ *
+ * @param addedNodes
+ * @param removedNodes
+ * @param parentAddedNodes
+ * @param parentRemovedNodes
+ */
+ public ChangesFilterListsWrapper(Set<String> addedNodes, Set<String> removedNodes, Set<String> parentAddedNodes,
+ Set<String> parentRemovedNodes)
+ {
+ this.addedNodes = addedNodes;
+ this.removedNodes = removedNodes;
+ this.parentAddedNodes = parentAddedNodes;
+ this.parentRemovedNodes = parentRemovedNodes;
+ }
+
+ public Set<String> getAddedNodes()
+ {
+ return addedNodes;
+ }
+
+ public Set<String> getRemovedNodes()
+ {
+ return removedNodes;
+ }
+
+ public Set<String> getParentAddedNodes()
+ {
+ return parentAddedNodes;
+ }
+
+ public Set<String> getParentRemovedNodes()
+ {
+ return parentRemovedNodes;
+ }
+
+ public String dump()
+ {
+ StringBuffer buffer = new StringBuffer();
+ buffer.append("\n");
+ buffer.append("Added=").append(addedNodes.toString()).append("\n");
+ buffer.append("Removed=").append(removedNodes.toString()).append("\n");
+ buffer.append("ParentAdded=").append(parentAddedNodes.toString()).append("\n");
+ buffer.append("ParentRemoved=").append(parentRemovedNodes.toString());
+ return buffer.toString();
+ }
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/ChangesFilterListsWrapper.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerCacheLoader.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerCacheLoader.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerCacheLoader.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2003-2009 eXo Platform SAS.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Affero General Public License
+ * as published by the Free Software Foundation; either version 3
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see<http://www.gnu.org/licenses/>.
+ */
+package org.exoplatform.services.jcr.impl.core.query.jbosscache;
+
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
+import org.exoplatform.services.jcr.impl.core.query.QueryHandler;
+import org.exoplatform.services.jcr.impl.core.query.SearchManager;
+import org.exoplatform.services.jcr.impl.storage.jbosscache.AbstractWriteOnlyCacheLoader;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Modification;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * @author <a href="mailto:nikolazius at gmail.com">Nikolay Zamosenchuk</a>
+ * @version $Id: IndexerCacheLoader.java 34360 2009-07-22 23:58:59Z nzamosenchuk $
+ *
+ */
+public class IndexerCacheLoader extends AbstractWriteOnlyCacheLoader
+{
+ private final Log log = ExoLogger.getLogger(this.getClass().getName());
+
+ private SearchManager searchManager;
+
+ private SearchManager parentSearchManager;
+
+ private QueryHandler handler;
+
+ private QueryHandler parentHandler;
+
+ /**
+ * Inject dependencies needed for CacheLoader: SearchManagers and QueryHandlers.
+ *
+ * @param searchManager
+ * @param parentSearchManager
+ * @param handler
+ * @param parentHandler
+ * @throws RepositoryConfigurationException
+ */
+ public void init(SearchManager searchManager, SearchManager parentSearchManager, QueryHandler handler,
+ QueryHandler parentHandler) throws RepositoryConfigurationException
+ {
+ this.searchManager = searchManager;
+ this.parentSearchManager = parentSearchManager;
+ this.handler = handler;
+ this.parentHandler = parentHandler;
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.impl.storage.jbosscache.AbstractWriteOnlyCacheLoader#put(org.jboss.cache.Fqn, java.lang.Object, java.lang.Object)
+ */
+ @Override
+ public Object put(Fqn arg0, Object key, Object val) throws Exception
+ {
+ if (key.equals(JbossCacheIndexChangesFilter.LISTWRAPPER) && val instanceof ChangesFilterListsWrapper)
+ {
+ if (log.isDebugEnabled())
+ {
+ log.info("Received list wrapper, start indexing...");
+ }
+ ChangesFilterListsWrapper wrapper = (ChangesFilterListsWrapper)val;
+ updateIndex(wrapper.getAddedNodes(), wrapper.getRemovedNodes(), wrapper.getParentAddedNodes(), wrapper
+ .getParentRemovedNodes());
+ }
+ return null;
+ }
+
+ /**
+ * Flushes lists of added/removed nodes to SearchManagers, starting indexing.
+ *
+ * @param addedNodes
+ * @param removedNodes
+ * @param parentAddedNodes
+ * @param parentRemovedNodes
+ */
+ protected void updateIndex(Set<String> addedNodes, Set<String> removedNodes, Set<String> parentAddedNodes,
+ Set<String> parentRemovedNodes)
+ {
+ // pass lists to search manager
+ if (searchManager != null && (addedNodes.size() > 0 || removedNodes.size() > 0))
+ {
+ try
+ {
+ searchManager.updateIndex(removedNodes, addedNodes);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ handler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not complete. " + ioe, ioe);
+ }
+ }
+ }
+ // pass lists to parent search manager
+ if (parentSearchManager != null && (parentAddedNodes.size() > 0 || parentRemovedNodes.size() > 0))
+ {
+ try
+ {
+ parentSearchManager.updateIndex(parentRemovedNodes, parentAddedNodes);
+ }
+ catch (RepositoryException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ }
+ catch (IOException e)
+ {
+ log.error("Error indexing changes " + e, e);
+ try
+ {
+ parentHandler.logErrorChanges(removedNodes, addedNodes);
+ }
+ catch (IOException ioe)
+ {
+ log.warn("Exception occure when errorLog writed. Error log is not complete. " + ioe, ioe);
+ }
+ }
+ }
+ }
+
+ /**
+ * Switches Indexer mode from RO to RW, or from RW to RO
+ *
+ * @param ioMode
+ */
+ public void setMode(IndexerIoMode ioMode)
+ {
+ try
+ {
+ if (handler != null)
+ {
+ handler.setIndexerIoMode(ioMode);
+ }
+ if (parentHandler != null)
+ {
+ parentHandler.setIndexerIoMode(ioMode);
+ }
+ }
+ catch (IOException e)
+ {
+ log.error("Unable to set indexer mode to " + ioMode, e);
+ }
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.impl.storage.jbosscache.AbstractWriteOnlyCacheLoader#put(java.util.List)
+ */
+ @Override
+ public void put(List<Modification> modifications) throws Exception
+ {
+ // batching is not used
+ }
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerCacheLoader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerSingletonStoreCacheLoader.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerSingletonStoreCacheLoader.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerSingletonStoreCacheLoader.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query.jbosscache;
+
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
+import org.exoplatform.services.jcr.util.IdGenerator;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.NodeSPI;
+import org.jboss.cache.loader.SingletonStoreCacheLoader;
+import org.jboss.cache.notifications.annotation.CacheListener;
+import org.jboss.cache.notifications.annotation.CacheStarted;
+import org.jboss.cache.notifications.event.Event;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.Callable;
+
+/**
+ * @author <a href="mailto:nikolazius at gmail.com">Nikolay Zamosenchuk</a>
+ * @version $Id: IndexerSingletonStoreCacheLoader.java 1008 2009-12-11 15:14:51Z nzamosenchuk $
+ *
+ */
+public class IndexerSingletonStoreCacheLoader extends SingletonStoreCacheLoader
+{
+ private final Log log = ExoLogger.getLogger(this.getClass().getName());
+
+ /**
+ * @see org.jboss.cache.loader.SingletonStoreCacheLoader#activeStatusChanged(boolean)
+ */
+ @Override
+ protected void activeStatusChanged(boolean newActiveState) throws PushStateException
+ {
+ // at first change indexer mode
+ setIndexerMode(newActiveState);
+ // and them push states if needed
+ super.activeStatusChanged(newActiveState);
+ }
+
+ @Override
+ protected Callable<?> createPushStateTask()
+ {
+ return new Callable()
+ {
+ public Object call() throws Exception
+ {
+ final boolean debugEnabled = log.isDebugEnabled();
+
+ if (debugEnabled)
+ log.debug("start pushing in-memory state to cache cacheLoader collection");
+
+ final Set<String> removedNodes = new HashSet<String>();
+ final Set<String> addedNodes = new HashSet<String>();
+ final Set<String> parentRemovedNodes = new HashSet<String>();
+ final Set<String> parentAddedNodes = new HashSet<String>();
+ // merging all lists stored in memory
+ Collection<NodeSPI> children = cache.getRoot().getChildren();
+ for (NodeSPI aChildren : children)
+ {
+ Fqn<?> fqn = aChildren.getFqn();
+ Object value = cache.get(fqn, JbossCacheIndexChangesFilter.LISTWRAPPER);
+ if (value != null && value instanceof ChangesFilterListsWrapper)
+ {
+ // get wrapper object
+ ChangesFilterListsWrapper listsWrapper = (ChangesFilterListsWrapper)value;
+ // get search manager lists
+ addedNodes.addAll(listsWrapper.getAddedNodes());
+ removedNodes.addAll(listsWrapper.getRemovedNodes());
+ // parent search manager lists
+ parentAddedNodes.addAll(listsWrapper.getParentAddedNodes());
+ parentRemovedNodes.addAll(listsWrapper.getParentAddedNodes());
+ };
+ }
+ //TODO: recover logic is here, lists are: removedNodes and addedNodes String id = IdGenerator.generate();
+ String id = IdGenerator.generate();
+ cache.put(id, JbossCacheIndexChangesFilter.LISTWRAPPER, new ChangesFilterListsWrapper(addedNodes,
+ removedNodes, parentAddedNodes, parentRemovedNodes));
+ if (debugEnabled)
+ log.debug("in-memory state passed to cache cacheLoader successfully");
+ return null;
+ }
+ };
+ }
+
+ /**
+ * Sets/changes indexer mode
+ *
+ * @param writeEnabled
+ */
+ protected void setIndexerMode(boolean writeEnabled)
+ {
+ // get base cache loader that is configured under SingletonStoreCacheLoader
+ // if it is IndexerCacheLoader need to call setMode(ioMode)
+ if (getCacheLoader() instanceof IndexerCacheLoader)
+ {
+ // if newActiveState is true IndexerCacheLoader is coordinator with write enabled;
+ ((IndexerCacheLoader)getCacheLoader()).setMode(writeEnabled ? IndexerIoMode.READ_WRITE
+ : IndexerIoMode.READ_ONLY);
+ log.info("Set indexer io mode to:" + (writeEnabled ? IndexerIoMode.READ_WRITE : IndexerIoMode.READ_ONLY));
+ }
+ }
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/IndexerSingletonStoreCacheLoader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JbossCacheIndexChangesFilter.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JbossCacheIndexChangesFilter.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JbossCacheIndexChangesFilter.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2009 eXo Platform SAS.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.exoplatform.services.jcr.impl.core.query.jbosscache;
+
+import org.exoplatform.services.jcr.config.QueryHandlerEntry;
+import org.exoplatform.services.jcr.config.QueryHandlerParams;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
+import org.exoplatform.services.jcr.impl.core.query.IndexerChangesFilter;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
+import org.exoplatform.services.jcr.impl.core.query.IndexingTree;
+import org.exoplatform.services.jcr.impl.core.query.QueryHandler;
+import org.exoplatform.services.jcr.impl.core.query.SearchManager;
+import org.exoplatform.services.jcr.util.IdGenerator;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheFactory;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.config.CacheLoaderConfig;
+import org.jboss.cache.config.CacheLoaderConfig.IndividualCacheLoaderConfig;
+import org.jboss.cache.config.CacheLoaderConfig.IndividualCacheLoaderConfig.SingletonStoreConfig;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.Properties;
+import java.util.Set;
+
+import javax.jcr.RepositoryException;
+
+/**
+ * @author <a href="mailto:Sergey.Kabashnyuk at exoplatform.org">Sergey Kabashnyuk</a>
+ * @version $Id: exo-jboss-codetemplates.xml 34360 2009-07-22 23:58:59Z ksm $
+ *
+ */
+public class JbossCacheIndexChangesFilter extends IndexerChangesFilter
+{
+ /**
+ * Logger instance for this class
+ */
+ private final Log log = ExoLogger.getLogger(JbossCacheIndexChangesFilter.class);
+
+ private final Cache<Serializable, Object> cache;
+
+ public static String ADDED = "$add".intern();
+
+ public static String REMOVED = "$remove".intern();
+
+ public static String PARENT_ADDED = "$padd".intern();
+
+ public static String PARENT_REMOVED = "$premove".intern();
+
+ public static String LISTWRAPPER = "$lists".intern();
+
+ /**
+ * @param searchManager
+ * @param config
+ * @param indexingTree
+ * @throws RepositoryConfigurationException
+ */
+ public JbossCacheIndexChangesFilter(SearchManager searchManager, SearchManager parentSearchManager,
+ QueryHandlerEntry config, IndexingTree indexingTree, IndexingTree parentIndexingTree, QueryHandler handler,
+ QueryHandler parentHandler) throws IOException, RepositoryException, RepositoryConfigurationException
+ {
+ super(searchManager, parentSearchManager, config, indexingTree, parentIndexingTree, handler, parentHandler);
+ String jbcConfig = config.getParameterValue(QueryHandlerParams.PARAM_CHANGES_FILTER_CONFIG_PATH);
+ CacheFactory<Serializable, Object> factory = new DefaultCacheFactory<Serializable, Object>();
+ log.info("JBoss Cache configuration used: " + jbcConfig);
+ this.cache = factory.createCache(jbcConfig, false);
+
+ // initialize IndexerCacheLoader
+ IndexerCacheLoader indexerCacheLoader = new IndexerCacheLoader();
+ // inject dependencies
+ indexerCacheLoader.init(searchManager, parentSearchManager, handler, parentHandler);
+ // set SingltonStoreCacheLoader
+ SingletonStoreConfig singletonStoreConfig = new SingletonStoreConfig();
+ singletonStoreConfig.setSingletonStoreClass(IndexerSingletonStoreCacheLoader.class.getName());
+ //singletonStoreConfig.setSingletonStoreClass(SingletonStoreCacheLoader.class.getName());
+ Properties singletonStoreProperties = new Properties();
+ singletonStoreProperties.setProperty("pushStateWhenCoordinator", "false");
+ singletonStoreProperties.setProperty("pushStateWhenCoordinatorTimeout", "10000");
+ singletonStoreConfig.setProperties(singletonStoreProperties);
+ singletonStoreConfig.setSingletonStoreEnabled(true);
+ // create CacheLoaderConfig
+ IndividualCacheLoaderConfig individualCacheLoaderConfig = new IndividualCacheLoaderConfig();
+ // set SingletonStoreConfig
+ individualCacheLoaderConfig.setSingletonStoreConfig(singletonStoreConfig);
+ // set CacheLoader
+ individualCacheLoaderConfig.setCacheLoader(indexerCacheLoader);
+ // set parameters
+ individualCacheLoaderConfig.setFetchPersistentState(false);
+ individualCacheLoaderConfig.setAsync(false);
+ individualCacheLoaderConfig.setIgnoreModifications(false);
+ individualCacheLoaderConfig.setPurgeOnStartup(false);
+ // create CacheLoaderConfig
+ CacheLoaderConfig cacheLoaderConfig = new CacheLoaderConfig();
+ cacheLoaderConfig.setShared(false);
+ cacheLoaderConfig.setPassivation(false);
+ cacheLoaderConfig.addIndividualCacheLoaderConfig(individualCacheLoaderConfig);
+ // insert CacheLoaderConfig
+ this.cache.getConfiguration().setCacheLoaderConfig(cacheLoaderConfig);
+ this.cache.create();
+ this.cache.start();
+ // start will invoke cache listener which will notify handler that mode is changed
+ IndexerIoMode ioMode =
+ ((CacheSPI)cache).getRPCManager().isCoordinator() ? IndexerIoMode.READ_WRITE : IndexerIoMode.READ_ONLY;
+
+ handler.setIndexerIoMode(ioMode);
+ parentHandler.setIndexerIoMode(ioMode);
+
+ if (!parentHandler.isInitialized())
+ {
+ parentHandler.init();
+ }
+ if (!handler.isInitialized())
+ {
+ handler.init();
+ }
+
+ }
+
+ /**
+ * @see org.exoplatform.services.jcr.impl.core.query.IndexerChangesFilter#doUpdateIndex(java.util.Set, java.util.Set, java.util.Set, java.util.Set)
+ */
+ @Override
+ protected void doUpdateIndex(Set<String> removedNodes, Set<String> addedNodes, Set<String> parentRemovedNodes,
+ Set<String> parentAddedNodes)
+ {
+ String id = IdGenerator.generate();
+ cache.put(id, LISTWRAPPER, new ChangesFilterListsWrapper(addedNodes, removedNodes, parentAddedNodes,
+ parentRemovedNodes));
+ }
+
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JbossCacheIndexChangesFilter.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -152,6 +152,15 @@
}
/**
+ * Returns the index name at position <code>i</code>.
+ * @param i the position.
+ * @return the index name.
+ */
+ Set<String> getNames() {
+ return new HashSet<String>(indexes);
+ }
+
+ /**
* Returns the number of index names.
* @return the number of index names.
*/
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryBuilder.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -775,27 +775,18 @@
}, null);
QPath relPath = node.getRelativePath();
-
- InternalQName propName;
-
if (node.getOperation() == QueryConstants.OPERATION_SIMILAR)
{
// this is a bit ugly:
// add the name of a dummy property because relPath actually
// references a property. whereas the relPath of the similar
// operation references a node
- //relPath = QPath.makeChildPath(relPath, Constants.JCR_PRIMARYTYPE);
- propName = Constants.JCR_PRIMARYTYPE;
+ relPath = QPath.makeChildPath(relPath, Constants.JCR_PRIMARYTYPE);
}
- else
- {
- propName = relPath.getName();
- }
-
String field = "";
try
{
- field = resolver.createJCRName(propName).getAsString();
+ field = resolver.createJCRName(relPath.getName()).getAsString();
}
catch (NamespaceException e)
{
@@ -804,7 +795,7 @@
}
// support for fn:name()
- //InternalQName propName = relPath.getName();
+ InternalQName propName = relPath.getName();
if (propName.getNamespace().equals(NS_FN_URI) && propName.getName().equals("name()"))
{
if (node.getValueType() != QueryConstants.TYPE_STRING)
@@ -1062,7 +1053,7 @@
}
}
- if (relPath != null && relPath.getEntries().length > 1)
+ if (relPath.getEntries().length > 1)
{
// child axis in relation
QPathEntry[] elements = relPath.getEntries();
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -24,11 +24,13 @@
import org.exoplatform.services.jcr.datamodel.ItemData;
import org.exoplatform.services.jcr.datamodel.NodeData;
import org.exoplatform.services.jcr.impl.Constants;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
import org.exoplatform.services.jcr.impl.core.query.IndexingTree;
import org.exoplatform.services.jcr.impl.core.query.lucene.directory.DirectoryManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -73,2024 +75,2400 @@
* thread and reader threads is done using {@link #updateMonitor} and
* {@link #updateInProgress}.
*/
-public class MultiIndex {
+public class MultiIndex
+{
- /**
- * The logger instance for this class
- */
- private static final Logger log = LoggerFactory.getLogger(MultiIndex.class);
+ /**
+ * The logger instance for this class
+ */
+ private static final Logger log = LoggerFactory.getLogger(MultiIndex.class);
- /**
- * Names of active persistent index directories.
- */
- private final IndexInfos indexNames = new IndexInfos("indexes");
+ /**
+ * Names of active persistent index directories.
+ */
+ private IndexInfos indexNames = new IndexInfos("indexes");
- /**
- * Names of index directories that can be deleted.
- */
- private final Set deletable = new HashSet();
+ /**
+ * Names of index directories that can be deleted.
+ */
+ private final Set deletable = new HashSet();
- /**
- * List of open persistent indexes. This list may also contain an open
- * PersistentIndex owned by the IndexMerger daemon. Such an index is not
- * registered with indexNames and <b>must not</b> be used in regular index
- * operations (delete node, etc.)!
- */
- private final List indexes = new ArrayList();
+ /**
+ * List of open persistent indexes. This list may also contain an open
+ * PersistentIndex owned by the IndexMerger daemon. Such an index is not
+ * registered with indexNames and <b>must not</b> be used in regular index
+ * operations (delete node, etc.)!
+ */
+ private final List indexes = new ArrayList();
- /**
- * The internal namespace mappings of the query manager.
- */
- private final NamespaceMappings nsMappings;
+ /**
+ * The internal namespace mappings of the query manager.
+ */
+ private final NamespaceMappings nsMappings;
- /**
- * The directory manager.
- */
- private final DirectoryManager directoryManager;
+ /**
+ * The directory manager.
+ */
+ private final DirectoryManager directoryManager;
- /**
- * The base directory to store the index.
- */
- private final Directory indexDir;
+ /**
+ * The base directory to store the index.
+ */
+ private final Directory indexDir;
- /**
- * The query handler
- */
- private final SearchIndex handler;
+ /**
+ * The query handler
+ */
+ private final SearchIndex handler;
- /**
- * The volatile index.
- */
- private VolatileIndex volatileIndex;
+ /**
+ * The volatile index.
+ */
+ private VolatileIndex volatileIndex;
- /**
- * Flag indicating whether an update operation is in progress.
- */
- private boolean updateInProgress = false;
+ /**
+ * Flag indicating whether an update operation is in progress.
+ */
+ private boolean updateInProgress = false;
- /**
- * If not <code>null</code> points to a valid <code>IndexReader</code> that
- * reads from all indexes, including volatile and persistent indexes.
- */
- private CachingMultiIndexReader multiReader;
+ /**
+ * If not <code>null</code> points to a valid <code>IndexReader</code> that
+ * reads from all indexes, including volatile and persistent indexes.
+ */
+ private CachingMultiIndexReader multiReader;
- /**
- * Shared document number cache across all persistent indexes.
- */
- private final DocNumberCache cache;
+ /**
+ * Shared document number cache across all persistent indexes.
+ */
+ private final DocNumberCache cache;
- /**
- * Monitor to use to synchronize access to {@link #multiReader} and
- * {@link #updateInProgress}.
- */
- private final Object updateMonitor = new Object();
+ /**
+ * Monitor to use to synchronize access to {@link #multiReader} and
+ * {@link #updateInProgress}.
+ */
+ private final Object updateMonitor = new Object();
- /**
- * <code>true</code> if the redo log contained entries on startup.
- */
- private boolean redoLogApplied = false;
+ /**
+ * <code>true</code> if the redo log contained entries on startup.
+ */
+ private boolean redoLogApplied = false;
- /**
- * The time this index was last flushed or a transaction was committed.
- */
- private long lastFlushTime;
+ /**
+ * The time this index was last flushed or a transaction was committed.
+ */
+ private long lastFlushTime;
- /**
- * The <code>IndexMerger</code> for this <code>MultiIndex</code>.
- */
- private final IndexMerger merger;
+ /**
+ * The <code>IndexMerger</code> for this <code>MultiIndex</code>.
+ */
+ private final IndexMerger merger;
- /**
- * Timer to schedule flushes of this index after some idle time.
- */
- private static final Timer FLUSH_TIMER = new Timer(true);
+ /**
+ * Timer to schedule flushes of this index after some idle time.
+ */
+ private static final Timer FLUSH_TIMER = new Timer(true);
- /**
- * Task that is periodically called by {@link #FLUSH_TIMER} and checks if
- * index should be flushed.
- */
- private final TimerTask flushTask;
+ /**
+ * Task that is periodically called by {@link #FLUSH_TIMER} and checks if
+ * index should be flushed.
+ */
+ private TimerTask flushTask;
- /**
- * The RedoLog of this <code>MultiIndex</code>.
- */
- private final RedoLog redoLog;
+ /**
+ * The RedoLog of this <code>MultiIndex</code>.
+ */
+ private RedoLog redoLog = null;
- /**
- * The indexing queue with pending text extraction jobs.
- */
- private IndexingQueue indexingQueue;
+ /**
+ * The indexing queue with pending text extraction jobs.
+ */
+ private IndexingQueue indexingQueue;
- /**
- * Set<NodeId> of uuids that should not be indexed.
- */
- private final IndexingTree indexingTree;
+ /**
+ * Set<NodeId> of uuids that should not be indexed.
+ */
+ private final IndexingTree indexingTree;
- /**
- * The next transaction id.
- */
- private long nextTransactionId = 0;
+ /**
+ * The next transaction id.
+ */
+ private long nextTransactionId = 0;
- /**
- * The current transaction id.
- */
- private long currentTransactionId = -1;
+ /**
+ * The current transaction id.
+ */
+ private long currentTransactionId = -1;
- /**
- * Flag indicating whether re-indexing is running.
- */
- private boolean reindexing = false;
+ /**
+ * Flag indicating whether re-indexing is running.
+ */
+ private boolean reindexing = false;
- /**
- * The index format version of this multi index.
- */
- private final IndexFormatVersion version;
+ /**
+ * The index format version of this multi index.
+ */
+ private final IndexFormatVersion version;
- /**
- * Creates a new MultiIndex.
- *
- * @param handler
- * the search handler
- * @param excludedIDs
- * Set<NodeId> that contains uuids that should not be indexed
- * nor further traversed.
- * @throws IOException
- * if an error occurs
- */
- MultiIndex(SearchIndex handler, IndexingTree indexingTree)
- throws IOException {
- this.directoryManager = handler.getDirectoryManager();
- this.indexDir = directoryManager.getDirectory(".");
- this.handler = handler;
- this.cache = new DocNumberCache(handler.getCacheSize());
- this.redoLog = new RedoLog(indexDir);
- this.indexingTree = indexingTree;
- this.nsMappings = handler.getNamespaceMappings();
+ /**
+ * Indexer io mode
+ */
+ private IndexerIoMode ioMode = IndexerIoMode.READ_ONLY;
- if (indexNames.exists(indexDir)) {
- indexNames.read(indexDir);
- }
+ /**
+ * Creates a new MultiIndex in READ ONLY MODE! setIndexerIoMode(READ_WRITE) later.
+ *
+ * @param handler
+ * the search handler
+ * @param excludedIDs
+ * Set<NodeId> that contains uuids that should not be indexed
+ * nor further traversed.
+ * @throws IOException
+ * if an error occurs
+ */
+ MultiIndex(SearchIndex handler, IndexingTree indexingTree, IndexerIoMode ioMode) throws IOException
+ {
+ this.ioMode = ioMode;
+ this.directoryManager = handler.getDirectoryManager();
+ this.indexDir = directoryManager.getDirectory(".");
+ this.handler = handler;
+ this.cache = new DocNumberCache(handler.getCacheSize());
+ this.indexingTree = indexingTree;
+ this.nsMappings = handler.getNamespaceMappings();
+ this.flushTask = null;
+ if (indexNames.exists(indexDir))
+ {
+ indexNames.read(indexDir);
+ }
- // as of 1.5 deletable file is not used anymore
- removeDeletable();
+ // as of 1.5 deletable file is not used anymore
+ removeDeletable();
- // initialize IndexMerger
- merger = new IndexMerger(this);
- merger.setMaxMergeDocs(handler.getMaxMergeDocs());
- merger.setMergeFactor(handler.getMergeFactor());
- merger.setMinMergeDocs(handler.getMinMergeDocs());
+ // initialize IndexMerger
+ merger = new IndexMerger(this);
+ merger.setMaxMergeDocs(handler.getMaxMergeDocs());
+ merger.setMergeFactor(handler.getMergeFactor());
+ merger.setMinMergeDocs(handler.getMinMergeDocs());
- IndexingQueueStore store = new IndexingQueueStore(indexDir);
+ IndexingQueueStore store = new IndexingQueueStore(indexDir);
- // initialize indexing queue
- this.indexingQueue = new IndexingQueue(store);
+ // initialize indexing queue
+ this.indexingQueue = new IndexingQueue(store);
- // open persistent indexes
- for (int i = 0; i < indexNames.size(); i++) {
- String name = indexNames.getName(i);
- // only open if it still exists
- // it is possible that indexNames still contains a name for
- // an index that has been deleted, but indexNames has not been
- // written to disk.
- if (!directoryManager.hasDirectory(name)) {
- log.debug("index does not exist anymore: " + name);
- // move on to next index
- continue;
- }
- PersistentIndex index = new PersistentIndex(name, handler
- .getTextAnalyzer(), handler.getSimilarity(), cache,
- indexingQueue, directoryManager);
- index.setMaxFieldLength(handler.getMaxFieldLength());
- index.setUseCompoundFile(handler.getUseCompoundFile());
- index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
- indexes.add(index);
- merger.indexAdded(index.getName(), index.getNumDocuments());
- }
+ // open persistent indexes
+ for (int i = 0; i < indexNames.size(); i++)
+ {
+ String name = indexNames.getName(i);
+ // only open if it still exists
+ // it is possible that indexNames still contains a name for
+ // an index that has been deleted, but indexNames has not been
+ // written to disk.
+ if (!directoryManager.hasDirectory(name))
+ {
+ log.debug("index does not exist anymore: " + name);
+ // move on to next index
+ continue;
+ }
+ PersistentIndex index =
+ new PersistentIndex(name, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+ directoryManager);
+ index.setMaxFieldLength(handler.getMaxFieldLength());
+ index.setUseCompoundFile(handler.getUseCompoundFile());
+ index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
+ indexes.add(index);
+ merger.indexAdded(index.getName(), index.getNumDocuments());
+ }
- // init volatile index
- resetVolatileIndex();
+ // init volatile index
+ resetVolatileIndex();
- // set index format version and at the same time
- // initialize hierarchy cache if requested.
- CachingMultiIndexReader reader = getIndexReader(handler
- .isInitializeHierarchyCache());
- try {
- version = IndexFormatVersion.getVersion(reader);
- } finally {
- reader.release();
- }
+ // set index format version and at the same time
+ // initialize hierarchy cache if requested.
+ CachingMultiIndexReader reader = getIndexReader(handler.isInitializeHierarchyCache());
+ try
+ {
+ version = IndexFormatVersion.getVersion(reader);
+ }
+ finally
+ {
+ reader.release();
+ }
+ indexingQueue.initialize(this);
+ if (ioMode == IndexerIoMode.READ_WRITE)
+ {
+ setReadWrite();
+ }
+ }
- indexingQueue.initialize(this);
+ /**
+ * Returns the number of documents in this index.
+ *
+ * @return the number of documents in this index.
+ * @throws IOException
+ * if an error occurs while reading from the index.
+ */
+ int numDocs() throws IOException
+ {
+ if (indexNames.size() == 0)
+ {
+ return volatileIndex.getNumDocuments();
+ }
+ else
+ {
+ CachingMultiIndexReader reader = getIndexReader();
+ try
+ {
+ return reader.numDocs();
+ }
+ finally
+ {
+ reader.release();
+ }
+ }
+ }
- redoLogApplied = redoLog.hasEntries();
+ /**
+ * @return the index format version for this multi index.
+ */
+ IndexFormatVersion getIndexFormatVersion()
+ {
+ return version;
+ }
- // run recovery
- Recovery.run(this, redoLog);
+ /**
+ * Creates an initial index by traversing the node hierarchy starting at the
+ * node with <code>rootId</code>.
+ *
+ * @param stateMgr
+ * the item state manager.
+ * @param rootId
+ * the id of the node from where to start.
+ * @param rootPath
+ * the path of the node from where to start.
+ * @throws IOException
+ * if an error occurs while indexing the workspace.
+ * @throws IllegalStateException
+ * if this index is not empty.
+ */
+ void createInitialIndex(ItemDataConsumer stateMgr) throws IOException
+ {
+ // only do an initial index if there are no indexes at all
+ if (indexNames.size() == 0)
+ {
+ reindexing = true;
+ try
+ {
+ long count = 0;
+ // traverse and index workspace
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ // NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
+ count = createIndex(indexingTree.getIndexingRoot(), stateMgr, count);
+ executeAndLog(new Commit(getTransactionId()));
+ log.info("Created initial index for {} nodes", new Long(count));
+ releaseMultiReader();
+ scheduleFlushTask();
+ }
+ catch (Exception e)
+ {
+ String msg = "Error indexing workspace";
+ IOException ex = new IOException(msg);
+ ex.initCause(e);
+ throw ex;
+ }
+ finally
+ {
+ reindexing = false;
+ }
+ }
+ else
+ {
+ throw new IllegalStateException("Index already present");
+ }
+ }
- // enqueue unused segments for deletion
- enqueueUnusedSegments();
- attemptDelete();
+ /**
+ * Atomically updates the index by removing some documents and adding
+ * others.
+ *
+ * @param remove
+ * collection of <code>UUID</code>s that identify documents to
+ * remove
+ * @param add
+ * collection of <code>Document</code>s to add. Some of the
+ * elements in this collection may be <code>null</code>, to
+ * indicate that a node could not be indexed successfully.
+ * @throws IOException
+ * if an error occurs while updating the index.
+ */
+ synchronized void update(Collection remove, Collection add) throws IOException
+ {
+ // make sure a reader is available during long updates
+ if (add.size() > handler.getBufferSize())
+ {
+ try
+ {
+ getIndexReader().release();
+ }
+ catch (IOException e)
+ {
+ // do not fail if an exception is thrown here
+ log.warn("unable to prepare index reader " + "for queries during update", e);
+ }
+ }
- // now that we are ready, start index merger
- merger.start();
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ long transactionId = nextTransactionId++;
+ executeAndLog(new Start(transactionId));
- if (redoLogApplied) {
- // wait for the index merge to finish pending jobs
- try {
- merger.waitUntilIdle();
- } catch (InterruptedException e) {
- // move on
- }
- flush();
- }
+ boolean flush = false;
+ for (Iterator it = remove.iterator(); it.hasNext();)
+ {
+ executeAndLog(new DeleteNode(transactionId, (String)it.next()));
+ }
+ for (Iterator it = add.iterator(); it.hasNext();)
+ {
+ Document doc = (Document)it.next();
+ if (doc != null)
+ {
+ executeAndLog(new AddNode(transactionId, doc));
+ // commit volatile index if needed
+ flush |= checkVolatileCommit();
+ }
+ }
+ executeAndLog(new Commit(transactionId));
- flushTask = new TimerTask() {
- public void run() {
- // check if there are any indexing jobs finished
- checkIndexingQueue();
- // check if volatile index should be flushed
- checkFlush();
- }
- };
+ // flush whole index when volatile index has been commited.
+ if (flush)
+ {
+ flush();
+ }
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
- if (indexNames.size() > 0) {
- scheduleFlushTask();
- }
- }
+ /**
+ * Adds a document to the index.
+ *
+ * @param doc
+ * the document to add.
+ * @throws IOException
+ * if an error occurs while adding the document to the index.
+ */
+ void addDocument(Document doc) throws IOException
+ {
+ update(Collections.EMPTY_LIST, Arrays.asList(new Document[]{doc}));
+ }
- /**
- * Returns the number of documents in this index.
- *
- * @return the number of documents in this index.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- int numDocs() throws IOException {
- if (indexNames.size() == 0) {
- return volatileIndex.getNumDocuments();
- } else {
- CachingMultiIndexReader reader = getIndexReader();
- try {
- return reader.numDocs();
- } finally {
- reader.release();
- }
- }
- }
+ /**
+ * Deletes the first document that matches the <code>uuid</code>.
+ *
+ * @param uuid
+ * document that match this <code>uuid</code> will be deleted.
+ * @throws IOException
+ * if an error occurs while deleting the document.
+ */
+ void removeDocument(String uuid) throws IOException
+ {
+ update(Arrays.asList(new String[]{uuid}), Collections.EMPTY_LIST);
+ }
- /**
- * @return the index format version for this multi index.
- */
- IndexFormatVersion getIndexFormatVersion() {
- return version;
- }
+ /**
+ * Deletes all documents that match the <code>uuid</code>.
+ *
+ * @param uuid
+ * documents that match this <code>uuid</code> will be deleted.
+ * @return the number of deleted documents.
+ * @throws IOException
+ * if an error occurs while deleting documents.
+ */
+ synchronized int removeAllDocuments(String uuid) throws IOException
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ int num;
+ try
+ {
+ Term idTerm = new Term(FieldNames.UUID, uuid.toString());
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ num = volatileIndex.removeDocument(idTerm);
+ if (num > 0)
+ {
+ redoLog.append(new DeleteNode(getTransactionId(), uuid));
+ }
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ PersistentIndex index = (PersistentIndex)indexes.get(i);
+ // only remove documents from registered indexes
+ if (indexNames.contains(index.getName()))
+ {
+ int removed = index.removeDocument(idTerm);
+ if (removed > 0)
+ {
+ redoLog.append(new DeleteNode(getTransactionId(), uuid));
+ }
+ num += removed;
+ }
+ }
+ executeAndLog(new Commit(getTransactionId()));
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ return num;
+ }
- /**
- * Creates an initial index by traversing the node hierarchy starting at the
- * node with <code>rootId</code>.
- *
- * @param stateMgr
- * the item state manager.
- * @param rootId
- * the id of the node from where to start.
- * @param rootPath
- * the path of the node from where to start.
- * @throws IOException
- * if an error occurs while indexing the workspace.
- * @throws IllegalStateException
- * if this index is not empty.
- */
- void createInitialIndex(ItemDataConsumer stateMgr) throws IOException {
- // only do an initial index if there are no indexes at all
- if (indexNames.size() == 0) {
- reindexing = true;
- try {
- long count = 0;
- // traverse and index workspace
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- // NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
- count = createIndex(indexingTree.getIndexingRoot(), stateMgr,
- count);
- executeAndLog(new Commit(getTransactionId()));
- log.info("Created initial index for {} nodes", new Long(count));
- releaseMultiReader();
- scheduleFlushTask();
- } catch (Exception e) {
- String msg = "Error indexing workspace";
- IOException ex = new IOException(msg);
- ex.initCause(e);
- throw ex;
- } finally {
- reindexing = false;
- }
- } else {
- throw new IllegalStateException("Index already present");
- }
- }
+ /**
+ * Returns <code>IndexReader</code>s for the indexes named
+ * <code>indexNames</code>. An <code>IndexListener</code> is registered and
+ * notified when documents are deleted from one of the indexes in
+ * <code>indexNames</code>.
+ * <p/>
+ * Note: the number of <code>IndexReaders</code> returned by this method is
+ * not necessarily the same as the number of index names passed. An index
+ * might have been deleted and is not reachable anymore.
+ *
+ * @param indexNames
+ * the names of the indexes for which to obtain readers.
+ * @param listener
+ * the listener to notify when documents are deleted.
+ * @return the <code>IndexReaders</code>.
+ * @throws IOException
+ * if an error occurs acquiring the index readers.
+ */
+ synchronized IndexReader[] getIndexReaders(String[] indexNames, IndexListener listener) throws IOException
+ {
+ Set names = new HashSet(Arrays.asList(indexNames));
+ Map indexReaders = new HashMap();
- /**
- * Atomically updates the index by removing some documents and adding
- * others.
- *
- * @param remove
- * collection of <code>UUID</code>s that identify documents to
- * remove
- * @param add
- * collection of <code>Document</code>s to add. Some of the
- * elements in this collection may be <code>null</code>, to
- * indicate that a node could not be indexed successfully.
- * @throws IOException
- * if an error occurs while updating the index.
- */
- synchronized void update(Collection remove, Collection add)
- throws IOException {
- // make sure a reader is available during long updates
- if (add.size() > handler.getBufferSize()) {
- try {
- getIndexReader().release();
- } catch (IOException e) {
- // do not fail if an exception is thrown here
- log.warn("unable to prepare index reader "
- + "for queries during update", e);
- }
- }
+ try
+ {
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex index = (PersistentIndex)it.next();
+ if (names.contains(index.getName()))
+ {
+ indexReaders.put(index.getReadOnlyIndexReader(listener), index);
+ }
+ }
+ }
+ catch (IOException e)
+ {
+ // release readers obtained so far
+ for (Iterator it = indexReaders.entrySet().iterator(); it.hasNext();)
+ {
+ Map.Entry entry = (Map.Entry)it.next();
+ ReadOnlyIndexReader reader = (ReadOnlyIndexReader)entry.getKey();
+ try
+ {
+ reader.release();
+ }
+ catch (IOException ex)
+ {
+ log.warn("Exception releasing index reader: " + ex);
+ }
+ ((PersistentIndex)entry.getValue()).resetListener();
+ }
+ throw e;
+ }
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- long transactionId = nextTransactionId++;
- executeAndLog(new Start(transactionId));
+ return (IndexReader[])indexReaders.keySet().toArray(new IndexReader[indexReaders.size()]);
+ }
- boolean flush = false;
- for (Iterator it = remove.iterator(); it.hasNext();) {
- executeAndLog(new DeleteNode(transactionId, (String) it.next()));
- }
- for (Iterator it = add.iterator(); it.hasNext();) {
- Document doc = (Document) it.next();
- if (doc != null) {
- executeAndLog(new AddNode(transactionId, doc));
- // commit volatile index if needed
- flush |= checkVolatileCommit();
- }
- }
- executeAndLog(new Commit(transactionId));
+ /**
+ * Creates a new Persistent index. The new index is not registered with this
+ * <code>MultiIndex</code>.
+ *
+ * @param indexName
+ * the name of the index to open, or <code>null</code> if an
+ * index with a new name should be created.
+ * @return a new <code>PersistentIndex</code>.
+ * @throws IOException
+ * if a new index cannot be created.
+ */
+ synchronized PersistentIndex getOrCreateIndex(String indexName) throws IOException
+ {
+ // check existing
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ return idx;
+ }
+ }
- // flush whole index when volatile index has been commited.
- if (flush) {
- flush();
- }
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
+ if (ioMode == IndexerIoMode.READ_ONLY)
+ {
+ throw new UnsupportedOperationException("Can't create index in READ_ONLY mode.");
+ }
- /**
- * Adds a document to the index.
- *
- * @param doc
- * the document to add.
- * @throws IOException
- * if an error occurs while adding the document to the index.
- */
- void addDocument(Document doc) throws IOException {
- update(Collections.EMPTY_LIST, Arrays.asList(new Document[] { doc }));
- }
+ // otherwise open / create it
+ if (indexName == null)
+ {
+ do
+ {
+ indexName = indexNames.newName();
+ }
+ while (directoryManager.hasDirectory(indexName));
+ }
+ PersistentIndex index;
+ try
+ {
+ index =
+ new PersistentIndex(indexName, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+ directoryManager);
+ }
+ catch (IOException e)
+ {
+ // do some clean up
+ if (!directoryManager.delete(indexName))
+ {
+ deletable.add(indexName);
+ }
+ throw e;
+ }
+ index.setMaxFieldLength(handler.getMaxFieldLength());
+ index.setUseCompoundFile(handler.getUseCompoundFile());
+ index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
- /**
- * Deletes the first document that matches the <code>uuid</code>.
- *
- * @param uuid
- * document that match this <code>uuid</code> will be deleted.
- * @throws IOException
- * if an error occurs while deleting the document.
- */
- void removeDocument(String uuid) throws IOException {
- update(Arrays.asList(new String[] { uuid }), Collections.EMPTY_LIST);
- }
+ // add to list of open indexes and return it
+ indexes.add(index);
+ return index;
+ }
- /**
- * Deletes all documents that match the <code>uuid</code>.
- *
- * @param uuid
- * documents that match this <code>uuid</code> will be deleted.
- * @return the number of deleted documents.
- * @throws IOException
- * if an error occurs while deleting documents.
- */
- synchronized int removeAllDocuments(String uuid) throws IOException {
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- int num;
- try {
- Term idTerm = new Term(FieldNames.UUID, uuid.toString());
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- num = volatileIndex.removeDocument(idTerm);
- if (num > 0) {
- redoLog.append(new DeleteNode(getTransactionId(), uuid));
- }
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
- // only remove documents from registered indexes
- if (indexNames.contains(index.getName())) {
- int removed = index.removeDocument(idTerm);
- if (removed > 0) {
- redoLog
- .append(new DeleteNode(getTransactionId(), uuid));
- }
- num += removed;
- }
- }
- executeAndLog(new Commit(getTransactionId()));
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- return num;
- }
+ /**
+ * Returns <code>true</code> if this multi index has an index segment with
+ * the given name. This method even returns <code>true</code> if an index
+ * segments has not yet been loaded / initialized but exists on disk.
+ *
+ * @param indexName
+ * the name of the index segment.
+ * @return <code>true</code> if it exists; otherwise <code>false</code>.
+ * @throws IOException
+ * if an error occurs while checking existence of directory.
+ */
+ synchronized boolean hasIndex(String indexName) throws IOException
+ {
+ // check existing
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ return true;
+ }
+ }
+ // check if it exists on disk
+ return directoryManager.hasDirectory(indexName);
+ }
- /**
- * Returns <code>IndexReader</code>s for the indexes named
- * <code>indexNames</code>. An <code>IndexListener</code> is registered and
- * notified when documents are deleted from one of the indexes in
- * <code>indexNames</code>.
- * <p/>
- * Note: the number of <code>IndexReaders</code> returned by this method is
- * not necessarily the same as the number of index names passed. An index
- * might have been deleted and is not reachable anymore.
- *
- * @param indexNames
- * the names of the indexes for which to obtain readers.
- * @param listener
- * the listener to notify when documents are deleted.
- * @return the <code>IndexReaders</code>.
- * @throws IOException
- * if an error occurs acquiring the index readers.
- */
- synchronized IndexReader[] getIndexReaders(String[] indexNames,
- IndexListener listener) throws IOException {
- Set names = new HashSet(Arrays.asList(indexNames));
- Map indexReaders = new HashMap();
+ /**
+ * Replaces the indexes with names <code>obsoleteIndexes</code> with
+ * <code>index</code>. Documents that must be deleted in <code>index</code>
+ * can be identified with <code>Term</code>s in <code>deleted</code>.
+ *
+ * @param obsoleteIndexes
+ * the names of the indexes to replace.
+ * @param index
+ * the new index that is the result of a merge of the indexes to
+ * replace.
+ * @param deleted
+ * <code>Term</code>s that identify documents that must be
+ * deleted in <code>index</code>.
+ * @throws IOException
+ * if an exception occurs while replacing the indexes.
+ */
+ void replaceIndexes(String[] obsoleteIndexes, PersistentIndex index, Collection deleted) throws IOException
+ {
- try {
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex index = (PersistentIndex) it.next();
- if (names.contains(index.getName())) {
- indexReaders.put(index.getReadOnlyIndexReader(listener),
- index);
- }
- }
- } catch (IOException e) {
- // release readers obtained so far
- for (Iterator it = indexReaders.entrySet().iterator(); it.hasNext();) {
- Map.Entry entry = (Map.Entry) it.next();
- ReadOnlyIndexReader reader = (ReadOnlyIndexReader) entry
- .getKey();
- try {
- reader.release();
- } catch (IOException ex) {
- log.warn("Exception releasing index reader: " + ex);
- }
- ((PersistentIndex) entry.getValue()).resetListener();
- }
- throw e;
- }
+ if (handler.isInitializeHierarchyCache())
+ {
+ // force initializing of caches
+ long time = System.currentTimeMillis();
+ index.getReadOnlyIndexReader(true).release();
+ time = System.currentTimeMillis() - time;
+ log.debug("hierarchy cache initialized in {} ms", new Long(time));
+ }
- return (IndexReader[]) indexReaders.keySet().toArray(
- new IndexReader[indexReaders.size()]);
- }
+ synchronized (this)
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ // if we are reindexing there is already an active transaction
+ if (!reindexing)
+ {
+ executeAndLog(new Start(Action.INTERNAL_TRANS_REPL_INDEXES));
+ }
+ // delete obsolete indexes
+ Set names = new HashSet(Arrays.asList(obsoleteIndexes));
+ for (Iterator it = names.iterator(); it.hasNext();)
+ {
+ // do not try to delete indexes that are already gone
+ String indexName = (String)it.next();
+ if (indexNames.contains(indexName))
+ {
+ executeAndLog(new DeleteIndex(getTransactionId(), indexName));
+ }
+ }
- /**
- * Creates a new Persistent index. The new index is not registered with this
- * <code>MultiIndex</code>.
- *
- * @param indexName
- * the name of the index to open, or <code>null</code> if an
- * index with a new name should be created.
- * @return a new <code>PersistentIndex</code>.
- * @throws IOException
- * if a new index cannot be created.
- */
- synchronized PersistentIndex getOrCreateIndex(String indexName)
- throws IOException {
- // check existing
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- return idx;
- }
- }
+ // Index merger does not log an action when it creates the
+ // target
+ // index of the merge. We have to do this here.
+ executeAndLog(new CreateIndex(getTransactionId(), index.getName()));
- // otherwise open / create it
- if (indexName == null) {
- do {
- indexName = indexNames.newName();
- } while (directoryManager.hasDirectory(indexName));
- }
- PersistentIndex index;
- try {
- index = new PersistentIndex(indexName, handler.getTextAnalyzer(),
- handler.getSimilarity(), cache, indexingQueue,
- directoryManager);
- } catch (IOException e) {
- // do some clean up
- if (!directoryManager.delete(indexName)) {
- deletable.add(indexName);
- }
- throw e;
- }
- index.setMaxFieldLength(handler.getMaxFieldLength());
- index.setUseCompoundFile(handler.getUseCompoundFile());
- index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
+ executeAndLog(new AddIndex(getTransactionId(), index.getName()));
- // add to list of open indexes and return it
- indexes.add(index);
- return index;
- }
+ // delete documents in index
+ for (Iterator it = deleted.iterator(); it.hasNext();)
+ {
+ Term id = (Term)it.next();
+ index.removeDocument(id);
+ }
+ index.commit();
- /**
- * Returns <code>true</code> if this multi index has an index segment with
- * the given name. This method even returns <code>true</code> if an index
- * segments has not yet been loaded / initialized but exists on disk.
- *
- * @param indexName
- * the name of the index segment.
- * @return <code>true</code> if it exists; otherwise <code>false</code>.
- * @throws IOException
- * if an error occurs while checking existence of directory.
- */
- synchronized boolean hasIndex(String indexName) throws IOException {
- // check existing
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- return true;
- }
- }
- // check if it exists on disk
- return directoryManager.hasDirectory(indexName);
- }
+ if (!reindexing)
+ {
+ // only commit if we are not reindexing
+ // when reindexing the final commit is done at the very end
+ executeAndLog(new Commit(getTransactionId()));
+ }
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
+ if (reindexing)
+ {
+ // do some cleanup right away when reindexing
+ attemptDelete();
+ }
+ }
- /**
- * Replaces the indexes with names <code>obsoleteIndexes</code> with
- * <code>index</code>. Documents that must be deleted in <code>index</code>
- * can be identified with <code>Term</code>s in <code>deleted</code>.
- *
- * @param obsoleteIndexes
- * the names of the indexes to replace.
- * @param index
- * the new index that is the result of a merge of the indexes to
- * replace.
- * @param deleted
- * <code>Term</code>s that identify documents that must be
- * deleted in <code>index</code>.
- * @throws IOException
- * if an exception occurs while replacing the indexes.
- */
- void replaceIndexes(String[] obsoleteIndexes, PersistentIndex index,
- Collection deleted) throws IOException {
+ /**
+ * Returns an read-only <code>IndexReader</code> that spans alls indexes of
+ * this <code>MultiIndex</code>.
+ *
+ * @return an <code>IndexReader</code>.
+ * @throws IOException
+ * if an error occurs constructing the <code>IndexReader</code>.
+ */
+ public CachingMultiIndexReader getIndexReader() throws IOException
+ {
+ return getIndexReader(false);
+ }
- if (handler.isInitializeHierarchyCache()) {
- // force initializing of caches
- long time = System.currentTimeMillis();
- index.getReadOnlyIndexReader(true).release();
- time = System.currentTimeMillis() - time;
- log.debug("hierarchy cache initialized in {} ms", new Long(time));
- }
+ /**
+ * Returns an read-only <code>IndexReader</code> that spans alls indexes of
+ * this <code>MultiIndex</code>.
+ *
+ * @param initCache
+ * when set <code>true</code> the hierarchy cache is completely
+ * initialized before this call returns.
+ * @return an <code>IndexReader</code>.
+ * @throws IOException
+ * if an error occurs constructing the <code>IndexReader</code>.
+ */
+ public synchronized CachingMultiIndexReader getIndexReader(boolean initCache) throws IOException
+ {
+ synchronized (updateMonitor)
+ {
- synchronized (this) {
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- // if we are reindexing there is already an active transaction
- if (!reindexing) {
- executeAndLog(new Start(Action.INTERNAL_TRANS_REPL_INDEXES));
- }
- // delete obsolete indexes
- Set names = new HashSet(Arrays.asList(obsoleteIndexes));
- for (Iterator it = names.iterator(); it.hasNext();) {
- // do not try to delete indexes that are already gone
- String indexName = (String) it.next();
- if (indexNames.contains(indexName)) {
- executeAndLog(new DeleteIndex(getTransactionId(),
- indexName));
- }
- }
+ // TODO: re-implement in less aggressive way
+ if (ioMode == IndexerIoMode.READ_ONLY)
+ {
+ // this is temporary and extensive solution to re-read list of indexes.
+ refreshIndexesList();
+ }
+ if (multiReader != null)
+ {
+ multiReader.acquire();
+ return multiReader;
+ }
+ // no reader available
+ // wait until no update is in progress
+ while (updateInProgress)
+ {
+ try
+ {
+ updateMonitor.wait();
+ }
+ catch (InterruptedException e)
+ {
+ throw new IOException("Interrupted while waiting to aquire reader");
+ }
+ }
+ // some other read thread might have created the reader in the
+ // meantime -> check again
+ if (multiReader == null)
+ {
+ List readerList = new ArrayList();
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ PersistentIndex pIdx = (PersistentIndex)indexes.get(i);
+
+ if (indexNames.contains(pIdx.getName()))
+ {
+ try
+ {
+ readerList.add(pIdx.getReadOnlyIndexReader(initCache));
+ }
+ catch (FileNotFoundException e)
+ {
+ if(directoryManager.hasDirectory(pIdx.getName())){
+ throw e;
+ }
+ }
+ }
+ }
+ readerList.add(volatileIndex.getReadOnlyIndexReader());
+ ReadOnlyIndexReader[] readers =
+ (ReadOnlyIndexReader[])readerList.toArray(new ReadOnlyIndexReader[readerList.size()]);
+ multiReader = new CachingMultiIndexReader(readers, cache);
+ }
+ multiReader.acquire();
+ return multiReader;
+ }
+ }
- // Index merger does not log an action when it creates the
- // target
- // index of the merge. We have to do this here.
- executeAndLog(new CreateIndex(getTransactionId(), index
- .getName()));
+ /**
+ * Returns the volatile index.
+ *
+ * @return the volatile index.
+ */
+ VolatileIndex getVolatileIndex()
+ {
+ return volatileIndex;
+ }
- executeAndLog(new AddIndex(getTransactionId(), index.getName()));
+ /**
+ * Closes this <code>MultiIndex</code>.
+ */
+ void close()
+ {
- // delete documents in index
- for (Iterator it = deleted.iterator(); it.hasNext();) {
- Term id = (Term) it.next();
- index.removeDocument(id);
- }
- index.commit();
+ // stop index merger
+ // when calling this method we must not lock this MultiIndex, otherwise
+ // a deadlock might occur
+ merger.dispose();
- if (!reindexing) {
- // only commit if we are not reindexing
- // when reindexing the final commit is done at the very end
- executeAndLog(new Commit(getTransactionId()));
- }
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
- if (reindexing) {
- // do some cleanup right away when reindexing
- attemptDelete();
- }
- }
+ synchronized (this)
+ {
+ // stop timer
+ if (flushTask != null)
+ {
+ flushTask.cancel();
+ }
- /**
- * Returns an read-only <code>IndexReader</code> that spans alls indexes of
- * this <code>MultiIndex</code>.
- *
- * @return an <code>IndexReader</code>.
- * @throws IOException
- * if an error occurs constructing the <code>IndexReader</code>.
- */
- public CachingMultiIndexReader getIndexReader() throws IOException {
- return getIndexReader(false);
- }
+ // commit / close indexes
+ try
+ {
+ releaseMultiReader();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing search index.", e);
+ }
+ try
+ {
+ flush();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing search index.", e);
+ }
+ volatileIndex.close();
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ ((PersistentIndex)indexes.get(i)).close();
+ }
- /**
- * Returns an read-only <code>IndexReader</code> that spans alls indexes of
- * this <code>MultiIndex</code>.
- *
- * @param initCache
- * when set <code>true</code> the hierarchy cache is completely
- * initialized before this call returns.
- * @return an <code>IndexReader</code>.
- * @throws IOException
- * if an error occurs constructing the <code>IndexReader</code>.
- */
- public synchronized CachingMultiIndexReader getIndexReader(boolean initCache)
- throws IOException {
- synchronized (updateMonitor) {
- if (multiReader != null) {
- multiReader.acquire();
- return multiReader;
- }
- // no reader available
- // wait until no update is in progress
- while (updateInProgress) {
- try {
- updateMonitor.wait();
- } catch (InterruptedException e) {
- throw new IOException(
- "Interrupted while waiting to aquire reader");
- }
- }
- // some other read thread might have created the reader in the
- // meantime -> check again
- if (multiReader == null) {
- List readerList = new ArrayList();
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex pIdx = (PersistentIndex) indexes.get(i);
- if (indexNames.contains(pIdx.getName())) {
- readerList.add(pIdx.getReadOnlyIndexReader(initCache));
- }
- }
- readerList.add(volatileIndex.getReadOnlyIndexReader());
- ReadOnlyIndexReader[] readers = (ReadOnlyIndexReader[]) readerList
- .toArray(new ReadOnlyIndexReader[readerList.size()]);
- multiReader = new CachingMultiIndexReader(readers, cache);
- }
- multiReader.acquire();
- return multiReader;
- }
- }
+ // close indexing queue
+ indexingQueue.close();
- /**
- * Returns the volatile index.
- *
- * @return the volatile index.
- */
- VolatileIndex getVolatileIndex() {
- return volatileIndex;
- }
+ // finally close directory
+ try
+ {
+ indexDir.close();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing directory.", e);
+ }
+ }
+ }
- /**
- * Closes this <code>MultiIndex</code>.
- */
- void close() {
+ /**
+ * Returns the namespace mappings of this search index.
+ *
+ * @return the namespace mappings of this search index.
+ */
+ NamespaceMappings getNamespaceMappings()
+ {
+ return nsMappings;
+ }
- // stop index merger
- // when calling this method we must not lock this MultiIndex, otherwise
- // a deadlock might occur
- merger.dispose();
+ /**
+ * Returns the indexing queue for this multi index.
+ *
+ * @return the indexing queue for this multi index.
+ */
+ public IndexingQueue getIndexingQueue()
+ {
+ return indexingQueue;
+ }
- synchronized (this) {
- // stop timer
- flushTask.cancel();
+ /**
+ * Returns a lucene Document for the <code>node</code>.
+ *
+ * @param node
+ * the node to index.
+ * @return the index document.
+ * @throws RepositoryException
+ * if an error occurs while reading from the workspace.
+ */
+ Document createDocument(NodeData node) throws RepositoryException
+ {
+ return handler.createDocument(node, nsMappings, version);
+ }
- // commit / close indexes
- try {
- releaseMultiReader();
- } catch (IOException e) {
- log.error("Exception while closing search index.", e);
- }
- try {
- flush();
- } catch (IOException e) {
- log.error("Exception while closing search index.", e);
- }
- volatileIndex.close();
- for (int i = 0; i < indexes.size(); i++) {
- ((PersistentIndex) indexes.get(i)).close();
- }
+ /**
+ * Returns a lucene Document for the Node with <code>id</code>.
+ *
+ * @param id
+ * the id of the node to index.
+ * @return the index document.
+ * @throws RepositoryException
+ * if an error occurs while reading from the workspace or if
+ * there is no node with <code>id</code>.
+ */
+ Document createDocument(String id) throws RepositoryException
+ {
+ ItemData data = handler.getContext().getItemStateManager().getItemData(id);
+ if (data == null)
+ throw new ItemNotFoundException("Item id=" + id + " not found");
+ if (!data.isNode())
+ throw new RepositoryException("Item with id " + id + " is not a node");
+ return createDocument((NodeData)data);
- // close indexing queue
- indexingQueue.close();
+ }
- // finally close directory
- try {
- indexDir.close();
- } catch (IOException e) {
- log.error("Exception while closing directory.", e);
- }
- }
- }
+ /**
+ * Returns <code>true</code> if the redo log contained entries while this
+ * index was instantiated; <code>false</code> otherwise.
+ *
+ * @return <code>true</code> if the redo log contained entries.
+ */
+ boolean getRedoLogApplied()
+ {
+ return redoLogApplied;
+ }
- /**
- * Returns the namespace mappings of this search index.
- *
- * @return the namespace mappings of this search index.
- */
- NamespaceMappings getNamespaceMappings() {
- return nsMappings;
- }
+ /**
+ * Removes the <code>index</code> from the list of active sub indexes. The
+ * Index is not acutally deleted right away, but postponed to the
+ * transaction commit.
+ * <p/>
+ * This method does not close the index, but rather expects that the index
+ * has already been closed.
+ *
+ * @param index
+ * the index to delete.
+ */
+ synchronized void deleteIndex(PersistentIndex index)
+ {
+ // remove it from the lists if index is registered
+ indexes.remove(index);
+ indexNames.removeName(index.getName());
+ synchronized (deletable)
+ {
+ log.debug("Moved " + index.getName() + " to deletable");
+ deletable.add(index.getName());
+ }
+ }
- /**
- * Returns the indexing queue for this multi index.
- *
- * @return the indexing queue for this multi index.
- */
- public IndexingQueue getIndexingQueue() {
- return indexingQueue;
- }
+ /**
+ * Flushes this <code>MultiIndex</code>. Persists all pending changes and
+ * resets the redo log.
+ *
+ * @throws IOException
+ * if the flush fails.
+ */
+ public void flush() throws IOException
+ {
+ synchronized (this)
+ {
+ // commit volatile index
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ commitVolatileIndex();
- /**
- * Returns a lucene Document for the <code>node</code>.
- *
- * @param node
- * the node to index.
- * @return the index document.
- * @throws RepositoryException
- * if an error occurs while reading from the workspace.
- */
- Document createDocument(NodeData node) throws RepositoryException {
- return handler.createDocument(node, nsMappings, version);
- }
+ // commit persistent indexes
+ for (int i = indexes.size() - 1; i >= 0; i--)
+ {
+ PersistentIndex index = (PersistentIndex)indexes.get(i);
+ // only commit indexes we own
+ // index merger also places PersistentIndex instances in
+ // indexes,
+ // but does not make them public by registering the name in
+ // indexNames
+ if (indexNames.contains(index.getName()))
+ {
+ index.commit();
+ // check if index still contains documents
+ if (index.getNumDocuments() == 0)
+ {
+ executeAndLog(new DeleteIndex(getTransactionId(), index.getName()));
+ }
+ }
+ }
+ executeAndLog(new Commit(getTransactionId()));
- /**
- * Returns a lucene Document for the Node with <code>id</code>.
- *
- * @param id
- * the id of the node to index.
- * @return the index document.
- * @throws RepositoryException
- * if an error occurs while reading from the workspace or if
- * there is no node with <code>id</code>.
- */
- Document createDocument(String id) throws RepositoryException {
- ItemData data = handler.getContext().getItemStateManager().getItemData(
- id);
- if (data == null)
- throw new ItemNotFoundException("Item id=" + id + " not found");
- if (!data.isNode())
- throw new RepositoryException("Item with id " + id
- + " is not a node");
- return createDocument((NodeData) data);
+ indexNames.write(indexDir);
- }
+ // reset redo log
+ redoLog.clear();
- /**
- * Returns <code>true</code> if the redo log contained entries while this
- * index was instantiated; <code>false</code> otherwise.
- *
- * @return <code>true</code> if the redo log contained entries.
- */
- boolean getRedoLogApplied() {
- return redoLogApplied;
- }
+ lastFlushTime = System.currentTimeMillis();
+ }
- /**
- * Removes the <code>index</code> from the list of active sub indexes. The
- * Index is not acutally deleted right away, but postponed to the
- * transaction commit.
- * <p/>
- * This method does not close the index, but rather expects that the index
- * has already been closed.
- *
- * @param index
- * the index to delete.
- */
- synchronized void deleteIndex(PersistentIndex index) {
- // remove it from the lists if index is registered
- indexes.remove(index);
- indexNames.removeName(index.getName());
- synchronized (deletable) {
- log.debug("Moved " + index.getName() + " to deletable");
- deletable.add(index.getName());
- }
- }
+ // delete obsolete indexes
+ attemptDelete();
+ }
- /**
- * Flushes this <code>MultiIndex</code>. Persists all pending changes and
- * resets the redo log.
- *
- * @throws IOException
- * if the flush fails.
- */
- public void flush() throws IOException {
- synchronized (this) {
- // commit volatile index
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- commitVolatileIndex();
+ /**
+ * Releases the {@link #multiReader} and sets it <code>null</code>. If the
+ * reader is already <code>null</code> this method does nothing. When this
+ * method returns {@link #multiReader} is guaranteed to be <code>null</code>
+ * even if an exception is thrown.
+ * <p/>
+ * Please note that this method does not take care of any synchronization. A
+ * caller must ensure that it is the only thread operating on this multi
+ * index, or that it holds the {@link #updateMonitor}.
+ *
+ * @throws IOException
+ * if an error occurs while releasing the reader.
+ */
+ void releaseMultiReader() throws IOException
+ {
+ if (multiReader != null)
+ {
+ try
+ {
+ multiReader.release();
+ }
+ finally
+ {
+ multiReader = null;
+ }
+ }
+ }
- // commit persistent indexes
- for (int i = indexes.size() - 1; i >= 0; i--) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
- // only commit indexes we own
- // index merger also places PersistentIndex instances in
- // indexes,
- // but does not make them public by registering the name in
- // indexNames
- if (indexNames.contains(index.getName())) {
- index.commit();
- // check if index still contains documents
- if (index.getNumDocuments() == 0) {
- executeAndLog(new DeleteIndex(getTransactionId(), index
- .getName()));
- }
- }
- }
- executeAndLog(new Commit(getTransactionId()));
+ // -------------------------< internal
+ // >-------------------------------------
- indexNames.write(indexDir);
+ /**
+ * Enqueues unused segments for deletion in {@link #deletable}. This method
+ * does not synchronize on {@link #deletable}! A caller must ensure that it
+ * is the only one acting on the {@link #deletable} map.
+ *
+ * @throws IOException
+ * if an error occurs while reading directories.
+ */
+ private void enqueueUnusedSegments() throws IOException
+ {
+ // walk through index segments
+ String[] dirNames = directoryManager.getDirectoryNames();
+ for (int i = 0; i < dirNames.length; i++)
+ {
+ if (dirNames[i].startsWith("_") && !indexNames.contains(dirNames[i]))
+ {
+ deletable.add(dirNames[i]);
+ }
+ }
+ }
- // reset redo log
- redoLog.clear();
+ /**
+ * Cancel flush task and add new one
+ */
+ private void scheduleFlushTask()
+ {
+ // cancel task
+ if (flushTask != null)
+ {
+ flushTask.cancel();
+ }
+ // clear canceled tasks
+ FLUSH_TIMER.purge();
+ // new flush task, cause canceled can't be re-used
+ flushTask = new TimerTask()
+ {
+ public void run()
+ {
+ // check if there are any indexing jobs finished
+ checkIndexingQueue();
+ // check if volatile index should be flushed
+ checkFlush();
+ }
+ };
+ FLUSH_TIMER.schedule(flushTask, 0, 1000);
+ lastFlushTime = System.currentTimeMillis();
+ }
- lastFlushTime = System.currentTimeMillis();
- }
+ /**
+ * Resets the volatile index to a new instance.
+ */
+ private void resetVolatileIndex() throws IOException
+ {
+ volatileIndex = new VolatileIndex(handler.getTextAnalyzer(), handler.getSimilarity(), indexingQueue);
+ volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
+ volatileIndex.setMaxFieldLength(handler.getMaxFieldLength());
+ volatileIndex.setBufferSize(handler.getBufferSize());
+ }
- // delete obsolete indexes
- attemptDelete();
- }
+ /**
+ * Returns the current transaction id.
+ *
+ * @return the current transaction id.
+ */
+ private long getTransactionId()
+ {
+ return currentTransactionId;
+ }
- /**
- * Releases the {@link #multiReader} and sets it <code>null</code>. If the
- * reader is already <code>null</code> this method does nothing. When this
- * method returns {@link #multiReader} is guaranteed to be <code>null</code>
- * even if an exception is thrown.
- * <p/>
- * Please note that this method does not take care of any synchronization. A
- * caller must ensure that it is the only thread operating on this multi
- * index, or that it holds the {@link #updateMonitor}.
- *
- * @throws IOException
- * if an error occurs while releasing the reader.
- */
- void releaseMultiReader() throws IOException {
- if (multiReader != null) {
- try {
- multiReader.release();
- } finally {
- multiReader = null;
- }
- }
- }
+ /**
+ * Executes action <code>a</code> and appends the action to the redo log if
+ * successful.
+ *
+ * @param a
+ * the <code>Action</code> to execute.
+ * @return the executed action.
+ * @throws IOException
+ * if an error occurs while executing the action or appending
+ * the action to the redo log.
+ */
+ private Action executeAndLog(Action a) throws IOException
+ {
+ a.execute(this);
+ redoLog.append(a);
+ // please note that flushing the redo log is only required on
+ // commit, but we also want to keep track of new indexes for sure.
+ // otherwise it might happen that unused index folders are orphaned
+ // after a crash.
+ if (a.getType() == Action.TYPE_COMMIT || a.getType() == Action.TYPE_ADD_INDEX)
+ {
+ redoLog.flush();
+ }
+ return a;
+ }
- // -------------------------< internal
- // >-------------------------------------
+ /**
+ * Checks if it is needed to commit the volatile index according to
+ * {@link SearchIndex#getMaxVolatileIndexSize()}.
+ *
+ * @return <code>true</code> if the volatile index has been committed,
+ * <code>false</code> otherwise.
+ * @throws IOException
+ * if an error occurs while committing the volatile index.
+ */
+ private boolean checkVolatileCommit() throws IOException
+ {
+ if (volatileIndex.getRamSizeInBytes() >= handler.getMaxVolatileIndexSize())
+ {
+ commitVolatileIndex();
+ return true;
+ }
+ return false;
+ }
- /**
- * Enqueues unused segments for deletion in {@link #deletable}. This method
- * does not synchronize on {@link #deletable}! A caller must ensure that it
- * is the only one acting on the {@link #deletable} map.
- *
- * @throws IOException
- * if an error occurs while reading directories.
- */
- private void enqueueUnusedSegments() throws IOException {
- // walk through index segments
- String[] dirNames = directoryManager.getDirectoryNames();
- for (int i = 0; i < dirNames.length; i++) {
- if (dirNames[i].startsWith("_")
- && !indexNames.contains(dirNames[i])) {
- deletable.add(dirNames[i]);
- }
- }
- }
+ /**
+ * Commits the volatile index to a persistent index. The new persistent
+ * index is added to the list of indexes but not written to disk. When this
+ * method returns a new volatile index has been created.
+ *
+ * @throws IOException
+ * if an error occurs while writing the volatile index to disk.
+ */
+ private void commitVolatileIndex() throws IOException
+ {
- private void scheduleFlushTask() {
- lastFlushTime = System.currentTimeMillis();
- FLUSH_TIMER.schedule(flushTask, 0, 1000);
- }
+ // check if volatile index contains documents at all
+ if (volatileIndex.getNumDocuments() > 0)
+ {
- /**
- * Resets the volatile index to a new instance.
- */
- private void resetVolatileIndex() throws IOException {
- volatileIndex = new VolatileIndex(handler.getTextAnalyzer(), handler
- .getSimilarity(), indexingQueue);
- volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
- volatileIndex.setMaxFieldLength(handler.getMaxFieldLength());
- volatileIndex.setBufferSize(handler.getBufferSize());
- }
+ long time = System.currentTimeMillis();
+ // create index
+ CreateIndex create = new CreateIndex(getTransactionId(), null);
+ executeAndLog(create);
- /**
- * Returns the current transaction id.
- *
- * @return the current transaction id.
- */
- private long getTransactionId() {
- return currentTransactionId;
- }
+ // commit volatile index
+ executeAndLog(new VolatileCommit(getTransactionId(), create.getIndexName()));
- /**
- * Executes action <code>a</code> and appends the action to the redo log if
- * successful.
- *
- * @param a
- * the <code>Action</code> to execute.
- * @return the executed action.
- * @throws IOException
- * if an error occurs while executing the action or appending
- * the action to the redo log.
- */
- private Action executeAndLog(Action a) throws IOException {
- a.execute(this);
- redoLog.append(a);
- // please note that flushing the redo log is only required on
- // commit, but we also want to keep track of new indexes for sure.
- // otherwise it might happen that unused index folders are orphaned
- // after a crash.
- if (a.getType() == Action.TYPE_COMMIT
- || a.getType() == Action.TYPE_ADD_INDEX) {
- redoLog.flush();
- }
- return a;
- }
+ // add new index
+ AddIndex add = new AddIndex(getTransactionId(), create.getIndexName());
+ executeAndLog(add);
- /**
- * Checks if it is needed to commit the volatile index according to
- * {@link SearchIndex#getMaxVolatileIndexSize()}.
- *
- * @return <code>true</code> if the volatile index has been committed,
- * <code>false</code> otherwise.
- * @throws IOException
- * if an error occurs while committing the volatile index.
- */
- private boolean checkVolatileCommit() throws IOException {
- if (volatileIndex.getRamSizeInBytes() >= handler
- .getMaxVolatileIndexSize()) {
- commitVolatileIndex();
- return true;
- }
- return false;
- }
+ // create new volatile index
+ resetVolatileIndex();
- /**
- * Commits the volatile index to a persistent index. The new persistent
- * index is added to the list of indexes but not written to disk. When this
- * method returns a new volatile index has been created.
- *
- * @throws IOException
- * if an error occurs while writing the volatile index to disk.
- */
- private void commitVolatileIndex() throws IOException {
+ time = System.currentTimeMillis() - time;
+ log.debug("Committed in-memory index in " + time + "ms.");
+ }
+ }
- // check if volatile index contains documents at all
- if (volatileIndex.getNumDocuments() > 0) {
+ /**
+ * Recursively creates an index starting with the NodeState
+ * <code>node</code>.
+ *
+ * @param node
+ * the current NodeState.
+ * @param path
+ * the path of the current node.
+ * @param stateMgr
+ * the shared item state manager.
+ * @param count
+ * the number of nodes already indexed.
+ * @return the number of nodes indexed so far.
+ * @throws IOException
+ * if an error occurs while writing to the index.
+ * @throws ItemStateException
+ * if an node state cannot be found.
+ * @throws RepositoryException
+ * if any other error occurs
+ */
+ private long createIndex(NodeData node, ItemDataConsumer stateMgr, long count) throws IOException,
+ RepositoryException
+ {
+ // NodeId id = node.getNodeId();
- long time = System.currentTimeMillis();
- // create index
- CreateIndex create = new CreateIndex(getTransactionId(), null);
- executeAndLog(create);
+ if (indexingTree.isExcluded(node))
+ {
+ return count;
+ }
+ executeAndLog(new AddNode(getTransactionId(), node.getIdentifier()));
+ if (++count % 100 == 0)
+ {
- // commit volatile index
- executeAndLog(new VolatileCommit(getTransactionId(), create
- .getIndexName()));
+ log.info("indexing... {} ({})", node.getQPath().getAsString(), new Long(count));
+ }
+ if (count % 10 == 0)
+ {
+ checkIndexingQueue(true);
+ }
+ checkVolatileCommit();
+ List<NodeData> children = stateMgr.getChildNodesData(node);
+ for (NodeData nodeData : children)
+ {
- // add new index
- AddIndex add = new AddIndex(getTransactionId(), create
- .getIndexName());
- executeAndLog(add);
+ NodeData childState = (NodeData)stateMgr.getItemData(nodeData.getIdentifier());
+ if (childState == null)
+ {
+ handler.getOnWorkspaceInconsistencyHandler().handleMissingChildNode(
+ new ItemNotFoundException("Child not found "), handler, nodeData.getQPath(), node, nodeData);
+ }
- // create new volatile index
- resetVolatileIndex();
+ if (nodeData != null)
+ {
+ count = createIndex(nodeData, stateMgr, count);
+ }
+ }
- time = System.currentTimeMillis() - time;
- log.debug("Committed in-memory index in " + time + "ms.");
- }
- }
+ return count;
+ }
- /**
- * Recursively creates an index starting with the NodeState
- * <code>node</code>.
- *
- * @param node
- * the current NodeState.
- * @param path
- * the path of the current node.
- * @param stateMgr
- * the shared item state manager.
- * @param count
- * the number of nodes already indexed.
- * @return the number of nodes indexed so far.
- * @throws IOException
- * if an error occurs while writing to the index.
- * @throws ItemStateException
- * if an node state cannot be found.
- * @throws RepositoryException
- * if any other error occurs
- */
- private long createIndex(NodeData node, ItemDataConsumer stateMgr,
- long count) throws IOException, RepositoryException {
- // NodeId id = node.getNodeId();
+ /**
+ * Attempts to delete all files recorded in {@link #deletable}.
+ */
+ private void attemptDelete()
+ {
+ synchronized (deletable)
+ {
+ for (Iterator it = deletable.iterator(); it.hasNext();)
+ {
+ String indexName = (String)it.next();
+ if (directoryManager.delete(indexName))
+ {
+ it.remove();
+ }
+ else
+ {
+ log.info("Unable to delete obsolete index: " + indexName);
+ }
+ }
+ }
+ }
- if (indexingTree.isExcluded(node)) {
- return count;
- }
- executeAndLog(new AddNode(getTransactionId(), node.getIdentifier()));
- if (++count % 100 == 0) {
+ /**
+ * Removes the deletable file if it exists. The file is not used anymore in
+ * Jackrabbit versions >= 1.5.
+ */
+ private void removeDeletable()
+ {
+ String fileName = "deletable";
+ try
+ {
+ if (indexDir.fileExists(fileName))
+ {
+ indexDir.deleteFile(fileName);
+ }
+ }
+ catch (IOException e)
+ {
+ log.warn("Unable to remove file 'deletable'.", e);
+ }
+ }
- log.info("indexing... {} ({})", node.getQPath().getAsString(),
- new Long(count));
- }
- if (count % 10 == 0) {
- checkIndexingQueue(true);
- }
- checkVolatileCommit();
- List<NodeData> children = stateMgr.getChildNodesData(node);
- for (NodeData nodeData : children) {
+ /**
+ * Checks the duration between the last commit to this index and the current
+ * time and flushes the index (if there are changes at all) if the duration
+ * (idle time) is more than {@link SearchIndex#getVolatileIdleTime()}
+ * seconds.
+ */
+ private synchronized void checkFlush()
+ {
+ long idleTime = System.currentTimeMillis() - lastFlushTime;
+ // do not flush if volatileIdleTime is zero or negative
+ if (handler.getVolatileIdleTime() > 0 && idleTime > handler.getVolatileIdleTime() * 1000)
+ {
+ try
+ {
+ if (redoLog.hasEntries())
+ {
+ log.debug("Flushing index after being idle for " + idleTime + " ms.");
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ flush();
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
+ }
+ catch (IOException e)
+ {
+ log.error("Unable to commit volatile index", e);
+ }
+ }
+ }
- NodeData childState = (NodeData) stateMgr.getItemData(nodeData
- .getIdentifier());
- if (childState == null) {
- handler.getOnWorkspaceInconsistencyHandler()
- .handleMissingChildNode(
- new ItemNotFoundException("Child not found "),
- handler, nodeData.getQPath(), node, nodeData);
- }
+ /**
+ * Checks the indexing queue for finished text extrator jobs and updates the
+ * index accordingly if there are any new ones. This method is synchronized
+ * and should only be called by the timer task that periodically checks if
+ * there are documents ready in the indexing queue. A new transaction is
+ * used when documents are transfered from the indexing queue to the index.
+ */
+ private synchronized void checkIndexingQueue()
+ {
+ checkIndexingQueue(false);
+ }
- if (nodeData != null) {
- count = createIndex(nodeData, stateMgr, count);
- }
- }
+ /**
+ * Checks the indexing queue for finished text extrator jobs and updates the
+ * index accordingly if there are any new ones.
+ *
+ * @param transactionPresent
+ * whether a transaction is in progress and the current
+ * {@link #getTransactionId()} should be used. If
+ * <code>false</code> a new transaction is created when documents
+ * are transfered from the indexing queue to the index.
+ */
+ private void checkIndexingQueue(boolean transactionPresent)
+ {
+ Document[] docs = indexingQueue.getFinishedDocuments();
+ Map finished = new HashMap();
+ for (int i = 0; i < docs.length; i++)
+ {
+ String uuid = docs[i].get(FieldNames.UUID);
+ finished.put(uuid, docs[i]);
+ }
- return count;
- }
+ // now update index with the remaining ones if there are any
+ if (!finished.isEmpty())
+ {
+ log.info("updating index with {} nodes from indexing queue.", new Long(finished.size()));
- /**
- * Attempts to delete all files recorded in {@link #deletable}.
- */
- private void attemptDelete() {
- synchronized (deletable) {
- for (Iterator it = deletable.iterator(); it.hasNext();) {
- String indexName = (String) it.next();
- if (directoryManager.delete(indexName)) {
- it.remove();
- } else {
- log.info("Unable to delete obsolete index: " + indexName);
- }
- }
- }
- }
+ // remove documents from the queue
+ for (Iterator it = finished.keySet().iterator(); it.hasNext();)
+ {
+ indexingQueue.removeDocument(it.next().toString());
+ }
- /**
- * Removes the deletable file if it exists. The file is not used anymore in
- * Jackrabbit versions >= 1.5.
- */
- private void removeDeletable() {
- String fileName = "deletable";
- try {
- if (indexDir.fileExists(fileName)) {
- indexDir.deleteFile(fileName);
- }
- } catch (IOException e) {
- log.warn("Unable to remove file 'deletable'.", e);
- }
- }
+ try
+ {
+ if (transactionPresent)
+ {
+ for (Iterator it = finished.keySet().iterator(); it.hasNext();)
+ {
+ executeAndLog(new DeleteNode(getTransactionId(), (String)it.next()));
+ }
+ for (Iterator it = finished.values().iterator(); it.hasNext();)
+ {
+ executeAndLog(new AddNode(getTransactionId(), (Document)it.next()));
+ }
+ }
+ else
+ {
+ update(finished.keySet(), finished.values());
+ }
+ }
+ catch (IOException e)
+ {
+ // update failed
+ log.warn("Failed to update index with deferred text extraction", e);
+ }
+ }
+ }
- /**
- * Checks the duration between the last commit to this index and the current
- * time and flushes the index (if there are changes at all) if the duration
- * (idle time) is more than {@link SearchIndex#getVolatileIdleTime()}
- * seconds.
- */
- private synchronized void checkFlush() {
- long idleTime = System.currentTimeMillis() - lastFlushTime;
- // do not flush if volatileIdleTime is zero or negative
- if (handler.getVolatileIdleTime() > 0
- && idleTime > handler.getVolatileIdleTime() * 1000) {
- try {
- if (redoLog.hasEntries()) {
- log.debug("Flushing index after being idle for " + idleTime
- + " ms.");
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- flush();
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
- } catch (IOException e) {
- log.error("Unable to commit volatile index", e);
- }
- }
- }
+ // ------------------------< Actions
+ // >---------------------------------------
- /**
- * Checks the indexing queue for finished text extrator jobs and updates the
- * index accordingly if there are any new ones. This method is synchronized
- * and should only be called by the timer task that periodically checks if
- * there are documents ready in the indexing queue. A new transaction is
- * used when documents are transfered from the indexing queue to the index.
- */
- private synchronized void checkIndexingQueue() {
- checkIndexingQueue(false);
- }
+ /**
+ * Defines an action on an <code>MultiIndex</code>.
+ */
+ public abstract static class Action
+ {
- /**
- * Checks the indexing queue for finished text extrator jobs and updates the
- * index accordingly if there are any new ones.
- *
- * @param transactionPresent
- * whether a transaction is in progress and the current
- * {@link #getTransactionId()} should be used. If
- * <code>false</code> a new transaction is created when documents
- * are transfered from the indexing queue to the index.
- */
- private void checkIndexingQueue(boolean transactionPresent) {
- Document[] docs = indexingQueue.getFinishedDocuments();
- Map finished = new HashMap();
- for (int i = 0; i < docs.length; i++) {
- String uuid = docs[i].get(FieldNames.UUID);
- finished.put(uuid, docs[i]);
- }
+ /**
+ * Action identifier in redo log for transaction start action.
+ */
+ static final String START = "STR";
- // now update index with the remaining ones if there are any
- if (!finished.isEmpty()) {
- log.info("updating index with {} nodes from indexing queue.",
- new Long(finished.size()));
+ /**
+ * Action type for start action.
+ */
+ public static final int TYPE_START = 0;
- // remove documents from the queue
- for (Iterator it = finished.keySet().iterator(); it.hasNext();) {
- indexingQueue.removeDocument(it.next().toString());
- }
+ /**
+ * Action identifier in redo log for add node action.
+ */
+ static final String ADD_NODE = "ADD";
- try {
- if (transactionPresent) {
- for (Iterator it = finished.keySet().iterator(); it
- .hasNext();) {
- executeAndLog(new DeleteNode(getTransactionId(),
- (String) it.next()));
- }
- for (Iterator it = finished.values().iterator(); it
- .hasNext();) {
- executeAndLog(new AddNode(getTransactionId(),
- (Document) it.next()));
- }
- } else {
- update(finished.keySet(), finished.values());
- }
- } catch (IOException e) {
- // update failed
- log.warn(
- "Failed to update index with deferred text extraction",
- e);
- }
- }
- }
+ /**
+ * Action type for add node action.
+ */
+ public static final int TYPE_ADD_NODE = 1;
- // ------------------------< Actions
- // >---------------------------------------
+ /**
+ * Action identifier in redo log for node delete action.
+ */
+ static final String DELETE_NODE = "DEL";
- /**
- * Defines an action on an <code>MultiIndex</code>.
- */
- public abstract static class Action {
+ /**
+ * Action type for delete node action.
+ */
+ public static final int TYPE_DELETE_NODE = 2;
- /**
- * Action identifier in redo log for transaction start action.
- */
- static final String START = "STR";
+ /**
+ * Action identifier in redo log for transaction commit action.
+ */
+ static final String COMMIT = "COM";
- /**
- * Action type for start action.
- */
- public static final int TYPE_START = 0;
+ /**
+ * Action type for commit action.
+ */
+ public static final int TYPE_COMMIT = 3;
- /**
- * Action identifier in redo log for add node action.
- */
- static final String ADD_NODE = "ADD";
+ /**
+ * Action identifier in redo log for volatile index commit action.
+ */
+ static final String VOLATILE_COMMIT = "VOL_COM";
- /**
- * Action type for add node action.
- */
- public static final int TYPE_ADD_NODE = 1;
+ /**
+ * Action type for volatile index commit action.
+ */
+ public static final int TYPE_VOLATILE_COMMIT = 4;
- /**
- * Action identifier in redo log for node delete action.
- */
- static final String DELETE_NODE = "DEL";
+ /**
+ * Action identifier in redo log for index create action.
+ */
+ static final String CREATE_INDEX = "CRE_IDX";
- /**
- * Action type for delete node action.
- */
- public static final int TYPE_DELETE_NODE = 2;
+ /**
+ * Action type for create index action.
+ */
+ public static final int TYPE_CREATE_INDEX = 5;
- /**
- * Action identifier in redo log for transaction commit action.
- */
- static final String COMMIT = "COM";
+ /**
+ * Action identifier in redo log for index add action.
+ */
+ static final String ADD_INDEX = "ADD_IDX";
- /**
- * Action type for commit action.
- */
- public static final int TYPE_COMMIT = 3;
+ /**
+ * Action type for add index action.
+ */
+ public static final int TYPE_ADD_INDEX = 6;
- /**
- * Action identifier in redo log for volatile index commit action.
- */
- static final String VOLATILE_COMMIT = "VOL_COM";
+ /**
+ * Action identifier in redo log for delete index action.
+ */
+ static final String DELETE_INDEX = "DEL_IDX";
- /**
- * Action type for volatile index commit action.
- */
- public static final int TYPE_VOLATILE_COMMIT = 4;
+ /**
+ * Action type for delete index action.
+ */
+ public static final int TYPE_DELETE_INDEX = 7;
- /**
- * Action identifier in redo log for index create action.
- */
- static final String CREATE_INDEX = "CRE_IDX";
+ /**
+ * Transaction identifier for internal actions like volatile index
+ * commit triggered by timer thread.
+ */
+ static final long INTERNAL_TRANSACTION = -1;
- /**
- * Action type for create index action.
- */
- public static final int TYPE_CREATE_INDEX = 5;
+ /**
+ * Transaction identifier for internal action that replaces indexs.
+ */
+ static final long INTERNAL_TRANS_REPL_INDEXES = -2;
- /**
- * Action identifier in redo log for index add action.
- */
- static final String ADD_INDEX = "ADD_IDX";
+ /**
+ * The id of the transaction that executed this action.
+ */
+ private final long transactionId;
- /**
- * Action type for add index action.
- */
- public static final int TYPE_ADD_INDEX = 6;
+ /**
+ * The action type.
+ */
+ private final int type;
- /**
- * Action identifier in redo log for delete index action.
- */
- static final String DELETE_INDEX = "DEL_IDX";
+ /**
+ * Creates a new <code>Action</code>.
+ *
+ * @param transactionId
+ * the id of the transaction that executed this action.
+ * @param type
+ * the action type.
+ */
+ Action(long transactionId, int type)
+ {
+ this.transactionId = transactionId;
+ this.type = type;
+ }
- /**
- * Action type for delete index action.
- */
- public static final int TYPE_DELETE_INDEX = 7;
+ /**
+ * Returns the transaction id for this <code>Action</code>.
+ *
+ * @return the transaction id for this <code>Action</code>.
+ */
+ long getTransactionId()
+ {
+ return transactionId;
+ }
- /**
- * Transaction identifier for internal actions like volatile index
- * commit triggered by timer thread.
- */
- static final long INTERNAL_TRANSACTION = -1;
+ /**
+ * Returns the action type.
+ *
+ * @return the action type.
+ */
+ int getType()
+ {
+ return type;
+ }
- /**
- * Transaction identifier for internal action that replaces indexs.
- */
- static final long INTERNAL_TRANS_REPL_INDEXES = -2;
+ /**
+ * Executes this action on the <code>index</code>.
+ *
+ * @param index
+ * the index where to execute the action.
+ * @throws IOException
+ * if the action fails due to some I/O error in the index or
+ * some other error.
+ */
+ public abstract void execute(MultiIndex index) throws IOException;
- /**
- * The id of the transaction that executed this action.
- */
- private final long transactionId;
+ /**
+ * Executes the inverse operation of this action. That is, does an undo
+ * of this action. This default implementation does nothing, but returns
+ * silently.
+ *
+ * @param index
+ * the index where to undo the action.
+ * @throws IOException
+ * if the action cannot be undone.
+ */
+ public void undo(MultiIndex index) throws IOException
+ {
+ }
- /**
- * The action type.
- */
- private final int type;
+ /**
+ * Returns a <code>String</code> representation of this action that can
+ * be written to the {@link RedoLog}.
+ *
+ * @return a <code>String</code> representation of this action.
+ */
+ public abstract String toString();
- /**
- * Creates a new <code>Action</code>.
- *
- * @param transactionId
- * the id of the transaction that executed this action.
- * @param type
- * the action type.
- */
- Action(long transactionId, int type) {
- this.transactionId = transactionId;
- this.type = type;
- }
+ /**
+ * Parses an line in the redo log and created an {@link Action}.
+ *
+ * @param line
+ * the line from the redo log.
+ * @return an <code>Action</code>.
+ * @throws IllegalArgumentException
+ * if the line is malformed.
+ */
+ static Action fromString(String line) throws IllegalArgumentException
+ {
+ int endTransIdx = line.indexOf(' ');
+ if (endTransIdx == -1)
+ {
+ throw new IllegalArgumentException(line);
+ }
+ long transactionId;
+ try
+ {
+ transactionId = Long.parseLong(line.substring(0, endTransIdx));
+ }
+ catch (NumberFormatException e)
+ {
+ throw new IllegalArgumentException(line);
+ }
+ int endActionIdx = line.indexOf(' ', endTransIdx + 1);
+ if (endActionIdx == -1)
+ {
+ // action does not have arguments
+ endActionIdx = line.length();
+ }
+ String actionLabel = line.substring(endTransIdx + 1, endActionIdx);
+ String arguments = "";
+ if (endActionIdx + 1 <= line.length())
+ {
+ arguments = line.substring(endActionIdx + 1);
+ }
+ Action a;
+ if (actionLabel.equals(Action.ADD_NODE))
+ {
+ a = AddNode.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.ADD_INDEX))
+ {
+ a = AddIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.COMMIT))
+ {
+ a = Commit.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.CREATE_INDEX))
+ {
+ a = CreateIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.DELETE_INDEX))
+ {
+ a = DeleteIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.DELETE_NODE))
+ {
+ a = DeleteNode.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.START))
+ {
+ a = Start.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.VOLATILE_COMMIT))
+ {
+ a = VolatileCommit.fromString(transactionId, arguments);
+ }
+ else
+ {
+ throw new IllegalArgumentException(line);
+ }
+ return a;
+ }
+ }
- /**
- * Returns the transaction id for this <code>Action</code>.
- *
- * @return the transaction id for this <code>Action</code>.
- */
- long getTransactionId() {
- return transactionId;
- }
+ /**
+ * Adds an index to the MultiIndex's active persistent index list.
+ */
+ private static class AddIndex extends Action
+ {
- /**
- * Returns the action type.
- *
- * @return the action type.
- */
- int getType() {
- return type;
- }
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Executes this action on the <code>index</code>.
- *
- * @param index
- * the index where to execute the action.
- * @throws IOException
- * if the action fails due to some I/O error in the index or
- * some other error.
- */
- public abstract void execute(MultiIndex index) throws IOException;
+ /**
+ * Creates a new AddIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to add, or <code>null</code> if an
+ * index with a new name should be created.
+ */
+ AddIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_ADD_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * Executes the inverse operation of this action. That is, does an undo
- * of this action. This default implementation does nothing, but returns
- * silently.
- *
- * @param index
- * the index where to undo the action.
- * @throws IOException
- * if the action cannot be undone.
- */
- public void undo(MultiIndex index) throws IOException {
- }
+ /**
+ * Creates a new AddIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to add.
+ * @return the AddIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static AddIndex fromString(long transactionId, String arguments)
+ {
+ return new AddIndex(transactionId, arguments);
+ }
- /**
- * Returns a <code>String</code> representation of this action that can
- * be written to the {@link RedoLog}.
- *
- * @return a <code>String</code> representation of this action.
- */
- public abstract String toString();
+ /**
+ * Adds a sub index to <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ if (!index.indexNames.contains(indexName))
+ {
+ index.indexNames.addName(indexName);
+ // now that the index is in the active list let the merger know
+ // about it
+ index.merger.indexAdded(indexName, idx.getNumDocuments());
+ }
+ }
- /**
- * Parses an line in the redo log and created an {@link Action}.
- *
- * @param line
- * the line from the redo log.
- * @return an <code>Action</code>.
- * @throws IllegalArgumentException
- * if the line is malformed.
- */
- static Action fromString(String line) throws IllegalArgumentException {
- int endTransIdx = line.indexOf(' ');
- if (endTransIdx == -1) {
- throw new IllegalArgumentException(line);
- }
- long transactionId;
- try {
- transactionId = Long.parseLong(line.substring(0, endTransIdx));
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(line);
- }
- int endActionIdx = line.indexOf(' ', endTransIdx + 1);
- if (endActionIdx == -1) {
- // action does not have arguments
- endActionIdx = line.length();
- }
- String actionLabel = line.substring(endTransIdx + 1, endActionIdx);
- String arguments = "";
- if (endActionIdx + 1 <= line.length()) {
- arguments = line.substring(endActionIdx + 1);
- }
- Action a;
- if (actionLabel.equals(Action.ADD_NODE)) {
- a = AddNode.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.ADD_INDEX)) {
- a = AddIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.COMMIT)) {
- a = Commit.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.CREATE_INDEX)) {
- a = CreateIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.DELETE_INDEX)) {
- a = DeleteIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.DELETE_NODE)) {
- a = DeleteNode.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.START)) {
- a = Start.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.VOLATILE_COMMIT)) {
- a = VolatileCommit.fromString(transactionId, arguments);
- } else {
- throw new IllegalArgumentException(line);
- }
- return a;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.ADD_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
+ }
- /**
- * Adds an index to the MultiIndex's active persistent index list.
- */
- private static class AddIndex extends Action {
+ /**
+ * Adds a node to the index.
+ */
+ private static class AddNode extends Action
+ {
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * The maximum length of a AddNode String.
+ */
+ private static final int ENTRY_LENGTH =
+ Long.toString(Long.MAX_VALUE).length() + Action.ADD_NODE.length() + Constants.UUID_FORMATTED_LENGTH + 2;
- /**
- * Creates a new AddIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to add, or <code>null</code> if an
- * index with a new name should be created.
- */
- AddIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_ADD_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * The uuid of the node to add.
+ */
+ private final String uuid;
- /**
- * Creates a new AddIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to add.
- * @return the AddIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static AddIndex fromString(long transactionId, String arguments) {
- return new AddIndex(transactionId, arguments);
- }
+ /**
+ * The document to add to the index, or <code>null</code> if not
+ * available.
+ */
+ private Document doc;
- /**
- * Adds a sub index to <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- if (!index.indexNames.contains(indexName)) {
- index.indexNames.addName(indexName);
- // now that the index is in the active list let the merger know
- // about it
- index.merger.indexAdded(indexName, idx.getNumDocuments());
- }
- }
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param uuid
+ * the uuid of the node to add.
+ */
+ AddNode(long transactionId, String uuid)
+ {
+ super(transactionId, Action.TYPE_ADD_NODE);
+ this.uuid = uuid;
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.ADD_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
- }
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param doc
+ * the document to add.
+ */
+ AddNode(long transactionId, Document doc)
+ {
+ this(transactionId, doc.get(FieldNames.UUID));
+ this.doc = doc;
+ }
- /**
- * Adds a node to the index.
- */
- private static class AddNode extends Action {
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the arguments to this action. The uuid of the node to add
+ * @return the AddNode action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed. Not a UUID.
+ */
+ static AddNode fromString(long transactionId, String arguments) throws IllegalArgumentException
+ {
+ // simple length check
+ if (arguments.length() != Constants.UUID_FORMATTED_LENGTH)
+ {
+ throw new IllegalArgumentException("arguments is not a uuid");
+ }
+ return new AddNode(transactionId, arguments);
+ }
- /**
- * The maximum length of a AddNode String.
- */
- private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE)
- .length()
- + Action.ADD_NODE.length()
- + Constants.UUID_FORMATTED_LENGTH
- + 2;
+ /**
+ * Adds a node to the index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ if (doc == null)
+ {
+ try
+ {
+ doc = index.createDocument(uuid);
+ }
+ catch (RepositoryException e)
+ {
+ // node does not exist anymore
+ log.debug(e.getMessage());
+ }
+ }
+ if (doc != null)
+ {
+ index.volatileIndex.addDocuments(new Document[]{doc});
+ }
+ }
- /**
- * The uuid of the node to add.
- */
- private final String uuid;
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.ADD_NODE);
+ logLine.append(' ');
+ logLine.append(uuid);
+ return logLine.toString();
+ }
+ }
- /**
- * The document to add to the index, or <code>null</code> if not
- * available.
- */
- private Document doc;
+ /**
+ * Commits a transaction.
+ */
+ private static class Commit extends Action
+ {
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param uuid
- * the uuid of the node to add.
- */
- AddNode(long transactionId, String uuid) {
- super(transactionId, Action.TYPE_ADD_NODE);
- this.uuid = uuid;
- }
+ /**
+ * Creates a new Commit action.
+ *
+ * @param transactionId
+ * the id of the transaction that is committed.
+ */
+ Commit(long transactionId)
+ {
+ super(transactionId, Action.TYPE_COMMIT);
+ }
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param doc
- * the document to add.
- */
- AddNode(long transactionId, Document doc) {
- this(transactionId, doc.get(FieldNames.UUID));
- this.doc = doc;
- }
+ /**
+ * Creates a new Commit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this method.
+ * @return the Commit action.
+ */
+ static Commit fromString(long transactionId, String arguments)
+ {
+ return new Commit(transactionId);
+ }
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the arguments to this action. The uuid of the node to add
- * @return the AddNode action.
- * @throws IllegalArgumentException
- * if the arguments are malformed. Not a UUID.
- */
- static AddNode fromString(long transactionId, String arguments)
- throws IllegalArgumentException {
- // simple length check
- if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
- throw new IllegalArgumentException("arguments is not a uuid");
- }
- return new AddNode(transactionId, arguments);
- }
+ /**
+ * Touches the last flush time (sets it to the current time).
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ index.lastFlushTime = System.currentTimeMillis();
+ }
- /**
- * Adds a node to the index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- if (doc == null) {
- try {
- doc = index.createDocument(uuid);
- } catch (RepositoryException e) {
- // node does not exist anymore
- log.debug(e.getMessage());
- }
- }
- if (doc != null) {
- index.volatileIndex.addDocuments(new Document[] { doc });
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ return Long.toString(getTransactionId()) + ' ' + Action.COMMIT;
+ }
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.ADD_NODE);
- logLine.append(' ');
- logLine.append(uuid);
- return logLine.toString();
- }
- }
+ /**
+ * Creates an new sub index but does not add it to the active persistent
+ * index list.
+ */
+ private static class CreateIndex extends Action
+ {
- /**
- * Commits a transaction.
- */
- private static class Commit extends Action {
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Creates a new Commit action.
- *
- * @param transactionId
- * the id of the transaction that is committed.
- */
- Commit(long transactionId) {
- super(transactionId, Action.TYPE_COMMIT);
- }
+ /**
+ * Creates a new CreateIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to add, or <code>null</code> if an
+ * index with a new name should be created.
+ */
+ CreateIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_CREATE_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * Creates a new Commit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this method.
- * @return the Commit action.
- */
- static Commit fromString(long transactionId, String arguments) {
- return new Commit(transactionId);
- }
+ /**
+ * Creates a new CreateIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to create.
+ * @return the AddIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static CreateIndex fromString(long transactionId, String arguments)
+ {
+ // when created from String, this action is executed as redo action
+ return new CreateIndex(transactionId, arguments);
+ }
- /**
- * Touches the last flush time (sets it to the current time).
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- index.lastFlushTime = System.currentTimeMillis();
- }
+ /**
+ * Creates a new index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ indexName = idx.getName();
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- return Long.toString(getTransactionId()) + ' ' + Action.COMMIT;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public void undo(MultiIndex index) throws IOException
+ {
+ if (index.hasIndex(indexName))
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ idx.close();
+ index.deleteIndex(idx);
+ }
+ }
- /**
- * Creates an new sub index but does not add it to the active persistent
- * index list.
- */
- private static class CreateIndex extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.CREATE_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * Returns the index name that has been created. If this method is
+ * called before {@link #execute(MultiIndex)} it will return
+ * <code>null</code>.
+ *
+ * @return the name of the index that has been created.
+ */
+ String getIndexName()
+ {
+ return indexName;
+ }
+ }
- /**
- * Creates a new CreateIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to add, or <code>null</code> if an
- * index with a new name should be created.
- */
- CreateIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_CREATE_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * Closes and deletes an index that is no longer in use.
+ */
+ private static class DeleteIndex extends Action
+ {
- /**
- * Creates a new CreateIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to create.
- * @return the AddIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static CreateIndex fromString(long transactionId, String arguments) {
- // when created from String, this action is executed as redo action
- return new CreateIndex(transactionId, arguments);
- }
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Creates a new index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- indexName = idx.getName();
- }
+ /**
+ * Creates a new DeleteIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to delete.
+ */
+ DeleteIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_DELETE_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * @inheritDoc
- */
- public void undo(MultiIndex index) throws IOException {
- if (index.hasIndex(indexName)) {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- idx.close();
- index.deleteIndex(idx);
- }
- }
+ /**
+ * Creates a new DeleteIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to delete.
+ * @return the DeleteIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static DeleteIndex fromString(long transactionId, String arguments)
+ {
+ return new DeleteIndex(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.CREATE_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
+ /**
+ * Removes a sub index from <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ // get index if it exists
+ for (Iterator it = index.indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ idx.close();
+ index.deleteIndex(idx);
+ break;
+ }
+ }
+ }
- /**
- * Returns the index name that has been created. If this method is
- * called before {@link #execute(MultiIndex)} it will return
- * <code>null</code>.
- *
- * @return the name of the index that has been created.
- */
- String getIndexName() {
- return indexName;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.DELETE_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
+ }
- /**
- * Closes and deletes an index that is no longer in use.
- */
- private static class DeleteIndex extends Action {
+ /**
+ * Deletes a node from the index.
+ */
+ private static class DeleteNode extends Action
+ {
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * The maximum length of a DeleteNode String.
+ */
+ private static final int ENTRY_LENGTH =
+ Long.toString(Long.MAX_VALUE).length() + Action.DELETE_NODE.length() + Constants.UUID_FORMATTED_LENGTH + 2;
- /**
- * Creates a new DeleteIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to delete.
- */
- DeleteIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_DELETE_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * The uuid of the node to remove.
+ */
+ private final String uuid;
- /**
- * Creates a new DeleteIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to delete.
- * @return the DeleteIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static DeleteIndex fromString(long transactionId, String arguments) {
- return new DeleteIndex(transactionId, arguments);
- }
+ /**
+ * Creates a new DeleteNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param uuid
+ * the uuid of the node to delete.
+ */
+ DeleteNode(long transactionId, String uuid)
+ {
+ super(transactionId, Action.TYPE_DELETE_NODE);
+ this.uuid = uuid;
+ }
- /**
- * Removes a sub index from <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- // get index if it exists
- for (Iterator it = index.indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- idx.close();
- index.deleteIndex(idx);
- break;
- }
- }
- }
+ /**
+ * Creates a new DeleteNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the uuid of the node to delete.
+ * @return the DeleteNode action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed. Not a UUID.
+ */
+ static DeleteNode fromString(long transactionId, String arguments)
+ {
+ // simple length check
+ if (arguments.length() != Constants.UUID_FORMATTED_LENGTH)
+ {
+ throw new IllegalArgumentException("arguments is not a uuid");
+ }
+ return new DeleteNode(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.DELETE_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
- }
+ /**
+ * Deletes a node from the index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ String uuidString = uuid.toString();
+ // check if indexing queue is still working on
+ // this node from a previous update
+ Document doc = index.indexingQueue.removeDocument(uuidString);
+ if (doc != null)
+ {
+ Util.disposeDocument(doc);
+ }
+ Term idTerm = new Term(FieldNames.UUID, uuidString);
+ // if the document cannot be deleted from the volatile index
+ // delete it from one of the persistent indexes.
+ int num = index.volatileIndex.removeDocument(idTerm);
+ if (num == 0)
+ {
+ for (int i = index.indexes.size() - 1; i >= 0; i--)
+ {
+ // only look in registered indexes
+ PersistentIndex idx = (PersistentIndex)index.indexes.get(i);
+ if (index.indexNames.contains(idx.getName()))
+ {
+ num = idx.removeDocument(idTerm);
+ if (num > 0)
+ {
+ return;
+ }
+ }
+ }
+ }
+ }
- /**
- * Deletes a node from the index.
- */
- private static class DeleteNode extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.DELETE_NODE);
+ logLine.append(' ');
+ logLine.append(uuid);
+ return logLine.toString();
+ }
+ }
- /**
- * The maximum length of a DeleteNode String.
- */
- private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE)
- .length()
- + Action.DELETE_NODE.length()
- + Constants.UUID_FORMATTED_LENGTH
- + 2;
+ /**
+ * Starts a transaction.
+ */
+ private static class Start extends Action
+ {
- /**
- * The uuid of the node to remove.
- */
- private final String uuid;
+ /**
+ * Creates a new Start transaction action.
+ *
+ * @param transactionId
+ * the id of the transaction that started.
+ */
+ Start(long transactionId)
+ {
+ super(transactionId, Action.TYPE_START);
+ }
- /**
- * Creates a new DeleteNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param uuid
- * the uuid of the node to delete.
- */
- DeleteNode(long transactionId, String uuid) {
- super(transactionId, Action.TYPE_DELETE_NODE);
- this.uuid = uuid;
- }
+ /**
+ * Creates a new Start action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this method.
+ * @return the Start action.
+ */
+ static Start fromString(long transactionId, String arguments)
+ {
+ return new Start(transactionId);
+ }
- /**
- * Creates a new DeleteNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the uuid of the node to delete.
- * @return the DeleteNode action.
- * @throws IllegalArgumentException
- * if the arguments are malformed. Not a UUID.
- */
- static DeleteNode fromString(long transactionId, String arguments) {
- // simple length check
- if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
- throw new IllegalArgumentException("arguments is not a uuid");
- }
- return new DeleteNode(transactionId, arguments);
- }
+ /**
+ * Sets the current transaction id on <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ index.currentTransactionId = getTransactionId();
+ }
- /**
- * Deletes a node from the index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- String uuidString = uuid.toString();
- // check if indexing queue is still working on
- // this node from a previous update
- Document doc = index.indexingQueue.removeDocument(uuidString);
- if (doc != null) {
- Util.disposeDocument(doc);
- }
- Term idTerm = new Term(FieldNames.UUID, uuidString);
- // if the document cannot be deleted from the volatile index
- // delete it from one of the persistent indexes.
- int num = index.volatileIndex.removeDocument(idTerm);
- if (num == 0) {
- for (int i = index.indexes.size() - 1; i >= 0; i--) {
- // only look in registered indexes
- PersistentIndex idx = (PersistentIndex) index.indexes
- .get(i);
- if (index.indexNames.contains(idx.getName())) {
- num = idx.removeDocument(idTerm);
- if (num > 0) {
- return;
- }
- }
- }
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ return Long.toString(getTransactionId()) + ' ' + Action.START;
+ }
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.DELETE_NODE);
- logLine.append(' ');
- logLine.append(uuid);
- return logLine.toString();
- }
- }
+ /**
+ * Commits the volatile index to disk.
+ */
+ private static class VolatileCommit extends Action
+ {
- /**
- * Starts a transaction.
- */
- private static class Start extends Action {
+ /**
+ * The name of the target index to commit to.
+ */
+ private final String targetIndex;
- /**
- * Creates a new Start transaction action.
- *
- * @param transactionId
- * the id of the transaction that started.
- */
- Start(long transactionId) {
- super(transactionId, Action.TYPE_START);
- }
+ /**
+ * Creates a new VolatileCommit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ */
+ VolatileCommit(long transactionId, String targetIndex)
+ {
+ super(transactionId, Action.TYPE_VOLATILE_COMMIT);
+ this.targetIndex = targetIndex;
+ }
- /**
- * Creates a new Start action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this method.
- * @return the Start action.
- */
- static Start fromString(long transactionId, String arguments) {
- return new Start(transactionId);
- }
+ /**
+ * Creates a new VolatileCommit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this implementation.
+ * @return the VolatileCommit action.
+ */
+ static VolatileCommit fromString(long transactionId, String arguments)
+ {
+ return new VolatileCommit(transactionId, arguments);
+ }
- /**
- * Sets the current transaction id on <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- index.currentTransactionId = getTransactionId();
- }
+ /**
+ * Commits the volatile index to disk.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ VolatileIndex volatileIndex = index.getVolatileIndex();
+ PersistentIndex persistentIndex = index.getOrCreateIndex(targetIndex);
+ persistentIndex.copyIndex(volatileIndex);
+ index.resetVolatileIndex();
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- return Long.toString(getTransactionId()) + ' ' + Action.START;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.VOLATILE_COMMIT);
+ logLine.append(' ');
+ logLine.append(targetIndex);
+ return logLine.toString();
+ }
+ }
- /**
- * Commits the volatile index to disk.
- */
- private static class VolatileCommit extends Action {
+ /**
+ * Set indexer io mode.
+ * @param ioMode
+ * @throws IOException
+ */
+ public void setIndexerIoMode(IndexerIoMode ioMode) throws IOException
+ {
+ log.info("Indexer io mode=" + ioMode);
+ //do some thing if changed
+ if (!this.ioMode.equals(ioMode))
+ {
+ this.ioMode = ioMode;
+ switch (ioMode)
+ {
+ case READ_ONLY :
+ setReadOny();
+ break;
+ case READ_WRITE :
+ setReadWrite();
+ break;
+ }
+ }
- /**
- * The name of the target index to commit to.
- */
- private final String targetIndex;
+ }
- /**
- * Creates a new VolatileCommit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- */
- VolatileCommit(long transactionId, String targetIndex) {
- super(transactionId, Action.TYPE_VOLATILE_COMMIT);
- this.targetIndex = targetIndex;
- }
+ /**
+ * Sets mode to READ_ONLY, discarding flush task
+ */
+ protected void setReadOny()
+ {
+ // try to stop merger in safe way
+ merger.dispose();
+ flushTask.cancel();
+ FLUSH_TIMER.purge();
+ this.redoLog = null;
+ }
- /**
- * Creates a new VolatileCommit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this implementation.
- * @return the VolatileCommit action.
- */
- static VolatileCommit fromString(long transactionId, String arguments) {
- return new VolatileCommit(transactionId, arguments);
- }
+ /**
+ * Sets mode to READ_WRITE, initiating recovery process
+ *
+ * @throws IOException
+ */
+ protected void setReadWrite() throws IOException
+ {
+ this.redoLog = new RedoLog(indexDir);
+ redoLogApplied = redoLog.hasEntries();
- /**
- * Commits the volatile index to disk.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- VolatileIndex volatileIndex = index.getVolatileIndex();
- PersistentIndex persistentIndex = index
- .getOrCreateIndex(targetIndex);
- persistentIndex.copyIndex(volatileIndex);
- index.resetVolatileIndex();
- }
+ // run recovery
+ Recovery.run(this, redoLog);
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.VOLATILE_COMMIT);
- logLine.append(' ');
- logLine.append(targetIndex);
- return logLine.toString();
- }
- }
+ // enqueue unused segments for deletion
+ enqueueUnusedSegments();
+ attemptDelete();
+
+ // now that we are ready, start index merger
+ merger.start();
+ if (redoLogApplied)
+ {
+ // wait for the index merge to finish pending jobs
+ try
+ {
+ merger.waitUntilIdle();
+ }
+ catch (InterruptedException e)
+ {
+ // move on
+ }
+ flush();
+ }
+
+ if (indexNames.size() > 0)
+ {
+ scheduleFlushTask();
+ }
+ }
+
+ /**
+ * Temporary solution for indexer in cluster. This method re-reads list of indexes from FS.
+ * @throws IOException
+ */
+ protected void refreshIndexesList() throws IOException
+ {
+ log.info("Refreshing list of indexes...");
+ // release reader if any
+ releaseMultiReader();
+ // get new indexInfo
+ IndexInfos newIndexNames = new IndexInfos("indexes");
+ if (newIndexNames.exists(indexDir))
+ {
+ newIndexNames.read(indexDir);
+ }
+ // prepare added/removed sets
+ Set<String> removed = new HashSet<String>(indexNames.getNames());
+ removed.removeAll(newIndexNames.getNames());
+
+ Set<String> added = new HashSet<String>(newIndexNames.getNames());
+ added.removeAll(indexNames.getNames());
+
+ // remove removed indexes
+ Iterator<PersistentIndex> iterator = indexes.iterator();
+ while (iterator.hasNext())
+ {
+ PersistentIndex index = iterator.next();
+ if (removed.contains(((PersistentIndex)index).getName()))
+ {
+ ((PersistentIndex)index).close();
+ iterator.remove();
+ }
+ }
+ // add added indexes
+ for (String name : added)
+ {
+ // only open if it still exists
+ // it is possible that indexNames still contains a name for
+ // an index that has been deleted, but indexNames has not been
+ // written to disk.
+ if (!directoryManager.hasDirectory(name))
+ {
+ log.debug("index does not exist anymore: " + name);
+ // move on to next index
+ continue;
+ }
+ PersistentIndex index =
+ new PersistentIndex(name, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+ directoryManager);
+ index.setMaxFieldLength(handler.getMaxFieldLength());
+ index.setUseCompoundFile(handler.getUseCompoundFile());
+ index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
+ indexes.add(index);
+ }
+ indexNames = newIndexNames;
+ }
}
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -402,8 +402,9 @@
// WARN. DON'T USE access item BY PATH - it's may be a node in case of
// residual definitions in NT
List<ValueData> data =
- prop.getValues().size() > 0 ? prop.getValues() : ((PropertyData)stateProvider.getItemData(prop
- .getIdentifier())).getValues();
+ prop.getValues().size() > 0 ? prop.getValues() : ((PropertyData)stateProvider.getItemData(
+ (NodeData)stateProvider.getItemData(prop.getParentIdentifier()), prop.getQPath().getEntries()[prop
+ .getQPath().getEntries().length - 1])).getValues();
if (data == null)
log.warn("null value found at property " + prop.getQPath().getAsString());
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -52,6 +52,7 @@
import org.exoplatform.services.jcr.impl.core.query.DefaultQueryNodeFactory;
import org.exoplatform.services.jcr.impl.core.query.ErrorLog;
import org.exoplatform.services.jcr.impl.core.query.ExecutableQuery;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
import org.exoplatform.services.jcr.impl.core.query.QueryHandler;
import org.exoplatform.services.jcr.impl.core.query.QueryHandlerContext;
import org.exoplatform.services.jcr.impl.core.query.SearchIndexConfigurationHelper;
@@ -514,42 +515,44 @@
indexingConfig = createIndexingConfiguration(nsMappings);
analyzer.setIndexingConfig(indexingConfig);
- index = new MultiIndex(this, context.getIndexingTree());
- if (index.numDocs() == 0 && context.isCreateInitialIndex())
+ index = new MultiIndex(this, context.getIndexingTree(), ioMode);
+ // if RW mode, create initial index and start check
+ if (ioMode == IndexerIoMode.READ_WRITE)
{
-
- index.createInitialIndex(context.getItemStateManager());
- }
- if (consistencyCheckEnabled && (index.getRedoLogApplied() || forceConsistencyCheck))
- {
- log.info("Running consistency check...");
- try
+ if (index.numDocs() == 0 && context.isCreateInitialIndex())
{
- ConsistencyCheck check = ConsistencyCheck.run(index, context.getItemStateManager());
- if (autoRepair)
+ index.createInitialIndex(context.getItemStateManager());
+ }
+ if (consistencyCheckEnabled && (index.getRedoLogApplied() || forceConsistencyCheck))
+ {
+ log.info("Running consistency check...");
+ try
{
- check.repair(true);
- }
- else
- {
- List<ConsistencyCheckError> errors = check.getErrors();
- if (errors.size() == 0)
+ ConsistencyCheck check = ConsistencyCheck.run(index, context.getItemStateManager());
+ if (autoRepair)
{
- log.info("No errors detected.");
+ check.repair(true);
}
- for (Iterator<ConsistencyCheckError> it = errors.iterator(); it.hasNext();)
+ else
{
- ConsistencyCheckError err = it.next();
- log.info(err.toString());
+ List<ConsistencyCheckError> errors = check.getErrors();
+ if (errors.size() == 0)
+ {
+ log.info("No errors detected.");
+ }
+ for (Iterator<ConsistencyCheckError> it = errors.iterator(); it.hasNext();)
+ {
+ ConsistencyCheckError err = it.next();
+ log.info(err.toString());
+ }
}
}
+ catch (Exception e)
+ {
+ log.warn("Failed to run consistency check on index: " + e);
+ }
}
- catch (Exception e)
- {
- log.warn("Failed to run consistency check on index: " + e);
- }
}
-
// initialize spell checker
spellChecker = createSpellChecker();
@@ -2639,4 +2642,30 @@
return new LuceneQueryHits(reader, searcher, query);
}
+ /**
+ * @throws IOException
+ * @see org.exoplatform.services.jcr.impl.core.query.QueryHandler#setIndexerIoMode(org.exoplatform.services.jcr.impl.core.query.IndexerIoMode)
+ */
+ public void setIndexerIoMode(IndexerIoMode ioMode) throws IOException
+ {
+ log.info("Indexer io mode=" + ioMode);
+ //do some thing if changed
+ if (!this.ioMode.equals(ioMode))
+ {
+ this.ioMode = ioMode;
+ if (index != null)
+ {
+ switch (ioMode)
+ {
+ case READ_ONLY :
+ index.setIndexerIoMode(ioMode);
+ break;
+ case READ_WRITE :
+ index.setIndexerIoMode(ioMode);
+ break;
+ }
+ }
+ }
+
+ }
}
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/AbstractWriteOnlyCacheLoader.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/AbstractWriteOnlyCacheLoader.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/AbstractWriteOnlyCacheLoader.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2003-2009 eXo Platform SAS.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Affero General Public License
+ * as published by the Free Software Foundation; either version 3
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see<http://www.gnu.org/licenses/>.
+ */
+package org.exoplatform.services.jcr.impl.storage.jbosscache;
+
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Modification;
+import org.jboss.cache.config.CacheLoaderConfig.IndividualCacheLoaderConfig;
+import org.jboss.cache.loader.AbstractCacheLoader;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Created by The eXo Platform SAS.
+ *
+ * <br/>
+ * Date: 06.11.2009
+ *
+ * @author <a href="mailto:alex.reshetnyak at exoplatform.com.ua">Alex Reshetnyak</a>
+ * @version $Id: AbstractWriteOnlyCacheLoader.java 480 2009-11-06 10:17:07Z pnedonosko $
+ */
+public abstract class AbstractWriteOnlyCacheLoader
+ extends AbstractCacheLoader
+{
+ private IndividualCacheLoaderConfig config;
+
+ /**
+ * {@inheritDoc}
+ */
+ public boolean exists(Fqn arg0) throws Exception
+ {
+ return false;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Map<Object, Object> get(Fqn arg0) throws Exception
+ {
+ return Collections.emptyMap();
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Set<Object> getChildrenNames(Fqn arg0) throws Exception
+ {
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public IndividualCacheLoaderConfig getConfig()
+ {
+ return config;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void put(Fqn arg0, Map<Object, Object> arg1) throws Exception
+ {
+ throw new WriteOnlyCacheLoaderException("The method 'put(Fqn arg0, Map<Object, Object> arg1))' should not be called.");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Object put(Fqn arg0, Object arg1, Object arg2) throws Exception
+ {
+ throw new WriteOnlyCacheLoaderException("The method 'put(Fqn arg0, Object arg1, Object arg2)' should not be called.");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void remove(Fqn arg0) throws Exception
+ {
+ throw new WriteOnlyCacheLoaderException("The method 'remove(Fqn arg0)' should not be called.");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public Object remove(Fqn arg0, Object arg1) throws Exception
+ {
+ throw new WriteOnlyCacheLoaderException("The method 'remove(Fqn arg0, Object arg1)' should not be called.");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void removeData(Fqn arg0) throws Exception
+ {
+ throw new WriteOnlyCacheLoaderException("The method 'removeData(Fqn arg0)' should not be called.");
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void setConfig(IndividualCacheLoaderConfig config)
+ {
+ this.config = config;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public abstract void put(List<Modification> modifications) throws Exception ;
+
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/AbstractWriteOnlyCacheLoader.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Added: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/WriteOnlyCacheLoaderException.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/WriteOnlyCacheLoaderException.java (rev 0)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/WriteOnlyCacheLoaderException.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -0,0 +1,48 @@
+package org.exoplatform.services.jcr.impl.storage.jbosscache;
+
+public class WriteOnlyCacheLoaderException
+ extends Exception
+{
+ /**
+ * Constructs an Exception without a message.
+ */
+ public WriteOnlyCacheLoaderException()
+ {
+ super();
+ }
+
+ /**
+ * Constructs an Exception with a detailed message.
+ *
+ * @param Message
+ * The message associated with the exception.
+ */
+ public WriteOnlyCacheLoaderException(String message)
+ {
+ super(message);
+ }
+
+ /**
+ * Constructs an Exception with a detailed message and base exception.
+ *
+ * @param Message
+ * The message associated with the exception.
+ * @param cause
+ * Throwable, the base exception.
+ */
+ public WriteOnlyCacheLoaderException(String message, Throwable cause)
+ {
+ super(message, cause);
+ }
+
+ /**
+ * WriteOnlyCacheLoaderException constructor.
+ *
+ * @param cause
+ * Throwable, the base exception.
+ */
+ public WriteOnlyCacheLoaderException(Throwable cause)
+ {
+ super(cause);
+ }
+}
Property changes on: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jbosscache/WriteOnlyCacheLoaderException.java
___________________________________________________________________
Name: svn:mime-type
+ text/plain
Modified: jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java
===================================================================
--- jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java 2009-12-22 10:32:33 UTC (rev 1143)
+++ jcr/branches/1.12.0-JBCCACHE/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java 2009-12-22 13:29:21 UTC (rev 1144)
@@ -22,6 +22,7 @@
import org.exoplatform.services.jcr.impl.core.SessionImpl;
import org.exoplatform.services.jcr.impl.core.query.AbstractQueryHandler;
import org.exoplatform.services.jcr.impl.core.query.ExecutableQuery;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
import org.exoplatform.services.jcr.impl.core.query.QueryHandlerContext;
import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
@@ -78,7 +79,7 @@
}
- public void setContext(QueryHandlerContext context) throws IOException
+ public void setContext(QueryHandlerContext context)
{
// TODO Auto-generated method stub
@@ -93,4 +94,13 @@
return null;
}
+ /**
+ * @see org.exoplatform.services.jcr.impl.core.query.QueryHandler#setIndexerIoMode(org.exoplatform.services.jcr.impl.core.query.IndexerIoMode)
+ */
+ public void setIndexerIoMode(IndexerIoMode ioMode) throws IOException
+ {
+ // TODO Auto-generated method stub
+
+ }
+
}
More information about the exo-jcr-commits
mailing list