[exo-jcr-commits] exo-jcr SVN: r3297 - in jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query: jbosscache and 1 other directories.

do-not-reply at jboss.org do-not-reply at jboss.org
Fri Oct 15 10:52:05 EDT 2010


Author: nzamosenchuk
Date: 2010-10-15 10:52:04 -0400 (Fri, 15 Oct 2010)
New Revision: 3297

Modified:
   jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java
   jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
   jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JBossCacheIndexInfos.java
   jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChunkIndex.java
   jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
Log:
EXOJCR-987 : refactoring.

Modified: jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java
===================================================================
--- jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java	2010-10-14 14:11:54 UTC (rev 3296)
+++ jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/AbstractQueryHandler.java	2010-10-15 14:52:04 UTC (rev 3297)
@@ -18,10 +18,10 @@
 
 import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
 import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.exoplatform.services.jcr.impl.core.query.lucene.DefaultIndexUpdateMonitor;
 import org.exoplatform.services.jcr.impl.core.query.lucene.IndexInfos;
 import org.exoplatform.services.jcr.impl.core.query.lucene.IndexUpdateMonitor;
-import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

Modified: jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java
===================================================================
--- jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java	2010-10-14 14:11:54 UTC (rev 3296)
+++ jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/QueryHandler.java	2010-10-15 14:52:04 UTC (rev 3297)
@@ -21,9 +21,9 @@
 import org.exoplatform.services.jcr.datamodel.NodeData;
 import org.exoplatform.services.jcr.impl.core.SessionDataManager;
 import org.exoplatform.services.jcr.impl.core.SessionImpl;
+import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.exoplatform.services.jcr.impl.core.query.lucene.IndexInfos;
 import org.exoplatform.services.jcr.impl.core.query.lucene.IndexUpdateMonitor;
-import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.exoplatform.services.jcr.impl.core.query.lucene.QueryHits;
 
 import java.io.IOException;

Modified: jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JBossCacheIndexInfos.java
===================================================================
--- jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JBossCacheIndexInfos.java	2010-10-14 14:11:54 UTC (rev 3296)
+++ jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/jbosscache/JBossCacheIndexInfos.java	2010-10-15 14:52:04 UTC (rev 3297)
@@ -21,8 +21,8 @@
 import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
 import org.exoplatform.services.jcr.impl.core.query.IndexerIoModeHandler;
 import org.exoplatform.services.jcr.impl.core.query.IndexerIoModeListener;
+import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.exoplatform.services.jcr.impl.core.query.lucene.IndexInfos;
-import org.exoplatform.services.jcr.impl.core.query.lucene.ChunkIndex;
 import org.exoplatform.services.jcr.impl.util.io.PrivilegedCacheHelper;
 import org.exoplatform.services.log.ExoLogger;
 import org.exoplatform.services.log.Log;

Modified: jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChunkIndex.java
===================================================================
--- jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChunkIndex.java	2010-10-14 14:11:54 UTC (rev 3296)
+++ jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChunkIndex.java	2010-10-15 14:52:04 UTC (rev 3297)
@@ -17,21 +17,18 @@
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
 import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.store.Directory;
 import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
 import org.exoplatform.services.jcr.datamodel.ItemData;
 import org.exoplatform.services.jcr.datamodel.NodeData;
-import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
-import org.exoplatform.services.jcr.impl.core.query.IndexerIoModeHandler;
+import org.exoplatform.services.jcr.impl.core.query.ChunkService;
 import org.exoplatform.services.jcr.impl.core.query.IndexingTree;
 import org.exoplatform.services.jcr.impl.core.query.lucene.directory.DirectoryManager;
 import org.exoplatform.services.jcr.impl.util.SecurityHelper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -39,12 +36,10 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.Map.Entry;
 
 import javax.jcr.ItemNotFoundException;
 import javax.jcr.RepositoryException;
@@ -61,17 +56,12 @@
    private static final Logger log = LoggerFactory.getLogger("exo.jcr.component.core.MultiIndex");
 
    /**
-    * Names of active persistent index directories.
-    */
-   private IndexInfos indexNames;
-
-   /**
     * List of open persistent indexes. This list may also contain an open
-    * PersistentIndex owned by the IndexMerger daemon. Such an index is not
+    * PersistentIndexChunk owned by the IndexMerger daemon. Such an index is not
     * registered with indexNames and <b>must not</b> be used in regular index
     * operations (delete node, etc.)!
     */
-   private final List<PersistentIndex> indexes = new ArrayList<PersistentIndex>();
+   private final List<PersistentIndexChunk> indexes = new ArrayList<PersistentIndexChunk>();
 
    /**
     * The internal namespace mappings of the query manager.
@@ -124,10 +114,7 @@
     */
    private final IndexFormatVersion version;
 
-   /**
-    * The handler of the Indexer io mode
-    */
-   private final IndexerIoModeHandler modeHandler;
+   private final ChunkService chunkService;
 
    /**
     * Creates a new MultiIndex.
@@ -140,10 +127,8 @@
     * @throws IOException
     *             if an error occurs
     */
-   ChunkIndex(SearchIndex handler, IndexingTree indexingTree, IndexerIoModeHandler modeHandler, IndexInfos indexInfos,
-      IndexUpdateMonitor indexUpdateMonitor) throws IOException
+   ChunkIndex(SearchIndex handler, IndexingTree indexingTree) throws IOException
    {
-      this.modeHandler = modeHandler;
       this.directoryManager = handler.getDirectoryManager();
       // this method is run in privileged mode internally
       this.indexDir = directoryManager.getDirectory(".");
@@ -151,27 +136,20 @@
       this.cache = new DocNumberCache(handler.getCacheSize());
       this.indexingTree = indexingTree;
       this.nsMappings = handler.getNamespaceMappings();
-      this.indexNames = indexInfos;
-      this.indexNames.setDirectory(indexDir);
-      // this method is run in privileged mode internally
-      this.indexNames.read();
+      // TODO remove this stub
+      this.chunkService = new ChunkServiceImpl();
 
-      // TODO remove hard-coded piece
-      this.indexNames.addName("1");
-      this.indexNames.addName("2");
-      this.indexNames.addName("3");
-      this.indexNames.addName("4");
-
       // this method is run in privileged mode internally
       IndexingQueueStore store = new IndexingQueueStore(indexDir);
 
       // initialize indexing queue
       this.indexingQueue = new IndexingQueue(store);
-      // copy current index names
-      Set<String> currentNames = new HashSet<String>(indexNames.getNames());
 
       // open persistent indexes
-      for (String name : currentNames)
+
+      Set<Integer> assignedChunks = chunkService.getAssignedChunks();
+
+      for (int i = 0; i < chunkService.getChunkCount(); i++)
       {
          // only open if it still exists
          // it is possible that indexNames still contains a name for
@@ -185,9 +163,10 @@
          //            // move on to next index
          //            continue;
          //         }
-         PersistentIndex index =
-            new PersistentIndex(name, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
-               directoryManager);
+         PersistentIndexChunk index =
+            new PersistentIndexChunk(i, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+               directoryManager, !assignedChunks.contains(Integer.valueOf(i)));
+         
          index.setMaxFieldLength(handler.getMaxFieldLength());
          index.setUseCompoundFile(handler.getUseCompoundFile());
          index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
@@ -213,52 +192,105 @@
          });
       }
       indexingQueue.initialize(this);
-      this.indexNames.setMultiIndex(this);
    }
 
    /**
-    * Returns the number of documents in this index.
+    * Adds a document to the index.
     * 
-    * @return the number of documents in this index.
+    * @param doc
+    *            the document to add.
     * @throws IOException
-    *             if an error occurs while reading from the index.
+    *             if an error occurs while adding the document to the index.
     */
-   int numDocs() throws IOException
+   public void addDocument(Document doc) throws IOException
    {
-      if (indexNames.size() == 0)
+      update(Collections.EMPTY_LIST, Arrays.asList(new Document[]{doc}));
+   }
+
+   /**
+    * Closes this <code>MultiIndex</code>.
+    */
+   public void close()
+   {
+
+      // stop index merger
+      // when calling this method we must not lock this MultiIndex, otherwise
+      // a deadlock might occur
+
+      synchronized (this)
       {
-         return 0;
-      }
-      else
-      {
-         final CachingMultiIndexReader reader = getIndexReader();
+
+         // commit / close indexes
          try
          {
-            return reader.numDocs();
+            releaseMultiReader();
          }
-         finally
+         catch (IOException e)
          {
-            SecurityHelper.doPriviledgedIOExceptionAction(new PrivilegedExceptionAction<Object>()
-            {
-               public Object run() throws Exception
-               {
-                  reader.release();
-                  return null;
-               }
-            });
+            log.error("Exception while closing search index.", e);
          }
+
+         // TODO Should they be commited before close?!
+         for (int i = 0; i < indexes.size(); i++)
+         {
+            (indexes.get(i)).close();
+         }
+
+         // close indexing queue
+         indexingQueue.close();
+
+         // finally close directory
+         try
+         {
+            indexDir.close();
+         }
+         catch (IOException e)
+         {
+            log.error("Exception while closing directory.", e);
+         }
       }
    }
 
    /**
-    * @return the index format version for this multi index.
+    * Returns a lucene Document for the <code>node</code>.
+    * 
+    * @param node
+    *            the node to index.
+    * @return the index document.
+    * @throws RepositoryException
+    *             if an error occurs while reading from the workspace.
     */
-   IndexFormatVersion getIndexFormatVersion()
+   public Document createDocument(NodeData node) throws RepositoryException
    {
-      return version;
+      return handler.createDocument(node, nsMappings, version);
    }
 
    /**
+    * Returns a lucene Document for the Node with <code>id</code>.
+    * 
+    * @param id
+    *            the id of the node to index.
+    * @return the index document.
+    * @throws RepositoryException
+    *             if an error occurs while reading from the workspace or if
+    *             there is no node with <code>id</code>.
+    */
+   public Document createDocument(String id) throws RepositoryException
+   {
+      ItemData data = handler.getContext().getItemStateManager().getItemData(id);
+      if (data == null)
+      {
+         throw new ItemNotFoundException("Item id=" + id + " not found");
+      }
+      if (!data.isNode())
+      {
+         throw new RepositoryException("Item with id " + id + " is not a node");
+      }
+      return createDocument((NodeData)data);
+
+   }
+
+   /**
     * Creates an initial index by traversing the node hierarchy starting at the
     * node with <code>rootId</code>.
     * 
@@ -273,9 +305,8 @@
     * @throws IllegalStateException
     *             if this index is not empty.
     */
-   void createInitialIndex(ItemDataConsumer stateMgr) throws IOException
+   public void createInitialIndex(ItemDataConsumer stateMgr) throws IOException
    {
-      // TODO: re-study check!!! 
       CachingMultiIndexReader reader = getIndexReader();
       int numDocs = reader.numDocs();
       reader.release();
@@ -286,8 +317,6 @@
          {
             long count = 0;
             // traverse and index workspace
-
-            // NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
             count = createIndex(indexingTree.getIndexingRoot(), stateMgr, count);
 
             log.info("Created initial index for {} nodes", new Long(count));
@@ -312,138 +341,24 @@
    }
 
    /**
-    * Atomically updates the index by removing some documents and adding
-    * others.
-    * 
-    * @param remove
-    *            collection of <code>UUID</code>s that identify documents to
-    *            remove
-    * @param add
-    *            collection of <code>Document</code>s to add. Some of the
-    *            elements in this collection may be <code>null</code>, to
-    *            indicate that a node could not be indexed successfully.
-    * @throws IOException
-    *             if an error occurs while updating the index.
+    * @return the index format version for this multi index.
     */
-   synchronized void update(final Collection<String> remove, final Collection<Document> add) throws IOException
+   public IndexFormatVersion getIndexFormatVersion()
    {
-      SecurityHelper.doPriviledgedIOExceptionAction(new PrivilegedExceptionAction<Object>()
-      {
-         public Object run() throws Exception
-         {
-            // make sure a reader is available during long updates
-            if (add.size() > handler.getBufferSize())
-            {
-               try
-               {
-                  getIndexReader().release();
-               }
-               catch (IOException e)
-               {
-                  // do not fail if an exception is thrown here
-                  log.warn("unable to prepare index reader " + "for queries during update", e);
-               }
-            }
-            try
-            {
-               for (Iterator<String> it = remove.iterator(); it.hasNext();)
-               {
-                  String uuidString = it.next();
-                  // check if indexing queue is still working on
-                  // this node from a previous update
-                  Document doc = indexingQueue.removeDocument(uuidString);
-                  if (doc != null)
-                  {
-                     Util.disposeDocument(doc);
-                  }
-                  Term idTerm = new Term(FieldNames.UUID, uuidString);
-                  getChunk(uuidString).removeDocument(idTerm);
-               }
-               for (Iterator<Document> it = add.iterator(); it.hasNext();)
-               {
-                  Document doc = it.next();
-                  if (doc != null)
-                  {
-                     if (doc != null)
-                     {
-                        String uuid = doc.get(FieldNames.UUID);
-                        getChunk(uuid).addDocuments(new Document[]{doc});
-                     }
-                  }
-               }
-               // TODO for owning indexes only
-               for (PersistentIndex idx : indexes)
-               {
-                  idx.commit();
-               }
-            }
-            finally
-            {
-               releaseMultiReader();
-            }
-            return null;
-         }
-      });
+      return version;
    }
 
    /**
-    * Adds a document to the index.
+    * Returns the indexing queue for this multi index.
     * 
-    * @param doc
-    *            the document to add.
-    * @throws IOException
-    *             if an error occurs while adding the document to the index.
+    * @return the indexing queue for this multi index.
     */
-   void addDocument(Document doc) throws IOException
+   public IndexingQueue getIndexingQueue()
    {
-      update(Collections.EMPTY_LIST, Arrays.asList(new Document[]{doc}));
+      return indexingQueue;
    }
 
    /**
-    * Deletes the first document that matches the <code>uuid</code>.
-    * 
-    * @param uuid
-    *            document that match this <code>uuid</code> will be deleted.
-    * @throws IOException
-    *             if an error occurs while deleting the document.
-    */
-   void removeDocument(String uuid) throws IOException
-   {
-      update(Arrays.asList(new String[]{uuid}), Collections.EMPTY_LIST);
-   }
-
-   /**
-    * Deletes all documents that match the <code>uuid</code>.
-    * 
-    * @param uuid
-    *            documents that match this <code>uuid</code> will be deleted.
-    * @return the number of deleted documents.
-    * @throws IOException
-    *             if an error occurs while deleting documents.
-    */
-   synchronized int removeAllDocuments(String uuid) throws IOException
-   {
-      int num;
-      try
-      {
-         Term idTerm = new Term(FieldNames.UUID, uuid.toString());
-         num = getChunk(uuid).removeDocument(idTerm);
-         for (int i = 0; i < indexes.size(); i++)
-         {
-            PersistentIndex index = indexes.get(i);
-            // TODO only remove documents from owning indexes
-            int removed = index.removeDocument(idTerm);
-            num += removed;
-         }
-      }
-      finally
-      {
-         releaseMultiReader();
-      }
-      return num;
-   }
-
-   /**
     * Returns an read-only <code>IndexReader</code> that spans alls indexes of
     * this <code>MultiIndex</code>.
     * 
@@ -484,7 +399,7 @@
             if (multiReader == null)
             {
                List<ReadOnlyIndexReader> readerList = new ArrayList<ReadOnlyIndexReader>();
-               for (PersistentIndex idx : indexes)
+               for (PersistentIndexChunk idx : indexes)
                {
                   readerList.add(idx.getReadOnlyIndexReader(initCache));
                }
@@ -495,153 +410,244 @@
             return multiReader;
          }
       });
+   }
 
+   /**
+    * TODO solve this architecture issue
+    * This belongs only to MultiIndex with volatile present
+    * 
+    * @return
+    */
+   @Deprecated
+   public boolean getRedoLogApplied()
+   {
+      return false;
    }
 
    /**
-    * Closes this <code>MultiIndex</code>.
+    * Returns the number of documents in this index.
+    * 
+    * @return the number of documents in this index.
+    * @throws IOException
+    *             if an error occurs while reading from the index.
     */
-   void close()
+   public int numDocs() throws IOException
    {
-      if (modeHandler.getMode().equals(IndexerIoMode.READ_WRITE))
+      final CachingMultiIndexReader reader = getIndexReader();
+      try
       {
-
-         // stop index merger
-         // when calling this method we must not lock this MultiIndex, otherwise
-         // a deadlock might occur
-
-         synchronized (this)
+         return reader.numDocs();
+      }
+      finally
+      {
+         SecurityHelper.doPriviledgedIOExceptionAction(new PrivilegedExceptionAction<Object>()
          {
-
-            // commit / close indexes
-            try
+            public Object run() throws Exception
             {
-               releaseMultiReader();
+               reader.release();
+               return null;
             }
-            catch (IOException e)
-            {
-               log.error("Exception while closing search index.", e);
-            }
-
-            // TODO Should they be commited before close?!
-            for (int i = 0; i < indexes.size(); i++)
-            {
-               (indexes.get(i)).close();
-            }
-
-            // close indexing queue
-            indexingQueue.close();
-
-            // finally close directory
-            try
-            {
-               indexDir.close();
-            }
-            catch (IOException e)
-            {
-               log.error("Exception while closing directory.", e);
-            }
-         }
+         });
       }
    }
 
    /**
-    * Returns the namespace mappings of this search index.
-    * 
-    * @return the namespace mappings of this search index.
+    * TODO
     */
-   NamespaceMappings getNamespaceMappings()
+   public void reassignChunks()
    {
-      return nsMappings;
-   }
 
-   /**
-    * Returns the indexing queue for this multi index.
-    * 
-    * @return the indexing queue for this multi index.
-    */
-   public IndexingQueue getIndexingQueue()
-   {
-      return indexingQueue;
    }
 
    /**
-    * Returns a lucene Document for the <code>node</code>.
+    * Deletes all documents that match the <code>uuid</code>.
     * 
-    * @param node
-    *            the node to index.
-    * @return the index document.
-    * @throws RepositoryException
-    *             if an error occurs while reading from the workspace.
+    * @param uuid
+    *            documents that match this <code>uuid</code> will be deleted.
+    * @return the number of deleted documents.
+    * @throws IOException
+    *             if an error occurs while deleting documents.
     */
-   Document createDocument(NodeData node) throws RepositoryException
+   public synchronized int removeAllDocuments(String uuid) throws IOException
    {
-      return handler.createDocument(node, nsMappings, version);
+      int num;
+      try
+      {
+         Term idTerm = new Term(FieldNames.UUID, uuid.toString());
+         num = getChunk(uuid).removeDocument(idTerm);
+         for (int i = 0; i < indexes.size(); i++)
+         {
+            PersistentIndexChunk index = indexes.get(i);
+            // TODO only remove documents from owning indexes
+            int removed = index.removeDocument(idTerm);
+            num += removed;
+         }
+      }
+      finally
+      {
+         releaseMultiReader();
+      }
+      return num;
    }
 
    /**
-    * Returns a lucene Document for the Node with <code>id</code>.
+    * Deletes the first document that matches the <code>uuid</code>.
     * 
-    * @param id
-    *            the id of the node to index.
-    * @return the index document.
-    * @throws RepositoryException
-    *             if an error occurs while reading from the workspace or if
-    *             there is no node with <code>id</code>.
+    * @param uuid
+    *            document that match this <code>uuid</code> will be deleted.
+    * @throws IOException
+    *             if an error occurs while deleting the document.
     */
-   Document createDocument(String id) throws RepositoryException
+   public void removeDocument(String uuid) throws IOException
    {
-      ItemData data = handler.getContext().getItemStateManager().getItemData(id);
-      if (data == null)
-      {
-         throw new ItemNotFoundException("Item id=" + id + " not found");
-      }
-      if (!data.isNode())
-      {
-         throw new RepositoryException("Item with id " + id + " is not a node");
-      }
-      return createDocument((NodeData)data);
-
+      update(Arrays.asList(new String[]{uuid}), Collections.EMPTY_LIST);
    }
 
    /**
-    * Releases the {@link #multiReader} and sets it <code>null</code>. If the
-    * reader is already <code>null</code> this method does nothing. When this
-    * method returns {@link #multiReader} is guaranteed to be <code>null</code>
-    * even if an exception is thrown.
-    * <p/>
-    * Please note that this method does not take care of any synchronization. A
-    * caller must ensure that it is the only thread operating on this multi
-    * index, or that it holds the {@link #updateMonitor}.
+    * Atomically updates the index by removing some documents and adding
+    * others.
     * 
+    * @param remove
+    *            collection of <code>UUID</code>s that identify documents to
+    *            remove
+    * @param add
+    *            collection of <code>Document</code>s to add. Some of the
+    *            elements in this collection may be <code>null</code>, to
+    *            indicate that a node could not be indexed successfully.
     * @throws IOException
-    *             if an error occurs while releasing the reader.
+    *             if an error occurs while updating the index.
     */
-   void releaseMultiReader() throws IOException
+   public synchronized void update(final Collection<String> remove, final Collection<Document> add) throws IOException
    {
       SecurityHelper.doPriviledgedIOExceptionAction(new PrivilegedExceptionAction<Object>()
       {
          public Object run() throws Exception
          {
-            if (multiReader != null)
+            // make sure a reader is available during long updates
+            if (add.size() > handler.getBufferSize())
             {
                try
                {
-                  multiReader.release();
+                  getIndexReader().release();
                }
-               finally
+               catch (IOException e)
                {
-                  multiReader = null;
+                  // do not fail if an exception is thrown here
+                  log.warn("unable to prepare index reader " + "for queries during update", e);
                }
             }
+            try
+            {
+               for (Iterator<String> it = remove.iterator(); it.hasNext();)
+               {
+                  String uuidString = it.next();
+                  // check if indexing queue is still working on
+                  // this node from a previous update
+                  Document doc = indexingQueue.removeDocument(uuidString);
+                  if (doc != null)
+                  {
+                     Util.disposeDocument(doc);
+                  }
+                  Term idTerm = new Term(FieldNames.UUID, uuidString);
+                  getChunk(uuidString).removeDocument(idTerm);
+               }
+               for (Iterator<Document> it = add.iterator(); it.hasNext();)
+               {
+                  Document doc = it.next();
+                  if (doc != null)
+                  {
+                     if (doc != null)
+                     {
+                        String uuid = doc.get(FieldNames.UUID);
+                        getChunk(uuid).addDocuments(new Document[]{doc});
+                     }
+                  }
+               }
+               // TODO for owning indexes only
+               for (PersistentIndexChunk idx : indexes)
+               {
+                  idx.commit();
+               }
+            }
+            finally
+            {
+               releaseMultiReader();
+            }
             return null;
          }
       });
    }
 
-   // -------------------------< internal
-   // >-------------------------------------
+   /**
+    * Checks the indexing queue for finished text extrator jobs and updates the
+    * index accordingly if there are any new ones.
+    * 
+    * @param transactionPresent
+    *            whether a transaction is in progress and the current
+    *            {@link #getTransactionId()} should be used. If
+    *            <code>false</code> a new transaction is created when documents
+    *            are transfered from the indexing queue to the index.
+    */
+   private void checkIndexingQueue(boolean transactionPresent)
+   {
+      Document[] docs = indexingQueue.getFinishedDocuments();
+      Map<String, Document> finished = new HashMap<String, Document>();
+      for (int i = 0; i < docs.length; i++)
+      {
+         String uuid = docs[i].get(FieldNames.UUID);
+         finished.put(uuid, docs[i]);
+      }
 
+      // now update index with the remaining ones if there are any
+      if (!finished.isEmpty())
+      {
+         log.info("updating index with {} nodes from indexing queue.", new Long(finished.size()));
+
+         // remove documents from the queue
+         for (Iterator<String> it = finished.keySet().iterator(); it.hasNext();)
+         {
+            indexingQueue.removeDocument(it.next());
+         }
+
+         try
+         {
+            if (transactionPresent)
+            {
+               for (Iterator<String> it = finished.keySet().iterator(); it.hasNext();)
+               {
+                  String uuidString = it.next();
+                  // check if indexing queue is still working on
+                  // this node from a previous update
+                  Document doc = indexingQueue.removeDocument(uuidString);
+                  if (doc != null)
+                  {
+                     Util.disposeDocument(doc);
+                  }
+                  Term idTerm = new Term(FieldNames.UUID, uuidString);
+                  // if the document cannot be deleted from the volatile index
+                  // delete it from one of the persistent indexes.
+                  getChunk(uuidString).removeDocument(idTerm);
+               }
+               for (Iterator<Document> it = finished.values().iterator(); it.hasNext();)
+               {
+                  Document doc = it.next();
+                  String uuid = doc.get(FieldNames.UUID);
+                  getChunk(uuid).addDocuments(new Document[]{doc});
+               }
+            }
+            else
+            {
+               update(finished.keySet(), finished.values());
+            }
+         }
+         catch (IOException e)
+         {
+            // update failed
+            log.warn("Failed to update index with deferred text extraction", e);
+         }
+      }
+   }
+
    /**
     * Recursively creates an index starting with the NodeState
     * <code>node</code>.
@@ -700,96 +706,43 @@
       return count;
    }
 
-   /**
-    * Checks the indexing queue for finished text extrator jobs and updates the
-    * index accordingly if there are any new ones. This method is synchronized
-    * and should only be called by the timer task that periodically checks if
-    * there are documents ready in the indexing queue. A new transaction is
-    * used when documents are transfered from the indexing queue to the index.
-    */
-   private synchronized void checkIndexingQueue()
+   private PersistentIndexChunk getChunk(String uuid)
    {
-      checkIndexingQueue(false);
+      return indexes.get(chunkService.getChunkId(uuid));
    }
 
    /**
-    * Checks the indexing queue for finished text extrator jobs and updates the
-    * index accordingly if there are any new ones.
+    * Releases the {@link #multiReader} and sets it <code>null</code>. If the
+    * reader is already <code>null</code> this method does nothing. When this
+    * method returns {@link #multiReader} is guaranteed to be <code>null</code>
+    * even if an exception is thrown.
+    * <p/>
+    * Please note that this method does not take care of any synchronization. A
+    * caller must ensure that it is the only thread operating on this multi
+    * index, or that it holds the {@link #updateMonitor}.
     * 
-    * @param transactionPresent
-    *            whether a transaction is in progress and the current
-    *            {@link #getTransactionId()} should be used. If
-    *            <code>false</code> a new transaction is created when documents
-    *            are transfered from the indexing queue to the index.
+    * @throws IOException
+    *             if an error occurs while releasing the reader.
     */
-   private void checkIndexingQueue(boolean transactionPresent)
+   private void releaseMultiReader() throws IOException
    {
-      Document[] docs = indexingQueue.getFinishedDocuments();
-      Map<String, Document> finished = new HashMap<String, Document>();
-      for (int i = 0; i < docs.length; i++)
+      SecurityHelper.doPriviledgedIOExceptionAction(new PrivilegedExceptionAction<Object>()
       {
-         String uuid = docs[i].get(FieldNames.UUID);
-         finished.put(uuid, docs[i]);
-      }
-
-      // now update index with the remaining ones if there are any
-      if (!finished.isEmpty())
-      {
-         log.info("updating index with {} nodes from indexing queue.", new Long(finished.size()));
-
-         // remove documents from the queue
-         for (Iterator<String> it = finished.keySet().iterator(); it.hasNext();)
+         public Object run() throws Exception
          {
-            indexingQueue.removeDocument(it.next());
-         }
-
-         try
-         {
-            if (transactionPresent)
+            if (multiReader != null)
             {
-               for (Iterator<String> it = finished.keySet().iterator(); it.hasNext();)
+               try
                {
-                  String uuidString = it.next();
-                  // check if indexing queue is still working on
-                  // this node from a previous update
-                  Document doc = indexingQueue.removeDocument(uuidString);
-                  if (doc != null)
-                  {
-                     Util.disposeDocument(doc);
-                  }
-                  Term idTerm = new Term(FieldNames.UUID, uuidString);
-                  // if the document cannot be deleted from the volatile index
-                  // delete it from one of the persistent indexes.
-                  getChunk(uuidString).removeDocument(idTerm);
+                  multiReader.release();
                }
-               for (Iterator<Document> it = finished.values().iterator(); it.hasNext();)
+               finally
                {
-                  Document doc = it.next();
-                  String uuid = doc.get(FieldNames.UUID);
-                  getChunk(uuid).addDocuments(new Document[]{doc});
+                  multiReader = null;
                }
             }
-            else
-            {
-               update(finished.keySet(), finished.values());
-            }
+            return null;
          }
-         catch (IOException e)
-         {
-            // update failed
-            log.warn("Failed to update index with deferred text extraction", e);
-         }
-      }
+      });
    }
-
-   @Deprecated
-   public boolean getRedoLogApplied()
-   {
-      return false;
-   }
-
-   private PersistentIndex getChunk(String uuid)
-   {
-      return indexes.get(0);
-   }
-}
+}
\ No newline at end of file

Modified: jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
--- jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java	2010-10-14 14:11:54 UTC (rev 3296)
+++ jcr/branches/1.14-CNK/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java	2010-10-15 14:52:04 UTC (rev 3297)
@@ -570,7 +570,7 @@
       indexingConfig = createIndexingConfiguration(nsMappings);
       analyzer.setIndexingConfig(indexingConfig);
 
-      index = new ChunkIndex(this, context.getIndexingTree(), modeHandler, getIndexInfos(), getIndexUpdateMonitor());
+      index = new ChunkIndex(this, context.getIndexingTree());
       // if RW mode, create initial index and start check
       if (modeHandler.getMode() == IndexerIoMode.READ_WRITE)
       {



More information about the exo-jcr-commits mailing list