[exo-jcr-commits] exo-jcr SVN: r975 - jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene.
do-not-reply at jboss.org
do-not-reply at jboss.org
Wed Dec 9 11:25:02 EST 2009
Author: skabashnyuk
Date: 2009-12-09 11:25:01 -0500 (Wed, 09 Dec 2009)
New Revision: 975
Modified:
jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
Log:
EXOJCR-291: Io mode
Modified: jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
===================================================================
--- jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-12-09 16:24:25 UTC (rev 974)
+++ jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2009-12-09 16:25:01 UTC (rev 975)
@@ -24,6 +24,7 @@
import org.exoplatform.services.jcr.datamodel.ItemData;
import org.exoplatform.services.jcr.datamodel.NodeData;
import org.exoplatform.services.jcr.impl.Constants;
+import org.exoplatform.services.jcr.impl.core.query.IndexerIoMode;
import org.exoplatform.services.jcr.impl.core.query.IndexingTree;
import org.exoplatform.services.jcr.impl.core.query.lucene.directory.DirectoryManager;
import org.slf4j.Logger;
@@ -73,2024 +74,2285 @@
* thread and reader threads is done using {@link #updateMonitor} and
* {@link #updateInProgress}.
*/
-public class MultiIndex {
+public class MultiIndex
+{
- /**
- * The logger instance for this class
- */
- private static final Logger log = LoggerFactory.getLogger(MultiIndex.class);
+ /**
+ * The logger instance for this class
+ */
+ private static final Logger log = LoggerFactory.getLogger(MultiIndex.class);
- /**
- * Names of active persistent index directories.
- */
- private final IndexInfos indexNames = new IndexInfos("indexes");
+ /**
+ * Names of active persistent index directories.
+ */
+ private final IndexInfos indexNames = new IndexInfos("indexes");
- /**
- * Names of index directories that can be deleted.
- */
- private final Set deletable = new HashSet();
+ /**
+ * Names of index directories that can be deleted.
+ */
+ private final Set deletable = new HashSet();
- /**
- * List of open persistent indexes. This list may also contain an open
- * PersistentIndex owned by the IndexMerger daemon. Such an index is not
- * registered with indexNames and <b>must not</b> be used in regular index
- * operations (delete node, etc.)!
- */
- private final List indexes = new ArrayList();
+ /**
+ * List of open persistent indexes. This list may also contain an open
+ * PersistentIndex owned by the IndexMerger daemon. Such an index is not
+ * registered with indexNames and <b>must not</b> be used in regular index
+ * operations (delete node, etc.)!
+ */
+ private final List indexes = new ArrayList();
- /**
- * The internal namespace mappings of the query manager.
- */
- private final NamespaceMappings nsMappings;
+ /**
+ * The internal namespace mappings of the query manager.
+ */
+ private final NamespaceMappings nsMappings;
- /**
- * The directory manager.
- */
- private final DirectoryManager directoryManager;
+ /**
+ * The directory manager.
+ */
+ private final DirectoryManager directoryManager;
- /**
- * The base directory to store the index.
- */
- private final Directory indexDir;
+ /**
+ * The base directory to store the index.
+ */
+ private final Directory indexDir;
- /**
- * The query handler
- */
- private final SearchIndex handler;
+ /**
+ * The query handler
+ */
+ private final SearchIndex handler;
- /**
- * The volatile index.
- */
- private VolatileIndex volatileIndex;
+ /**
+ * The volatile index.
+ */
+ private VolatileIndex volatileIndex;
- /**
- * Flag indicating whether an update operation is in progress.
- */
- private boolean updateInProgress = false;
+ /**
+ * Flag indicating whether an update operation is in progress.
+ */
+ private boolean updateInProgress = false;
- /**
- * If not <code>null</code> points to a valid <code>IndexReader</code> that
- * reads from all indexes, including volatile and persistent indexes.
- */
- private CachingMultiIndexReader multiReader;
+ /**
+ * If not <code>null</code> points to a valid <code>IndexReader</code> that
+ * reads from all indexes, including volatile and persistent indexes.
+ */
+ private CachingMultiIndexReader multiReader;
- /**
- * Shared document number cache across all persistent indexes.
- */
- private final DocNumberCache cache;
+ /**
+ * Shared document number cache across all persistent indexes.
+ */
+ private final DocNumberCache cache;
- /**
- * Monitor to use to synchronize access to {@link #multiReader} and
- * {@link #updateInProgress}.
- */
- private final Object updateMonitor = new Object();
+ /**
+ * Monitor to use to synchronize access to {@link #multiReader} and
+ * {@link #updateInProgress}.
+ */
+ private final Object updateMonitor = new Object();
- /**
- * <code>true</code> if the redo log contained entries on startup.
- */
- private boolean redoLogApplied = false;
+ /**
+ * <code>true</code> if the redo log contained entries on startup.
+ */
+ private boolean redoLogApplied = false;
- /**
- * The time this index was last flushed or a transaction was committed.
- */
- private long lastFlushTime;
+ /**
+ * The time this index was last flushed or a transaction was committed.
+ */
+ private long lastFlushTime;
- /**
- * The <code>IndexMerger</code> for this <code>MultiIndex</code>.
- */
- private final IndexMerger merger;
+ /**
+ * The <code>IndexMerger</code> for this <code>MultiIndex</code>.
+ */
+ private final IndexMerger merger;
- /**
- * Timer to schedule flushes of this index after some idle time.
- */
- private static final Timer FLUSH_TIMER = new Timer(true);
+ /**
+ * Timer to schedule flushes of this index after some idle time.
+ */
+ private static final Timer FLUSH_TIMER = new Timer(true);
- /**
- * Task that is periodically called by {@link #FLUSH_TIMER} and checks if
- * index should be flushed.
- */
- private final TimerTask flushTask;
+ /**
+ * Task that is periodically called by {@link #FLUSH_TIMER} and checks if
+ * index should be flushed.
+ */
+ private final TimerTask flushTask;
- /**
- * The RedoLog of this <code>MultiIndex</code>.
- */
- private final RedoLog redoLog;
+ /**
+ * The RedoLog of this <code>MultiIndex</code>.
+ */
+ private final RedoLog redoLog;
- /**
- * The indexing queue with pending text extraction jobs.
- */
- private IndexingQueue indexingQueue;
+ /**
+ * The indexing queue with pending text extraction jobs.
+ */
+ private IndexingQueue indexingQueue;
- /**
- * Set<NodeId> of uuids that should not be indexed.
- */
- private final IndexingTree indexingTree;
+ /**
+ * Set<NodeId> of uuids that should not be indexed.
+ */
+ private final IndexingTree indexingTree;
- /**
- * The next transaction id.
- */
- private long nextTransactionId = 0;
+ /**
+ * The next transaction id.
+ */
+ private long nextTransactionId = 0;
- /**
- * The current transaction id.
- */
- private long currentTransactionId = -1;
+ /**
+ * The current transaction id.
+ */
+ private long currentTransactionId = -1;
- /**
- * Flag indicating whether re-indexing is running.
- */
- private boolean reindexing = false;
+ /**
+ * Flag indicating whether re-indexing is running.
+ */
+ private boolean reindexing = false;
- /**
- * The index format version of this multi index.
- */
- private final IndexFormatVersion version;
+ /**
+ * The index format version of this multi index.
+ */
+ private final IndexFormatVersion version;
- /**
- * Creates a new MultiIndex.
- *
- * @param handler
- * the search handler
- * @param excludedIDs
- * Set<NodeId> that contains uuids that should not be indexed
- * nor further traversed.
- * @throws IOException
- * if an error occurs
- */
- MultiIndex(SearchIndex handler, IndexingTree indexingTree)
- throws IOException {
- this.directoryManager = handler.getDirectoryManager();
- this.indexDir = directoryManager.getDirectory(".");
- this.handler = handler;
- this.cache = new DocNumberCache(handler.getCacheSize());
- this.redoLog = new RedoLog(indexDir);
- this.indexingTree = indexingTree;
- this.nsMappings = handler.getNamespaceMappings();
+ /**
+ * Indexer io mode
+ */
+ private IndexerIoMode ioMode = IndexerIoMode.READ_WRITE;
- if (indexNames.exists(indexDir)) {
- indexNames.read(indexDir);
- }
+ /**
+ * Creates a new MultiIndex.
+ *
+ * @param handler
+ * the search handler
+ * @param excludedIDs
+ * Set<NodeId> that contains uuids that should not be indexed
+ * nor further traversed.
+ * @throws IOException
+ * if an error occurs
+ */
+ MultiIndex(SearchIndex handler, IndexingTree indexingTree) throws IOException
+ {
+ this.directoryManager = handler.getDirectoryManager();
+ this.indexDir = directoryManager.getDirectory(".");
+ this.handler = handler;
+ this.cache = new DocNumberCache(handler.getCacheSize());
+ this.redoLog = new RedoLog(indexDir);
+ this.indexingTree = indexingTree;
+ this.nsMappings = handler.getNamespaceMappings();
- // as of 1.5 deletable file is not used anymore
- removeDeletable();
+ if (indexNames.exists(indexDir))
+ {
+ indexNames.read(indexDir);
+ }
- // initialize IndexMerger
- merger = new IndexMerger(this);
- merger.setMaxMergeDocs(handler.getMaxMergeDocs());
- merger.setMergeFactor(handler.getMergeFactor());
- merger.setMinMergeDocs(handler.getMinMergeDocs());
+ // as of 1.5 deletable file is not used anymore
+ removeDeletable();
- IndexingQueueStore store = new IndexingQueueStore(indexDir);
+ // initialize IndexMerger
+ merger = new IndexMerger(this);
+ merger.setMaxMergeDocs(handler.getMaxMergeDocs());
+ merger.setMergeFactor(handler.getMergeFactor());
+ merger.setMinMergeDocs(handler.getMinMergeDocs());
- // initialize indexing queue
- this.indexingQueue = new IndexingQueue(store);
+ IndexingQueueStore store = new IndexingQueueStore(indexDir);
- // open persistent indexes
- for (int i = 0; i < indexNames.size(); i++) {
- String name = indexNames.getName(i);
- // only open if it still exists
- // it is possible that indexNames still contains a name for
- // an index that has been deleted, but indexNames has not been
- // written to disk.
- if (!directoryManager.hasDirectory(name)) {
- log.debug("index does not exist anymore: " + name);
- // move on to next index
- continue;
- }
- PersistentIndex index = new PersistentIndex(name, handler
- .getTextAnalyzer(), handler.getSimilarity(), cache,
- indexingQueue, directoryManager);
- index.setMaxFieldLength(handler.getMaxFieldLength());
- index.setUseCompoundFile(handler.getUseCompoundFile());
- index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
- indexes.add(index);
- merger.indexAdded(index.getName(), index.getNumDocuments());
- }
+ // initialize indexing queue
+ this.indexingQueue = new IndexingQueue(store);
- // init volatile index
- resetVolatileIndex();
+ // open persistent indexes
+ for (int i = 0; i < indexNames.size(); i++)
+ {
+ String name = indexNames.getName(i);
+ // only open if it still exists
+ // it is possible that indexNames still contains a name for
+ // an index that has been deleted, but indexNames has not been
+ // written to disk.
+ if (!directoryManager.hasDirectory(name))
+ {
+ log.debug("index does not exist anymore: " + name);
+ // move on to next index
+ continue;
+ }
+ PersistentIndex index =
+ new PersistentIndex(name, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+ directoryManager);
+ index.setMaxFieldLength(handler.getMaxFieldLength());
+ index.setUseCompoundFile(handler.getUseCompoundFile());
+ index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
+ indexes.add(index);
+ merger.indexAdded(index.getName(), index.getNumDocuments());
+ }
- // set index format version and at the same time
- // initialize hierarchy cache if requested.
- CachingMultiIndexReader reader = getIndexReader(handler
- .isInitializeHierarchyCache());
- try {
- version = IndexFormatVersion.getVersion(reader);
- } finally {
- reader.release();
- }
+ // init volatile index
+ resetVolatileIndex();
- indexingQueue.initialize(this);
+ // set index format version and at the same time
+ // initialize hierarchy cache if requested.
+ CachingMultiIndexReader reader = getIndexReader(handler.isInitializeHierarchyCache());
+ try
+ {
+ version = IndexFormatVersion.getVersion(reader);
+ }
+ finally
+ {
+ reader.release();
+ }
- redoLogApplied = redoLog.hasEntries();
+ indexingQueue.initialize(this);
- // run recovery
- Recovery.run(this, redoLog);
+ redoLogApplied = redoLog.hasEntries();
- // enqueue unused segments for deletion
- enqueueUnusedSegments();
- attemptDelete();
+ // run recovery
+ Recovery.run(this, redoLog);
- // now that we are ready, start index merger
- merger.start();
+ // enqueue unused segments for deletion
+ enqueueUnusedSegments();
+ attemptDelete();
- if (redoLogApplied) {
- // wait for the index merge to finish pending jobs
- try {
- merger.waitUntilIdle();
- } catch (InterruptedException e) {
- // move on
- }
- flush();
- }
+ // now that we are ready, start index merger
+ merger.start();
- flushTask = new TimerTask() {
- public void run() {
- // check if there are any indexing jobs finished
- checkIndexingQueue();
- // check if volatile index should be flushed
- checkFlush();
- }
- };
+ if (redoLogApplied)
+ {
+ // wait for the index merge to finish pending jobs
+ try
+ {
+ merger.waitUntilIdle();
+ }
+ catch (InterruptedException e)
+ {
+ // move on
+ }
+ flush();
+ }
- if (indexNames.size() > 0) {
- scheduleFlushTask();
- }
- }
+ flushTask = new TimerTask()
+ {
+ public void run()
+ {
+ // check if there are any indexing jobs finished
+ checkIndexingQueue();
+ // check if volatile index should be flushed
+ checkFlush();
+ }
+ };
- /**
- * Returns the number of documents in this index.
- *
- * @return the number of documents in this index.
- * @throws IOException
- * if an error occurs while reading from the index.
- */
- int numDocs() throws IOException {
- if (indexNames.size() == 0) {
- return volatileIndex.getNumDocuments();
- } else {
- CachingMultiIndexReader reader = getIndexReader();
- try {
- return reader.numDocs();
- } finally {
- reader.release();
- }
- }
- }
+ if (indexNames.size() > 0)
+ {
+ scheduleFlushTask();
+ }
+ }
- /**
- * @return the index format version for this multi index.
- */
- IndexFormatVersion getIndexFormatVersion() {
- return version;
- }
+ /**
+ * Returns the number of documents in this index.
+ *
+ * @return the number of documents in this index.
+ * @throws IOException
+ * if an error occurs while reading from the index.
+ */
+ int numDocs() throws IOException
+ {
+ if (indexNames.size() == 0)
+ {
+ return volatileIndex.getNumDocuments();
+ }
+ else
+ {
+ CachingMultiIndexReader reader = getIndexReader();
+ try
+ {
+ return reader.numDocs();
+ }
+ finally
+ {
+ reader.release();
+ }
+ }
+ }
- /**
- * Creates an initial index by traversing the node hierarchy starting at the
- * node with <code>rootId</code>.
- *
- * @param stateMgr
- * the item state manager.
- * @param rootId
- * the id of the node from where to start.
- * @param rootPath
- * the path of the node from where to start.
- * @throws IOException
- * if an error occurs while indexing the workspace.
- * @throws IllegalStateException
- * if this index is not empty.
- */
- void createInitialIndex(ItemDataConsumer stateMgr) throws IOException {
- // only do an initial index if there are no indexes at all
- if (indexNames.size() == 0) {
- reindexing = true;
- try {
- long count = 0;
- // traverse and index workspace
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- // NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
- count = createIndex(indexingTree.getIndexingRoot(), stateMgr,
- count);
- executeAndLog(new Commit(getTransactionId()));
- log.info("Created initial index for {} nodes", new Long(count));
- releaseMultiReader();
- scheduleFlushTask();
- } catch (Exception e) {
- String msg = "Error indexing workspace";
- IOException ex = new IOException(msg);
- ex.initCause(e);
- throw ex;
- } finally {
- reindexing = false;
- }
- } else {
- throw new IllegalStateException("Index already present");
- }
- }
+ /**
+ * @return the index format version for this multi index.
+ */
+ IndexFormatVersion getIndexFormatVersion()
+ {
+ return version;
+ }
- /**
- * Atomically updates the index by removing some documents and adding
- * others.
- *
- * @param remove
- * collection of <code>UUID</code>s that identify documents to
- * remove
- * @param add
- * collection of <code>Document</code>s to add. Some of the
- * elements in this collection may be <code>null</code>, to
- * indicate that a node could not be indexed successfully.
- * @throws IOException
- * if an error occurs while updating the index.
- */
- synchronized void update(Collection remove, Collection add)
- throws IOException {
- // make sure a reader is available during long updates
- if (add.size() > handler.getBufferSize()) {
- try {
- getIndexReader().release();
- } catch (IOException e) {
- // do not fail if an exception is thrown here
- log.warn("unable to prepare index reader "
- + "for queries during update", e);
- }
- }
+ /**
+ * Creates an initial index by traversing the node hierarchy starting at the
+ * node with <code>rootId</code>.
+ *
+ * @param stateMgr
+ * the item state manager.
+ * @param rootId
+ * the id of the node from where to start.
+ * @param rootPath
+ * the path of the node from where to start.
+ * @throws IOException
+ * if an error occurs while indexing the workspace.
+ * @throws IllegalStateException
+ * if this index is not empty.
+ */
+ void createInitialIndex(ItemDataConsumer stateMgr) throws IOException
+ {
+ // only do an initial index if there are no indexes at all
+ if (indexNames.size() == 0)
+ {
+ reindexing = true;
+ try
+ {
+ long count = 0;
+ // traverse and index workspace
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ // NodeData rootState = (NodeData) stateMgr.getItemData(rootId);
+ count = createIndex(indexingTree.getIndexingRoot(), stateMgr, count);
+ executeAndLog(new Commit(getTransactionId()));
+ log.info("Created initial index for {} nodes", new Long(count));
+ releaseMultiReader();
+ scheduleFlushTask();
+ }
+ catch (Exception e)
+ {
+ String msg = "Error indexing workspace";
+ IOException ex = new IOException(msg);
+ ex.initCause(e);
+ throw ex;
+ }
+ finally
+ {
+ reindexing = false;
+ }
+ }
+ else
+ {
+ throw new IllegalStateException("Index already present");
+ }
+ }
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- long transactionId = nextTransactionId++;
- executeAndLog(new Start(transactionId));
+ /**
+ * Atomically updates the index by removing some documents and adding
+ * others.
+ *
+ * @param remove
+ * collection of <code>UUID</code>s that identify documents to
+ * remove
+ * @param add
+ * collection of <code>Document</code>s to add. Some of the
+ * elements in this collection may be <code>null</code>, to
+ * indicate that a node could not be indexed successfully.
+ * @throws IOException
+ * if an error occurs while updating the index.
+ */
+ synchronized void update(Collection remove, Collection add) throws IOException
+ {
+ // make sure a reader is available during long updates
+ if (add.size() > handler.getBufferSize())
+ {
+ try
+ {
+ getIndexReader().release();
+ }
+ catch (IOException e)
+ {
+ // do not fail if an exception is thrown here
+ log.warn("unable to prepare index reader " + "for queries during update", e);
+ }
+ }
- boolean flush = false;
- for (Iterator it = remove.iterator(); it.hasNext();) {
- executeAndLog(new DeleteNode(transactionId, (String) it.next()));
- }
- for (Iterator it = add.iterator(); it.hasNext();) {
- Document doc = (Document) it.next();
- if (doc != null) {
- executeAndLog(new AddNode(transactionId, doc));
- // commit volatile index if needed
- flush |= checkVolatileCommit();
- }
- }
- executeAndLog(new Commit(transactionId));
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ long transactionId = nextTransactionId++;
+ executeAndLog(new Start(transactionId));
- // flush whole index when volatile index has been commited.
- if (flush) {
- flush();
- }
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
+ boolean flush = false;
+ for (Iterator it = remove.iterator(); it.hasNext();)
+ {
+ executeAndLog(new DeleteNode(transactionId, (String)it.next()));
+ }
+ for (Iterator it = add.iterator(); it.hasNext();)
+ {
+ Document doc = (Document)it.next();
+ if (doc != null)
+ {
+ executeAndLog(new AddNode(transactionId, doc));
+ // commit volatile index if needed
+ flush |= checkVolatileCommit();
+ }
+ }
+ executeAndLog(new Commit(transactionId));
- /**
- * Adds a document to the index.
- *
- * @param doc
- * the document to add.
- * @throws IOException
- * if an error occurs while adding the document to the index.
- */
- void addDocument(Document doc) throws IOException {
- update(Collections.EMPTY_LIST, Arrays.asList(new Document[] { doc }));
- }
+ // flush whole index when volatile index has been commited.
+ if (flush)
+ {
+ flush();
+ }
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
- /**
- * Deletes the first document that matches the <code>uuid</code>.
- *
- * @param uuid
- * document that match this <code>uuid</code> will be deleted.
- * @throws IOException
- * if an error occurs while deleting the document.
- */
- void removeDocument(String uuid) throws IOException {
- update(Arrays.asList(new String[] { uuid }), Collections.EMPTY_LIST);
- }
+ /**
+ * Adds a document to the index.
+ *
+ * @param doc
+ * the document to add.
+ * @throws IOException
+ * if an error occurs while adding the document to the index.
+ */
+ void addDocument(Document doc) throws IOException
+ {
+ update(Collections.EMPTY_LIST, Arrays.asList(new Document[]{doc}));
+ }
- /**
- * Deletes all documents that match the <code>uuid</code>.
- *
- * @param uuid
- * documents that match this <code>uuid</code> will be deleted.
- * @return the number of deleted documents.
- * @throws IOException
- * if an error occurs while deleting documents.
- */
- synchronized int removeAllDocuments(String uuid) throws IOException {
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- int num;
- try {
- Term idTerm = new Term(FieldNames.UUID, uuid.toString());
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- num = volatileIndex.removeDocument(idTerm);
- if (num > 0) {
- redoLog.append(new DeleteNode(getTransactionId(), uuid));
- }
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
- // only remove documents from registered indexes
- if (indexNames.contains(index.getName())) {
- int removed = index.removeDocument(idTerm);
- if (removed > 0) {
- redoLog
- .append(new DeleteNode(getTransactionId(), uuid));
- }
- num += removed;
- }
- }
- executeAndLog(new Commit(getTransactionId()));
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- return num;
- }
+ /**
+ * Deletes the first document that matches the <code>uuid</code>.
+ *
+ * @param uuid
+ * document that match this <code>uuid</code> will be deleted.
+ * @throws IOException
+ * if an error occurs while deleting the document.
+ */
+ void removeDocument(String uuid) throws IOException
+ {
+ update(Arrays.asList(new String[]{uuid}), Collections.EMPTY_LIST);
+ }
- /**
- * Returns <code>IndexReader</code>s for the indexes named
- * <code>indexNames</code>. An <code>IndexListener</code> is registered and
- * notified when documents are deleted from one of the indexes in
- * <code>indexNames</code>.
- * <p/>
- * Note: the number of <code>IndexReaders</code> returned by this method is
- * not necessarily the same as the number of index names passed. An index
- * might have been deleted and is not reachable anymore.
- *
- * @param indexNames
- * the names of the indexes for which to obtain readers.
- * @param listener
- * the listener to notify when documents are deleted.
- * @return the <code>IndexReaders</code>.
- * @throws IOException
- * if an error occurs acquiring the index readers.
- */
- synchronized IndexReader[] getIndexReaders(String[] indexNames,
- IndexListener listener) throws IOException {
- Set names = new HashSet(Arrays.asList(indexNames));
- Map indexReaders = new HashMap();
+ /**
+ * Deletes all documents that match the <code>uuid</code>.
+ *
+ * @param uuid
+ * documents that match this <code>uuid</code> will be deleted.
+ * @return the number of deleted documents.
+ * @throws IOException
+ * if an error occurs while deleting documents.
+ */
+ synchronized int removeAllDocuments(String uuid) throws IOException
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ int num;
+ try
+ {
+ Term idTerm = new Term(FieldNames.UUID, uuid.toString());
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ num = volatileIndex.removeDocument(idTerm);
+ if (num > 0)
+ {
+ redoLog.append(new DeleteNode(getTransactionId(), uuid));
+ }
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ PersistentIndex index = (PersistentIndex)indexes.get(i);
+ // only remove documents from registered indexes
+ if (indexNames.contains(index.getName()))
+ {
+ int removed = index.removeDocument(idTerm);
+ if (removed > 0)
+ {
+ redoLog.append(new DeleteNode(getTransactionId(), uuid));
+ }
+ num += removed;
+ }
+ }
+ executeAndLog(new Commit(getTransactionId()));
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ return num;
+ }
- try {
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex index = (PersistentIndex) it.next();
- if (names.contains(index.getName())) {
- indexReaders.put(index.getReadOnlyIndexReader(listener),
- index);
- }
- }
- } catch (IOException e) {
- // release readers obtained so far
- for (Iterator it = indexReaders.entrySet().iterator(); it.hasNext();) {
- Map.Entry entry = (Map.Entry) it.next();
- ReadOnlyIndexReader reader = (ReadOnlyIndexReader) entry
- .getKey();
- try {
- reader.release();
- } catch (IOException ex) {
- log.warn("Exception releasing index reader: " + ex);
- }
- ((PersistentIndex) entry.getValue()).resetListener();
- }
- throw e;
- }
+ /**
+ * Returns <code>IndexReader</code>s for the indexes named
+ * <code>indexNames</code>. An <code>IndexListener</code> is registered and
+ * notified when documents are deleted from one of the indexes in
+ * <code>indexNames</code>.
+ * <p/>
+ * Note: the number of <code>IndexReaders</code> returned by this method is
+ * not necessarily the same as the number of index names passed. An index
+ * might have been deleted and is not reachable anymore.
+ *
+ * @param indexNames
+ * the names of the indexes for which to obtain readers.
+ * @param listener
+ * the listener to notify when documents are deleted.
+ * @return the <code>IndexReaders</code>.
+ * @throws IOException
+ * if an error occurs acquiring the index readers.
+ */
+ synchronized IndexReader[] getIndexReaders(String[] indexNames, IndexListener listener) throws IOException
+ {
+ Set names = new HashSet(Arrays.asList(indexNames));
+ Map indexReaders = new HashMap();
- return (IndexReader[]) indexReaders.keySet().toArray(
- new IndexReader[indexReaders.size()]);
- }
+ try
+ {
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex index = (PersistentIndex)it.next();
+ if (names.contains(index.getName()))
+ {
+ indexReaders.put(index.getReadOnlyIndexReader(listener), index);
+ }
+ }
+ }
+ catch (IOException e)
+ {
+ // release readers obtained so far
+ for (Iterator it = indexReaders.entrySet().iterator(); it.hasNext();)
+ {
+ Map.Entry entry = (Map.Entry)it.next();
+ ReadOnlyIndexReader reader = (ReadOnlyIndexReader)entry.getKey();
+ try
+ {
+ reader.release();
+ }
+ catch (IOException ex)
+ {
+ log.warn("Exception releasing index reader: " + ex);
+ }
+ ((PersistentIndex)entry.getValue()).resetListener();
+ }
+ throw e;
+ }
- /**
- * Creates a new Persistent index. The new index is not registered with this
- * <code>MultiIndex</code>.
- *
- * @param indexName
- * the name of the index to open, or <code>null</code> if an
- * index with a new name should be created.
- * @return a new <code>PersistentIndex</code>.
- * @throws IOException
- * if a new index cannot be created.
- */
- synchronized PersistentIndex getOrCreateIndex(String indexName)
- throws IOException {
- // check existing
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- return idx;
- }
- }
+ return (IndexReader[])indexReaders.keySet().toArray(new IndexReader[indexReaders.size()]);
+ }
- // otherwise open / create it
- if (indexName == null) {
- do {
- indexName = indexNames.newName();
- } while (directoryManager.hasDirectory(indexName));
- }
- PersistentIndex index;
- try {
- index = new PersistentIndex(indexName, handler.getTextAnalyzer(),
- handler.getSimilarity(), cache, indexingQueue,
- directoryManager);
- } catch (IOException e) {
- // do some clean up
- if (!directoryManager.delete(indexName)) {
- deletable.add(indexName);
- }
- throw e;
- }
- index.setMaxFieldLength(handler.getMaxFieldLength());
- index.setUseCompoundFile(handler.getUseCompoundFile());
- index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
+ /**
+ * Creates a new Persistent index. The new index is not registered with this
+ * <code>MultiIndex</code>.
+ *
+ * @param indexName
+ * the name of the index to open, or <code>null</code> if an
+ * index with a new name should be created.
+ * @return a new <code>PersistentIndex</code>.
+ * @throws IOException
+ * if a new index cannot be created.
+ */
+ synchronized PersistentIndex getOrCreateIndex(String indexName) throws IOException
+ {
+ // check existing
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ return idx;
+ }
+ }
- // add to list of open indexes and return it
- indexes.add(index);
- return index;
- }
+ // otherwise open / create it
+ if (indexName == null)
+ {
+ do
+ {
+ indexName = indexNames.newName();
+ }
+ while (directoryManager.hasDirectory(indexName));
+ }
+ PersistentIndex index;
+ try
+ {
+ index =
+ new PersistentIndex(indexName, handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
+ directoryManager);
+ }
+ catch (IOException e)
+ {
+ // do some clean up
+ if (!directoryManager.delete(indexName))
+ {
+ deletable.add(indexName);
+ }
+ throw e;
+ }
+ index.setMaxFieldLength(handler.getMaxFieldLength());
+ index.setUseCompoundFile(handler.getUseCompoundFile());
+ index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
- /**
- * Returns <code>true</code> if this multi index has an index segment with
- * the given name. This method even returns <code>true</code> if an index
- * segments has not yet been loaded / initialized but exists on disk.
- *
- * @param indexName
- * the name of the index segment.
- * @return <code>true</code> if it exists; otherwise <code>false</code>.
- * @throws IOException
- * if an error occurs while checking existence of directory.
- */
- synchronized boolean hasIndex(String indexName) throws IOException {
- // check existing
- for (Iterator it = indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- return true;
- }
- }
- // check if it exists on disk
- return directoryManager.hasDirectory(indexName);
- }
+ // add to list of open indexes and return it
+ indexes.add(index);
+ return index;
+ }
- /**
- * Replaces the indexes with names <code>obsoleteIndexes</code> with
- * <code>index</code>. Documents that must be deleted in <code>index</code>
- * can be identified with <code>Term</code>s in <code>deleted</code>.
- *
- * @param obsoleteIndexes
- * the names of the indexes to replace.
- * @param index
- * the new index that is the result of a merge of the indexes to
- * replace.
- * @param deleted
- * <code>Term</code>s that identify documents that must be
- * deleted in <code>index</code>.
- * @throws IOException
- * if an exception occurs while replacing the indexes.
- */
- void replaceIndexes(String[] obsoleteIndexes, PersistentIndex index,
- Collection deleted) throws IOException {
+ /**
+ * Returns <code>true</code> if this multi index has an index segment with
+ * the given name. This method even returns <code>true</code> if an index
+ * segments has not yet been loaded / initialized but exists on disk.
+ *
+ * @param indexName
+ * the name of the index segment.
+ * @return <code>true</code> if it exists; otherwise <code>false</code>.
+ * @throws IOException
+ * if an error occurs while checking existence of directory.
+ */
+ synchronized boolean hasIndex(String indexName) throws IOException
+ {
+ // check existing
+ for (Iterator it = indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ return true;
+ }
+ }
+ // check if it exists on disk
+ return directoryManager.hasDirectory(indexName);
+ }
- if (handler.isInitializeHierarchyCache()) {
- // force initializing of caches
- long time = System.currentTimeMillis();
- index.getReadOnlyIndexReader(true).release();
- time = System.currentTimeMillis() - time;
- log.debug("hierarchy cache initialized in {} ms", new Long(time));
- }
+ /**
+ * Replaces the indexes with names <code>obsoleteIndexes</code> with
+ * <code>index</code>. Documents that must be deleted in <code>index</code>
+ * can be identified with <code>Term</code>s in <code>deleted</code>.
+ *
+ * @param obsoleteIndexes
+ * the names of the indexes to replace.
+ * @param index
+ * the new index that is the result of a merge of the indexes to
+ * replace.
+ * @param deleted
+ * <code>Term</code>s that identify documents that must be
+ * deleted in <code>index</code>.
+ * @throws IOException
+ * if an exception occurs while replacing the indexes.
+ */
+ void replaceIndexes(String[] obsoleteIndexes, PersistentIndex index, Collection deleted) throws IOException
+ {
- synchronized (this) {
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- // if we are reindexing there is already an active transaction
- if (!reindexing) {
- executeAndLog(new Start(Action.INTERNAL_TRANS_REPL_INDEXES));
- }
- // delete obsolete indexes
- Set names = new HashSet(Arrays.asList(obsoleteIndexes));
- for (Iterator it = names.iterator(); it.hasNext();) {
- // do not try to delete indexes that are already gone
- String indexName = (String) it.next();
- if (indexNames.contains(indexName)) {
- executeAndLog(new DeleteIndex(getTransactionId(),
- indexName));
- }
- }
+ if (handler.isInitializeHierarchyCache())
+ {
+ // force initializing of caches
+ long time = System.currentTimeMillis();
+ index.getReadOnlyIndexReader(true).release();
+ time = System.currentTimeMillis() - time;
+ log.debug("hierarchy cache initialized in {} ms", new Long(time));
+ }
- // Index merger does not log an action when it creates the
- // target
- // index of the merge. We have to do this here.
- executeAndLog(new CreateIndex(getTransactionId(), index
- .getName()));
+ synchronized (this)
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ // if we are reindexing there is already an active transaction
+ if (!reindexing)
+ {
+ executeAndLog(new Start(Action.INTERNAL_TRANS_REPL_INDEXES));
+ }
+ // delete obsolete indexes
+ Set names = new HashSet(Arrays.asList(obsoleteIndexes));
+ for (Iterator it = names.iterator(); it.hasNext();)
+ {
+ // do not try to delete indexes that are already gone
+ String indexName = (String)it.next();
+ if (indexNames.contains(indexName))
+ {
+ executeAndLog(new DeleteIndex(getTransactionId(), indexName));
+ }
+ }
- executeAndLog(new AddIndex(getTransactionId(), index.getName()));
+ // Index merger does not log an action when it creates the
+ // target
+ // index of the merge. We have to do this here.
+ executeAndLog(new CreateIndex(getTransactionId(), index.getName()));
- // delete documents in index
- for (Iterator it = deleted.iterator(); it.hasNext();) {
- Term id = (Term) it.next();
- index.removeDocument(id);
- }
- index.commit();
+ executeAndLog(new AddIndex(getTransactionId(), index.getName()));
- if (!reindexing) {
- // only commit if we are not reindexing
- // when reindexing the final commit is done at the very end
- executeAndLog(new Commit(getTransactionId()));
- }
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
- if (reindexing) {
- // do some cleanup right away when reindexing
- attemptDelete();
- }
- }
+ // delete documents in index
+ for (Iterator it = deleted.iterator(); it.hasNext();)
+ {
+ Term id = (Term)it.next();
+ index.removeDocument(id);
+ }
+ index.commit();
- /**
- * Returns an read-only <code>IndexReader</code> that spans alls indexes of
- * this <code>MultiIndex</code>.
- *
- * @return an <code>IndexReader</code>.
- * @throws IOException
- * if an error occurs constructing the <code>IndexReader</code>.
- */
- public CachingMultiIndexReader getIndexReader() throws IOException {
- return getIndexReader(false);
- }
+ if (!reindexing)
+ {
+ // only commit if we are not reindexing
+ // when reindexing the final commit is done at the very end
+ executeAndLog(new Commit(getTransactionId()));
+ }
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
+ if (reindexing)
+ {
+ // do some cleanup right away when reindexing
+ attemptDelete();
+ }
+ }
- /**
- * Returns an read-only <code>IndexReader</code> that spans alls indexes of
- * this <code>MultiIndex</code>.
- *
- * @param initCache
- * when set <code>true</code> the hierarchy cache is completely
- * initialized before this call returns.
- * @return an <code>IndexReader</code>.
- * @throws IOException
- * if an error occurs constructing the <code>IndexReader</code>.
- */
- public synchronized CachingMultiIndexReader getIndexReader(boolean initCache)
- throws IOException {
- synchronized (updateMonitor) {
- if (multiReader != null) {
- multiReader.acquire();
- return multiReader;
- }
- // no reader available
- // wait until no update is in progress
- while (updateInProgress) {
- try {
- updateMonitor.wait();
- } catch (InterruptedException e) {
- throw new IOException(
- "Interrupted while waiting to aquire reader");
- }
- }
- // some other read thread might have created the reader in the
- // meantime -> check again
- if (multiReader == null) {
- List readerList = new ArrayList();
- for (int i = 0; i < indexes.size(); i++) {
- PersistentIndex pIdx = (PersistentIndex) indexes.get(i);
- if (indexNames.contains(pIdx.getName())) {
- readerList.add(pIdx.getReadOnlyIndexReader(initCache));
- }
- }
- readerList.add(volatileIndex.getReadOnlyIndexReader());
- ReadOnlyIndexReader[] readers = (ReadOnlyIndexReader[]) readerList
- .toArray(new ReadOnlyIndexReader[readerList.size()]);
- multiReader = new CachingMultiIndexReader(readers, cache);
- }
- multiReader.acquire();
- return multiReader;
- }
- }
+ /**
+ * Returns an read-only <code>IndexReader</code> that spans alls indexes of
+ * this <code>MultiIndex</code>.
+ *
+ * @return an <code>IndexReader</code>.
+ * @throws IOException
+ * if an error occurs constructing the <code>IndexReader</code>.
+ */
+ public CachingMultiIndexReader getIndexReader() throws IOException
+ {
+ return getIndexReader(false);
+ }
- /**
- * Returns the volatile index.
- *
- * @return the volatile index.
- */
- VolatileIndex getVolatileIndex() {
- return volatileIndex;
- }
+ /**
+ * Returns an read-only <code>IndexReader</code> that spans alls indexes of
+ * this <code>MultiIndex</code>.
+ *
+ * @param initCache
+ * when set <code>true</code> the hierarchy cache is completely
+ * initialized before this call returns.
+ * @return an <code>IndexReader</code>.
+ * @throws IOException
+ * if an error occurs constructing the <code>IndexReader</code>.
+ */
+ public synchronized CachingMultiIndexReader getIndexReader(boolean initCache) throws IOException
+ {
+ synchronized (updateMonitor)
+ {
+ if (multiReader != null)
+ {
+ multiReader.acquire();
+ return multiReader;
+ }
+ // no reader available
+ // wait until no update is in progress
+ while (updateInProgress)
+ {
+ try
+ {
+ updateMonitor.wait();
+ }
+ catch (InterruptedException e)
+ {
+ throw new IOException("Interrupted while waiting to aquire reader");
+ }
+ }
+ // some other read thread might have created the reader in the
+ // meantime -> check again
+ if (multiReader == null)
+ {
+ List readerList = new ArrayList();
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ PersistentIndex pIdx = (PersistentIndex)indexes.get(i);
+ if (indexNames.contains(pIdx.getName()))
+ {
+ readerList.add(pIdx.getReadOnlyIndexReader(initCache));
+ }
+ }
+ readerList.add(volatileIndex.getReadOnlyIndexReader());
+ ReadOnlyIndexReader[] readers =
+ (ReadOnlyIndexReader[])readerList.toArray(new ReadOnlyIndexReader[readerList.size()]);
+ multiReader = new CachingMultiIndexReader(readers, cache);
+ }
+ multiReader.acquire();
+ return multiReader;
+ }
+ }
- /**
- * Closes this <code>MultiIndex</code>.
- */
- void close() {
+ /**
+ * Returns the volatile index.
+ *
+ * @return the volatile index.
+ */
+ VolatileIndex getVolatileIndex()
+ {
+ return volatileIndex;
+ }
- // stop index merger
- // when calling this method we must not lock this MultiIndex, otherwise
- // a deadlock might occur
- merger.dispose();
+ /**
+ * Closes this <code>MultiIndex</code>.
+ */
+ void close()
+ {
- synchronized (this) {
- // stop timer
- flushTask.cancel();
+ // stop index merger
+ // when calling this method we must not lock this MultiIndex, otherwise
+ // a deadlock might occur
+ merger.dispose();
- // commit / close indexes
- try {
- releaseMultiReader();
- } catch (IOException e) {
- log.error("Exception while closing search index.", e);
- }
- try {
- flush();
- } catch (IOException e) {
- log.error("Exception while closing search index.", e);
- }
- volatileIndex.close();
- for (int i = 0; i < indexes.size(); i++) {
- ((PersistentIndex) indexes.get(i)).close();
- }
+ synchronized (this)
+ {
+ // stop timer
+ flushTask.cancel();
- // close indexing queue
- indexingQueue.close();
+ // commit / close indexes
+ try
+ {
+ releaseMultiReader();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing search index.", e);
+ }
+ try
+ {
+ flush();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing search index.", e);
+ }
+ volatileIndex.close();
+ for (int i = 0; i < indexes.size(); i++)
+ {
+ ((PersistentIndex)indexes.get(i)).close();
+ }
- // finally close directory
- try {
- indexDir.close();
- } catch (IOException e) {
- log.error("Exception while closing directory.", e);
- }
- }
- }
+ // close indexing queue
+ indexingQueue.close();
- /**
- * Returns the namespace mappings of this search index.
- *
- * @return the namespace mappings of this search index.
- */
- NamespaceMappings getNamespaceMappings() {
- return nsMappings;
- }
+ // finally close directory
+ try
+ {
+ indexDir.close();
+ }
+ catch (IOException e)
+ {
+ log.error("Exception while closing directory.", e);
+ }
+ }
+ }
- /**
- * Returns the indexing queue for this multi index.
- *
- * @return the indexing queue for this multi index.
- */
- public IndexingQueue getIndexingQueue() {
- return indexingQueue;
- }
+ /**
+ * Returns the namespace mappings of this search index.
+ *
+ * @return the namespace mappings of this search index.
+ */
+ NamespaceMappings getNamespaceMappings()
+ {
+ return nsMappings;
+ }
- /**
- * Returns a lucene Document for the <code>node</code>.
- *
- * @param node
- * the node to index.
- * @return the index document.
- * @throws RepositoryException
- * if an error occurs while reading from the workspace.
- */
- Document createDocument(NodeData node) throws RepositoryException {
- return handler.createDocument(node, nsMappings, version);
- }
+ /**
+ * Returns the indexing queue for this multi index.
+ *
+ * @return the indexing queue for this multi index.
+ */
+ public IndexingQueue getIndexingQueue()
+ {
+ return indexingQueue;
+ }
- /**
- * Returns a lucene Document for the Node with <code>id</code>.
- *
- * @param id
- * the id of the node to index.
- * @return the index document.
- * @throws RepositoryException
- * if an error occurs while reading from the workspace or if
- * there is no node with <code>id</code>.
- */
- Document createDocument(String id) throws RepositoryException {
- ItemData data = handler.getContext().getItemStateManager().getItemData(
- id);
- if (data == null)
- throw new ItemNotFoundException("Item id=" + id + " not found");
- if (!data.isNode())
- throw new RepositoryException("Item with id " + id
- + " is not a node");
- return createDocument((NodeData) data);
+ /**
+ * Returns a lucene Document for the <code>node</code>.
+ *
+ * @param node
+ * the node to index.
+ * @return the index document.
+ * @throws RepositoryException
+ * if an error occurs while reading from the workspace.
+ */
+ Document createDocument(NodeData node) throws RepositoryException
+ {
+ return handler.createDocument(node, nsMappings, version);
+ }
- }
+ /**
+ * Returns a lucene Document for the Node with <code>id</code>.
+ *
+ * @param id
+ * the id of the node to index.
+ * @return the index document.
+ * @throws RepositoryException
+ * if an error occurs while reading from the workspace or if
+ * there is no node with <code>id</code>.
+ */
+ Document createDocument(String id) throws RepositoryException
+ {
+ ItemData data = handler.getContext().getItemStateManager().getItemData(id);
+ if (data == null)
+ throw new ItemNotFoundException("Item id=" + id + " not found");
+ if (!data.isNode())
+ throw new RepositoryException("Item with id " + id + " is not a node");
+ return createDocument((NodeData)data);
- /**
- * Returns <code>true</code> if the redo log contained entries while this
- * index was instantiated; <code>false</code> otherwise.
- *
- * @return <code>true</code> if the redo log contained entries.
- */
- boolean getRedoLogApplied() {
- return redoLogApplied;
- }
+ }
- /**
- * Removes the <code>index</code> from the list of active sub indexes. The
- * Index is not acutally deleted right away, but postponed to the
- * transaction commit.
- * <p/>
- * This method does not close the index, but rather expects that the index
- * has already been closed.
- *
- * @param index
- * the index to delete.
- */
- synchronized void deleteIndex(PersistentIndex index) {
- // remove it from the lists if index is registered
- indexes.remove(index);
- indexNames.removeName(index.getName());
- synchronized (deletable) {
- log.debug("Moved " + index.getName() + " to deletable");
- deletable.add(index.getName());
- }
- }
+ /**
+ * Returns <code>true</code> if the redo log contained entries while this
+ * index was instantiated; <code>false</code> otherwise.
+ *
+ * @return <code>true</code> if the redo log contained entries.
+ */
+ boolean getRedoLogApplied()
+ {
+ return redoLogApplied;
+ }
- /**
- * Flushes this <code>MultiIndex</code>. Persists all pending changes and
- * resets the redo log.
- *
- * @throws IOException
- * if the flush fails.
- */
- public void flush() throws IOException {
- synchronized (this) {
- // commit volatile index
- executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
- commitVolatileIndex();
+ /**
+ * Removes the <code>index</code> from the list of active sub indexes. The
+ * Index is not acutally deleted right away, but postponed to the
+ * transaction commit.
+ * <p/>
+ * This method does not close the index, but rather expects that the index
+ * has already been closed.
+ *
+ * @param index
+ * the index to delete.
+ */
+ synchronized void deleteIndex(PersistentIndex index)
+ {
+ // remove it from the lists if index is registered
+ indexes.remove(index);
+ indexNames.removeName(index.getName());
+ synchronized (deletable)
+ {
+ log.debug("Moved " + index.getName() + " to deletable");
+ deletable.add(index.getName());
+ }
+ }
- // commit persistent indexes
- for (int i = indexes.size() - 1; i >= 0; i--) {
- PersistentIndex index = (PersistentIndex) indexes.get(i);
- // only commit indexes we own
- // index merger also places PersistentIndex instances in
- // indexes,
- // but does not make them public by registering the name in
- // indexNames
- if (indexNames.contains(index.getName())) {
- index.commit();
- // check if index still contains documents
- if (index.getNumDocuments() == 0) {
- executeAndLog(new DeleteIndex(getTransactionId(), index
- .getName()));
- }
- }
- }
- executeAndLog(new Commit(getTransactionId()));
+ /**
+ * Flushes this <code>MultiIndex</code>. Persists all pending changes and
+ * resets the redo log.
+ *
+ * @throws IOException
+ * if the flush fails.
+ */
+ public void flush() throws IOException
+ {
+ synchronized (this)
+ {
+ // commit volatile index
+ executeAndLog(new Start(Action.INTERNAL_TRANSACTION));
+ commitVolatileIndex();
- indexNames.write(indexDir);
+ // commit persistent indexes
+ for (int i = indexes.size() - 1; i >= 0; i--)
+ {
+ PersistentIndex index = (PersistentIndex)indexes.get(i);
+ // only commit indexes we own
+ // index merger also places PersistentIndex instances in
+ // indexes,
+ // but does not make them public by registering the name in
+ // indexNames
+ if (indexNames.contains(index.getName()))
+ {
+ index.commit();
+ // check if index still contains documents
+ if (index.getNumDocuments() == 0)
+ {
+ executeAndLog(new DeleteIndex(getTransactionId(), index.getName()));
+ }
+ }
+ }
+ executeAndLog(new Commit(getTransactionId()));
- // reset redo log
- redoLog.clear();
+ indexNames.write(indexDir);
- lastFlushTime = System.currentTimeMillis();
- }
+ // reset redo log
+ redoLog.clear();
- // delete obsolete indexes
- attemptDelete();
- }
+ lastFlushTime = System.currentTimeMillis();
+ }
- /**
- * Releases the {@link #multiReader} and sets it <code>null</code>. If the
- * reader is already <code>null</code> this method does nothing. When this
- * method returns {@link #multiReader} is guaranteed to be <code>null</code>
- * even if an exception is thrown.
- * <p/>
- * Please note that this method does not take care of any synchronization. A
- * caller must ensure that it is the only thread operating on this multi
- * index, or that it holds the {@link #updateMonitor}.
- *
- * @throws IOException
- * if an error occurs while releasing the reader.
- */
- void releaseMultiReader() throws IOException {
- if (multiReader != null) {
- try {
- multiReader.release();
- } finally {
- multiReader = null;
- }
- }
- }
+ // delete obsolete indexes
+ attemptDelete();
+ }
- // -------------------------< internal
- // >-------------------------------------
+ /**
+ * Releases the {@link #multiReader} and sets it <code>null</code>. If the
+ * reader is already <code>null</code> this method does nothing. When this
+ * method returns {@link #multiReader} is guaranteed to be <code>null</code>
+ * even if an exception is thrown.
+ * <p/>
+ * Please note that this method does not take care of any synchronization. A
+ * caller must ensure that it is the only thread operating on this multi
+ * index, or that it holds the {@link #updateMonitor}.
+ *
+ * @throws IOException
+ * if an error occurs while releasing the reader.
+ */
+ void releaseMultiReader() throws IOException
+ {
+ if (multiReader != null)
+ {
+ try
+ {
+ multiReader.release();
+ }
+ finally
+ {
+ multiReader = null;
+ }
+ }
+ }
- /**
- * Enqueues unused segments for deletion in {@link #deletable}. This method
- * does not synchronize on {@link #deletable}! A caller must ensure that it
- * is the only one acting on the {@link #deletable} map.
- *
- * @throws IOException
- * if an error occurs while reading directories.
- */
- private void enqueueUnusedSegments() throws IOException {
- // walk through index segments
- String[] dirNames = directoryManager.getDirectoryNames();
- for (int i = 0; i < dirNames.length; i++) {
- if (dirNames[i].startsWith("_")
- && !indexNames.contains(dirNames[i])) {
- deletable.add(dirNames[i]);
- }
- }
- }
+ // -------------------------< internal
+ // >-------------------------------------
- private void scheduleFlushTask() {
- lastFlushTime = System.currentTimeMillis();
- FLUSH_TIMER.schedule(flushTask, 0, 1000);
- }
+ /**
+ * Enqueues unused segments for deletion in {@link #deletable}. This method
+ * does not synchronize on {@link #deletable}! A caller must ensure that it
+ * is the only one acting on the {@link #deletable} map.
+ *
+ * @throws IOException
+ * if an error occurs while reading directories.
+ */
+ private void enqueueUnusedSegments() throws IOException
+ {
+ // walk through index segments
+ String[] dirNames = directoryManager.getDirectoryNames();
+ for (int i = 0; i < dirNames.length; i++)
+ {
+ if (dirNames[i].startsWith("_") && !indexNames.contains(dirNames[i]))
+ {
+ deletable.add(dirNames[i]);
+ }
+ }
+ }
- /**
- * Resets the volatile index to a new instance.
- */
- private void resetVolatileIndex() throws IOException {
- volatileIndex = new VolatileIndex(handler.getTextAnalyzer(), handler
- .getSimilarity(), indexingQueue);
- volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
- volatileIndex.setMaxFieldLength(handler.getMaxFieldLength());
- volatileIndex.setBufferSize(handler.getBufferSize());
- }
+ private void scheduleFlushTask()
+ {
+ lastFlushTime = System.currentTimeMillis();
+ FLUSH_TIMER.schedule(flushTask, 0, 1000);
+ }
- /**
- * Returns the current transaction id.
- *
- * @return the current transaction id.
- */
- private long getTransactionId() {
- return currentTransactionId;
- }
+ /**
+ * Resets the volatile index to a new instance.
+ */
+ private void resetVolatileIndex() throws IOException
+ {
+ volatileIndex = new VolatileIndex(handler.getTextAnalyzer(), handler.getSimilarity(), indexingQueue);
+ volatileIndex.setUseCompoundFile(handler.getUseCompoundFile());
+ volatileIndex.setMaxFieldLength(handler.getMaxFieldLength());
+ volatileIndex.setBufferSize(handler.getBufferSize());
+ }
- /**
- * Executes action <code>a</code> and appends the action to the redo log if
- * successful.
- *
- * @param a
- * the <code>Action</code> to execute.
- * @return the executed action.
- * @throws IOException
- * if an error occurs while executing the action or appending
- * the action to the redo log.
- */
- private Action executeAndLog(Action a) throws IOException {
- a.execute(this);
- redoLog.append(a);
- // please note that flushing the redo log is only required on
- // commit, but we also want to keep track of new indexes for sure.
- // otherwise it might happen that unused index folders are orphaned
- // after a crash.
- if (a.getType() == Action.TYPE_COMMIT
- || a.getType() == Action.TYPE_ADD_INDEX) {
- redoLog.flush();
- }
- return a;
- }
+ /**
+ * Returns the current transaction id.
+ *
+ * @return the current transaction id.
+ */
+ private long getTransactionId()
+ {
+ return currentTransactionId;
+ }
- /**
- * Checks if it is needed to commit the volatile index according to
- * {@link SearchIndex#getMaxVolatileIndexSize()}.
- *
- * @return <code>true</code> if the volatile index has been committed,
- * <code>false</code> otherwise.
- * @throws IOException
- * if an error occurs while committing the volatile index.
- */
- private boolean checkVolatileCommit() throws IOException {
- if (volatileIndex.getRamSizeInBytes() >= handler
- .getMaxVolatileIndexSize()) {
- commitVolatileIndex();
- return true;
- }
- return false;
- }
+ /**
+ * Executes action <code>a</code> and appends the action to the redo log if
+ * successful.
+ *
+ * @param a
+ * the <code>Action</code> to execute.
+ * @return the executed action.
+ * @throws IOException
+ * if an error occurs while executing the action or appending
+ * the action to the redo log.
+ */
+ private Action executeAndLog(Action a) throws IOException
+ {
+ a.execute(this);
+ redoLog.append(a);
+ // please note that flushing the redo log is only required on
+ // commit, but we also want to keep track of new indexes for sure.
+ // otherwise it might happen that unused index folders are orphaned
+ // after a crash.
+ if (a.getType() == Action.TYPE_COMMIT || a.getType() == Action.TYPE_ADD_INDEX)
+ {
+ redoLog.flush();
+ }
+ return a;
+ }
- /**
- * Commits the volatile index to a persistent index. The new persistent
- * index is added to the list of indexes but not written to disk. When this
- * method returns a new volatile index has been created.
- *
- * @throws IOException
- * if an error occurs while writing the volatile index to disk.
- */
- private void commitVolatileIndex() throws IOException {
+ /**
+ * Checks if it is needed to commit the volatile index according to
+ * {@link SearchIndex#getMaxVolatileIndexSize()}.
+ *
+ * @return <code>true</code> if the volatile index has been committed,
+ * <code>false</code> otherwise.
+ * @throws IOException
+ * if an error occurs while committing the volatile index.
+ */
+ private boolean checkVolatileCommit() throws IOException
+ {
+ if (volatileIndex.getRamSizeInBytes() >= handler.getMaxVolatileIndexSize())
+ {
+ commitVolatileIndex();
+ return true;
+ }
+ return false;
+ }
- // check if volatile index contains documents at all
- if (volatileIndex.getNumDocuments() > 0) {
+ /**
+ * Commits the volatile index to a persistent index. The new persistent
+ * index is added to the list of indexes but not written to disk. When this
+ * method returns a new volatile index has been created.
+ *
+ * @throws IOException
+ * if an error occurs while writing the volatile index to disk.
+ */
+ private void commitVolatileIndex() throws IOException
+ {
- long time = System.currentTimeMillis();
- // create index
- CreateIndex create = new CreateIndex(getTransactionId(), null);
- executeAndLog(create);
+ // check if volatile index contains documents at all
+ if (volatileIndex.getNumDocuments() > 0)
+ {
- // commit volatile index
- executeAndLog(new VolatileCommit(getTransactionId(), create
- .getIndexName()));
+ long time = System.currentTimeMillis();
+ // create index
+ CreateIndex create = new CreateIndex(getTransactionId(), null);
+ executeAndLog(create);
- // add new index
- AddIndex add = new AddIndex(getTransactionId(), create
- .getIndexName());
- executeAndLog(add);
+ // commit volatile index
+ executeAndLog(new VolatileCommit(getTransactionId(), create.getIndexName()));
- // create new volatile index
- resetVolatileIndex();
+ // add new index
+ AddIndex add = new AddIndex(getTransactionId(), create.getIndexName());
+ executeAndLog(add);
- time = System.currentTimeMillis() - time;
- log.debug("Committed in-memory index in " + time + "ms.");
- }
- }
+ // create new volatile index
+ resetVolatileIndex();
- /**
- * Recursively creates an index starting with the NodeState
- * <code>node</code>.
- *
- * @param node
- * the current NodeState.
- * @param path
- * the path of the current node.
- * @param stateMgr
- * the shared item state manager.
- * @param count
- * the number of nodes already indexed.
- * @return the number of nodes indexed so far.
- * @throws IOException
- * if an error occurs while writing to the index.
- * @throws ItemStateException
- * if an node state cannot be found.
- * @throws RepositoryException
- * if any other error occurs
- */
- private long createIndex(NodeData node, ItemDataConsumer stateMgr,
- long count) throws IOException, RepositoryException {
- // NodeId id = node.getNodeId();
+ time = System.currentTimeMillis() - time;
+ log.debug("Committed in-memory index in " + time + "ms.");
+ }
+ }
- if (indexingTree.isExcluded(node)) {
- return count;
- }
- executeAndLog(new AddNode(getTransactionId(), node.getIdentifier()));
- if (++count % 100 == 0) {
+ /**
+ * Recursively creates an index starting with the NodeState
+ * <code>node</code>.
+ *
+ * @param node
+ * the current NodeState.
+ * @param path
+ * the path of the current node.
+ * @param stateMgr
+ * the shared item state manager.
+ * @param count
+ * the number of nodes already indexed.
+ * @return the number of nodes indexed so far.
+ * @throws IOException
+ * if an error occurs while writing to the index.
+ * @throws ItemStateException
+ * if an node state cannot be found.
+ * @throws RepositoryException
+ * if any other error occurs
+ */
+ private long createIndex(NodeData node, ItemDataConsumer stateMgr, long count) throws IOException,
+ RepositoryException
+ {
+ // NodeId id = node.getNodeId();
- log.info("indexing... {} ({})", node.getQPath().getAsString(),
- new Long(count));
- }
- if (count % 10 == 0) {
- checkIndexingQueue(true);
- }
- checkVolatileCommit();
- List<NodeData> children = stateMgr.getChildNodesData(node);
- for (NodeData nodeData : children) {
+ if (indexingTree.isExcluded(node))
+ {
+ return count;
+ }
+ executeAndLog(new AddNode(getTransactionId(), node.getIdentifier()));
+ if (++count % 100 == 0)
+ {
- NodeData childState = (NodeData) stateMgr.getItemData(nodeData
- .getIdentifier());
- if (childState == null) {
- handler.getOnWorkspaceInconsistencyHandler()
- .handleMissingChildNode(
- new ItemNotFoundException("Child not found "),
- handler, nodeData.getQPath(), node, nodeData);
- }
+ log.info("indexing... {} ({})", node.getQPath().getAsString(), new Long(count));
+ }
+ if (count % 10 == 0)
+ {
+ checkIndexingQueue(true);
+ }
+ checkVolatileCommit();
+ List<NodeData> children = stateMgr.getChildNodesData(node);
+ for (NodeData nodeData : children)
+ {
- if (nodeData != null) {
- count = createIndex(nodeData, stateMgr, count);
- }
- }
+ NodeData childState = (NodeData)stateMgr.getItemData(nodeData.getIdentifier());
+ if (childState == null)
+ {
+ handler.getOnWorkspaceInconsistencyHandler().handleMissingChildNode(
+ new ItemNotFoundException("Child not found "), handler, nodeData.getQPath(), node, nodeData);
+ }
- return count;
- }
+ if (nodeData != null)
+ {
+ count = createIndex(nodeData, stateMgr, count);
+ }
+ }
- /**
- * Attempts to delete all files recorded in {@link #deletable}.
- */
- private void attemptDelete() {
- synchronized (deletable) {
- for (Iterator it = deletable.iterator(); it.hasNext();) {
- String indexName = (String) it.next();
- if (directoryManager.delete(indexName)) {
- it.remove();
- } else {
- log.info("Unable to delete obsolete index: " + indexName);
- }
- }
- }
- }
+ return count;
+ }
- /**
- * Removes the deletable file if it exists. The file is not used anymore in
- * Jackrabbit versions >= 1.5.
- */
- private void removeDeletable() {
- String fileName = "deletable";
- try {
- if (indexDir.fileExists(fileName)) {
- indexDir.deleteFile(fileName);
- }
- } catch (IOException e) {
- log.warn("Unable to remove file 'deletable'.", e);
- }
- }
+ /**
+ * Attempts to delete all files recorded in {@link #deletable}.
+ */
+ private void attemptDelete()
+ {
+ synchronized (deletable)
+ {
+ for (Iterator it = deletable.iterator(); it.hasNext();)
+ {
+ String indexName = (String)it.next();
+ if (directoryManager.delete(indexName))
+ {
+ it.remove();
+ }
+ else
+ {
+ log.info("Unable to delete obsolete index: " + indexName);
+ }
+ }
+ }
+ }
- /**
- * Checks the duration between the last commit to this index and the current
- * time and flushes the index (if there are changes at all) if the duration
- * (idle time) is more than {@link SearchIndex#getVolatileIdleTime()}
- * seconds.
- */
- private synchronized void checkFlush() {
- long idleTime = System.currentTimeMillis() - lastFlushTime;
- // do not flush if volatileIdleTime is zero or negative
- if (handler.getVolatileIdleTime() > 0
- && idleTime > handler.getVolatileIdleTime() * 1000) {
- try {
- if (redoLog.hasEntries()) {
- log.debug("Flushing index after being idle for " + idleTime
- + " ms.");
- synchronized (updateMonitor) {
- updateInProgress = true;
- }
- try {
- flush();
- } finally {
- synchronized (updateMonitor) {
- updateInProgress = false;
- updateMonitor.notifyAll();
- releaseMultiReader();
- }
- }
- }
- } catch (IOException e) {
- log.error("Unable to commit volatile index", e);
- }
- }
- }
+ /**
+ * Removes the deletable file if it exists. The file is not used anymore in
+ * Jackrabbit versions >= 1.5.
+ */
+ private void removeDeletable()
+ {
+ String fileName = "deletable";
+ try
+ {
+ if (indexDir.fileExists(fileName))
+ {
+ indexDir.deleteFile(fileName);
+ }
+ }
+ catch (IOException e)
+ {
+ log.warn("Unable to remove file 'deletable'.", e);
+ }
+ }
- /**
- * Checks the indexing queue for finished text extrator jobs and updates the
- * index accordingly if there are any new ones. This method is synchronized
- * and should only be called by the timer task that periodically checks if
- * there are documents ready in the indexing queue. A new transaction is
- * used when documents are transfered from the indexing queue to the index.
- */
- private synchronized void checkIndexingQueue() {
- checkIndexingQueue(false);
- }
+ /**
+ * Checks the duration between the last commit to this index and the current
+ * time and flushes the index (if there are changes at all) if the duration
+ * (idle time) is more than {@link SearchIndex#getVolatileIdleTime()}
+ * seconds.
+ */
+ private synchronized void checkFlush()
+ {
+ long idleTime = System.currentTimeMillis() - lastFlushTime;
+ // do not flush if volatileIdleTime is zero or negative
+ if (handler.getVolatileIdleTime() > 0 && idleTime > handler.getVolatileIdleTime() * 1000)
+ {
+ try
+ {
+ if (redoLog.hasEntries())
+ {
+ log.debug("Flushing index after being idle for " + idleTime + " ms.");
+ synchronized (updateMonitor)
+ {
+ updateInProgress = true;
+ }
+ try
+ {
+ flush();
+ }
+ finally
+ {
+ synchronized (updateMonitor)
+ {
+ updateInProgress = false;
+ updateMonitor.notifyAll();
+ releaseMultiReader();
+ }
+ }
+ }
+ }
+ catch (IOException e)
+ {
+ log.error("Unable to commit volatile index", e);
+ }
+ }
+ }
- /**
- * Checks the indexing queue for finished text extrator jobs and updates the
- * index accordingly if there are any new ones.
- *
- * @param transactionPresent
- * whether a transaction is in progress and the current
- * {@link #getTransactionId()} should be used. If
- * <code>false</code> a new transaction is created when documents
- * are transfered from the indexing queue to the index.
- */
- private void checkIndexingQueue(boolean transactionPresent) {
- Document[] docs = indexingQueue.getFinishedDocuments();
- Map finished = new HashMap();
- for (int i = 0; i < docs.length; i++) {
- String uuid = docs[i].get(FieldNames.UUID);
- finished.put(uuid, docs[i]);
- }
+ /**
+ * Checks the indexing queue for finished text extrator jobs and updates the
+ * index accordingly if there are any new ones. This method is synchronized
+ * and should only be called by the timer task that periodically checks if
+ * there are documents ready in the indexing queue. A new transaction is
+ * used when documents are transfered from the indexing queue to the index.
+ */
+ private synchronized void checkIndexingQueue()
+ {
+ checkIndexingQueue(false);
+ }
- // now update index with the remaining ones if there are any
- if (!finished.isEmpty()) {
- log.info("updating index with {} nodes from indexing queue.",
- new Long(finished.size()));
+ /**
+ * Checks the indexing queue for finished text extrator jobs and updates the
+ * index accordingly if there are any new ones.
+ *
+ * @param transactionPresent
+ * whether a transaction is in progress and the current
+ * {@link #getTransactionId()} should be used. If
+ * <code>false</code> a new transaction is created when documents
+ * are transfered from the indexing queue to the index.
+ */
+ private void checkIndexingQueue(boolean transactionPresent)
+ {
+ Document[] docs = indexingQueue.getFinishedDocuments();
+ Map finished = new HashMap();
+ for (int i = 0; i < docs.length; i++)
+ {
+ String uuid = docs[i].get(FieldNames.UUID);
+ finished.put(uuid, docs[i]);
+ }
- // remove documents from the queue
- for (Iterator it = finished.keySet().iterator(); it.hasNext();) {
- indexingQueue.removeDocument(it.next().toString());
- }
+ // now update index with the remaining ones if there are any
+ if (!finished.isEmpty())
+ {
+ log.info("updating index with {} nodes from indexing queue.", new Long(finished.size()));
- try {
- if (transactionPresent) {
- for (Iterator it = finished.keySet().iterator(); it
- .hasNext();) {
- executeAndLog(new DeleteNode(getTransactionId(),
- (String) it.next()));
- }
- for (Iterator it = finished.values().iterator(); it
- .hasNext();) {
- executeAndLog(new AddNode(getTransactionId(),
- (Document) it.next()));
- }
- } else {
- update(finished.keySet(), finished.values());
- }
- } catch (IOException e) {
- // update failed
- log.warn(
- "Failed to update index with deferred text extraction",
- e);
- }
- }
- }
+ // remove documents from the queue
+ for (Iterator it = finished.keySet().iterator(); it.hasNext();)
+ {
+ indexingQueue.removeDocument(it.next().toString());
+ }
- // ------------------------< Actions
- // >---------------------------------------
+ try
+ {
+ if (transactionPresent)
+ {
+ for (Iterator it = finished.keySet().iterator(); it.hasNext();)
+ {
+ executeAndLog(new DeleteNode(getTransactionId(), (String)it.next()));
+ }
+ for (Iterator it = finished.values().iterator(); it.hasNext();)
+ {
+ executeAndLog(new AddNode(getTransactionId(), (Document)it.next()));
+ }
+ }
+ else
+ {
+ update(finished.keySet(), finished.values());
+ }
+ }
+ catch (IOException e)
+ {
+ // update failed
+ log.warn("Failed to update index with deferred text extraction", e);
+ }
+ }
+ }
- /**
- * Defines an action on an <code>MultiIndex</code>.
- */
- public abstract static class Action {
+ // ------------------------< Actions
+ // >---------------------------------------
- /**
- * Action identifier in redo log for transaction start action.
- */
- static final String START = "STR";
+ /**
+ * Defines an action on an <code>MultiIndex</code>.
+ */
+ public abstract static class Action
+ {
- /**
- * Action type for start action.
- */
- public static final int TYPE_START = 0;
+ /**
+ * Action identifier in redo log for transaction start action.
+ */
+ static final String START = "STR";
- /**
- * Action identifier in redo log for add node action.
- */
- static final String ADD_NODE = "ADD";
+ /**
+ * Action type for start action.
+ */
+ public static final int TYPE_START = 0;
- /**
- * Action type for add node action.
- */
- public static final int TYPE_ADD_NODE = 1;
+ /**
+ * Action identifier in redo log for add node action.
+ */
+ static final String ADD_NODE = "ADD";
- /**
- * Action identifier in redo log for node delete action.
- */
- static final String DELETE_NODE = "DEL";
+ /**
+ * Action type for add node action.
+ */
+ public static final int TYPE_ADD_NODE = 1;
- /**
- * Action type for delete node action.
- */
- public static final int TYPE_DELETE_NODE = 2;
+ /**
+ * Action identifier in redo log for node delete action.
+ */
+ static final String DELETE_NODE = "DEL";
- /**
- * Action identifier in redo log for transaction commit action.
- */
- static final String COMMIT = "COM";
+ /**
+ * Action type for delete node action.
+ */
+ public static final int TYPE_DELETE_NODE = 2;
- /**
- * Action type for commit action.
- */
- public static final int TYPE_COMMIT = 3;
+ /**
+ * Action identifier in redo log for transaction commit action.
+ */
+ static final String COMMIT = "COM";
- /**
- * Action identifier in redo log for volatile index commit action.
- */
- static final String VOLATILE_COMMIT = "VOL_COM";
+ /**
+ * Action type for commit action.
+ */
+ public static final int TYPE_COMMIT = 3;
- /**
- * Action type for volatile index commit action.
- */
- public static final int TYPE_VOLATILE_COMMIT = 4;
+ /**
+ * Action identifier in redo log for volatile index commit action.
+ */
+ static final String VOLATILE_COMMIT = "VOL_COM";
- /**
- * Action identifier in redo log for index create action.
- */
- static final String CREATE_INDEX = "CRE_IDX";
+ /**
+ * Action type for volatile index commit action.
+ */
+ public static final int TYPE_VOLATILE_COMMIT = 4;
- /**
- * Action type for create index action.
- */
- public static final int TYPE_CREATE_INDEX = 5;
+ /**
+ * Action identifier in redo log for index create action.
+ */
+ static final String CREATE_INDEX = "CRE_IDX";
- /**
- * Action identifier in redo log for index add action.
- */
- static final String ADD_INDEX = "ADD_IDX";
+ /**
+ * Action type for create index action.
+ */
+ public static final int TYPE_CREATE_INDEX = 5;
- /**
- * Action type for add index action.
- */
- public static final int TYPE_ADD_INDEX = 6;
+ /**
+ * Action identifier in redo log for index add action.
+ */
+ static final String ADD_INDEX = "ADD_IDX";
- /**
- * Action identifier in redo log for delete index action.
- */
- static final String DELETE_INDEX = "DEL_IDX";
+ /**
+ * Action type for add index action.
+ */
+ public static final int TYPE_ADD_INDEX = 6;
- /**
- * Action type for delete index action.
- */
- public static final int TYPE_DELETE_INDEX = 7;
+ /**
+ * Action identifier in redo log for delete index action.
+ */
+ static final String DELETE_INDEX = "DEL_IDX";
- /**
- * Transaction identifier for internal actions like volatile index
- * commit triggered by timer thread.
- */
- static final long INTERNAL_TRANSACTION = -1;
+ /**
+ * Action type for delete index action.
+ */
+ public static final int TYPE_DELETE_INDEX = 7;
- /**
- * Transaction identifier for internal action that replaces indexs.
- */
- static final long INTERNAL_TRANS_REPL_INDEXES = -2;
+ /**
+ * Transaction identifier for internal actions like volatile index
+ * commit triggered by timer thread.
+ */
+ static final long INTERNAL_TRANSACTION = -1;
- /**
- * The id of the transaction that executed this action.
- */
- private final long transactionId;
+ /**
+ * Transaction identifier for internal action that replaces indexs.
+ */
+ static final long INTERNAL_TRANS_REPL_INDEXES = -2;
- /**
- * The action type.
- */
- private final int type;
+ /**
+ * The id of the transaction that executed this action.
+ */
+ private final long transactionId;
- /**
- * Creates a new <code>Action</code>.
- *
- * @param transactionId
- * the id of the transaction that executed this action.
- * @param type
- * the action type.
- */
- Action(long transactionId, int type) {
- this.transactionId = transactionId;
- this.type = type;
- }
+ /**
+ * The action type.
+ */
+ private final int type;
- /**
- * Returns the transaction id for this <code>Action</code>.
- *
- * @return the transaction id for this <code>Action</code>.
- */
- long getTransactionId() {
- return transactionId;
- }
+ /**
+ * Creates a new <code>Action</code>.
+ *
+ * @param transactionId
+ * the id of the transaction that executed this action.
+ * @param type
+ * the action type.
+ */
+ Action(long transactionId, int type)
+ {
+ this.transactionId = transactionId;
+ this.type = type;
+ }
- /**
- * Returns the action type.
- *
- * @return the action type.
- */
- int getType() {
- return type;
- }
+ /**
+ * Returns the transaction id for this <code>Action</code>.
+ *
+ * @return the transaction id for this <code>Action</code>.
+ */
+ long getTransactionId()
+ {
+ return transactionId;
+ }
- /**
- * Executes this action on the <code>index</code>.
- *
- * @param index
- * the index where to execute the action.
- * @throws IOException
- * if the action fails due to some I/O error in the index or
- * some other error.
- */
- public abstract void execute(MultiIndex index) throws IOException;
+ /**
+ * Returns the action type.
+ *
+ * @return the action type.
+ */
+ int getType()
+ {
+ return type;
+ }
- /**
- * Executes the inverse operation of this action. That is, does an undo
- * of this action. This default implementation does nothing, but returns
- * silently.
- *
- * @param index
- * the index where to undo the action.
- * @throws IOException
- * if the action cannot be undone.
- */
- public void undo(MultiIndex index) throws IOException {
- }
+ /**
+ * Executes this action on the <code>index</code>.
+ *
+ * @param index
+ * the index where to execute the action.
+ * @throws IOException
+ * if the action fails due to some I/O error in the index or
+ * some other error.
+ */
+ public abstract void execute(MultiIndex index) throws IOException;
- /**
- * Returns a <code>String</code> representation of this action that can
- * be written to the {@link RedoLog}.
- *
- * @return a <code>String</code> representation of this action.
- */
- public abstract String toString();
+ /**
+ * Executes the inverse operation of this action. That is, does an undo
+ * of this action. This default implementation does nothing, but returns
+ * silently.
+ *
+ * @param index
+ * the index where to undo the action.
+ * @throws IOException
+ * if the action cannot be undone.
+ */
+ public void undo(MultiIndex index) throws IOException
+ {
+ }
- /**
- * Parses an line in the redo log and created an {@link Action}.
- *
- * @param line
- * the line from the redo log.
- * @return an <code>Action</code>.
- * @throws IllegalArgumentException
- * if the line is malformed.
- */
- static Action fromString(String line) throws IllegalArgumentException {
- int endTransIdx = line.indexOf(' ');
- if (endTransIdx == -1) {
- throw new IllegalArgumentException(line);
- }
- long transactionId;
- try {
- transactionId = Long.parseLong(line.substring(0, endTransIdx));
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(line);
- }
- int endActionIdx = line.indexOf(' ', endTransIdx + 1);
- if (endActionIdx == -1) {
- // action does not have arguments
- endActionIdx = line.length();
- }
- String actionLabel = line.substring(endTransIdx + 1, endActionIdx);
- String arguments = "";
- if (endActionIdx + 1 <= line.length()) {
- arguments = line.substring(endActionIdx + 1);
- }
- Action a;
- if (actionLabel.equals(Action.ADD_NODE)) {
- a = AddNode.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.ADD_INDEX)) {
- a = AddIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.COMMIT)) {
- a = Commit.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.CREATE_INDEX)) {
- a = CreateIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.DELETE_INDEX)) {
- a = DeleteIndex.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.DELETE_NODE)) {
- a = DeleteNode.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.START)) {
- a = Start.fromString(transactionId, arguments);
- } else if (actionLabel.equals(Action.VOLATILE_COMMIT)) {
- a = VolatileCommit.fromString(transactionId, arguments);
- } else {
- throw new IllegalArgumentException(line);
- }
- return a;
- }
- }
+ /**
+ * Returns a <code>String</code> representation of this action that can
+ * be written to the {@link RedoLog}.
+ *
+ * @return a <code>String</code> representation of this action.
+ */
+ public abstract String toString();
- /**
- * Adds an index to the MultiIndex's active persistent index list.
- */
- private static class AddIndex extends Action {
+ /**
+ * Parses an line in the redo log and created an {@link Action}.
+ *
+ * @param line
+ * the line from the redo log.
+ * @return an <code>Action</code>.
+ * @throws IllegalArgumentException
+ * if the line is malformed.
+ */
+ static Action fromString(String line) throws IllegalArgumentException
+ {
+ int endTransIdx = line.indexOf(' ');
+ if (endTransIdx == -1)
+ {
+ throw new IllegalArgumentException(line);
+ }
+ long transactionId;
+ try
+ {
+ transactionId = Long.parseLong(line.substring(0, endTransIdx));
+ }
+ catch (NumberFormatException e)
+ {
+ throw new IllegalArgumentException(line);
+ }
+ int endActionIdx = line.indexOf(' ', endTransIdx + 1);
+ if (endActionIdx == -1)
+ {
+ // action does not have arguments
+ endActionIdx = line.length();
+ }
+ String actionLabel = line.substring(endTransIdx + 1, endActionIdx);
+ String arguments = "";
+ if (endActionIdx + 1 <= line.length())
+ {
+ arguments = line.substring(endActionIdx + 1);
+ }
+ Action a;
+ if (actionLabel.equals(Action.ADD_NODE))
+ {
+ a = AddNode.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.ADD_INDEX))
+ {
+ a = AddIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.COMMIT))
+ {
+ a = Commit.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.CREATE_INDEX))
+ {
+ a = CreateIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.DELETE_INDEX))
+ {
+ a = DeleteIndex.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.DELETE_NODE))
+ {
+ a = DeleteNode.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.START))
+ {
+ a = Start.fromString(transactionId, arguments);
+ }
+ else if (actionLabel.equals(Action.VOLATILE_COMMIT))
+ {
+ a = VolatileCommit.fromString(transactionId, arguments);
+ }
+ else
+ {
+ throw new IllegalArgumentException(line);
+ }
+ return a;
+ }
+ }
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * Adds an index to the MultiIndex's active persistent index list.
+ */
+ private static class AddIndex extends Action
+ {
- /**
- * Creates a new AddIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to add, or <code>null</code> if an
- * index with a new name should be created.
- */
- AddIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_ADD_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Creates a new AddIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to add.
- * @return the AddIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static AddIndex fromString(long transactionId, String arguments) {
- return new AddIndex(transactionId, arguments);
- }
+ /**
+ * Creates a new AddIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to add, or <code>null</code> if an
+ * index with a new name should be created.
+ */
+ AddIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_ADD_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * Adds a sub index to <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- if (!index.indexNames.contains(indexName)) {
- index.indexNames.addName(indexName);
- // now that the index is in the active list let the merger know
- // about it
- index.merger.indexAdded(indexName, idx.getNumDocuments());
- }
- }
+ /**
+ * Creates a new AddIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to add.
+ * @return the AddIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static AddIndex fromString(long transactionId, String arguments)
+ {
+ return new AddIndex(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.ADD_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
- }
+ /**
+ * Adds a sub index to <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ if (!index.indexNames.contains(indexName))
+ {
+ index.indexNames.addName(indexName);
+ // now that the index is in the active list let the merger know
+ // about it
+ index.merger.indexAdded(indexName, idx.getNumDocuments());
+ }
+ }
- /**
- * Adds a node to the index.
- */
- private static class AddNode extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.ADD_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
+ }
- /**
- * The maximum length of a AddNode String.
- */
- private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE)
- .length()
- + Action.ADD_NODE.length()
- + Constants.UUID_FORMATTED_LENGTH
- + 2;
+ /**
+ * Adds a node to the index.
+ */
+ private static class AddNode extends Action
+ {
- /**
- * The uuid of the node to add.
- */
- private final String uuid;
+ /**
+ * The maximum length of a AddNode String.
+ */
+ private static final int ENTRY_LENGTH =
+ Long.toString(Long.MAX_VALUE).length() + Action.ADD_NODE.length() + Constants.UUID_FORMATTED_LENGTH + 2;
- /**
- * The document to add to the index, or <code>null</code> if not
- * available.
- */
- private Document doc;
+ /**
+ * The uuid of the node to add.
+ */
+ private final String uuid;
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param uuid
- * the uuid of the node to add.
- */
- AddNode(long transactionId, String uuid) {
- super(transactionId, Action.TYPE_ADD_NODE);
- this.uuid = uuid;
- }
+ /**
+ * The document to add to the index, or <code>null</code> if not
+ * available.
+ */
+ private Document doc;
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param doc
- * the document to add.
- */
- AddNode(long transactionId, Document doc) {
- this(transactionId, doc.get(FieldNames.UUID));
- this.doc = doc;
- }
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param uuid
+ * the uuid of the node to add.
+ */
+ AddNode(long transactionId, String uuid)
+ {
+ super(transactionId, Action.TYPE_ADD_NODE);
+ this.uuid = uuid;
+ }
- /**
- * Creates a new AddNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the arguments to this action. The uuid of the node to add
- * @return the AddNode action.
- * @throws IllegalArgumentException
- * if the arguments are malformed. Not a UUID.
- */
- static AddNode fromString(long transactionId, String arguments)
- throws IllegalArgumentException {
- // simple length check
- if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
- throw new IllegalArgumentException("arguments is not a uuid");
- }
- return new AddNode(transactionId, arguments);
- }
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param doc
+ * the document to add.
+ */
+ AddNode(long transactionId, Document doc)
+ {
+ this(transactionId, doc.get(FieldNames.UUID));
+ this.doc = doc;
+ }
- /**
- * Adds a node to the index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- if (doc == null) {
- try {
- doc = index.createDocument(uuid);
- } catch (RepositoryException e) {
- // node does not exist anymore
- log.debug(e.getMessage());
- }
- }
- if (doc != null) {
- index.volatileIndex.addDocuments(new Document[] { doc });
- }
- }
+ /**
+ * Creates a new AddNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the arguments to this action. The uuid of the node to add
+ * @return the AddNode action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed. Not a UUID.
+ */
+ static AddNode fromString(long transactionId, String arguments) throws IllegalArgumentException
+ {
+ // simple length check
+ if (arguments.length() != Constants.UUID_FORMATTED_LENGTH)
+ {
+ throw new IllegalArgumentException("arguments is not a uuid");
+ }
+ return new AddNode(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.ADD_NODE);
- logLine.append(' ');
- logLine.append(uuid);
- return logLine.toString();
- }
- }
+ /**
+ * Adds a node to the index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ if (doc == null)
+ {
+ try
+ {
+ doc = index.createDocument(uuid);
+ }
+ catch (RepositoryException e)
+ {
+ // node does not exist anymore
+ log.debug(e.getMessage());
+ }
+ }
+ if (doc != null)
+ {
+ index.volatileIndex.addDocuments(new Document[]{doc});
+ }
+ }
- /**
- * Commits a transaction.
- */
- private static class Commit extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.ADD_NODE);
+ logLine.append(' ');
+ logLine.append(uuid);
+ return logLine.toString();
+ }
+ }
- /**
- * Creates a new Commit action.
- *
- * @param transactionId
- * the id of the transaction that is committed.
- */
- Commit(long transactionId) {
- super(transactionId, Action.TYPE_COMMIT);
- }
+ /**
+ * Commits a transaction.
+ */
+ private static class Commit extends Action
+ {
- /**
- * Creates a new Commit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this method.
- * @return the Commit action.
- */
- static Commit fromString(long transactionId, String arguments) {
- return new Commit(transactionId);
- }
+ /**
+ * Creates a new Commit action.
+ *
+ * @param transactionId
+ * the id of the transaction that is committed.
+ */
+ Commit(long transactionId)
+ {
+ super(transactionId, Action.TYPE_COMMIT);
+ }
- /**
- * Touches the last flush time (sets it to the current time).
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- index.lastFlushTime = System.currentTimeMillis();
- }
+ /**
+ * Creates a new Commit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this method.
+ * @return the Commit action.
+ */
+ static Commit fromString(long transactionId, String arguments)
+ {
+ return new Commit(transactionId);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- return Long.toString(getTransactionId()) + ' ' + Action.COMMIT;
- }
- }
+ /**
+ * Touches the last flush time (sets it to the current time).
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ index.lastFlushTime = System.currentTimeMillis();
+ }
- /**
- * Creates an new sub index but does not add it to the active persistent
- * index list.
- */
- private static class CreateIndex extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ return Long.toString(getTransactionId()) + ' ' + Action.COMMIT;
+ }
+ }
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * Creates an new sub index but does not add it to the active persistent
+ * index list.
+ */
+ private static class CreateIndex extends Action
+ {
- /**
- * Creates a new CreateIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to add, or <code>null</code> if an
- * index with a new name should be created.
- */
- CreateIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_CREATE_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Creates a new CreateIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to create.
- * @return the AddIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static CreateIndex fromString(long transactionId, String arguments) {
- // when created from String, this action is executed as redo action
- return new CreateIndex(transactionId, arguments);
- }
+ /**
+ * Creates a new CreateIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to add, or <code>null</code> if an
+ * index with a new name should be created.
+ */
+ CreateIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_CREATE_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * Creates a new index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- indexName = idx.getName();
- }
+ /**
+ * Creates a new CreateIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to create.
+ * @return the AddIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static CreateIndex fromString(long transactionId, String arguments)
+ {
+ // when created from String, this action is executed as redo action
+ return new CreateIndex(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public void undo(MultiIndex index) throws IOException {
- if (index.hasIndex(indexName)) {
- PersistentIndex idx = index.getOrCreateIndex(indexName);
- idx.close();
- index.deleteIndex(idx);
- }
- }
+ /**
+ * Creates a new index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ indexName = idx.getName();
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.CREATE_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
+ /**
+ * @inheritDoc
+ */
+ public void undo(MultiIndex index) throws IOException
+ {
+ if (index.hasIndex(indexName))
+ {
+ PersistentIndex idx = index.getOrCreateIndex(indexName);
+ idx.close();
+ index.deleteIndex(idx);
+ }
+ }
- /**
- * Returns the index name that has been created. If this method is
- * called before {@link #execute(MultiIndex)} it will return
- * <code>null</code>.
- *
- * @return the name of the index that has been created.
- */
- String getIndexName() {
- return indexName;
- }
- }
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.CREATE_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
- /**
- * Closes and deletes an index that is no longer in use.
- */
- private static class DeleteIndex extends Action {
+ /**
+ * Returns the index name that has been created. If this method is
+ * called before {@link #execute(MultiIndex)} it will return
+ * <code>null</code>.
+ *
+ * @return the name of the index that has been created.
+ */
+ String getIndexName()
+ {
+ return indexName;
+ }
+ }
- /**
- * The name of the index to add.
- */
- private String indexName;
+ /**
+ * Closes and deletes an index that is no longer in use.
+ */
+ private static class DeleteIndex extends Action
+ {
- /**
- * Creates a new DeleteIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param indexName
- * the name of the index to delete.
- */
- DeleteIndex(long transactionId, String indexName) {
- super(transactionId, Action.TYPE_DELETE_INDEX);
- this.indexName = indexName;
- }
+ /**
+ * The name of the index to add.
+ */
+ private String indexName;
- /**
- * Creates a new DeleteIndex action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the name of the index to delete.
- * @return the DeleteIndex action.
- * @throws IllegalArgumentException
- * if the arguments are malformed.
- */
- static DeleteIndex fromString(long transactionId, String arguments) {
- return new DeleteIndex(transactionId, arguments);
- }
+ /**
+ * Creates a new DeleteIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param indexName
+ * the name of the index to delete.
+ */
+ DeleteIndex(long transactionId, String indexName)
+ {
+ super(transactionId, Action.TYPE_DELETE_INDEX);
+ this.indexName = indexName;
+ }
- /**
- * Removes a sub index from <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- // get index if it exists
- for (Iterator it = index.indexes.iterator(); it.hasNext();) {
- PersistentIndex idx = (PersistentIndex) it.next();
- if (idx.getName().equals(indexName)) {
- idx.close();
- index.deleteIndex(idx);
- break;
- }
- }
- }
+ /**
+ * Creates a new DeleteIndex action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the name of the index to delete.
+ * @return the DeleteIndex action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed.
+ */
+ static DeleteIndex fromString(long transactionId, String arguments)
+ {
+ return new DeleteIndex(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.DELETE_INDEX);
- logLine.append(' ');
- logLine.append(indexName);
- return logLine.toString();
- }
- }
+ /**
+ * Removes a sub index from <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ // get index if it exists
+ for (Iterator it = index.indexes.iterator(); it.hasNext();)
+ {
+ PersistentIndex idx = (PersistentIndex)it.next();
+ if (idx.getName().equals(indexName))
+ {
+ idx.close();
+ index.deleteIndex(idx);
+ break;
+ }
+ }
+ }
- /**
- * Deletes a node from the index.
- */
- private static class DeleteNode extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.DELETE_INDEX);
+ logLine.append(' ');
+ logLine.append(indexName);
+ return logLine.toString();
+ }
+ }
- /**
- * The maximum length of a DeleteNode String.
- */
- private static final int ENTRY_LENGTH = Long.toString(Long.MAX_VALUE)
- .length()
- + Action.DELETE_NODE.length()
- + Constants.UUID_FORMATTED_LENGTH
- + 2;
+ /**
+ * Deletes a node from the index.
+ */
+ private static class DeleteNode extends Action
+ {
- /**
- * The uuid of the node to remove.
- */
- private final String uuid;
+ /**
+ * The maximum length of a DeleteNode String.
+ */
+ private static final int ENTRY_LENGTH =
+ Long.toString(Long.MAX_VALUE).length() + Action.DELETE_NODE.length() + Constants.UUID_FORMATTED_LENGTH + 2;
- /**
- * Creates a new DeleteNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param uuid
- * the uuid of the node to delete.
- */
- DeleteNode(long transactionId, String uuid) {
- super(transactionId, Action.TYPE_DELETE_NODE);
- this.uuid = uuid;
- }
+ /**
+ * The uuid of the node to remove.
+ */
+ private final String uuid;
- /**
- * Creates a new DeleteNode action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * the uuid of the node to delete.
- * @return the DeleteNode action.
- * @throws IllegalArgumentException
- * if the arguments are malformed. Not a UUID.
- */
- static DeleteNode fromString(long transactionId, String arguments) {
- // simple length check
- if (arguments.length() != Constants.UUID_FORMATTED_LENGTH) {
- throw new IllegalArgumentException("arguments is not a uuid");
- }
- return new DeleteNode(transactionId, arguments);
- }
+ /**
+ * Creates a new DeleteNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param uuid
+ * the uuid of the node to delete.
+ */
+ DeleteNode(long transactionId, String uuid)
+ {
+ super(transactionId, Action.TYPE_DELETE_NODE);
+ this.uuid = uuid;
+ }
- /**
- * Deletes a node from the index.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- String uuidString = uuid.toString();
- // check if indexing queue is still working on
- // this node from a previous update
- Document doc = index.indexingQueue.removeDocument(uuidString);
- if (doc != null) {
- Util.disposeDocument(doc);
- }
- Term idTerm = new Term(FieldNames.UUID, uuidString);
- // if the document cannot be deleted from the volatile index
- // delete it from one of the persistent indexes.
- int num = index.volatileIndex.removeDocument(idTerm);
- if (num == 0) {
- for (int i = index.indexes.size() - 1; i >= 0; i--) {
- // only look in registered indexes
- PersistentIndex idx = (PersistentIndex) index.indexes
- .get(i);
- if (index.indexNames.contains(idx.getName())) {
- num = idx.removeDocument(idTerm);
- if (num > 0) {
- return;
- }
- }
- }
- }
- }
+ /**
+ * Creates a new DeleteNode action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * the uuid of the node to delete.
+ * @return the DeleteNode action.
+ * @throws IllegalArgumentException
+ * if the arguments are malformed. Not a UUID.
+ */
+ static DeleteNode fromString(long transactionId, String arguments)
+ {
+ // simple length check
+ if (arguments.length() != Constants.UUID_FORMATTED_LENGTH)
+ {
+ throw new IllegalArgumentException("arguments is not a uuid");
+ }
+ return new DeleteNode(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.DELETE_NODE);
- logLine.append(' ');
- logLine.append(uuid);
- return logLine.toString();
- }
- }
+ /**
+ * Deletes a node from the index.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ String uuidString = uuid.toString();
+ // check if indexing queue is still working on
+ // this node from a previous update
+ Document doc = index.indexingQueue.removeDocument(uuidString);
+ if (doc != null)
+ {
+ Util.disposeDocument(doc);
+ }
+ Term idTerm = new Term(FieldNames.UUID, uuidString);
+ // if the document cannot be deleted from the volatile index
+ // delete it from one of the persistent indexes.
+ int num = index.volatileIndex.removeDocument(idTerm);
+ if (num == 0)
+ {
+ for (int i = index.indexes.size() - 1; i >= 0; i--)
+ {
+ // only look in registered indexes
+ PersistentIndex idx = (PersistentIndex)index.indexes.get(i);
+ if (index.indexNames.contains(idx.getName()))
+ {
+ num = idx.removeDocument(idTerm);
+ if (num > 0)
+ {
+ return;
+ }
+ }
+ }
+ }
+ }
- /**
- * Starts a transaction.
- */
- private static class Start extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer(ENTRY_LENGTH);
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.DELETE_NODE);
+ logLine.append(' ');
+ logLine.append(uuid);
+ return logLine.toString();
+ }
+ }
- /**
- * Creates a new Start transaction action.
- *
- * @param transactionId
- * the id of the transaction that started.
- */
- Start(long transactionId) {
- super(transactionId, Action.TYPE_START);
- }
+ /**
+ * Starts a transaction.
+ */
+ private static class Start extends Action
+ {
- /**
- * Creates a new Start action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this method.
- * @return the Start action.
- */
- static Start fromString(long transactionId, String arguments) {
- return new Start(transactionId);
- }
+ /**
+ * Creates a new Start transaction action.
+ *
+ * @param transactionId
+ * the id of the transaction that started.
+ */
+ Start(long transactionId)
+ {
+ super(transactionId, Action.TYPE_START);
+ }
- /**
- * Sets the current transaction id on <code>index</code>.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- index.currentTransactionId = getTransactionId();
- }
+ /**
+ * Creates a new Start action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this method.
+ * @return the Start action.
+ */
+ static Start fromString(long transactionId, String arguments)
+ {
+ return new Start(transactionId);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- return Long.toString(getTransactionId()) + ' ' + Action.START;
- }
- }
+ /**
+ * Sets the current transaction id on <code>index</code>.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ index.currentTransactionId = getTransactionId();
+ }
- /**
- * Commits the volatile index to disk.
- */
- private static class VolatileCommit extends Action {
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ return Long.toString(getTransactionId()) + ' ' + Action.START;
+ }
+ }
- /**
- * The name of the target index to commit to.
- */
- private final String targetIndex;
+ /**
+ * Commits the volatile index to disk.
+ */
+ private static class VolatileCommit extends Action
+ {
- /**
- * Creates a new VolatileCommit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- */
- VolatileCommit(long transactionId, String targetIndex) {
- super(transactionId, Action.TYPE_VOLATILE_COMMIT);
- this.targetIndex = targetIndex;
- }
+ /**
+ * The name of the target index to commit to.
+ */
+ private final String targetIndex;
- /**
- * Creates a new VolatileCommit action.
- *
- * @param transactionId
- * the id of the transaction that executes this action.
- * @param arguments
- * ignored by this implementation.
- * @return the VolatileCommit action.
- */
- static VolatileCommit fromString(long transactionId, String arguments) {
- return new VolatileCommit(transactionId, arguments);
- }
+ /**
+ * Creates a new VolatileCommit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ */
+ VolatileCommit(long transactionId, String targetIndex)
+ {
+ super(transactionId, Action.TYPE_VOLATILE_COMMIT);
+ this.targetIndex = targetIndex;
+ }
- /**
- * Commits the volatile index to disk.
- *
- * @inheritDoc
- */
- public void execute(MultiIndex index) throws IOException {
- VolatileIndex volatileIndex = index.getVolatileIndex();
- PersistentIndex persistentIndex = index
- .getOrCreateIndex(targetIndex);
- persistentIndex.copyIndex(volatileIndex);
- index.resetVolatileIndex();
- }
+ /**
+ * Creates a new VolatileCommit action.
+ *
+ * @param transactionId
+ * the id of the transaction that executes this action.
+ * @param arguments
+ * ignored by this implementation.
+ * @return the VolatileCommit action.
+ */
+ static VolatileCommit fromString(long transactionId, String arguments)
+ {
+ return new VolatileCommit(transactionId, arguments);
+ }
- /**
- * @inheritDoc
- */
- public String toString() {
- StringBuffer logLine = new StringBuffer();
- logLine.append(Long.toString(getTransactionId()));
- logLine.append(' ');
- logLine.append(Action.VOLATILE_COMMIT);
- logLine.append(' ');
- logLine.append(targetIndex);
- return logLine.toString();
- }
- }
+ /**
+ * Commits the volatile index to disk.
+ *
+ * @inheritDoc
+ */
+ public void execute(MultiIndex index) throws IOException
+ {
+ VolatileIndex volatileIndex = index.getVolatileIndex();
+ PersistentIndex persistentIndex = index.getOrCreateIndex(targetIndex);
+ persistentIndex.copyIndex(volatileIndex);
+ index.resetVolatileIndex();
+ }
+
+ /**
+ * @inheritDoc
+ */
+ public String toString()
+ {
+ StringBuffer logLine = new StringBuffer();
+ logLine.append(Long.toString(getTransactionId()));
+ logLine.append(' ');
+ logLine.append(Action.VOLATILE_COMMIT);
+ logLine.append(' ');
+ logLine.append(targetIndex);
+ return logLine.toString();
+ }
+ }
+
+ /**
+ * Set indexer io mode.
+ * @param ioMode
+ */
+ public void setIndexerIoMode(IndexerIoMode ioMode)
+ {
+ log.info("Indexer io mode=" + ioMode);
+ //do some thing if changed
+ if (!this.ioMode.equals(ioMode))
+ {
+ this.ioMode = ioMode;
+ switch (ioMode)
+ {
+ case READ_ONLY :
+ // stop timer
+ flushTask.cancel();
+ break;
+ case READ_WRITE :
+ scheduleFlushTask();
+ break;
+ }
+ }
+
+ }
}
Modified: jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
--- jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-12-09 16:24:25 UTC (rev 974)
+++ jcr/branches/1.12.0-JBC/component/core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java 2009-12-09 16:25:01 UTC (rev 975)
@@ -418,7 +418,7 @@
/**
* Indexer io mode
*/
- private IndexerIoMode ioMode;
+ private IndexerIoMode ioMode = IndexerIoMode.READ_WRITE;
/**
* Working constructor.
@@ -2651,8 +2651,20 @@
public void setIndexerIoMode(IndexerIoMode ioMode) throws IOException, RepositoryException
{
log.info("Indexer io mode=" + ioMode);
- this.ioMode = ioMode;
+ //do some thing if changed
+ if (!this.ioMode.equals(ioMode))
+ {
+ this.ioMode = ioMode;
+ switch (ioMode)
+ {
+ case READ_ONLY :
+ index.setIndexerIoMode(ioMode);
+ break;
+ case READ_WRITE :
+ index.setIndexerIoMode(ioMode);
+ break;
+ }
+ }
}
-
}
More information about the exo-jcr-commits
mailing list