[hibernate-commits] Hibernate SVN: r15704 - in search/trunk/src/java/org/hibernate/search/backend: impl/lucene and 1 other directories.

hibernate-commits at lists.jboss.org hibernate-commits at lists.jboss.org
Thu Dec 18 08:24:48 EST 2008


Author: sannegrinovero
Date: 2008-12-18 08:24:48 -0500 (Thu, 18 Dec 2008)
New Revision: 15704

Modified:
   search/trunk/src/java/org/hibernate/search/backend/Workspace.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/PerDPQueueProcessor.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/AddWorkDelegate.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteExtWorkDelegate.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteWorkDelegate.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/LuceneWorkDelegate.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/OptimizeWorkDelegate.java
   search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/PurgeAllWorkDelegate.java
Log:
HSEARCH-326 Drop support for IndexReader usage to update indexes. This also removes more unneeded Locks.

Modified: search/trunk/src/java/org/hibernate/search/backend/Workspace.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/Workspace.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/Workspace.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -8,9 +8,7 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.Directory;
 import org.slf4j.Logger;
 
 import org.hibernate.annotations.common.AssertionFailure;
@@ -24,13 +22,8 @@
 
 /**
  * Lucene workspace for a DirectoryProvider.<p/>
- * <ul>
- * <li>Before using {@link #getIndexWriter} or {@link #getIndexReader} the lock must be acquired,
- * and resources must be closed before releasing the lock.</li>
- * <li>One cannot get an IndexWriter when an IndexReader has been acquired and not closed, and vice-versa.</li>
- * <li>The recommended approach is to execute all the modifications on the <code>IndexReader</code>, and after that on
- * the <code>IndexWriter</code></li>.
- * </ul>
+ * Before using {@link #getIndexWriter} the lock must be acquired,
+ * and resources must be closed before releasing the lock.
  *
  * @author Emmanuel Bernard
  * @author Hardy Ferentschik
@@ -41,11 +34,13 @@
 
 	private static final Logger log = LoggerFactory.make();
 	private static final Analyzer SIMPLE_ANALYZER = new SimpleAnalyzer();
+	private static final IndexWriter.MaxFieldLength maxFieldLength =
+		new IndexWriter.MaxFieldLength( IndexWriter.DEFAULT_MAX_FIELD_LENGTH );
 	
 	// invariant state:
 
 	private final SearchFactoryImplementor searchFactoryImplementor;
-	private final DirectoryProvider directoryProvider;
+	private final DirectoryProvider<?> directoryProvider;
 	private final OptimizerStrategy optimizerStrategy;
 	private final ReentrantLock lock;
 	private final Set<Class<?>> entitiesInDirectory;
@@ -54,11 +49,6 @@
 	// variable state:
 	
 	/**
-	 * Current open IndexReader, or null when closed. Guarded by synchronization.
-	 */
-	private IndexReader reader;
-	
-	/**
 	 * Current open IndexWriter, or null when closed. Guarded by synchronization.
 	 */
 	private IndexWriter writer;
@@ -88,13 +78,16 @@
 	/**
 	 * If optimization has not been forced give a change to configured OptimizerStrategy
 	 * to optimize the index.
-	 * @throws AssertionFailure if the lock is not owned or if an IndexReader is open.
+	 * To enter the optimization phase you need to acquire the lock first.
+	 * @throws AssertionFailure if the lock is not owned.
 	 */
 	public void optimizerPhase() {
 		assertOwnLock();
 		// used getAndSet(0) because Workspace is going to be reused by next transaction.
-		optimizerStrategy.addTransaction( operations.getAndSet( 0L ) );
-		optimizerStrategy.optimize( this );
+		synchronized (optimizerStrategy) {
+			optimizerStrategy.addTransaction( operations.getAndSet( 0L ) );
+			optimizerStrategy.optimize( this );
+		}
 	}
 	
 	/**
@@ -105,79 +98,25 @@
 	 * @see SearchFactory#optimize(Class)
 	 */
 	public void optimize() {
-		assertOwnLock(); // the DP is not affected, but needs to ensure the optimizerStrategy is accesses in threadsafe way
-		optimizerStrategy.optimizationForced();
-	}
-
-	/**
-	 * Gets an IndexReader to alter the index, opening one if needed.
-	 * The caller needs to own the lock relevant to this DirectoryProvider.
-	 * @throws AssertionFailure if an IndexWriter is open or if the lock is not owned.
-	 * @return a new IndexReader or one already open.
-	 * @see #lock()
-	 */
-	public synchronized IndexReader getIndexReader() {
-		assertOwnLock();
-		// one cannot access a reader for update while a writer is in use
-		if ( writer != null )
-			throw new AssertionFailure( "Tries to read for update an index while a writer is in use." );
-		if ( reader != null )
-			return reader;
-		Directory directory = directoryProvider.getDirectory();
-		try {
-			reader = IndexReader.open( directory, false );
-			log.trace( "IndexReader opened" );
+		// Needs to ensure the optimizerStrategy is accessed in threadsafe way
+		synchronized (optimizerStrategy) {
+			optimizerStrategy.optimizationForced();
 		}
-		catch ( IOException e ) {
-			reader = null;
-			throw new SearchException( "Unable to open IndexReader on directory " + directory, e );
-		}
-		return reader;
 	}
 
 	/**
-	 * Closes a previously opened IndexReader.
-	 * @throws SearchException on IOException during Lucene close operation.
-	 * @throws AssertionFailure if the lock is not owned or if there is no IndexReader to close.
-	 * @see #getIndexReader()
-	 */
-	public synchronized void closeIndexReader() {
-		assertOwnLock();
-		IndexReader toClose = reader;
-		reader = null;
-		if ( toClose != null ) {
-			try {
-				toClose.close();
-				log.trace( "IndexReader closed" );
-			}
-			catch ( IOException e ) {
-				throw new SearchException( "Exception while closing IndexReader", e );
-			}
-		}
-		else {
-			throw new AssertionFailure( "No IndexReader open to close." );
-		}
-	}
-	
-	/**
 	 * Gets the IndexWriter, opening one if needed.
 	 * @param batchmode when true the indexWriter settings for batch mode will be applied.
 	 * Ignored if IndexWriter is open already.
-	 * @throws AssertionFailure if an IndexReader is open or the lock is not owned.
+	 * @throws AssertionFailure if the lock is not owned.
 	 * @throws SearchException on a IOException during index opening.
 	 * @return a new IndexWriter or one already open.
 	 */
 	public synchronized IndexWriter getIndexWriter(boolean batchmode) {
-		assertOwnLock();
-		// one has to close a reader for update before a writer is accessed
-		if ( reader != null )
-			throw new AssertionFailure( "Tries to open an IndexWriter while an IndexReader is open in update mode." );
 		if ( writer != null )
 			return writer;
 		try {
-			// don't care about the Analyzer as it will be selected during usage of IndexWriter.
-			IndexWriter.MaxFieldLength fieldLength = new IndexWriter.MaxFieldLength( IndexWriter.DEFAULT_MAX_FIELD_LENGTH );
-			writer = new IndexWriter( directoryProvider.getDirectory(), SIMPLE_ANALYZER, false, fieldLength ); // has been created at init time
+			writer = new IndexWriter( directoryProvider.getDirectory(), SIMPLE_ANALYZER, false, maxFieldLength ); // has been created at init time
 			indexingParams.applyToWriter( writer, batchmode );
 			log.trace( "IndexWriter opened" );
 		}
@@ -189,13 +128,12 @@
 	}
 
 	/**
-	 * Commits changes to a previously opened index writer.
+	 * Commits changes to a previously opened IndexWriter.
 	 *
 	 * @throws SearchException on IOException during Lucene close operation.
-	 * @throws AssertionFailure if there is no IndexWriter to close, or if the lock is not owned.
+	 * @throws AssertionFailure if there is no IndexWriter to close.
 	 */
 	public synchronized void commitIndexWriter() {
-		assertOwnLock();
 		if ( writer != null ) {
 			try {
 				writer.commit();
@@ -213,10 +151,9 @@
 	/**
 	 * Closes a previously opened IndexWriter.
 	 * @throws SearchException on IOException during Lucene close operation.
-	 * @throws AssertionFailure if there is no IndexWriter to close, or if the lock is not owned.
+	 * @throws AssertionFailure if there is no IndexWriter to close.
 	 */
 	public synchronized void closeIndexWriter() {
-		assertOwnLock();
 		IndexWriter toClose = writer;
 		writer = null;
 		if ( toClose != null ) {
@@ -252,34 +189,19 @@
 	
 	/**
 	 * Acquires a lock on the DirectoryProvider backing this Workspace;
-	 * this is required to use getIndexWriter(boolean), closeIndexWriter(),
-	 * getIndexReader(), closeIndexReader().
-	 * @see #getIndexWriter(boolean)
-	 * @see #closeIndexWriter()
-	 * @see #getIndexReader()
-	 * @see #closeIndexReader()
+	 * this is required to use optimizerPhase()
+	 * @see #optimizerPhase()
 	 */
 	public void lock() {
 		lock.lock();
 	}
 
 	/**
-	 * Releases the lock obtained by calling lock()
-	 * @throws AssertionFailure when unlocking without having closed IndexWriter or IndexReader.
+	 * Releases the lock obtained by calling lock(). The caller must own the lock.
 	 * @see #lock()
 	 */
-	public synchronized void unlock() {
-		try {
-			if ( this.reader != null ) {
-				throw new AssertionFailure( "Unlocking Workspace without having closed the IndexReader" );
-			}
-			if ( this.writer != null ) {
-				throw new AssertionFailure( "Unlocking Workspace without having closed the IndexWriter" );
-			}
-		}
-		finally {
-			lock.unlock();
-		}
+	public void unlock() {
+		lock.unlock();
 	}
 
 	private void assertOwnLock() {

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/PerDPQueueProcessor.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/PerDPQueueProcessor.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/PerDPQueueProcessor.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -4,17 +4,20 @@
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.slf4j.Logger;
 
-import org.hibernate.annotations.common.AssertionFailure;
 import org.hibernate.search.backend.LuceneWork;
 import org.hibernate.search.backend.Workspace;
 import org.hibernate.search.backend.impl.lucene.works.LuceneWorkVisitor;
 import org.hibernate.search.util.LoggerFactory;
 
 /**
+ * A Runnable containing a unit of changes to be applied to a specific index.
+ * After creation, use addWork(LuceneWork) to fill the changes queue and then
+ * run it to apply all changes. After run() this object should be discarded.
+ * @see Runnable
+ * @see #addWork(LuceneWork)
  * @author Sanne Grinovero
  */
 class PerDPQueueProcessor implements Runnable {
@@ -24,79 +27,36 @@
 	private final LuceneWorkVisitor worker;
 	private final ExecutorService executor;
 	private final List<LuceneWork> workOnWriter = new ArrayList<LuceneWork>();
-	private final List<LuceneWork> workOnReader= new ArrayList<LuceneWork>();
 	
-	// if any work passed to addWork needs one, set corresponding flag to true:
+	// if any work needs batchmode, set corresponding flag to true:
 	private boolean batchmode = false;
-	private boolean needsWriter = false;
-	private boolean preferReader = false;
 	
+	/**
+	 * @param resources All resources for the given DirectoryProvider are collected
+	 *  from this wrapping object.
+	 */
 	public PerDPQueueProcessor(PerDPResources resources) {
 		this.worker = resources.getVisitor();
 		this.workspace = resources.getWorkspace();
 		this.executor = resources.getExecutor();
 	}
 
+	/**
+	 * adds a LuceneWork to the internal queue. Can't remove them.
+	 * @param work
+	 */
 	public void addWork(LuceneWork work) {
 		if ( work.isBatch() ) {
 			batchmode = true;
 			log.debug( "Batch mode enabled" );
 		}
-		IndexInteractionType type = work.getWorkDelegate( worker ).getIndexInteractionType();
-		switch ( type ) {
-			case PREFER_INDEXREADER :
-				preferReader = true;
-				workOnReader.add( work );
-				break;
-			case NEEDS_INDEXWRITER :
-				needsWriter = true;
-				//fall through:
-			case PREFER_INDEXWRITER :
-				workOnWriter.add( work );
-				break;
-			default :
-				throw new AssertionFailure( "Uncovered switch case for type " + type );
-		}
+		workOnWriter.add( work );
 	}
 
-	public void run() {
-		// skip "resource optimization mode" when in batch to have all tasks use preferred (optimal) mode.
-		if ( ! batchmode ) {
-			// 	see if we can skip using some resource
-			if ( ! needsWriter ) { // no specific need:
-				if ( preferReader ) {
-					useReaderOnly();
-				}
-				else {
-					useWriterOnly();
-				}
-			}
-			else {
-				useWriterOnly();
-			}
-			if ( ! (workOnWriter.isEmpty() || workOnReader.isEmpty() ) ) {
-				throw new AssertionFailure(
-					"During non-batch mode performWorks tries to use both IndexWriter and IndexReader." );
-			}
-		}
-		// apply changes to index:
-		log.trace( "Locking Workspace (or waiting to...)" );
-		workspace.lock();
-		log.trace( "Workspace lock aquired." );
-		try {
-			performReaderWorks();
-			performWriterWorks();
-		}
-		finally {
-			workspace.unlock();
-			log.trace( "Unlocking Workspace" );
-		}
-	}
-
 	/**
 	 * Do all workOnWriter on an IndexWriter.
 	 */
-	private void performWriterWorks() {
+	public void run() {
 		if ( workOnWriter.isEmpty() ) {
 			return;
 		}
@@ -107,56 +67,37 @@
 				lw.getWorkDelegate( worker ).performWork( lw, indexWriter );
 			}
 			workspace.commitIndexWriter();
-			//TODO next line is assuming the OptimizerStrategy will need an IndexWriter;
-			// would be nicer to have the strategy put an OptimizeWork on the queue,
-			// or just return "yes please" (true) to some method?
-			//FIXME will not have a chance to trigger when no writer activity is done.
-			// this is currently ok, until we enable mod.counts for deletions too.
-			workspace.optimizerPhase();
+			//TODO skip this when indexing in batches:
+			performOptimizations();
 		}
 		finally {
 			workspace.closeIndexWriter();
 		}
 	}
-
-	/**
-	 * Do all workOnReader on an IndexReader.
-	 */
-	private void performReaderWorks() {
-		if ( workOnReader.isEmpty() ) {
-			return;
-		}
-		log.debug( "Opening an IndexReader for update" );
-		IndexReader indexReader = workspace.getIndexReader();
+	
+	private void performOptimizations() {
+		log.trace( "Locking Workspace (or waiting to...)" );
+		workspace.lock();
 		try {
-			for (LuceneWork lw : workOnReader) {
-				lw.getWorkDelegate( worker ).performWork( lw, indexReader );
-			}
+			log.trace( "Workspace lock aquired." );
+			//TODO next line is assuming the OptimizerStrategy will need an IndexWriter;
+			// would be nicer to have the strategy put an OptimizeWork on the queue,
+			// or just return "yes please" (true) to some method?
+			//FIXME will not have a chance to trigger when no "add" activity is done.
+			// this is correct until we enable modification counts for deletions too.
+			workspace.optimizerPhase();
 		}
 		finally {
-			workspace.closeIndexReader();
+			workspace.unlock();
+			log.trace( "Unlocked Workspace" );
 		}
 	}
 
 	/**
-	 * forces all work to be done using only an IndexReader
+	 * Each PerDPQueueProcessor is owned by an Executor,
+	 * which contains the threads allowed to execute this.
+	 * @return the Executor which should run this Runnable.
 	 */
-	private void useReaderOnly() {
-		log.debug( "Skipping usage of an IndexWriter for updates" );
-		workOnReader.addAll( workOnWriter );
-		workOnWriter.clear();
-	}
-
-	/**
-	 * forces all work to be done using only an IndexWriter
-	 */
-	private void useWriterOnly() {
-		log.debug( "Skipping usage of an IndexReader for updates" );
-		//position 0 needed to maintain correct ordering of Work: delete operations first.
-		workOnWriter.addAll( 0, workOnReader );
-		workOnReader.clear();
-	}
-
 	public ExecutorService getOwningExecutor() {
 		return executor;
 	}

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/AddWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/AddWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/AddWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -4,7 +4,6 @@
 import java.util.Map;
 
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.search.Similarity;
 import org.slf4j.Logger;
@@ -13,7 +12,6 @@
 import org.hibernate.search.backend.AddLuceneWork;
 import org.hibernate.search.backend.LuceneWork;
 import org.hibernate.search.backend.Workspace;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 import org.hibernate.search.engine.DocumentBuilderIndexedEntity;
 import org.hibernate.search.util.LoggerFactory;
 import org.hibernate.search.util.ScopedAnalyzer;
@@ -38,13 +36,10 @@
 		this.workspace = workspace;
 	}
 
-	public IndexInteractionType getIndexInteractionType() {
-		return IndexInteractionType.NEEDS_INDEXWRITER;
-	}
-
 	public void performWork(LuceneWork work, IndexWriter writer) {
+		final Class<?> entityType = work.getEntityClass();
 		@SuppressWarnings("unchecked")
-		DocumentBuilderIndexedEntity documentBuilder = workspace.getDocumentBuilder( work.getEntityClass() );
+		DocumentBuilderIndexedEntity documentBuilder = workspace.getDocumentBuilder( entityType );
 		Map<String, String> fieldToAnalyzerMap = ( ( AddLuceneWork ) work ).getFieldToAnalyzerMap();
 		ScopedAnalyzer analyzer = ( ScopedAnalyzer ) documentBuilder.getAnalyzer();
 		analyzer = updateAnalyzerMappings( analyzer, fieldToAnalyzerMap, workspace );
@@ -52,12 +47,12 @@
 		if ( log.isTraceEnabled() ) {
 			log.trace(
 					"add to Lucene index: {}#{}:{}",
-					new Object[] { work.getEntityClass(), work.getId(), work.getDocument() }
+					new Object[] { entityType, work.getId(), work.getDocument() }
 			);
 		}
 		try {
 			//TODO the next two operations should be atomic to enable concurrent usage of IndexWriter
-			// make a wrapping Similarity based on ThreadLocals? or having it autoselect implementation basing on entity?
+			// make a wrapping Similarity based on ThreadLocals? or have it autoselect implementation basing on entity?
 			writer.setSimilarity( similarity );
 			writer.addDocument( work.getDocument(), analyzer );
 			workspace.incrementModificationCounter( 1 );
@@ -65,7 +60,7 @@
 		catch ( IOException e ) {
 			throw new SearchException(
 					"Unable to add to Lucene index: "
-							+ work.getEntityClass() + "#" + work.getId(), e
+							+ entityType + "#" + work.getId(), e
 			);
 		}
 	}
@@ -100,7 +95,4 @@
 		return analyzerClone;
 	}
 
-	public void performWork(LuceneWork work, IndexReader reader) {
-		throw new UnsupportedOperationException();
-	}
 }

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteExtWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteExtWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteExtWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -2,14 +2,12 @@
 
 import java.io.Serializable;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
 import org.hibernate.annotations.common.AssertionFailure;
 import org.hibernate.search.SearchException;
 import org.hibernate.search.backend.LuceneWork;
 import org.hibernate.search.backend.Workspace;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 import org.hibernate.search.engine.DocumentBuilderIndexedEntity;
 import org.hibernate.search.util.LoggerFactory;
 import org.slf4j.Logger;
@@ -25,8 +23,8 @@
  */
 public class DeleteExtWorkDelegate extends DeleteWorkDelegate {
 
-	private final Class managedType;
-	private final DocumentBuilderIndexedEntity builder;
+	private final Class<?> managedType;
+	private final DocumentBuilderIndexedEntity<?> builder;
 	private final Logger log = LoggerFactory.make();
 
 	DeleteExtWorkDelegate(Workspace workspace) {
@@ -39,13 +37,6 @@
 	}
 
 	@Override
-	public IndexInteractionType getIndexInteractionType() {
-		// no particular reason to prefer Reader, just it's possibly more tested
-		// as using the writer is an option of latest Lucene version only (2.4).
-		return IndexInteractionType.PREFER_INDEXREADER;
-	}
-
-	@Override
 	public void performWork(LuceneWork work, IndexWriter writer) {
 		checkType( work );
 		Serializable id = work.getId();
@@ -60,21 +51,6 @@
 		}
 	}
 
-	@Override
-	public void performWork(LuceneWork work, IndexReader reader) {
-		checkType( work );
-		Serializable id = work.getId();
-		log.trace( "Removing {}#{} by id using an IndexReader.", managedType, id );
-		Term idTerm = builder.getTerm( id );
-		try {
-			reader.deleteDocuments( idTerm );
-		}
-		catch ( Exception e ) {
-			String message = "Unable to remove " + managedType + "#" + id + " from index.";
-			throw new SearchException( message, e );
-		}
-	}
-	
 	private void checkType(final LuceneWork work) {
 		if ( work.getEntityClass() != managedType ) {
 			throw new AssertionFailure( "Unexpected type" );

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/DeleteWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -1,11 +1,9 @@
 package org.hibernate.search.backend.impl.lucene.works;
 
-import java.io.IOException;
+import java.io.Serializable;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.TermQuery;
@@ -14,7 +12,6 @@
 import org.hibernate.search.SearchException;
 import org.hibernate.search.backend.LuceneWork;
 import org.hibernate.search.backend.Workspace;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 import org.hibernate.search.engine.DocumentBuilderIndexedEntity;
 import org.hibernate.search.engine.DocumentBuilder;
 import org.hibernate.search.util.LoggerFactory;
@@ -38,18 +35,15 @@
 		this.workspace = workspace;
 	}
 
-	public IndexInteractionType getIndexInteractionType() {
-		return IndexInteractionType.PREFER_INDEXWRITER;
-	}
-
 	public void performWork(LuceneWork work, IndexWriter writer) {
 		final Class<?> entityType = work.getEntityClass();
-		log.trace( "Removing {}#{} by query.", entityType, work.getId() );
+		final Serializable id = work.getId();
+		log.trace( "Removing {}#{} by query.", entityType, id );
 		DocumentBuilderIndexedEntity<?> builder = workspace.getDocumentBuilder( entityType );
 
 		BooleanQuery entityDeletionQuery = new BooleanQuery();
 
-		TermQuery idQueryTerm = new TermQuery( builder.getTerm( work.getId() ) );
+		TermQuery idQueryTerm = new TermQuery( builder.getTerm( id ) );
 		entityDeletionQuery.add( idQueryTerm, BooleanClause.Occur.MUST );
 
 		Term classNameQueryTerm =  new Term( DocumentBuilder.CLASS_FIELDNAME, entityType.getName() );
@@ -60,59 +54,9 @@
 			writer.deleteDocuments( entityDeletionQuery );
 		}
 		catch ( Exception e ) {
-			String message = "Unable to remove " + entityType + "#" + work.getId() + " from index.";
+			String message = "Unable to remove " + entityType + "#" + id + " from index.";
 			throw new SearchException( message, e );
 		}
 	}
 
-	/*
-	 * This method is obsolete and was used pre Lucene 2.4. Now we are using IndexWriter.deleteDocuments(Query) to
-	 * delete index documents.
-	 *
-	 * This method might be deleted at some stage. (hardy)
-	 */
-	public void performWork(LuceneWork work, IndexReader reader) {
-		/**
-		 * even with Lucene 2.1, use of indexWriter to delete is not an option
-		 * We can only delete by term, and the index doesn't have a term that
-		 * uniquely identify the entry. See logic below
-		 */
-		final Class<?> entityType = work.getEntityClass();
-		log.trace( "Removing {}#{} from Lucene index.", entityType, work.getId() );
-		DocumentBuilderIndexedEntity<?> builder = workspace.getDocumentBuilder( entityType );
-		Term term = builder.getTerm( work.getId() );
-		TermDocs termDocs = null;
-		try {
-			//TODO is there a faster way?
-			//TODO include TermDocs into the workspace?
-			termDocs = reader.termDocs( term );
-			String entityName = entityType.getName();
-			while ( termDocs.next() ) {
-				int docIndex = termDocs.doc();
-				if ( entityName.equals( reader.document( docIndex ).get( DocumentBuilder.CLASS_FIELDNAME ) ) ) {
-					//remove only the one of the right class
-					//loop all to remove all the matches (defensive code)
-					reader.deleteDocument( docIndex );
-				}
-			}
-			//TODO shouldn't this use workspace.incrementModificationCounter( 1 ) ? 
-		}
-		catch ( Exception e ) {
-			throw new SearchException(
-					"Unable to remove from Lucene index: "
-							+ entityType + "#" + work.getId(), e
-			);
-		}
-		finally {
-			if ( termDocs != null ) {
-				try {
-					termDocs.close();
-				}
-				catch ( IOException e ) {
-					log.warn( "Unable to close termDocs properly", e );
-				}
-			}
-		}
-	}
-
 }

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/LuceneWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/LuceneWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/LuceneWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -1,9 +1,7 @@
 package org.hibernate.search.backend.impl.lucene.works;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.hibernate.search.backend.LuceneWork;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 
 /**
  * @author Sanne Grinovero
@@ -11,12 +9,6 @@
 public interface LuceneWorkDelegate {
 	
 	/**
-	 * @return the IndexInteractionType needed to accomplish this work (reader or writer)
-	 * 	or have a chance to express any preference for performance optimizations.
-	 */
-	IndexInteractionType getIndexInteractionType();
-
-	/**
 	 * Will perform work on an IndexWriter.
 	 * @param work the LuceneWork to apply to the IndexWriter.
 	 * @param writer the IndexWriter to use.
@@ -24,12 +16,4 @@
 	 */
 	void performWork(LuceneWork work, IndexWriter writer);
 	
-	/**
-	 * Will perform this work on an IndexReader.
-	 * @param work the LuceneWork to apply to the IndexReader.
-	 * @param reader the IndexReader to use.
-	 * @throws UnsupportedOperationException when the work is not compatible with an IndexReader.
-	 */
-	void performWork(LuceneWork work, IndexReader reader);
-	
 }

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/OptimizeWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/OptimizeWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/OptimizeWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -2,14 +2,12 @@
 
 import java.io.IOException;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.slf4j.Logger;
 
 import org.hibernate.search.SearchException;
 import org.hibernate.search.backend.LuceneWork;
 import org.hibernate.search.backend.Workspace;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 import org.hibernate.search.util.LoggerFactory;
 
 /**
@@ -32,23 +30,16 @@
 		this.workspace = workspace;
 	}
 
-	public IndexInteractionType getIndexInteractionType() {
-		return IndexInteractionType.NEEDS_INDEXWRITER;
-	}
-
 	public void performWork(LuceneWork work, IndexWriter writer) {
-		log.trace( "optimize Lucene index: {}", work.getEntityClass() );
+		final Class<?> entityType = work.getEntityClass();
+		log.trace( "optimize Lucene index: {}", entityType );
 		try {
 			writer.optimize();
 			workspace.optimize();
 		}
 		catch ( IOException e ) {
-			throw new SearchException( "Unable to optimize Lucene index: " + work.getEntityClass(), e );
+			throw new SearchException( "Unable to optimize Lucene index: " + entityType, e );
 		}
 	}
 
-	public void performWork(LuceneWork work, IndexReader reader) {
-		throw new UnsupportedOperationException();
-	}
-
 }

Modified: search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/PurgeAllWorkDelegate.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/PurgeAllWorkDelegate.java	2008-12-17 14:41:24 UTC (rev 15703)
+++ search/trunk/src/java/org/hibernate/search/backend/impl/lucene/works/PurgeAllWorkDelegate.java	2008-12-18 13:24:48 UTC (rev 15704)
@@ -1,13 +1,11 @@
 package org.hibernate.search.backend.impl.lucene.works;
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.Term;
 import org.slf4j.Logger;
 
 import org.hibernate.search.SearchException;
 import org.hibernate.search.backend.LuceneWork;
-import org.hibernate.search.backend.impl.lucene.IndexInteractionType;
 import org.hibernate.search.engine.DocumentBuilder;
 import org.hibernate.search.util.LoggerFactory;
 
@@ -27,30 +25,16 @@
 	PurgeAllWorkDelegate() {
 	}
 
-	public IndexInteractionType getIndexInteractionType() {
-		return IndexInteractionType.PREFER_INDEXREADER;
-	}
-
 	public void performWork(LuceneWork work, IndexWriter writer) {
-		log.trace( "purgeAll Lucene index using IndexWriter for type: {}", work.getEntityClass() );
+		final Class<?> entityType = work.getEntityClass();
+		log.trace( "purgeAll Lucene index using IndexWriter for type: {}", entityType );
 		try {
-			Term term = new Term( DocumentBuilder.CLASS_FIELDNAME, work.getEntityClass().getName() );
+			Term term = new Term( DocumentBuilder.CLASS_FIELDNAME, entityType.getName() );
 			writer.deleteDocuments( term );
 		}
 		catch (Exception e) {
-			throw new SearchException( "Unable to purge all from Lucene index: " + work.getEntityClass(), e );
+			throw new SearchException( "Unable to purge all from Lucene index: " + entityType, e );
 		}
 	}
 
-	public void performWork(LuceneWork work, IndexReader reader) {
-		log.trace( "purgeAll Lucene index using IndexReader for type: {}", work.getEntityClass() );
-		try {
-			Term term = new Term( DocumentBuilder.CLASS_FIELDNAME, work.getEntityClass().getName() );
-			reader.deleteDocuments( term );
-		}
-		catch (Exception e) {
-			throw new SearchException( "Unable to purge all from Lucene index: " + work.getEntityClass(), e );
-		}
-	}
-	
 }




More information about the hibernate-commits mailing list