[infinispan-commits] Infinispan SVN: r2485 - in trunk/lucene-directory/src: main/java/org/infinispan/lucene/readlocks and 2 other directories.

infinispan-commits at lists.jboss.org infinispan-commits at lists.jboss.org
Mon Oct 4 13:05:18 EDT 2010


Author: sannegrinovero
Date: 2010-10-04 13:05:17 -0400 (Mon, 04 Oct 2010)
New Revision: 2485

Added:
   trunk/lucene-directory/src/test/java/org/infinispan/lucene/DirectoryOnMultipleCachesTest.java
   trunk/lucene-directory/src/test/java/org/infinispan/lucene/readlocks/ConfigurationCheckTest.java
Modified:
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexInput.java
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexOutput.java
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/SingleChunkIndexInput.java
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/DistributedSegmentReadLocker.java
   trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/LocalLockMergingSegmentReadLocker.java
   trunk/lucene-directory/src/test/java/org/infinispan/lucene/CacheTestSupport.java
   trunk/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java
   trunk/lucene-directory/src/test/java/org/infinispan/lucene/SimpleLuceneTest.java
Log:
[ISPN-616] (Have the Lucene directory use specialized caches for different purposes) - merged from 4.2

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -85,7 +85,8 @@
    // access type will be changed in the next Lucene version
    volatile boolean isOpen = true;
 
-   private final AdvancedCache cache;
+   private final AdvancedCache metadataCache;
+   private final AdvancedCache chunksCache;
    // indexName is required when one common cache is used
    private final String indexName;
    // chunk size used in this directory, static filed not used as we want to have different chunk
@@ -95,44 +96,81 @@
    private final FileListOperations fileOps;
    private final SegmentReadLocker readLocks;
 
-   public InfinispanDirectory(Cache cache, String indexName, LockFactory lf, int chunkSize, SegmentReadLocker readLocker) {
-      checkNotNull(cache, "cache");
+   /**
+    * @param metadataCache the cache to be used for all smaller metadata: prefer replication over distribution, avoid eviction
+    * @param chunksCache the cache to use for the space consuming segments: prefer distribution, enable eviction if needed
+    * @param indexName the unique index name, useful to store multiple indexes in the same caches
+    * @param lf the LockFactory to be used by IndexWriters. @see org.infinispan.lucene.locking
+    * @param chunkSize segments are fragmented in chunkSize bytes; larger values are more efficient for searching but less for distribution and network replication
+    * @param readLocker @see org.infinispan.lucene.readlocks for some implementations; you might be able to provide more efficient implementations by controlling the IndexReader's lifecycle.
+    */
+   public InfinispanDirectory(Cache metadataCache, Cache chunksCache, String indexName, LockFactory lf, int chunkSize, SegmentReadLocker readLocker) {
+      checkNotNull(metadataCache, "metadataCache");
+      checkNotNull(chunksCache, "chunksCache");
       checkNotNull(indexName, "indexName");
       checkNotNull(lf, "LockFactory");
       checkNotNull(readLocker, "SegmentReadLocker");
       if (chunkSize <= 0)
          throw new IllegalArgumentException("chunkSize must be a positive integer");
-      this.cache = cache.getAdvancedCache();
+      this.metadataCache = metadataCache.getAdvancedCache();
+      this.chunksCache = chunksCache.getAdvancedCache();
       this.indexName = indexName;
       this.setLockFactory(lf);
       this.chunkSize = chunkSize;
-      this.fileOps = new FileListOperations(this.cache, indexName);
+      this.fileOps = new FileListOperations(this.metadataCache, indexName);
       this.readLocks = readLocker;
    }
    
+   @Deprecated//too many constructors, this will be removed
+   public InfinispanDirectory(Cache cache, String indexName, LockFactory lf, int chunkSize, SegmentReadLocker readLocker) {
+      this(cache, cache, indexName, lf, chunkSize, readLocker);
+   }
+   
+   @Deprecated//too many constructors, this will be removed
    public InfinispanDirectory(Cache cache, String indexName, LockFactory lf, int chunkSize) {
-      this(cache, indexName, lf, chunkSize,
-               new DistributedSegmentReadLocker(cache, indexName));
+      this(cache, indexName, lf, chunkSize, makeDefaultSegmentReadLocker(cache, cache, cache, indexName));
    }
-
+   
+   @Deprecated//too many constructors, this will be removed
    public InfinispanDirectory(Cache cache, String indexName, int chunkSize, SegmentReadLocker readLocker) {
       this(cache, indexName, makeDefaultLockFactory(cache, indexName), chunkSize, readLocker);
    }
+
+   /**
+    * This constructor assumes that three different caches are being used with specialized configurations for each
+    * cache usage
+    * @param metadataCache contains the metadata of stored elements
+    * @param chunksCache cache containing the bulk of the index; this is the larger part of data
+    * @param distLocksCache cache to store locks; should be replicated and not using a persistent CacheStore
+    * @param indexName identifies the index; you can store different indexes in the same set of caches using different identifiers
+    * @param chunkSize the maximum size in bytes for each chunk of data: larger sizes offer better search performance
+    * but might be problematic to handle during network replication or storage
+    */
+   public InfinispanDirectory(Cache metadataCache, Cache chunksCache, Cache distLocksCache, String indexName, int chunkSize) {
+      this(metadataCache, chunksCache, indexName, makeDefaultLockFactory(distLocksCache, indexName),
+               chunkSize, makeDefaultSegmentReadLocker(metadataCache, chunksCache, distLocksCache, indexName));
+   }
    
+   @Deprecated//too many constructors, this will be removed
    public InfinispanDirectory(Cache cache, String indexName, LockFactory lf) {
       this(cache, indexName, lf, DEFAULT_BUFFER_SIZE);
    }
 
+   @Deprecated//too many constructors, this will be removed
    public InfinispanDirectory(Cache cache, String indexName, int chunkSize) {
       this(cache, indexName, makeDefaultLockFactory(cache, indexName), chunkSize);
    }
 
+   /**
+    * @param cache the cache to use to store the index
+    * @param indexName identifies the index; you can store different indexes in the same set of caches using different identifiers
+    */
    public InfinispanDirectory(Cache cache, String indexName) {
-      this(cache, indexName, makeDefaultLockFactory(cache, indexName), DEFAULT_BUFFER_SIZE);
+      this(cache, cache, cache, indexName, DEFAULT_BUFFER_SIZE);
    }
 
    public InfinispanDirectory(Cache cache) {
-      this(cache, "");
+      this(cache, cache, cache, "", DEFAULT_BUFFER_SIZE);
    }
 
    /**
@@ -179,7 +217,7 @@
       else {
          FileCacheKey key = new FileCacheKey(indexName, fileName);
          file.touch();
-         cache.put(key, file);
+         metadataCache.put(key, file);
       }
    }
 
@@ -206,21 +244,21 @@
       Object ob;
       do {
          ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i);
-         ob = cache.get(fromChunkKey);
+         ob = chunksCache.get(fromChunkKey);
          if (ob == null) {
             break;
          }
          ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i);
-         cache.withFlags(Flag.SKIP_REMOTE_LOOKUP).put(toChunkKey, ob);
+         chunksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).put(toChunkKey, ob);
       } while (true);
       
       // rename metadata first
-      boolean batching = cache.startBatch();
+      boolean batching = metadataCache.startBatch();
       FileCacheKey fromKey = new FileCacheKey(indexName, from);
-      FileMetadata metadata = (FileMetadata) cache.withFlags(Flag.SKIP_LOCKING).get(fromKey);
-      cache.put(new FileCacheKey(indexName, to), metadata);
+      FileMetadata metadata = (FileMetadata) metadataCache.withFlags(Flag.SKIP_LOCKING).get(fromKey);
+      metadataCache.put(new FileCacheKey(indexName, to), metadata);
       fileOps.removeAndAdd(from, to);
-      if (batching) cache.endBatch(true);
+      if (batching) metadataCache.endBatch(true);
       
       // now trigger deletion of old file chunks:
       readLocks.deleteOrReleaseReadLock(from);
@@ -249,7 +287,7 @@
    public IndexOutput createOutput(String name) throws IOException {
       final FileCacheKey key = new FileCacheKey(indexName, name);
       // creating new file, metadata is added on flush() or close() of IndexOutPut
-      return new InfinispanIndexOutput(cache, key, chunkSize, fileOps);
+      return new InfinispanIndexOutput(metadataCache, chunksCache, key, chunkSize, fileOps);
    }
 
    /**
@@ -257,13 +295,13 @@
     */
    public IndexInput openInput(String name) throws IOException {
       final FileCacheKey fileKey = new FileCacheKey(indexName, name);
-      FileMetadata fileMetadata = (FileMetadata) cache.withFlags(Flag.SKIP_LOCKING).get(fileKey);
+      FileMetadata fileMetadata = (FileMetadata) metadataCache.withFlags(Flag.SKIP_LOCKING).get(fileKey);
       if (fileMetadata == null) {
          throw new FileNotFoundException("Error loading medatada for index file: " + fileKey);
       }
       else if (fileMetadata.getSize() <= fileMetadata.getBufferSize()) {
          //files smaller than chunkSize don't need a readLock
-         return new SingleChunkIndexInput(cache, fileKey, fileMetadata);
+         return new SingleChunkIndexInput(chunksCache, fileKey, fileMetadata);
       }
       else {
          boolean locked = readLocks.aquireReadLock(name);
@@ -271,7 +309,7 @@
             // safest reaction is to tell this file doesn't exist anymore.
             throw new FileNotFoundException("Error loading medatada for index file: " + fileKey);
          }
-         return new InfinispanIndexInput(cache, fileKey, fileMetadata, readLocks);
+         return new InfinispanIndexInput(chunksCache, fileKey, fileMetadata, readLocks);
       }
    }
 
@@ -293,10 +331,6 @@
       return "InfinispanDirectory{" + "indexName='" + indexName + '\'' + '}';
    }
 
-   public Cache getCache() {
-      return cache;
-   }
-
    /** new name for list() in Lucene 3.0 **/
    public String[] listAll() throws IOException {
       return list();
@@ -308,6 +342,12 @@
       return new BaseLockFactory(cache, indexName);
    }
    
+   private static SegmentReadLocker makeDefaultSegmentReadLocker(Cache metadataCache, Cache chunksCache, Cache distLocksCache, String indexName) {
+      checkNotNull(distLocksCache, "distLocksCache");
+      checkNotNull(indexName, "indexName");
+      return new DistributedSegmentReadLocker(distLocksCache, chunksCache, metadataCache, indexName);
+   }
+   
    private static void checkNotNull(Object v, String objectname) {
       if (v == null)
          throw new IllegalArgumentException(objectname + " must not be null");

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexInput.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexInput.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexInput.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -44,7 +44,7 @@
 
    private static final Log log = LogFactory.getLog(InfinispanIndexInput.class);
 
-   private final AdvancedCache cache;
+   private final AdvancedCache chunksCache;
    private final FileCacheKey fileKey;
    private final int chunkSize;
    private final SegmentReadLocker readLocks;
@@ -59,8 +59,8 @@
 
    private boolean isClone;
 
-   public InfinispanIndexInput(AdvancedCache cache, FileCacheKey fileKey, FileMetadata fileMetadata, SegmentReadLocker readLocks) throws FileNotFoundException {
-      this.cache = cache;
+   public InfinispanIndexInput(AdvancedCache chunksCache, FileCacheKey fileKey, FileMetadata fileMetadata, SegmentReadLocker readLocks) throws FileNotFoundException {
+      this.chunksCache = chunksCache;
       this.fileKey = fileKey;
       this.chunkSize = fileMetadata.getBufferSize();
       this.fileLength = fileMetadata.getSize();
@@ -133,7 +133,7 @@
 
    private void setBufferToCurrentChunk() throws IOException {
       ChunkCacheKey key = new ChunkCacheKey(fileKey.getIndexName(), filename, currentLoadedChunk);
-      buffer = (byte[]) cache.withFlags(Flag.SKIP_LOCKING).get(key);
+      buffer = (byte[]) chunksCache.withFlags(Flag.SKIP_LOCKING).get(key);
       if (buffer == null) {
          throw new IOException("Read past EOF: Chunk value could not be found for key " + key);
       }
@@ -144,7 +144,7 @@
    // RAMDirectory teaches to position the cursor to the end of previous chunk in this case
    private void setBufferToCurrentChunkIfPossible() throws IOException {
       ChunkCacheKey key = new ChunkCacheKey(fileKey.getIndexName(), filename, currentLoadedChunk);
-      buffer = (byte[]) cache.withFlags(Flag.SKIP_LOCKING).get(key);
+      buffer = (byte[]) chunksCache.withFlags(Flag.SKIP_LOCKING).get(key);
       if (buffer == null) {
          currentLoadedChunk--;
          bufferPosition = chunkSize;

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexOutput.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexOutput.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanIndexOutput.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -44,7 +44,8 @@
    private static final Log log = LogFactory.getLog(InfinispanIndexOutput.class);
 
    private final int bufferSize;
-   private final AdvancedCache cache;
+   private final AdvancedCache chunksCache;
+   private final AdvancedCache metadataCache;
    private final FileMetadata file;
    private final FileCacheKey fileKey;
    private final FileListOperations fileOps;
@@ -56,8 +57,9 @@
    private int currentChunkNumber = 0;
    private boolean needsAddingToFileList = true;
 
-   public InfinispanIndexOutput(AdvancedCache cache, FileCacheKey fileKey, int bufferSize, FileListOperations fileList) throws IOException {
-      this.cache = cache;
+   public InfinispanIndexOutput(AdvancedCache metadataCache, AdvancedCache chunksCache, FileCacheKey fileKey, int bufferSize, FileListOperations fileList) throws IOException {
+      this.metadataCache = metadataCache;
+      this.chunksCache = chunksCache;
       this.fileKey = fileKey;
       this.bufferSize = bufferSize;
       this.fileOps = fileList;
@@ -98,7 +100,7 @@
       storeCurrentBuffer();// save data first
       currentChunkNumber++;
       // check if we have to create new chunk, or get already existing in cache for modification
-      buffer = getChunkById(cache, fileKey, currentChunkNumber, bufferSize);
+      buffer = getChunkById(chunksCache, fileKey, currentChunkNumber, bufferSize);
       positionInBuffer = 0;
    }
 
@@ -147,19 +149,23 @@
             System.arraycopy(buffer, 0, bufferToFlush, 0, newBufferSize);
          }
       }
-      boolean microbatch = cache.startBatch();
+      boolean microbatch = false;
       // add chunk to cache
       if ( ! writingOnLastChunk || this.positionInBuffer != 0) {
+         if (chunksCache == metadataCache) {
+            //as we do an operation on chunks and one on metadata, it's not useful to start a batch unless it's the same cache
+            microbatch = chunksCache.startBatch();
+         }
          // create key for the current chunk
          ChunkCacheKey key = new ChunkCacheKey(fileKey.getIndexName(), fileKey.getFileName(), currentChunkNumber);
          if (trace) log.trace("Storing segment chunk: " + key);
-         cache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).put(key, bufferToFlush);
+         chunksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).put(key, bufferToFlush);
       }
       // override existing file header with new size and updated accesstime
       file.touch();
-      cache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).put(fileKey, file);
+      metadataCache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).put(fileKey, file);
       registerToFileListIfNeeded();
-      if (microbatch) cache.endBatch(true);
+      if (microbatch) chunksCache.endBatch(true);
    }
 
    private void registerToFileListIfNeeded() {
@@ -201,7 +207,7 @@
       }
       if (requestedChunkNumber != currentChunkNumber) {
          storeCurrentBuffer();
-         buffer = getChunkById(cache, fileKey, requestedChunkNumber, bufferSize);
+         buffer = getChunkById(chunksCache, fileKey, requestedChunkNumber, bufferSize);
          currentChunkNumber = requestedChunkNumber;
       }
       positionInBuffer = getPositionInBuffer(pos, bufferSize);

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/SingleChunkIndexInput.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/SingleChunkIndexInput.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/SingleChunkIndexInput.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -43,9 +43,9 @@
    private final byte[] buffer;
    private int bufferPosition;
 
-   public SingleChunkIndexInput(AdvancedCache cache, FileCacheKey fileKey, FileMetadata fileMetadata) throws FileNotFoundException {
+   public SingleChunkIndexInput(AdvancedCache chunksCache, FileCacheKey fileKey, FileMetadata fileMetadata) throws FileNotFoundException {
       ChunkCacheKey key = new ChunkCacheKey(fileKey.getIndexName(), fileKey.getFileName(), 0);
-      byte[] b = (byte[]) cache.withFlags(Flag.SKIP_LOCKING).get(key);
+      byte[] b = (byte[]) chunksCache.withFlags(Flag.SKIP_LOCKING).get(key);
       if (b == null) {
          buffer = new byte[0];
       }

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/DistributedSegmentReadLocker.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/DistributedSegmentReadLocker.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/DistributedSegmentReadLocker.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -49,18 +49,31 @@
    
    private static final Log log = LogFactory.getLog(DistributedSegmentReadLocker.class);
    
-   private final AdvancedCache cache;
+   private final AdvancedCache locksCache;
+   private final AdvancedCache chunksCache;
+   private final AdvancedCache metadataCache;
    private final String indexName;
    
-   public DistributedSegmentReadLocker(Cache cache, String indexName) {
-      if (cache == null)
-         throw new IllegalArgumentException("Cache must not be null");
+   public DistributedSegmentReadLocker(Cache locksCache, Cache chunksCache, Cache metadataCache, String indexName) {
+      if (locksCache == null)
+         throw new IllegalArgumentException("locksCache must not be null");
+      if (chunksCache == null)
+         throw new IllegalArgumentException("chunksCache must not be null");
+      if (metadataCache == null)
+         throw new IllegalArgumentException("metadataCache must not be null");
       if (indexName == null)
          throw new IllegalArgumentException("index name must not be null");
       this.indexName = indexName;
-      this.cache = cache.getAdvancedCache();
+      this.locksCache = locksCache.getAdvancedCache();
+      this.chunksCache = chunksCache.getAdvancedCache();
+      this.metadataCache = metadataCache.getAdvancedCache();
+      verifyCacheHasnoEviction(this.locksCache);
    }
 
+   public DistributedSegmentReadLocker(Cache cache, String indexName) {
+      this(cache, cache, cache, indexName);
+   }
+
    /**
     * Deletes or releases a read-lock for the specified filename, so that if it was marked as deleted and
     * no other {@link InfinispanIndexInput} instances are reading from it, then it will
@@ -77,23 +90,23 @@
       FileReadLockKey readLockKey = new FileReadLockKey(indexName, filename);
       int newValue = 0;
       boolean done = false;
-      Object lockValue = cache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
+      Object lockValue = locksCache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
       while (done == false) {
          if (lockValue == null) {
-            lockValue = cache.withFlags(Flag.SKIP_CACHE_STORE).putIfAbsent(readLockKey, Integer.valueOf(0));
+            lockValue = locksCache.withFlags(Flag.SKIP_CACHE_STORE).putIfAbsent(readLockKey, Integer.valueOf(0));
             done = (null == lockValue);
          }
          else {
             int refCount = (Integer) lockValue;
             newValue = refCount - 1;
-            done = cache.withFlags(Flag.SKIP_CACHE_STORE).replace(readLockKey, refCount, newValue);
+            done = locksCache.withFlags(Flag.SKIP_CACHE_STORE).replace(readLockKey, refCount, newValue);
             if (!done) {
-               lockValue = cache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
+               lockValue = locksCache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
             }
          }
       }
       if (newValue == 0) {
-         realFileDelete(readLockKey, cache);
+         realFileDelete(readLockKey, locksCache, chunksCache, metadataCache);
       }
    }
    
@@ -114,7 +127,7 @@
     */
    public boolean aquireReadLock(String filename) {
       FileReadLockKey readLockKey = new FileReadLockKey(indexName, filename);
-      Object lockValue = cache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
+      Object lockValue = locksCache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
       boolean done = false;
       while (done == false) {
          if (lockValue != null) {
@@ -124,21 +137,21 @@
                return false;
             }
             Integer newValue = Integer.valueOf(refCount + 1);
-            done = cache.withFlags(Flag.SKIP_CACHE_STORE).replace(readLockKey, lockValue, newValue);
+            done = locksCache.withFlags(Flag.SKIP_CACHE_STORE).replace(readLockKey, lockValue, newValue);
             if ( ! done) {
-               lockValue = cache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
+               lockValue = locksCache.withFlags(Flag.SKIP_LOCKING, Flag.SKIP_CACHE_STORE).get(readLockKey);
             }
          } else {
             // readLocks are not stored, so if there's no value assume it's ==1, which means
             // existing file, not deleted, nobody else owning a read lock. but check for ambiguity
-            lockValue = cache.withFlags(Flag.SKIP_CACHE_STORE).putIfAbsent(readLockKey, Integer.valueOf(2));
+            lockValue = locksCache.withFlags(Flag.SKIP_CACHE_STORE).putIfAbsent(readLockKey, Integer.valueOf(2));
             done = (null == lockValue);
             if (done) {
                // have to check now that the fileKey still exists to prevent the race condition of 
                // T1 fileKey exists - T2 delete file and remove readlock - T1 putIfAbsent(readlock, 2)
                final FileCacheKey fileKey = new FileCacheKey(indexName, filename);
-               if (cache.get(fileKey) == null) {
-                  cache.withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(readLockKey);
+               if (metadataCache.get(fileKey) == null) {
+                  locksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(readLockKey);
                   return false;
                }
             }
@@ -157,24 +170,29 @@
     * @param readLockKey the key representing the values to be deleted
     * @param cache the cache containing the elements to be deleted
     */
-   static void realFileDelete(FileReadLockKey readLockKey, AdvancedCache cache) {
+   static void realFileDelete(FileReadLockKey readLockKey, AdvancedCache locksCache, AdvancedCache chunksCache, AdvancedCache metadataCache) {
       final boolean trace = log.isTraceEnabled();
       final String indexName = readLockKey.getIndexName();
       final String filename = readLockKey.getFileName();
       FileCacheKey key = new FileCacheKey(indexName, filename);
       if (trace) log.trace("deleting metadata: " + key);
-      FileMetadata file = (FileMetadata) cache.withFlags(Flag.SKIP_LOCKING).remove(key);
+      FileMetadata file = (FileMetadata) metadataCache.withFlags(Flag.SKIP_LOCKING).remove(key);
       if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here
          for (int i = 0; i < file.getNumberOfChunks(); i++) {
             ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, filename, i);
             if (trace) log.trace("deleting chunk: " + chunkKey);
-            cache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).removeAsync(chunkKey);
+            chunksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_LOCKING).removeAsync(chunkKey);
          }
       }
       // last operation, as being set as value==0 it prevents others from using it during the
       // deletion process:
       if (trace) log.trace("deleting readlock: " + readLockKey);
-      cache.withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(readLockKey);
+      locksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(readLockKey);
    }
+   
+   private static void verifyCacheHasnoEviction(AdvancedCache cache) {
+      if (cache.getConfiguration().getEvictionStrategy().isEnabled())
+         throw new IllegalArgumentException("DistributedSegmentReadLocker is not reliable when using a cache with eviction enabled, disable eviction on this cache instance");
+   }
 
 }

Modified: trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/LocalLockMergingSegmentReadLocker.java
===================================================================
--- trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/LocalLockMergingSegmentReadLocker.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/main/java/org/infinispan/lucene/readlocks/LocalLockMergingSegmentReadLocker.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -47,8 +47,19 @@
     * @param indexName
     */
    public LocalLockMergingSegmentReadLocker(Cache cache, String indexName) {
-      this.delegate = new DistributedSegmentReadLocker(cache, indexName);
+      this.delegate = new DistributedSegmentReadLocker(cache, cache, cache, indexName);
    }
+   
+   /**
+    * Create a new LocalLockMergingSegmentReadLocker with special purpose caches
+    * @param locksCache the cache to be used to store ditributed locks
+    * @param chunksCache the cache containing the chunks, this is where the bulk of data is stored
+    * @param metadataCache smaller cache for the metadata of stored elements
+    * @param indexName
+    */
+   public LocalLockMergingSegmentReadLocker(Cache locksCache, Cache chunksCache, Cache metadataCache, String indexName) {
+      this.delegate = new DistributedSegmentReadLocker(locksCache, chunksCache, metadataCache, indexName);
+   }
 
    /**
     * {@inheritDoc}

Modified: trunk/lucene-directory/src/test/java/org/infinispan/lucene/CacheTestSupport.java
===================================================================
--- trunk/lucene-directory/src/test/java/org/infinispan/lucene/CacheTestSupport.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/test/java/org/infinispan/lucene/CacheTestSupport.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -215,6 +215,12 @@
       iw.close();
    }
    
+   public static void optimizeIndex(Directory dir) throws IOException {
+      IndexWriter iw = new IndexWriter(dir, LuceneSettings.analyzer, IndexWriter.MaxFieldLength.UNLIMITED);
+      iw.optimize();
+      iw.close();
+   }
+   
    /**
     * Useful tool to debug the Lucene invocations into the directory;
     * it prints a thread dump to standard output of only seven lines

Copied: trunk/lucene-directory/src/test/java/org/infinispan/lucene/DirectoryOnMultipleCachesTest.java (from rev 2484, branches/4.2.x/lucene-directory/src/test/java/org/infinispan/lucene/DirectoryOnMultipleCachesTest.java)
===================================================================
--- trunk/lucene-directory/src/test/java/org/infinispan/lucene/DirectoryOnMultipleCachesTest.java	                        (rev 0)
+++ trunk/lucene-directory/src/test/java/org/infinispan/lucene/DirectoryOnMultipleCachesTest.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -0,0 +1,125 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.infinispan.lucene;
+
+import static org.infinispan.lucene.CacheTestSupport.assertTextIsFoundInIds;
+import static org.infinispan.lucene.CacheTestSupport.writeTextToIndex;
+import static org.infinispan.lucene.CacheTestSupport.optimizeIndex;
+
+import java.io.IOException;
+
+import org.infinispan.Cache;
+import org.infinispan.manager.CacheContainer;
+import org.infinispan.test.TestingUtil;
+import org.infinispan.util.concurrent.ConcurrentHashSet;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+/**
+ * Verifies the Index can be spread across three different caches;
+ * this is useful so that each cache can be configured independently
+ * to better match the intended usage (like avoiding a CacheStore for volatile locking data).
+ * 
+ * @author Sanne Grinovero
+ */
+ at Test(groups = "functional", testName = "lucene.DirectoryOnMultipleCachesTest")
+public class DirectoryOnMultipleCachesTest {
+   
+   private CacheContainer cacheManager;
+   private Cache metadataCache;
+   private Cache chunkCache;
+   private Cache lockCache;
+
+   @BeforeClass(alwaysRun = true)
+   public void createBeforeClass() {
+      cacheManager = CacheTestSupport.createLocalCacheManager();
+      metadataCache = cacheManager.getCache("metadata");
+      chunkCache = cacheManager.getCache("chunks");
+      lockCache = cacheManager.getCache("locks");
+   }
+   
+   @Test
+   public void testRunningOnMultipleCaches() throws IOException {
+      assert metadataCache != chunkCache;
+      assert chunkCache != lockCache;
+      assert lockCache != metadataCache;
+      InfinispanDirectory dir = new InfinispanDirectory(metadataCache, chunkCache, lockCache, "testingIndex", 100);
+      writeTextToIndex(dir, 0, "hello world");
+      assertTextIsFoundInIds(dir, "hello", 0);
+      writeTextToIndex(dir, 1, "hello solar system");
+      assertTextIsFoundInIds(dir, "hello", 0, 1);
+      assertTextIsFoundInIds(dir, "system", 1);
+      optimizeIndex(dir);
+      assertTextIsFoundInIds(dir, "hello", 0, 1);
+      dir.close();
+   }
+   
+   @Test(dependsOnMethods="testRunningOnMultipleCaches")
+   public void verifyIntendedChunkCachesUsage() {
+      int chunks = 0;
+      for (Object key : chunkCache.keySet()) {
+         chunks++;
+         assert key.getClass().equals(ChunkCacheKey.class);
+         Object value = chunkCache.get(key);
+         assert byte[].class.equals(value.getClass());
+      }
+      assert chunks != 0;
+   }
+   
+   @Test(dependsOnMethods="testRunningOnMultipleCaches")
+   public void verifyIntendedLockCachesUsage() {
+      //all locks should be cleared now, so if any value is left it should be equal to one.
+      for (Object key : lockCache.keySet()) {
+         assert key.getClass().equals(FileReadLockKey.class);
+         assert lockCache.get(key).equals(1);
+      }
+   }
+   
+   @Test(dependsOnMethods="testRunningOnMultipleCaches")
+   public void verifyIntendedMetadataCachesUsage() {
+      int metadata = 0;
+      int filelists = 0;
+      for (Object key : metadataCache.keySet()) {
+         Object value = metadataCache.get(key);
+         if (key.getClass().equals(org.infinispan.lucene.FileListCacheKey.class)) {
+            filelists++;
+            assert value.getClass().equals(ConcurrentHashSet.class);
+         }
+         else if (key.getClass().equals(FileCacheKey.class)) {
+            metadata++;
+            assert value.getClass().equals(FileMetadata.class);
+         }
+         else {
+            assert false : "unexpected type of key in metadata cache: " + key.getClass();
+         }
+      }
+      assert filelists == 1;
+      assert metadata != 0;
+   }
+   
+   @AfterClass(alwaysRun = true)
+   public void afterClass() {
+      TestingUtil.killCacheManagers(cacheManager);
+   }
+
+}

Modified: trunk/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java
===================================================================
--- trunk/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -566,14 +566,6 @@
       testChunkBorders(dir, null);
    }
    
-   @Test
-   public void testChunkBordersOnFSDirectory() throws IOException {
-      boolean directoriesCreated = indexDir.mkdirs();
-      assert directoriesCreated : "couldn't create directory for FSDirectory test";
-      FSDirectory dir = FSDirectory.open(indexDir);
-      testChunkBorders(dir, null);
-   }
-   
    /**
     * Useful to verify the Infinispan Directory has similar behaviour
     * to standard Lucene implementations regarding reads out of ranges.

Modified: trunk/lucene-directory/src/test/java/org/infinispan/lucene/SimpleLuceneTest.java
===================================================================
--- trunk/lucene-directory/src/test/java/org/infinispan/lucene/SimpleLuceneTest.java	2010-10-04 16:40:34 UTC (rev 2484)
+++ trunk/lucene-directory/src/test/java/org/infinispan/lucene/SimpleLuceneTest.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -28,7 +28,9 @@
 import java.io.IOException;
 
 import org.apache.lucene.store.Directory;
+import org.infinispan.Cache;
 import org.infinispan.config.Configuration;
+import org.infinispan.lucene.readlocks.DistributedSegmentReadLocker;
 import org.infinispan.test.MultipleCacheManagersTest;
 import org.testng.annotations.Test;
 
@@ -51,8 +53,11 @@
    
    @Test
    public void testIndexWritingAndFinding() throws IOException {
-      Directory dirA = new InfinispanDirectory(cache(0, "lucene"), "indexName");
-      Directory dirB = new InfinispanDirectory(cache(1, "lucene"), "indexName");
+      final String indexName = "indexName";
+      final Cache cache0 = cache(0, "lucene");
+      final Cache cache1 = cache(1, "lucene");
+      Directory dirA = new InfinispanDirectory(cache0, indexName);
+      Directory dirB = new InfinispanDirectory(cache1, indexName);
       writeTextToIndex(dirA, 0, "hi from node A");
       assertTextIsFoundInIds(dirA, "hi", 0);
       assertTextIsFoundInIds(dirB, "hi", 0);
@@ -65,8 +70,8 @@
       assertTextIsFoundInIds(dirB, "node", 1);
       dirA.close();
       dirB.close();
-      DirectoryIntegrityCheck.verifyDirectoryStructure(cache(0, "lucene"), "indexName");
-      DirectoryIntegrityCheck.verifyDirectoryStructure(cache(1, "lucene"), "indexName");
+      DirectoryIntegrityCheck.verifyDirectoryStructure(cache0, "indexName");
+      DirectoryIntegrityCheck.verifyDirectoryStructure(cache1, "indexName");
    }
    
    @Test(description="Verifies the caches can be reused after a Directory close")

Copied: trunk/lucene-directory/src/test/java/org/infinispan/lucene/readlocks/ConfigurationCheckTest.java (from rev 2484, branches/4.2.x/lucene-directory/src/test/java/org/infinispan/lucene/readlocks/ConfigurationCheckTest.java)
===================================================================
--- trunk/lucene-directory/src/test/java/org/infinispan/lucene/readlocks/ConfigurationCheckTest.java	                        (rev 0)
+++ trunk/lucene-directory/src/test/java/org/infinispan/lucene/readlocks/ConfigurationCheckTest.java	2010-10-04 17:05:17 UTC (rev 2485)
@@ -0,0 +1,53 @@
+/*
+ * JBoss, Home of Professional Open Source.
+ * Copyright 2009, Red Hat Middleware LLC, and individual contributors
+ * as indicated by the @author tags. See the copyright.txt file in the
+ * distribution for a full listing of individual contributors.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * This software is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this software; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA, or see the FSF site: http://www.fsf.org.
+ */
+package org.infinispan.lucene.readlocks;
+
+import org.infinispan.config.Configuration;
+import org.infinispan.eviction.EvictionStrategy;
+import org.infinispan.manager.EmbeddedCacheManager;
+import org.infinispan.test.SingleCacheManagerTest;
+import org.infinispan.test.fwk.TestCacheManagerFactory;
+import org.testng.annotations.Test;
+
+/**
+ * Verifies a DistributedSegmentReadLocker can be built only on certain types of caches,
+ * for example it shouldn't be allowed to use eviction: see ISPN-680
+ * 
+ * @author Sanne Grinovero
+ */
+ at Test(groups = "functional", testName = "lucene.readlocks.ConfigurationCheckTest")
+public class ConfigurationCheckTest extends SingleCacheManagerTest {
+   
+   @Override
+   protected EmbeddedCacheManager createCacheManager() throws Exception {
+      Configuration configuration = new Configuration();
+      configuration.setEvictionStrategy(EvictionStrategy.FIFO);
+      return TestCacheManagerFactory.createCacheManager(configuration, false);
+   }
+   
+   @Test(expectedExceptions = IllegalArgumentException.class)
+   public void testEvictionIsNotAllowed() {
+      cache = cacheManager.getCache();
+      new DistributedSegmentReadLocker(cache, cache, cache, "lucene.readlocks.ConfigurationCheckTest");
+   }
+
+}



More information about the infinispan-commits mailing list