[infinispan-commits] Infinispan SVN: r1983 - in branches/4.1.x/lucene-directory/src: test/java/org/infinispan/lucene and 1 other directory.
infinispan-commits at lists.jboss.org
infinispan-commits at lists.jboss.org
Tue Jul 6 05:50:32 EDT 2010
Author: sannegrinovero
Date: 2010-07-06 05:50:32 -0400 (Tue, 06 Jul 2010)
New Revision: 1983
Modified:
branches/4.1.x/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java
branches/4.1.x/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java
Log:
[ISPN-527] (Implement Read-Locks for Lucene rename) (part of ISPN-501) - branch 4.1
Modified: branches/4.1.x/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java
===================================================================
--- branches/4.1.x/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java 2010-07-06 09:26:34 UTC (rev 1982)
+++ branches/4.1.x/lucene-directory/src/main/java/org/infinispan/lucene/InfinispanDirectory.java 2010-07-06 09:50:32 UTC (rev 1983)
@@ -160,16 +160,8 @@
*/
public void renameFile(String from, String to) throws IOException {
checkIsOpen();
- cache.startBatch();
- // rename main file header
- CacheKey fromKey = new FileCacheKey(indexName, from);
- FileMetadata fileFrom = (FileMetadata) cache.remove(fromKey);
- cache.put(new FileCacheKey(indexName, to), fileFrom);
- Set<String> fileList = getFileList();
- fileList.remove(from);
- fileList.add(to);
- cache.put(fileListCacheKey, fileList);
- // rename also all chunks
+
+ // preparation: copy all chunks to new keys
int i = -1;
Object ob;
do {
@@ -178,14 +170,28 @@
if (ob == null) {
break;
}
- cache.remove(fromChunkKey);
ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i);
- cache.put(toChunkKey, ob);
+ cache.withFlags(Flag.SKIP_REMOTE_LOOKUP).put(toChunkKey, ob);
} while (true);
+
+ // rename metadata first
+ cache.startBatch();
+ CacheKey fromKey = new FileCacheKey(indexName, from);
+ FileMetadata metadata = (FileMetadata) cache.remove(fromKey);
+ cache.put(new FileCacheKey(indexName, to), metadata);
+ Set<String> fileList = getFileList();
+ fileList.remove(from);
+ fileList.add(to);
+ createRefCountForNewFile(to);
+ cache.put(fileListCacheKey, fileList);
+ cache.endBatch(true);
+
+ // now trigger deletion of old file chunks:
+ FileReadLockKey fileFromReadLockKey = new FileReadLockKey(indexName, from);
+ InfinispanIndexInput.releaseReadLock(fileFromReadLockKey, cache);
if (log.isTraceEnabled()) {
log.trace("Renamed file from: {0} to: {1} in index {2}", from, to, indexName);
}
- cache.endBatch(true);
}
/**
@@ -209,8 +215,7 @@
FileMetadata previous = (FileMetadata) cache.putIfAbsent(key, newFileMetadata);
if (previous == null) {
// creating new file
- FileReadLockKey readLockKey = new FileReadLockKey(indexName, name);
- cache.put(readLockKey, Integer.valueOf(1));
+ createRefCountForNewFile(name);
Set<String> fileList = getFileList();
fileList.add(name);
cache.put(fileListCacheKey, fileList);
@@ -220,6 +225,11 @@
}
}
+ private void createRefCountForNewFile(String fileName) {
+ FileReadLockKey readLockKey = new FileReadLockKey(indexName, fileName);
+ cache.withFlags(Flag.SKIP_REMOTE_LOOKUP).put(readLockKey, Integer.valueOf(1));
+ }
+
@SuppressWarnings("unchecked")
private Set<String> getFileList() {
Set<String> fileList = (Set<String>) cache.withFlags(Flag.SKIP_LOCKING).get(fileListCacheKey);
Modified: branches/4.1.x/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java
===================================================================
--- branches/4.1.x/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java 2010-07-06 09:26:34 UTC (rev 1982)
+++ branches/4.1.x/lucene-directory/src/test/java/org/infinispan/lucene/InfinispanDirectoryIOTest.java 2010-07-06 09:50:32 UTC (rev 1983)
@@ -48,6 +48,9 @@
@Test(groups = "functional", testName = "lucene.InfinispanDirectoryIOTest", sequential = true)
public class InfinispanDirectoryIOTest {
+ /** The Test index name */
+ private static final String INDEXNAME = "index";
+
private CacheContainer cacheManager;
@BeforeTest
@@ -70,44 +73,54 @@
final int BUFFER_SIZE = 64;
Cache<CacheKey, Object> cache = cacheManager.getCache();
- InfinispanDirectory dir = new InfinispanDirectory(cache, "index", BUFFER_SIZE);
+ InfinispanDirectory dir = new InfinispanDirectory(cache, INDEXNAME, BUFFER_SIZE);
- final int SHORT_FILE_SIZE = 61;
- assert BUFFER_SIZE > SHORT_FILE_SIZE;
- createFileWithRepeatableContent(dir, "SingleChunk.txt", SHORT_FILE_SIZE);
- assertReadByteWorkingCorrectly(dir, "SingleChunk.txt", SHORT_FILE_SIZE);
- assertReadBytesWorkingCorrectly(dir, "SingleChunk.txt", SHORT_FILE_SIZE, 15);
+ verifyOnBuffer("SingleChunk.txt", 61, BUFFER_SIZE, cache, dir, 15);
final int VERY_BIG_FILE_SIZE = 10000;
assert BUFFER_SIZE < VERY_BIG_FILE_SIZE;
- createFileWithRepeatableContent(dir, "MultipleChunks.txt", VERY_BIG_FILE_SIZE);
- assertReadByteWorkingCorrectly(dir, "MultipleChunks.txt", VERY_BIG_FILE_SIZE);
- assertReadBytesWorkingCorrectly(dir, "MultipleChunks.txt", VERY_BIG_FILE_SIZE, 33);
+ verifyOnBuffer("MultipleChunks.txt", VERY_BIG_FILE_SIZE, BUFFER_SIZE, cache, dir, 33);
final int LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE = 256;
assert (LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE % BUFFER_SIZE) == 0;
- createFileWithRepeatableContent(dir, "LastChunkFilled.txt", LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE);
- assertReadByteWorkingCorrectly(dir, "LastChunkFilled.txt", LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE);
- assertReadBytesWorkingCorrectly(dir, "LastChunkFilled.txt", LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE, 11);
- assert 4 == getChunksNumber(cache, "index", "LastChunkFilled.txt");
+ verifyOnBuffer("LastChunkFilled.txt", LAST_CHUNK_COMPLETELY_FILLED_FILE_SIZE, BUFFER_SIZE, cache, dir, 11);
+ assertHasNChunks(4, cache, INDEXNAME, "LastChunkFilled.txt.bak");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
final int LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE = 257;
assert (LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE % BUFFER_SIZE) == 1;
- createFileWithRepeatableContent(dir, "LonelyByteInLastChunk.txt", LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE);
- assertReadByteWorkingCorrectly(dir, "LonelyByteInLastChunk.txt", LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE);
- assertReadBytesWorkingCorrectly(dir, "LonelyByteInLastChunk.txt", LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE, 12);
- assert 5 == getChunksNumber(cache, "index", "LonelyByteInLastChunk.txt");
+ verifyOnBuffer("LonelyByteInLastChunk.txt", LAST_CHUNK_WITH_LONELY_BYTE_FILE_SIZE, BUFFER_SIZE, cache, dir, 12);
+ assertHasNChunks(5, cache, INDEXNAME, "LonelyByteInLastChunk.txt.bak");
dir.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
}
+
+ /**
+ * Helper for testReadWholeFile test:
+ * creates a file and then verifies it's readability in specific corner cases.
+ * Then reuses the same parameters to verify the file rename capabilities.
+ */
+ private void verifyOnBuffer(final String fileName, final int fileSize, final int bufferSize, Cache<CacheKey, Object> cache, InfinispanDirectory dir, final int readBuffer) throws IOException {
+ createFileWithRepeatableContent(dir, fileName, fileSize);
+ assertReadByteWorkingCorrectly(dir, fileName, fileSize);
+ assertReadBytesWorkingCorrectly(dir, fileName, fileSize, readBuffer);
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
+ final String newFileName = fileName+".bak";
+ dir.renameFile(fileName, newFileName);
+ assertReadByteWorkingCorrectly(dir, newFileName, fileSize);
+ assertReadBytesWorkingCorrectly(dir, newFileName, fileSize, readBuffer);
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
+ assert dir.fileExists(newFileName);
+ assert dir.fileExists(fileName) == false;
+ }
@Test
public void testReadRandomSampleFile() throws IOException {
final int BUFFER_SIZE = 64;
Cache<CacheKey, Object> cache = cacheManager.getCache();
- InfinispanDirectory dir = new InfinispanDirectory(cache, "index", BUFFER_SIZE);
+ InfinispanDirectory dir = new InfinispanDirectory(cache, INDEXNAME, BUFFER_SIZE);
final int FILE_SIZE = 1000;
assert BUFFER_SIZE < FILE_SIZE;
@@ -134,7 +147,7 @@
}
indexInput.close();
dir.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
}
/**
@@ -206,19 +219,13 @@
}
/**
- * It returns the number of chunks of file which is divided
- *
- * @param cache
- * @param index
- * @param fileName
- * @return
+ * Verifies a file is divided in N chunks
*/
- private int getChunksNumber(Cache<CacheKey, Object> cache, String index, String fileName) {
- int chunksNumber = 0;
- while (cache.containsKey(new ChunkCacheKey(index, fileName, chunksNumber))) {
- chunksNumber++;
+ private void assertHasNChunks(int expectedChunks, Cache<CacheKey, Object> cache, String index, String fileName) {
+ for (int i=0; i<expectedChunks; i++) {
+ ChunkCacheKey key = new ChunkCacheKey(index, fileName, i);
+ Assert.assertTrue("should contain key " + key, cache.containsKey(key));
}
- return chunksNumber;
}
/**
@@ -249,32 +256,33 @@
final int BUFFER_SIZE = 64;
Cache<CacheKey, Object> cache = cacheManager.getCache();
- InfinispanDirectory dir = new InfinispanDirectory(cache, "index", BUFFER_SIZE);
+ InfinispanDirectory dir = new InfinispanDirectory(cache, INDEXNAME, BUFFER_SIZE);
// create file headers
FileMetadata file1 = new FileMetadata();
- CacheKey key1 = new FileCacheKey("index", "Hello.txt");
+ CacheKey key1 = new FileCacheKey(INDEXNAME, "Hello.txt");
cache.put(key1, file1);
FileMetadata file2 = new FileMetadata();
- CacheKey key2 = new FileCacheKey("index", "World.txt");
+ CacheKey key2 = new FileCacheKey(INDEXNAME, "World.txt");
cache.put(key2, file2);
// byte array for Hello.txt
String helloText = "Hello world. This is some text.";
- cache.put(new ChunkCacheKey("index", "Hello.txt", 0), helloText.getBytes());
+ cache.put(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0), helloText.getBytes());
// byte array for World.txt - should be in at least 2 chunks.
String worldText = "This String should contain more than sixty four characters but less than one hundred and twenty eight.";
+ assert worldText.getBytes().length > BUFFER_SIZE;
byte[] buf = new byte[BUFFER_SIZE];
System.arraycopy(worldText.getBytes(), 0, buf, 0, BUFFER_SIZE);
- cache.put(new ChunkCacheKey("index", "World.txt", 0), buf);
+ cache.put(new ChunkCacheKey(INDEXNAME, "World.txt", 0), buf);
String part1 = new String(buf);
buf = new byte[BUFFER_SIZE];
System.arraycopy(worldText.getBytes(), BUFFER_SIZE, buf, 0, worldText.length() - BUFFER_SIZE);
- cache.put(new ChunkCacheKey("index", "World.txt", 1), buf);
+ cache.put(new ChunkCacheKey(INDEXNAME, "World.txt", 1), buf);
String part2 = new String(buf);
// make sure the generated bytes do add up!
@@ -352,23 +360,23 @@
assert new String(baos.toByteArray()).equals(worldText);
dir.deleteFile("Hello.txt");
- assert null == cache.get(new FileCacheKey("index", "Hello.txt"));
- assert null == cache.get(new ChunkCacheKey("index", "Hello.txt", 0));
+ assert null == cache.get(new FileCacheKey(INDEXNAME, "Hello.txt"));
+ assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0));
- Object ob1 = cache.get(new FileCacheKey("index", "World.txt"));
- Object ob2 = cache.get(new ChunkCacheKey("index", "World.txt", 0));
- Object ob3 = cache.get(new ChunkCacheKey("index", "World.txt", 1));
+ Object ob1 = cache.get(new FileCacheKey(INDEXNAME, "World.txt"));
+ Object ob2 = cache.get(new ChunkCacheKey(INDEXNAME, "World.txt", 0));
+ Object ob3 = cache.get(new ChunkCacheKey(INDEXNAME, "World.txt", 1));
dir.renameFile("World.txt", "HelloWorld.txt");
- assert null == cache.get(new FileCacheKey("index", "Hello.txt"));
- assert null == cache.get(new ChunkCacheKey("index", "Hello.txt", 0));
- assert null == cache.get(new ChunkCacheKey("index", "Hello.txt", 1));
+ assert null == cache.get(new FileCacheKey(INDEXNAME, "Hello.txt"));
+ assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 0));
+ assert null == cache.get(new ChunkCacheKey(INDEXNAME, "Hello.txt", 1));
- assert cache.get(new FileCacheKey("index", "HelloWorld.txt")).equals(ob1);
- assert cache.get(new ChunkCacheKey("index", "HelloWorld.txt", 0)).equals(ob2);
- assert cache.get(new ChunkCacheKey("index", "HelloWorld.txt", 1)).equals(ob3);
+ assert cache.get(new FileCacheKey(INDEXNAME, "HelloWorld.txt")).equals(ob1);
+ assert cache.get(new ChunkCacheKey(INDEXNAME, "HelloWorld.txt", 0)).equals(ob2);
+ assert cache.get(new ChunkCacheKey(INDEXNAME, "HelloWorld.txt", 1)).equals(ob3);
- // test that contents survive a move
+ // test that contents survives a move
ii = dir.openInput("HelloWorld.txt");
assert ii.length() == worldText.length();
@@ -387,14 +395,14 @@
assert new String(baos.toByteArray()).equals(worldText);
dir.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
}
public void testWriteChunks() throws Exception {
final int BUFFER_SIZE = 64;
Cache<CacheKey, Object> cache = cacheManager.getCache();
- InfinispanDirectory dir = new InfinispanDirectory(cache, "index", BUFFER_SIZE);
+ InfinispanDirectory dir = new InfinispanDirectory(cache, INDEXNAME, BUFFER_SIZE);
IndexOutput io = dir.createOutput("MyNewFile.txt");
@@ -404,7 +412,7 @@
io.flush();
assert dir.fileExists("MyNewFile.txt");
- assert null != cache.get(new ChunkCacheKey("index", "MyNewFile.txt", 0));
+ assert null != cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 0));
// test contents by reading:
byte[] buf = new byte[9];
@@ -419,20 +427,20 @@
io.writeBytes(testText.getBytes(), 0, testText.length());
io.close();
// now compare.
- byte[] chunk1 = (byte[]) cache.get(new ChunkCacheKey("index", "MyNewFile.txt", 0));
- byte[] chunk2 = (byte[]) cache.get(new ChunkCacheKey("index", "MyNewFile.txt", 1));
+ byte[] chunk1 = (byte[]) cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 0));
+ byte[] chunk2 = (byte[]) cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 1));
assert null != chunk1;
assert null != chunk2;
assert testText.equals(new String(chunk1) + new String(chunk2).trim());
dir.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
}
public void testWriteChunksDefaultChunks() throws Exception {
Cache<CacheKey, Object> cache = cacheManager.getCache();
- InfinispanDirectory dir = new InfinispanDirectory(cache, "index");
+ InfinispanDirectory dir = new InfinispanDirectory(cache, INDEXNAME);
final String testText = "This is some rubbish";
final byte[] testTextAsBytes = testText.getBytes();
@@ -444,13 +452,13 @@
io.writeByte((byte) 3);
io.writeBytes(testTextAsBytes, testTextAsBytes.length);
io.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
- FileCacheKey fileCacheKey = new FileCacheKey("index", "MyNewFile.txt");
+ FileCacheKey fileCacheKey = new FileCacheKey(INDEXNAME, "MyNewFile.txt");
assert null != cache.get(fileCacheKey);
FileMetadata metadata = (FileMetadata) cache.get(fileCacheKey);
Assert.assertEquals(testTextAsBytes.length + 3, metadata.getSize());
- assert null != cache.get(new ChunkCacheKey("index", "MyNewFile.txt", 0));
+ assert null != cache.get(new ChunkCacheKey(INDEXNAME, "MyNewFile.txt", 0));
// test contents by reading:
IndexInput ii = dir.openInput("MyNewFile.txt");
@@ -465,7 +473,7 @@
assert testText.equals(new String(buf).trim());
dir.close();
- DirectoryIntegrityCheck.verifyDirectoryStructure(cache, "index");
+ DirectoryIntegrityCheck.verifyDirectoryStructure(cache, INDEXNAME);
}
}
More information about the infinispan-commits
mailing list