[jbosscache-commits] JBoss Cache SVN: r5681 - in jbosscache-lucene: jbosscache and 13 other directories.

jbosscache-commits at lists.jboss.org jbosscache-commits at lists.jboss.org
Fri Apr 25 05:49:39 EDT 2008


Author: manik.surtani at jboss.com
Date: 2008-04-25 05:49:39 -0400 (Fri, 25 Apr 2008)
New Revision: 5681

Added:
   jbosscache-lucene/jbosscache/
   jbosscache-lucene/jbosscache/README.txt
   jbosscache-lucene/jbosscache/build.xml
   jbosscache-lucene/jbosscache/jbosscache.iml
   jbosscache-lucene/jbosscache/lib/
   jbosscache-lucene/jbosscache/pom.xml.template
   jbosscache-lucene/jbosscache/src/
   jbosscache-lucene/jbosscache/src/java/
   jbosscache-lucene/jbosscache/src/java/org/
   jbosscache-lucene/jbosscache/src/java/org/apache/
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/ByteArrayIO.java
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/DistributedLock.java
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/JBCDirectory.java
   jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/Metadata.java
   jbosscache-lucene/jbosscache/src/test/
   jbosscache-lucene/jbosscache/src/test/log4j.xml
   jbosscache-lucene/jbosscache/src/test/org/
   jbosscache-lucene/jbosscache/src/test/org/apache/
   jbosscache-lucene/jbosscache/src/test/org/apache/lucene/
   jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/
   jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/
   jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestByteArrayIO.java
   jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestConcurrentUsage.java
Log:


Added: jbosscache-lucene/jbosscache/README.txt
===================================================================
--- jbosscache-lucene/jbosscache/README.txt	                        (rev 0)
+++ jbosscache-lucene/jbosscache/README.txt	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,16 @@
+JBoss Cache as a replicated in-memory store for indexes
+
+* This implementation is built using JBoss Cache 2.1.1.CR2, and requires Java 5.
+   * See distribution on http://www.jbosscache.org for complete list of dependencies.
+
+* For the locking implementation to work accurately, JBoss Cache must be configured:
+   * to use SYNC_REPL
+   * Synchronous Commit
+   * Synchronous Rollback
+   * a transaction manager
+   * Preferably, optimistic locking
+   * Refer to the JBoss Cache User Guide on http://www.jbosscache.org for details on configuring the above.
+
+* While not formally recommended for production use, the DummyTransactionManager that ships with JBoss Cache
+  could be used in this case since transaction recovery is not a concern.
+ 
\ No newline at end of file

Added: jbosscache-lucene/jbosscache/build.xml
===================================================================
--- jbosscache-lucene/jbosscache/build.xml	                        (rev 0)
+++ jbosscache-lucene/jbosscache/build.xml	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<project name="lucene-jbosscache" default="default" xmlns:artifact="urn:maven-artifact-ant">
+
+   <description>
+      Lucene JBoss Cache 2.x integration
+   </description>
+
+   <property name="jbosscache.version" value="2.1.1.CR2" />
+   <property name="build.dir" location="../../build/contrib/jbosscache"/>
+   <property name="dist.dir" location="../../dist/contrib/jbosscache"/>
+   <property name="maven.dist.dir" location="../../dist/maven"/>
+   <property name="javac.target" value="1.5"/>
+   <property name="javac.source" value="1.5"/>
+
+   <!-- Initialise Maven Ant Tasks -->
+   <mkdir dir="lib" />
+   <get src="http://repo1.maven.org/maven2/org/apache/maven/maven-ant-tasks/2.0.9/maven-ant-tasks-2.0.9.jar" dest="lib/maven-ant-tasks.jar" usetimestamp="true"/>
+   <path id="maven-ant-tasks.classpath" path="lib/maven-ant-tasks.jar"/>
+   <typedef resource="org/apache/maven/artifact/ant/antlib.xml" uri="urn:maven-artifact-ant" classpathref="maven-ant-tasks.classpath"/>
+
+   <!-- Retrieve deps -->
+   <artifact:remoteRepository id="jboss.repository" url="http://repository.jboss.com/maven2" />
+   <artifact:dependencies pathId="jbosscache.deps">
+      <remoteRepository refId="jboss.repository" />
+      <dependency groupId="org.jboss.cache" artifactId="jbosscache-core" version="${jbosscache.version}"/>
+   </artifact:dependencies>
+
+   <pathconvert property="project.classpath" targetos="unix" refid="jbosscache.deps" />
+
+   <property name="src.dir" value="src"/>
+
+   <import file="../contrib-build.xml"/>
+
+
+   <target name="init" depends="contrib-build.init" />
+   
+</project>

Added: jbosscache-lucene/jbosscache/jbosscache.iml
===================================================================
--- jbosscache-lucene/jbosscache/jbosscache.iml	                        (rev 0)
+++ jbosscache-lucene/jbosscache/jbosscache.iml	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,78 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module relativePaths="true" type="JAVA_MODULE" version="4">
+  <component name="NewModuleRootManager" inherit-compiler-output="true">
+    <exclude-output />
+    <content url="file://$MODULE_DIR$">
+      <sourceFolder url="file://$MODULE_DIR$/src/java" isTestSource="false" />
+      <sourceFolder url="file://$MODULE_DIR$/src/test" isTestSource="true" />
+    </content>
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+    <orderEntry type="module" module-name="lucene" />
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../lib/junit-3.8.2.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../../.m2/repository/commons-logging/commons-logging/1.0.4/commons-logging-1.0.4.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../../.m2/repository/jgroups/jgroups/2.6.2/jgroups-2.6.2.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../../.m2/repository/org/jboss/javaee/jboss-javaee/5.0.0.Beta3/jboss-javaee-5.0.0.Beta3.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../../.m2/repository/org/jboss/jboss-common-core/2.2.3.GA/jboss-common-core-2.2.3.GA.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../jbosscache/benchmarks/benchmark-fwk/trunk/lib/log4j.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntry type="module-library">
+      <library>
+        <CLASSES>
+          <root url="jar://$MODULE_DIR$/../../../../../../.m2/repository/org/jboss/cache/jbosscache-core/2.2.0-SNAPSHOT/jbosscache-core-2.2.0-SNAPSHOT.jar!/" />
+        </CLASSES>
+        <JAVADOC />
+        <SOURCES />
+      </library>
+    </orderEntry>
+    <orderEntryProperties />
+  </component>
+</module>
+

Added: jbosscache-lucene/jbosscache/pom.xml.template
===================================================================
--- jbosscache-lucene/jbosscache/pom.xml.template	                        (rev 0)
+++ jbosscache-lucene/jbosscache/pom.xml.template	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,42 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+
+  <!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+    
+    http://www.apache.org/licenses/LICENSE-2.0
+    
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+  -->
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.lucene</groupId>
+    <artifactId>lucene-contrib</artifactId>
+    <version>@version@</version>
+  </parent>
+  <groupId>org.apache.lucene</groupId>
+  <artifactId>lucene-jbosscache</artifactId>
+  <packaging>jar</packaging>
+  <version>@version@</version>
+  <name>lucene-contrib-jbosscache</name>
+  <description>JBoss Cache (in-memory cluster) based Directory implementation</description>
+  <dependencies>
+    <dependency>
+      <groupId>org.jboss.cache</groupId>
+      <artifactId>jbosscache-core</artifactId>
+      <version>2.1.0.GA</version>
+    </dependency>
+  </dependencies>
+</project>

Added: jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/ByteArrayIO.java
===================================================================
--- jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/ByteArrayIO.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/ByteArrayIO.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,212 @@
+package org.apache.lucene.store.jbosscache;
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Utility class that handles the breaking up and combining of blocks represented in Nodes in JBoss Cache into
+ * the byte input and output stream interfaces that Lucene expects to deal with.
+ *
+ * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
+ */
+public class ByteArrayIO
+{
+   private static Log log = LogFactory.getLog(ByteArrayIO.class);
+
+   static int getChunkNumber(long positionToRead)
+   {
+      if (positionToRead < 0) return 0;
+      return (int) (positionToRead >>> JBCDirectory.CHUNK_SIZE_BITS);
+   }
+
+   static int getChunkPointer(long pointer)
+   {
+      return (int) (JBCDirectory.CHUNK_MASK & pointer);
+   }
+
+   static byte[] getChunkData(int chunkNumber, Map<Integer, Node> chunks, Node fileNode)
+   {
+      Node chunk = chunks.get(chunkNumber);
+      if (chunk == null)
+      {
+         chunk = fileNode.getChild(chunkNumber);
+         chunks.put(chunkNumber, chunk);
+      }
+
+      return (byte[]) chunk.get("buf");
+   }
+
+   public static class ByteArrayIndexInput extends IndexInput
+   {
+      private Node fileNode;
+      private long pointer;
+      private Metadata metadata;
+      private Map<Integer, Node> chunks = new HashMap<Integer, Node>();
+
+      /**
+       * @param node node containing chunks as children
+       * @param file containing metadata
+       */
+      public ByteArrayIndexInput(Node fileNode, Metadata metadata)
+      {
+         this.fileNode = fileNode;
+         this.metadata = metadata;
+      }
+
+      public byte readByte() throws IOException
+      {
+         if (metadata == null) throw new IOException("(null): File does not exist");
+
+         if (pointer == 0 && metadata.getSize() == 0) return -1;
+         
+         if (pointer + 1 > metadata.getSize())
+         {
+            log.info(metadata.getFileName() + ": Reading past end of file.  File size is " + metadata.getSize() + " and pointer is at " + pointer);
+            throw new IOException(metadata.getFileName() + ": Reading past end of file.  File size is " + metadata.getSize() + " and pointer is at " + pointer);
+         }
+         return getChunkData(getChunkNumber(pointer), chunks, fileNode)[getChunkPointer(pointer++)];
+      }
+
+      public void readBytes(byte[] bytes, int offset, int length) throws IOException
+      {
+         if (metadata == null) throw new IOException("File does not exist");
+
+         if (pointer + length > metadata.getSize())
+            throw new IOException(metadata.getFileName() + ": Reading past end of file");
+
+         int toRead = length;
+         int bytesReadSoFar = 0;
+         boolean first = true;
+         while (toRead > 0)
+         {
+            byte[] chunkData = getChunkData(getChunkNumber(pointer), chunks, fileNode);
+            int startingPoint = first ? getChunkPointer(pointer) : 0;
+            if (first) first = false;
+            int bytesToRead = Math.min(toRead, chunkData.length - startingPoint);
+            System.arraycopy(chunkData, startingPoint, bytes, offset + bytesReadSoFar, bytesToRead);
+            toRead = toRead - bytesToRead;
+            bytesReadSoFar += bytesToRead;
+            pointer += bytesToRead;
+         }
+      }
+
+      public void close() throws IOException
+      {
+         // do nothing, except reset the pointer and release resources.
+         chunks.clear();
+         pointer = 0;
+      }
+
+      public long getFilePointer()
+      {
+         return pointer;
+      }
+
+      public void seek(long l) throws IOException
+      {
+         pointer = (int) l;
+      }
+
+      public long length()
+      {
+         return metadata.getSize();
+      }
+   }
+
+   public static class ByteArrayIndexOutput extends IndexOutput
+   {
+      private Node fileNode, indexRootNode;
+      private Metadata metadata;
+      private long pointer;
+      private Map<Integer, Node> chunks = new HashMap<Integer, Node>();
+      private byte[] buffer = new byte[JBCDirectory.CHUNK_SIZE];
+      private int bufferpointer;
+
+      public ByteArrayIndexOutput(Node fileNode, Node indexRootNode, Metadata metadata)
+      {
+         this.fileNode = fileNode;
+         this.indexRootNode = indexRootNode;
+         this.metadata = metadata;
+      }
+
+      void newChunk() throws IOException
+      {
+         flush();
+         bufferpointer = 0;
+         buffer = new byte[JBCDirectory.CHUNK_SIZE];
+      }
+
+      public void writeByte(byte b) throws IOException
+      {
+         if (bufferpointer == buffer.length)
+         {
+            newChunk();
+         }
+         buffer[bufferpointer++] = b;
+         pointer++;
+         metadata.setSize(metadata.getSize() + 1);
+      }
+
+      public void writeBytes(byte[] bytes, int offset, int length) throws IOException
+      {
+         int bytesWritten = 0;
+
+         while (bytesWritten < length)
+         {
+            int spaceInBuffer = buffer.length - bufferpointer;
+            int bytesToWrite = Math.min(spaceInBuffer, length - bytesWritten);
+            System.arraycopy(bytes, offset + bytesWritten, buffer, bufferpointer, bytesToWrite);
+            bytesWritten += bytesToWrite;
+            if (bytesWritten < length) newChunk();
+         }
+         metadata.setSize(metadata.getSize() + length);
+         pointer += length;
+      }
+
+      public void flush() throws IOException
+      {
+         int chunkNumber = getChunkNumber(pointer);
+         fileNode.addChild(new Fqn(chunkNumber)).put("buf", buffer);
+      }
+
+      public void close() throws IOException
+      {
+         flush();
+         bufferpointer = 0;
+         buffer = new byte[JBCDirectory.CHUNK_SIZE];
+         pointer = 0;
+         indexRootNode.put(metadata.getFileName(), metadata);
+      }
+
+      public long getFilePointer()
+      {
+         return pointer;
+      }
+
+      public void seek(long l) throws IOException
+      {
+         if (l > metadata.getSize()) throw new IOException(metadata.getFileName() + ": seeking past end of file!");
+         // first flush
+         flush();
+         pointer = l;
+         int cn = getChunkNumber(pointer);
+         buffer = getChunkData(cn, chunks, fileNode);
+         bufferpointer = getChunkPointer(pointer);
+//         metadata.setSize(pointer);
+      }
+
+      public long length() throws IOException
+      {
+         return metadata.getSize();
+      }
+   }
+
+}

Added: jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/DistributedLock.java
===================================================================
--- jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/DistributedLock.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/DistributedLock.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,172 @@
+package org.apache.lucene.store.jbosscache;
+
+import org.apache.lucene.store.Lock;
+import org.jboss.cache.Cache;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.CacheException;
+
+import javax.transaction.TransactionManager;
+import javax.transaction.RollbackException;
+import javax.transaction.SystemException;
+import javax.transaction.NotSupportedException;
+import javax.transaction.Transaction;
+import javax.transaction.InvalidTransactionException;
+import java.util.Map;
+import java.util.List;
+import java.util.HashMap;
+import java.util.Collections;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * Distributed lock implementation that places a token in the cache to represent a lock.
+ *
+ * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
+ */
+public class DistributedLock extends Lock
+{
+   final static Fqn LOCKS = Fqn.fromString("/distributed_locks");
+   boolean isLocked = false;
+   String name;
+   TransactionManager tm;
+   Cache cache;
+
+   public DistributedLock(Cache cache, String name)
+   {
+      this.cache = cache;
+      this.name = name;
+      tm = cache.getConfiguration().getRuntimeConfig().getTransactionManager();
+   }
+
+   public boolean obtain()
+   {
+      // locks are obtained by placing a token in the cache, as a part of a 2-phase commit.
+      boolean acquired = false;
+      try
+      {
+         tm.begin();
+         Node locksNode = cache.getRoot().addChild(LOCKS);
+         if (!locksNode.getKeys().contains(name))
+         {
+            locksNode.put(name, name);
+            acquired = true;
+         }
+      }
+      catch (Exception e)
+      {
+         System.out.println("Failed! " + e.getMessage());
+         failureReason = e;
+         acquired = false;
+      }
+      finally
+      {
+         try
+         {
+            if (tm.getTransaction() != null)
+            {
+               if (acquired)
+               {
+                  tm.commit();
+               }
+               else
+               {
+                  tm.rollback();
+               }
+            }
+         }
+         catch (RollbackException rbe)
+         {
+            //unable to acquire lock
+            acquired = false;
+         }
+         catch (Exception e)
+         {
+            throw new RuntimeException(e);
+         }
+      }
+
+      if (acquired)
+      {
+         // Now start another transaction on the thread, so that all operations can be batched.  This transaction is committed
+         // in release().  This transaction is also guaranteed to succeed cluster-wide since it will only be started if we are
+         // the only writer.
+         try
+         {
+            tm.begin();
+         }
+         catch (Exception e)
+         {
+            throw new CacheException("Unable to start transaction!", e);
+         }
+      }
+      return acquired;
+   }
+
+   public void release()
+   {
+      boolean removed = false;
+
+      try
+      {
+         // commit any ongoing work first
+         tm.commit();
+
+         // Now start a new transaction to release the lock.
+         tm.begin();
+         Node locksNode = cache.getRoot().addChild(LOCKS);
+         removed = locksNode.remove(name) != null;
+      }
+      catch (Exception e)
+      {
+         // do nothing?  What if we end up with a stale lock here?
+         throw new CacheException("Unable to commit work done or release lock!", e);
+      }
+      finally
+      {
+         try
+         {
+            if (removed)
+            {
+               tm.commit();
+            }
+            else
+               tm.rollback();
+         }
+         catch (Exception e)
+         {
+            if (removed) throw new CacheException("Unable to release lock!", e);
+         }
+      }
+   }
+
+   public boolean isLocked()
+   {
+      boolean locked = false;
+      Transaction tx = null;
+      try
+      {
+         // if there is an ongoing transaction we need to suspend it
+         if ((tx = tm.getTransaction()) != null) tm.suspend();
+         Node locksNode = cache.getRoot().addChild(LOCKS);
+         locked = locksNode.getKeys().contains(name);
+      }
+      catch (Exception e)
+      {
+         // do nothing?  What if we end up with a stale lock here?
+      }
+      finally
+      {
+         if (tx != null) try
+         {
+            tm.resume(tx);
+         }
+         catch (Exception e)
+         {
+            throw new CacheException("Unable to resume suspended transaction " + tx, e);
+         }
+      }
+      return locked;
+   }
+}
+

Added: jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/JBCDirectory.java
===================================================================
--- jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/JBCDirectory.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/JBCDirectory.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,145 @@
+package org.apache.lucene.store.jbosscache;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.Lock;
+import org.jboss.cache.Cache;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * Structure is that each Directory maps to a subtree root Node in JBoss Cache.  Each file then maps on to a child Node
+ * of the root.
+ * <p/>
+ * Some configuration parameters include <tt>-Dorg.jboss.cache.lucene.blocksize</tt>, which defaults to <tt>"16k"</tt>.
+ * Other supported values for this setting include "4k", "8k", "16k", "32k", "64k" and "128k".
+ * This denotes the size, in bytes, of file chunks and the tradeoff between size and performance that applies to file
+ * system block sizes applies here.
+ * <p />
+ * A file is broken up into blocks and each block is stored as an child of the Node that represents the file.
+ *
+ * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
+ */
+public class JBCDirectory extends Directory
+{
+   static String CHUNK_SIZE_STR = System.getProperty("org.jboss.cache.lucene.blocksize", "16k");
+   static int CHUNK_SIZE = 1024 * Integer.parseInt(CHUNK_SIZE_STR.toUpperCase().replace("K", ""));
+   static int CHUNK_SIZE_BITS = (int) (Math.log(CHUNK_SIZE) / Math.log(2));
+   static int CHUNK_MASK = CHUNK_SIZE - 1;
+
+   private Cache<Object, Object> cache;
+   private Node<Object, Object> node;
+
+   /**
+    * Consructs a JBoss Cache based directory implementation.
+    * @param node Node in JBoss Cache to use as a "starting point", under which a sub tree structure will be created.
+    * @param cache Running cache instance to use.
+    */
+   public JBCDirectory(Node node, Cache cache)
+   {
+      this.node = node;
+      this.cache = cache;
+   }
+
+   @Override
+   public Lock makeLock(String fileName)
+   {
+      return new DistributedLock(cache, fileName);
+   }
+
+   public String[] list() throws IOException
+   {
+      Set childrenNames = node.getKeys();
+      return (String[]) childrenNames.toArray(new String[]{});
+   }
+
+   public boolean fileExists(String name) throws IOException
+   {
+      return node.get(name) != null;
+   }
+
+   public long fileModified(String name) throws IOException
+   {
+      return getFile(name).getLastModified();
+   }
+
+   public void touchFile(String name) throws IOException
+   {
+      getFile(name).touch();
+   }
+
+   public void deleteFile(String name) throws IOException
+   {
+      removeFile(name);
+   }
+
+   public void renameFile(String from, String to) throws IOException
+   {
+      Metadata file = getFile(from);
+      if (file != null)
+      {
+         Node newFile = node.addChild(new Fqn(to));
+         Node oldFile = node.getChild(from);
+
+         for (Object o : oldFile.getChildrenNames()) // chunks
+         {
+            cache.move(new Fqn(oldFile.getFqn(), o), newFile.getFqn());
+         }
+         cache.removeNode(oldFile.getFqn());
+      }
+      file.setFileName(to);
+      file.touch();
+      node.remove(from); // remove old metadata
+      node.put(to, file); // new metadata
+   }
+
+   public long fileLength(String name) throws IOException
+   {
+      return getFile(name).getSize();
+   }
+
+   public IndexOutput createOutput(String name) throws IOException
+   {
+      if (!fileExists(name))
+      {
+         // create it
+         Metadata file = new Metadata(name);
+         node.addChild(new Fqn(name));
+         node.put(name, file);
+      }
+      return new ByteArrayIO.ByteArrayIndexOutput(node.getChild(name), node, getFile(name));
+   }
+
+   public IndexInput openInput(String name) throws IOException
+   {
+      return new ByteArrayIO.ByteArrayIndexInput(node.getChild(name), getFile(name));
+   }
+
+   public void close() throws IOException
+   {
+   }
+
+   /**
+    * Retrieves metadata for an index file from the cache
+    * @param name name of index file
+    * @return file metadata
+    */
+   protected Metadata getFile(String name)
+   {
+      return (Metadata) node.get(name);
+   }
+
+   /**
+    * Deletes an index file from the cache, by removing it's metadata as well as any child nodes containing file data.
+    * @param name name of index file to delete
+    */
+   protected void removeFile(String name)
+   {
+      Metadata file = (Metadata) node.remove(name);
+      if (file != null) node.removeChild(file.getFileName());
+   }
+}

Added: jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/Metadata.java
===================================================================
--- jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/Metadata.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/java/org/apache/lucene/store/jbosscache/Metadata.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,114 @@
+package org.apache.lucene.store.jbosscache;
+
+import java.io.Serializable;
+
+/**
+ * Metadata for an index file stored in JBoss Cache.
+ *
+ * @author Manik Surtani (<a href="mailto:manik at jboss.org">manik at jboss.org</a>)
+ */
+public class Metadata implements Serializable
+{
+   private long lastModified;
+   private String fileName;
+   private int numChunks;
+   private long size;
+
+   public Metadata(String fileName)
+   {
+      this.fileName = fileName;
+      touch();
+   }
+
+   public Metadata(long lastModified, String fileName, int numChunks)
+   {
+      this.lastModified = lastModified;
+      this.fileName = fileName;
+      this.numChunks = numChunks;
+   }
+
+   public long getLastModified()
+   {
+      return lastModified;
+   }
+
+   public void setLastModified(long lastModified)
+   {
+      this.lastModified = lastModified;
+   }
+
+   public String getFileName()
+   {
+      return fileName;
+   }
+
+   public void setFileName(String fileName)
+   {
+      this.fileName = fileName;
+   }
+
+   public int getNumChunks()
+   {
+      return numChunks;
+   }
+
+   public void setNumChunks(int numChunks)
+   {
+      this.numChunks = numChunks;
+   }
+
+   public long getSize()
+   {
+      return size;
+   }
+
+   public void setSize(long size)
+   {
+      if (this.size > size)
+      {
+         System.out.println("Decrement!");
+      }
+      this.size = size;
+   }
+
+   public void touch()
+   {
+      setLastModified(System.currentTimeMillis());
+   }
+
+
+   public String toString()
+   {
+      return "Metadata{" +
+            "lastModified=" + lastModified +
+            ", fileName='" + fileName + '\'' +
+            ", numChunks=" + numChunks +
+            ", size=" + size +
+            '}';
+   }
+
+   public boolean equals(Object o)
+   {
+      if (this == o) return true;
+      if (o == null || getClass() != o.getClass()) return false;
+
+      Metadata metadata = (Metadata) o;
+
+      if (lastModified != metadata.lastModified) return false;
+      if (numChunks != metadata.numChunks) return false;
+      if (size != metadata.size) return false;
+      if (fileName != null ? !fileName.equals(metadata.fileName) : metadata.fileName != null) return false;
+
+      return true;
+   }
+
+   public int hashCode()
+   {
+      int result;
+      result = (int) (lastModified ^ (lastModified >>> 32));
+      result = 31 * result + (fileName != null ? fileName.hashCode() : 0);
+      result = 31 * result + numChunks;
+      result = 31 * result + (int) (size ^ (size >>> 32));
+      return result;
+   }
+}

Added: jbosscache-lucene/jbosscache/src/test/log4j.xml
===================================================================
--- jbosscache-lucene/jbosscache/src/test/log4j.xml	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/test/log4j.xml	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+
+<!-- ===================================================================== -->
+<!--                                                                       -->
+<!--  Log4j Configuration                                                  -->
+<!--                                                                       -->
+<!-- ===================================================================== -->
+
+<!-- $Id$ -->
+
+<!--
+   | For more configuration infromation and examples see the Jakarta Log4j
+   | owebsite: http://jakarta.apache.org/log4j
+ -->
+
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
+
+   <!-- ================================= -->
+   <!-- Preserve messages in a local file -->
+   <!-- ================================= -->
+
+   <!-- A time/date based rolling appender -->
+   <appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
+      <param name="File" value="jbosscache.log"/>
+      <param name="Append" value="true"/>
+
+      <!-- Rollover at midnight each day -->
+      <param name="DatePattern" value="'.'yyyy-MM-dd"/>
+
+      <!-- Rollover at the top of each hour
+      <param name="DatePattern" value="'.'yyyy-MM-dd-HH"/>
+      -->
+      <param name="Threshold" value="TRACE"/>
+
+      <layout class="org.apache.log4j.PatternLayout">
+         <!-- The default pattern: Date Priority [Category] Message\n -->
+         <param name="ConversionPattern" value="%d %-5p [%c] (%t) %m%n"/>
+
+         <!-- The full pattern: Date MS Priority [Category] (Thread:NDC) Message\n
+        <param name="ConversionPattern" value="%d %-5r %-5p [%c] (%t:%x) %m%n"/>
+         -->
+      </layout>
+   </appender>
+
+   <!-- ============================== -->
+   <!-- Append messages to the console -->
+   <!-- ============================== -->
+
+   <appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
+      <param name="Threshold" value="TRACE"/>
+      <param name="Target" value="System.out"/>
+
+      <layout class="org.apache.log4j.PatternLayout">
+         <!-- The default pattern: Date Priority [Category] Message\n -->
+         <param name="ConversionPattern" value="%d %-5p [%c{1}] (%t) %m%n"/>
+      </layout>
+   </appender>
+
+
+   <!-- ================ -->
+   <!-- Limit categories -->
+   <!-- ================ -->
+
+   <!-- needs to be ERROR otherwise other seeminly problematic transaction rollbacks get logged as WARN but in
+        the case of the Lucene plugin, this is not exceptional and just a part of attempting to obtain a distributed lock. -->
+   <category name="org.jboss.cache">
+      <priority value="ERROR"/>
+   </category>
+
+   <!-- See reasoning above -->
+   <category name="org.jboss.cache.transaction">
+      <priority value="FATAL"/>
+   </category>
+
+   <category name="org.jboss.tm">
+      <priority value="WARN"/>
+   </category>
+
+   <category name="org.jgroups">
+      <priority value="WARN"/>
+   </category>
+
+   <!-- ======================= -->
+   <!-- Setup the Root category -->
+   <!-- ======================= -->
+
+   <root>
+      <appender-ref ref="CONSOLE"/>
+      <!--<appender-ref ref="FILE"/>-->
+   </root>
+
+</log4j:configuration>

Added: jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestByteArrayIO.java
===================================================================
--- jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestByteArrayIO.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestByteArrayIO.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,221 @@
+package org.apache.lucene.store.jbosscache;
+
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.jboss.cache.Cache;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+
+import java.io.ByteArrayOutputStream;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import junit.framework.TestCase;
+
+public class TestByteArrayIO extends TestCase
+{
+   public void testChunkNumbering()
+   {
+      assert ByteArrayIO.getChunkNumber(-1) == 0;
+      assert ByteArrayIO.getChunkNumber(0) == 0;
+      assert ByteArrayIO.getChunkNumber(1) == 0;
+      assert ByteArrayIO.getChunkNumber(JBCDirectory.CHUNK_SIZE - 1) == 0;
+      assert ByteArrayIO.getChunkNumber(JBCDirectory.CHUNK_SIZE) == 1;
+      assert ByteArrayIO.getChunkNumber(JBCDirectory.CHUNK_SIZE + 1) == 1;
+   }
+
+   public void testChunkPointer()
+   {
+      assert ByteArrayIO.getChunkPointer(0) == 0;
+      assert ByteArrayIO.getChunkPointer(1) == 1;
+      assert ByteArrayIO.getChunkPointer(JBCDirectory.CHUNK_SIZE - 1) == JBCDirectory.CHUNK_SIZE - 1;
+      assert ByteArrayIO.getChunkPointer(JBCDirectory.CHUNK_SIZE) == 0;
+      assert ByteArrayIO.getChunkPointer(JBCDirectory.CHUNK_SIZE + 1) == 1;
+      assert ByteArrayIO.getChunkPointer(2 * JBCDirectory.CHUNK_SIZE) == 0;
+      assert ByteArrayIO.getChunkPointer(2 * JBCDirectory.CHUNK_SIZE + 9) == 9;
+   }
+
+   // now some REAL tests
+
+   public void testReadChunks() throws Exception
+   {
+      JBCDirectory.CHUNK_SIZE = 64;
+      JBCDirectory.CHUNK_SIZE_BITS = 6;
+      JBCDirectory.CHUNK_MASK = 63;
+
+      Cache cache = new DefaultCacheFactory().createCache();
+      Metadata file1 = new Metadata(0L, "Hello.txt", 1);
+      cache.put("/Dir", file1.getFileName(), file1);
+
+      Metadata file2 = new Metadata(0L, "World.txt", 1);
+      cache.put("/Dir", file2.getFileName(), file2);
+
+      // byte array for Hello.txt
+      String helloText = "Hello world.  This is some text.";
+      cache.put(new Fqn("Dir", "Hello.txt", new Integer(0)), "buf", helloText.getBytes());
+
+      // byte array for World.txt - shouldbe in at least 2 chunks.
+      String worldText = "This String should contain more than sixty four characters but less than one hundred and twenty eight.";
+
+      byte[] buf = new byte[JBCDirectory.CHUNK_SIZE];
+      System.arraycopy(worldText.getBytes(), 0, buf, 0, JBCDirectory.CHUNK_SIZE);
+      cache.put(new Fqn("Dir", "World.txt", new Integer(0)), "buf", buf);
+      String part1 = new String(buf);
+      buf = new byte[JBCDirectory.CHUNK_SIZE];
+      System.arraycopy(worldText.getBytes(), JBCDirectory.CHUNK_SIZE, buf, 0, worldText.length() - JBCDirectory.CHUNK_SIZE);
+      cache.put(new Fqn("Dir", "World.txt", new Integer(1)), "buf", buf);
+      String part2 = new String(buf);
+      // make sure the generated bytes do add up!
+      assert worldText.equals(part1 + part2.trim());
+
+      file1.setSize(helloText.length());
+      file2.setSize(worldText.length());
+
+      // now we're set up.  Nice.
+
+      JBCDirectory dir = new JBCDirectory(cache.getNode("/Dir"), cache);
+
+
+      Set s = new HashSet();
+      s.add("Hello.txt");
+      s.add("World.txt");
+      Set other = new HashSet(Arrays.asList(dir.list()));
+
+      // ok, file listing works.
+      assert other.equals(s);
+
+      IndexInput ii = dir.openInput("Hello.txt");
+
+      assert ii.length() == helloText.length();
+
+      ByteArrayOutputStream baos = new ByteArrayOutputStream();
+
+      for (int i = 0; i < ii.length(); i++) baos.write(ii.readByte());
+
+      assert new String(baos.toByteArray()).equals(helloText);
+
+      ii = dir.openInput("World.txt");
+
+      assert ii.length() == worldText.length();
+
+      baos = new ByteArrayOutputStream();
+
+      for (int i = 0; i < ii.length(); i++) baos.write(ii.readByte());
+
+      assert new String(baos.toByteArray()).equals(worldText);
+
+      // now with buffered reading
+
+      ii = dir.openInput("Hello.txt");
+
+      assert ii.length() == helloText.length();
+
+      baos = new ByteArrayOutputStream();
+
+      long toRead = ii.length();
+      while (toRead > 0)
+      {
+         buf = new byte[19]; // suitably arbitrary
+         int bytesRead = (int) Math.min(toRead, 19);
+         ii.readBytes(buf, 0, bytesRead);
+         toRead = toRead - bytesRead;
+         baos.write(buf, 0, bytesRead);
+      }
+
+      assert new String(baos.toByteArray()).equals(helloText);
+
+      ii = dir.openInput("World.txt");
+
+      assert ii.length() == worldText.length();
+
+      baos = new ByteArrayOutputStream();
+
+      toRead = ii.length();
+      while (toRead > 0)
+      {
+         buf = new byte[19]; // suitably arbitrary
+         int bytesRead = (int) Math.min(toRead, 19);
+         ii.readBytes(buf, 0, bytesRead);
+         toRead = toRead - bytesRead;
+         baos.write(buf, 0, bytesRead);
+      }
+
+      assert new String(baos.toByteArray()).equals(worldText);
+
+      dir.removeFile("Hello.txt");
+      assert !cache.getRoot().getChild("Dir").hasChild("Hello.txt");
+
+      dir.renameFile("World.txt", "HelloWorld.txt");
+      assert !cache.getRoot().getChild("Dir").hasChild("World.txt");
+      assert cache.getRoot().getChild("Dir").hasChild("HelloWorld.txt");
+
+      // test that contents survive a move
+      ii = dir.openInput("HelloWorld.txt");
+
+      assert ii.length() == worldText.length();
+
+      baos = new ByteArrayOutputStream();
+
+      toRead = ii.length();
+      while (toRead > 0)
+      {
+         buf = new byte[19]; // suitably arbitrary
+         int bytesRead = (int) Math.min(toRead, 19);
+         ii.readBytes(buf, 0, bytesRead);
+         toRead = toRead - bytesRead;
+         baos.write(buf, 0, bytesRead);
+      }
+
+      assert new String(baos.toByteArray()).equals(worldText);
+
+   }
+
+   public void testWriteChunks() throws Exception
+   {
+      JBCDirectory.CHUNK_SIZE = 64;
+      JBCDirectory.CHUNK_SIZE_BITS = 6;
+      JBCDirectory.CHUNK_MASK = 63;
+
+      Cache cache = new DefaultCacheFactory().createCache();
+      Node d = cache.getRoot().addChild(Fqn.fromString("/Dir"));
+
+      JBCDirectory dir = new JBCDirectory(d, cache);
+
+      IndexOutput io = dir.createOutput("MyNewFile.txt");
+
+      io.writeByte((byte) 66);
+      io.writeByte((byte) 69);
+
+      io.close();
+
+      assert d.hasChild("MyNewFile.txt");
+      Node f = d.getChild("MyNewFile.txt");
+      assert f.hasChild(new Integer(0));
+
+      // test contents by reading:
+      byte[] buf = new byte[9];
+      IndexInput ii = dir.openInput("MyNewFile.txt");
+      ii.readBytes(buf, 0, (int) ii.length());
+
+      assert new String(new byte[]{66, 69}).equals(new String(buf).trim());
+
+      String testText = "This is some rubbish again that will span more than one chunk - one hopes.  Who knows, maybe even three or four chunks.";
+      io.seek(0);
+      io.writeBytes(testText.getBytes(), 0, testText.length());
+      io.close();
+      // now compare.
+      assert d.hasChild("MyNewFile.txt");
+      f = d.getChild("MyNewFile.txt");
+      assert f.hasChild(new Integer(0));
+      assert f.hasChild(new Integer(1));
+
+      Node c0 = f.getChild(new Integer(0));
+      Node c1 = f.getChild(new Integer(1));
+
+      assert testText.equals(new String((byte[]) c0.get("buf")) + new String((byte[]) c1.get("buf")).trim());
+
+   }
+
+}

Added: jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestConcurrentUsage.java
===================================================================
--- jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestConcurrentUsage.java	                        (rev 0)
+++ jbosscache-lucene/jbosscache/src/test/org/apache/lucene/store/jbosscache/TestConcurrentUsage.java	2008-04-25 09:49:39 UTC (rev 5681)
@@ -0,0 +1,349 @@
+package org.apache.lucene.store.jbosscache;
+
+import junit.framework.TestCase;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.demo.FileDocument;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.jboss.cache.Cache;
+import org.jboss.cache.CacheSPI;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.Fqn;
+import org.jboss.cache.Node;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.transaction.DummyTransactionManagerLookup;
+import org.jgroups.Address;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
+public class TestConcurrentUsage extends TestCase
+{
+   // Number of threads to concurrently access the replicated index
+   static final int NUM_THREADS = 5;
+
+   // Number of cache instances to replicate the indexes across
+   static final int NUM_INSTANCES = 5;
+
+   // Number of loops to run the test
+   static final int NUM_LOOPS = 25;
+
+   // Ratio of number of index reads per write.
+   // E.g., READ_RATIO of 10 means that there would be 10 index reads for each index write.
+   // This is an approximation as a random "coin toss" is used to determine behaviour per operation loop.
+   static final int READ_RATIO = 5;
+
+   File dummyDocToIndex = new File("/tmp/dummyToIndex.lucene");
+
+   Cache[] caches;
+   Directory[] directories;
+
+   // ----------------- Test setup and teardown ----------------
+
+   @Override
+   protected void setUp() throws Exception
+   {
+      createDummyDocToIndex();
+      caches = new Cache[NUM_INSTANCES];
+      directories = new Directory[NUM_INSTANCES];
+
+      for (int i = 0; i < NUM_INSTANCES; i++)
+      {
+         caches[i] = createCache();
+         directories[i] = new JBCDirectory(caches[i].getRoot().addChild(Fqn.fromString("/indexes")), caches[i]);
+      }
+
+      blockUntilClusterIsFormed(60000);
+   }
+
+   @Override
+   protected void tearDown() throws Exception
+   {
+      if (dummyDocToIndex.exists()) dummyDocToIndex.delete();
+
+      if (directories != null)
+      {
+         for (Directory d : directories)
+         {
+            if (d != null) d.close();
+         }
+      }
+      if (caches != null)
+      {
+         for (Cache c : caches)
+         {
+            if (c != null) c.stop();
+         }
+      }
+
+      directories = null;
+      caches = null;
+   }
+
+   // ----------------- Tests ----------------
+   public void testConcurrency() throws Exception
+   {
+      // start with a write to ensure index files exist otherwise Lucene will barf
+      doWriteOperation(directories[0]);
+
+      final Random r = new Random();
+      final CountDownLatch latch = new CountDownLatch(1);
+
+      Worker[] workers = new Worker[NUM_THREADS];
+
+      for (int i = 0; i < NUM_THREADS; i++)
+      {
+         workers[i] = new Worker("Worker-" + i, r, latch);
+         workers[i].start();
+      }
+
+      latch.countDown();
+
+      for (Worker w : workers) w.join();
+
+      // ensure there are no exceptions on any of the threads
+      for (Worker w : workers) if (w.e != null) throw w.e;
+
+      // check cache consistency
+      assertCacheConsistency();
+   }
+
+   public class Worker extends Thread
+   {
+      Random r;
+      CountDownLatch latch;
+      Exception e;
+
+      public Worker(String name, Random r, CountDownLatch latch)
+      {
+         super(name);
+         this.r = r;
+         this.latch = latch;
+      }
+
+      public void run()
+      {
+         try
+         {
+            latch.await();
+            for (int i = 0; i < NUM_LOOPS; i++)
+            {
+               // randomly select whether to simulate an index read or write
+               boolean write = r.nextInt(READ_RATIO + 1) == 1;
+               System.out.println("Thread " + getName() + ", loop " + i + ", operation: " + (write ? "WRITE" : "READ"));
+               // randomly select a directory instance
+               Directory d = directories[r.nextInt(NUM_INSTANCES)];
+
+               if (write)
+                  doWriteOperation(d);
+               else
+                  doReadOperation(d);
+            }
+         }
+         catch (Exception e)
+         {
+            this.e = e;
+         }
+
+         System.out.println("Thread " + getName() + ": FINISHED!");
+      }
+   }
+
+   private void doWriteOperation(Directory d) throws Exception
+   {
+      // this is a write
+      IndexWriter writer = null;
+      try
+      {
+         writer = new IndexWriter(d, new StandardAnalyzer());
+         // a dummy document
+         Document doc = FileDocument.Document(dummyDocToIndex);
+         writer.addDocument(doc);
+      }
+      catch (LockObtainFailedException lofe)
+      {
+         // this is normal; could happen.  Make sure you set a sufficient acquisition timeout in IndexWriter to minimise this.
+      }
+      finally
+      {
+         if (writer != null) writer.close();
+      }
+   }
+
+   private void doReadOperation(Directory d) throws Exception
+   {
+      IndexSearcher search = null;
+      try
+      {
+         // this is a read
+         search = new IndexSearcher(d);
+
+         // dummy query that probably won't return anything
+         QueryParser qp = new QueryParser("field", new StandardAnalyzer());
+         search.search(qp.parse("a"));
+      }
+      finally
+      {
+         if (search != null) search.close();
+      }
+   }
+
+   // ----------------- Utility methods to configure caches and help ensure a cluster is formed ----------------
+
+   private void assertCacheConsistency()
+   {
+      for (int i = 0; i < NUM_INSTANCES - 1; i++) assertCachesEqual(caches[i], caches[i + 1]);
+   }
+
+   private void assertCachesEqual(Cache c1, Cache c2)
+   {
+      assertNodesEqual(c1.getRoot(), c2.getRoot(), c1.getLocalAddress(), c2.getLocalAddress());
+   }
+
+   private void assertNodesEqual(Node n1, Node n2, Address a1, Address a2)
+   {
+      Map data1 = n1.getData();
+      Map data2 = n2.getData();
+
+      assertTrue("Data on node (n1: " + n1.getFqn() + ", " + data1.size() + ") (n2: " + n2.getFqn() + ", " + data2.size() + ") should be the same on instances " + a1 + " and " + a2, dataMapsAreEqual(data1, data2));
+      assertEquals("Children on node " + n1.getFqn() + " should be the same on instances " + a1 + " and " + a2, n1.getChildrenNames(), n2.getChildrenNames());
+
+      Iterator n1Children = n1.getChildren().iterator();
+
+      while (n1Children.hasNext())
+      {
+         Node n1Child = (Node) n1Children.next();
+         assertNodesEqual(n1Child, n2.getChild(n1Child.getFqn().getLastElement()), a1, a2);
+      }
+   }
+
+   private boolean dataMapsAreEqual(Map m1, Map m2)
+   {
+      if (m1 == m2) return true;
+
+      if (m2.size() != m1.size()) return false;
+
+      try
+      {
+         Iterator<Map.Entry> i = m1.entrySet().iterator();
+         while (i.hasNext())
+         {
+            Map.Entry e = i.next();
+            Object key = e.getKey();
+            Object value = e.getValue();
+            if (value == null)
+            {
+               if (!(m2.get(key) == null && m2.containsKey(key)))
+                  return false;
+            }
+            else
+            {
+               if (value instanceof byte[])
+               {
+                  if (!Arrays.equals((byte[]) value, (byte[]) m2.get(key)))
+                     return false;
+               }
+               else if (!value.equals(m2.get(key)))
+                  return false;
+            }
+         }
+      }
+      catch (ClassCastException unused)
+      {
+         return false;
+      }
+      catch (NullPointerException unused)
+      {
+         return false;
+      }
+
+      return true;
+   }
+
+   private void blockUntilClusterIsFormed(long timeout)
+   {
+      long failTime = System.currentTimeMillis() + timeout;
+
+      while (System.currentTimeMillis() < failTime)
+      {
+         try
+         {
+            Thread.sleep(100);
+         }
+         catch (InterruptedException ie)
+         {
+         }
+         if (areCacheViewsComplete(caches))
+         {
+            return;
+         }
+      }
+
+      throw new RuntimeException("timed out before caches had complete views");
+   }
+
+   private boolean areCacheViewsComplete(Cache[] caches)
+   {
+      int memberCount = caches.length;
+
+      for (int i = 0; i < memberCount; i++)
+      {
+         if (!isCacheViewComplete(caches[i], memberCount)) return false;
+      }
+
+      return true;
+   }
+
+   private boolean isCacheViewComplete(Cache c, int memberCount)
+   {
+      CacheSPI cache = (CacheSPI) c;
+      List members = cache.getMembers();
+      if (members == null || memberCount > members.size())
+      {
+         return false;
+      }
+      else if (memberCount < members.size())
+      {
+         throw new IllegalStateException(
+               "Cache at address " + cache.getLocalAddress() + " had " +
+                     members.size() + " members; expecting " + memberCount +
+                     ". Members were (" + members);
+      }
+      return true;
+   }
+
+   private Cache createCache()
+   {
+      Cache cache = new DefaultCacheFactory().createCache(false);
+      cache.getConfiguration().setCacheMode(Configuration.CacheMode.REPL_SYNC);
+      cache.getConfiguration().setSyncCommitPhase(true);
+      cache.getConfiguration().setSyncRollbackPhase(true);
+      cache.getConfiguration().setTransactionManagerLookupClass(DummyTransactionManagerLookup.class.getName());
+      cache.getConfiguration().setNodeLockingScheme(Configuration.NodeLockingScheme.OPTIMISTIC);
+      cache.getConfiguration().setLockParentForChildInsertRemove(true);
+//      cache.getConfiguration().setLockAcquisitionTimeout(1000); // should be short with O/L
+      cache.start();
+      return cache;
+   }
+
+   private void createDummyDocToIndex() throws Exception
+   {
+      if (dummyDocToIndex.exists()) dummyDocToIndex.delete();
+      dummyDocToIndex.createNewFile();
+      FileWriter fw = new FileWriter(dummyDocToIndex);
+      fw.write("a b c d e f g");
+      fw.close();
+      dummyDocToIndex.deleteOnExit();
+   }
+
+}




More information about the jbosscache-commits mailing list