[hibernate-commits] Hibernate SVN: r15400 - in search/tags: v3_1_0_Beta2 and 4 other directories.

hibernate-commits at lists.jboss.org hibernate-commits at lists.jboss.org
Mon Oct 27 07:50:45 EDT 2008


Author: hardy.ferentschik
Date: 2008-10-27 07:50:45 -0400 (Mon, 27 Oct 2008)
New Revision: 15400

Added:
   search/tags/v3_1_0_Beta2/
   search/tags/v3_1_0_Beta2/build.xml
   search/tags/v3_1_0_Beta2/changelog.txt
   search/tags/v3_1_0_Beta2/common-build.xml
   search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml
   search/tags/v3_1_0_Beta2/ivy.xml
   search/tags/v3_1_0_Beta2/lib/README.txt
   search/tags/v3_1_0_Beta2/pom.xml
   search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java
Removed:
   search/tags/v3_1_0_Beta2/build.xml
   search/tags/v3_1_0_Beta2/changelog.txt
   search/tags/v3_1_0_Beta2/common-build.xml
   search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml
   search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml
   search/tags/v3_1_0_Beta2/ivy.xml
   search/tags/v3_1_0_Beta2/lib/README.txt
   search/tags/v3_1_0_Beta2/pom.xml
   search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java
Log:
Created tag v3_1_0_Beta2.

Copied: search/tags/v3_1_0_Beta2 (from rev 15392, search/trunk)

Deleted: search/tags/v3_1_0_Beta2/build.xml
===================================================================
--- search/trunk/build.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/build.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,329 +0,0 @@
-<!-- $Id$ -->
-<!--
-
-  Hibernate Search ANT build script.
-
-  You need JDK 5.0 installed to build Hibernate Search.
-
--->
-
-<project name="Hibernate Search" default="dist" basedir="."
-    xmlns:ivy="antlib:fr.jayasoft.ivy.ant">
-
-    <!-- Give user a chance to override without editing this file
-		   (and without typing -D each time it compiles it) -->
-	<property file="build.properties"/>
-	<property file="${user.home}/.ant.properties"/>
-
-	<!-- Name of project and version, used to create filenames -->
-	<property name="Name" value="Hibernate Search"/>
-	<property name="name" value="hibernate-search"/>
-	<property name="version" value="3.1.0.Beta2"/>
-	<property name="javadoc.packagenames" value="org.hibernate.search.*"/>
-	<property name="copy.test" value="true"/>
-	<property name="javac.source" value="1.5"/>
-	<property name="javac.target" value="1.5"/>
-    <property name="jdbc.dir" value="jdbc"/>
-    <property name="common.dir" value="${basedir}"/>
-
-    <property name="ivy.dep.dir" value="${basedir}/build/lib" />
-
-    <!-- ivy load -->
-	<property name="ivy.jar.dir" value="${basedir}/ivy" />
-    <property name="ivy.conf.dir" value="${basedir}" />
-	<path id="ivy.lib.path">
-		<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
-	</path>
-	<taskdef resource="fr/jayasoft/ivy/ant/antlib.xml"
-			  uri="antlib:fr.jayasoft.ivy.ant" classpathref="ivy.lib.path"/>
-
-    <import file="${common.dir}/common-build.xml"/>
-
-    
-    <property name="build.testresources.dir" value="${build.dir}/testresources"/>
-    <property name="testresources.dir" value="${basedir}/src/test-resources"/>
-
-    <!-- override order for JBossXB to bootstrap properly -->
-    <path id="junit.classpath">
-           <fileset dir="${lib.dir}">
-                 <include name="*.jar"/>
-           </fileset>
-           <pathelement path="${classes.dir}"/>
-           <pathelement path="${testclasses.dir}"/>
-           <path refid="junit.moduleclasspath"/>
-           <path refid="lib.class.path"/>
-           <path location="${clover.jar}"/>
-    </path>
-	
-    <!-- override order for JBossXB to bootstrap properly -->
-    <path id="lib.class.path">
-        <fileset dir="${ivy.dep.dir}/core">
-            <include name="*.jar"/>
-            <exclude name="xml-apis.jar"/>
-            <exclude name="xerces*.jar"/>
-        </fileset>
-        <fileset dir="${lib.dir}">
-            <include name="*.jar"/>
-        </fileset>
-		<pathelement path="${clover.jar}"/>
-    </path>
-	
-	<path id="junit.moduleclasspath">
-        <!-- order matters for JBoss XB proper bootstrap -->
-        <fileset dir="${lib.dir}/test">
-			<include name="*.jar"/>
-			<include name="*.zip"/>
-		</fileset>
-        <pathelement location="${src.dir}"/>
-		<pathelement location="${test.dir}"/>
-		<!-- pathelement location="${annotations.jar}"/>
-        <pathelement location="${entitymanager.jar}"/ -->
-        <fileset dir="${ivy.dep.dir}/test">
-			<include name="*.jar"/>
-		</fileset>
-        <fileset dir="${jdbc.dir}">
-			<include name="*.jar"/>
-			<include name="*.zip"/>
-		</fileset>
-	</path>
-
-    <target name="init">
-		<antcall target="common-build.init"/>
-        <tstamp>
-            <format property="now" pattern="yyyyMMddhhmmss"/>
-        </tstamp>
-        <mkdir dir="${ivy.dep.dir}/core"/>
-        <mkdir dir="${ivy.dep.dir}/test"/>
-        <ivy:configure file="${ivy.jar.dir}/ivyconf.xml" />
-        <mkdir dir="${lib.dir}/test"/>
-        <mkdir dir="${build.testresources.dir}"/>
-    </target>
-
-    <target name="get.deps.core" depends="init" description="retrieve the core dependencies">
-        <ivy:resolve conf="default" />
-        <ivy:retrieve pattern="${ivy.dep.dir}/core/[artifact].[ext]" conf="default" />
-    </target>
-
-    <target name="compile" depends="init,get.deps.core" description="Compile the Java source code">
-        <available
-				classname="org.eclipse.core.launcher.Main"
-				property="build.compiler"
-				value="org.eclipse.jdt.core.JDTCompilerAdapter"
-				classpath="${java.class.path}"/>
-		<javac
-				srcdir="${src.dir}"
-				destdir="${classes.dir}"
-				classpathref="lib.class.path"
-				debug="${javac.debug}"
-				optimize="${javac.optimize}"
-				nowarn="on"
-                source="${javac.source}"
-                target="${javac.target}">
-			<src path="${src.dir}"/>
-		</javac>
-		<copy todir="${classes.dir}">
-			<fileset dir="${src.dir}">
-				<include name="**/resources/*.properties"/>
-				<include name="**/*.xsd"/>
-			</fileset>
-		</copy>
-	</target>
-
-     <target name="get.deps.test" depends="init" description="retrieve the test dependencies">
-        <ivy:resolve conf="test" />
-        <ivy:retrieve pattern="${ivy.dep.dir}/test/[artifact].[ext]" conf="test" />
-    </target>
-
-    <target name="compiletest" depends="init,get.deps.test,compile" description="Compile the tests">
-        <available
-				classname="org.eclipse.core.launcher.Main"
-				property="build.compiler"
-				value="org.eclipse.jdt.core.JDTCompilerAdapter"
-				classpath="${java.class.path}"/>
-		<javac
-				destdir="${testclasses.dir}"
-				classpathref="junit.classpath"
-				debug="${javac.debug}"
-				optimize="${javac.optimize}"
-				nowarn="on"
-                source="${javac.source}"
-                target="${javac.target}">
-			<src refid="testsrc.path"/>
-		</javac>
-	</target>
-
-    <target name="prepare-test-resources" depends="compiletest">
-        <copy todir="${build.testresources.dir}">
-            <fileset dir="${testresources.dir}">
-                <include name="**/*.*"/>
-				<exclude name="hibernate.properties"/>
-            </fileset>
-        </copy>
-        <mkdir dir="${build.testresources.dir}/jars"/>
-        <jar filesetmanifest="merge" jarfile="${build.testresources.dir}/jars/jms-slave.jar" >
-            <fileset dir="${testclasses.dir}">
-                <include name="org/hibernate/search/test/jms/slave/**.*"/>
-            </fileset>
-        </jar>
-        <jar filesetmanifest="merge" jarfile="${build.testresources.dir}/jars/jms-master.jar" >
-            <fileset dir="${testclasses.dir}">
-                <include name="org/hibernate/search/test/jms/master/**.*"/>
-            </fileset>
-        </jar>
-    </target>
-
-    <target name="junit" depends="compiletest, prepare-test-resources">
-		<for list="${targetdb}" param="db">
-			<sequential>
-				<antcall target="test-resources">
-					<param name="db" value="@{db}"/>
-				</antcall>
-				<mkdir dir="${testreports.dir}/@{db}"/>
-				<echo>Running against db: @{db}</echo>
-				<junit forkmode="perBatch" printsummary="yes" haltonfailure="yes">
-					<classpath>
-						<path path="${build.testresources.dir}"/>
-						<path refid="junit.classpath"/>             
-						<fileset dir="${jdbc.dir}">
-							<include name="**/*.jar"/>
-							<include name="**/*.zip"/>
-						</fileset>
-					</classpath>
-					<sysproperty key="build.dir" value="${build.dir}"/>
-					<formatter type="plain"/>
-					<formatter type="xml"/>
-					<batchtest fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no">
-						<fileset dir="${testclasses.dir}">
-							<include name="**/*Test.class"/>
-							<exclude name="**/JMSSlaveTest.class"/>
-						</fileset>
-					</batchtest>
-					<test fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no" name="org.hibernate.search.test.jms.slave.JMSSlaveTest"/>	
-				</junit>
-			</sequential>
-		</for>		
-    </target>
-
-	<!-- Run a single unit test. -->
-	<target name="junitsingle" depends="compiletest"
-			description="Run a single test suite (requires testname and jdbc.driver properties)">
-		<for list="${targetdb}" param="db">
-			<sequential>
-				<antcall target="test-resources">
-					<param name="db" value="@{db}"/>
-				</antcall>
-				<mkdir dir="${testreports.dir}/@{db}"/>
-				<echo>Running against db: @{db}</echo>
-				<junit printsummary="yes" fork="yes" haltonfailure="yes">
-					<classpath>
-						<path path="${build.testresources.dir}"/>
-						<path refid="junit.classpath"/>             
-						<fileset dir="${jdbc.dir}">
-							<include name="**/*.jar"/>
-							<include name="**/*.zip"/>
-						</fileset>
-					</classpath>
-					<sysproperty key="build.dir" value="${build.dir}"/>
-					<formatter type="plain"/>
-					<formatter type="xml"/>
-					<test fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no" name="${testname}"/>
-				</junit>
-			</sequential>
-		</for>		
-	</target>
-
-	<target name="jar" depends="compile" description="Build the distribution .jar file">
-		<mkdir dir="${classes.dir}/META-INF"/>
-		<manifest file="${classes.dir}/META-INF/MANIFEST.MF">
-			<attribute name="Implementation-Title" value="${Name}"/>
-			<attribute name="Implementation-Version" value="${version}"/>
-            <attribute name="Implementation-Vendor" value="hibernate.org"/>
-            <attribute name="Implementation-Vendor-Id" value="hibernate.org"/>
-            <attribute name="Implementation-URL" value="http://search.hibernate.org"/>
-		</manifest>
-		<antcall target="common-build.jar"/>
-        <ivy:resolve conf="default"/>
-        <ivy:publish artifactspattern="${dist.dir}/[artifact].[ext]"
-            resolver="local"
-            pubrevision="latest"
-            pubdate="${now}"
-            status="integration"
-        />
-    </target>
-
-	<!-- Some of this can probably be moved to common-build... -->
-	<target name="dist" depends="get.deps.core,get.deps.test,jar,jar,javadoc,copysource,copytest,copylib,extras"
-			description="Build everything">
-
-		<ant inheritall="false" dir="${basedir}/doc/reference"/>
-		<copy todir="${dist.dir}/doc/reference" failonerror="false">
-			<fileset dir="${basedir}/doc/reference/build">
-				<include name="**/*.*"/>
-			</fileset>
-		</copy>
-
-		<copy todir="${dist.dir}" failonerror="false">
-			<fileset dir="${common.dir}">
-				<include name="common-build.xml"/>
-			</fileset>
-		</copy>
-
-        <copy todir="${dist.dir}/test-resources" failonerror="false">
-            <fileset dir="${testresources.dir}">
-                <include name="**/*.*"/>
-            </fileset>
-        </copy>
-        <copy todir="${dist.dir}/ivy" failonerror="false">
-            <fileset dir="${ivy.jar.dir}">
-                <include name="**/*.*"/>
-            </fileset>
-        </copy>
-
-        <!-- copy dependencies -->
-        <copy todir="${dist.lib.dir}" failonerror="false">
-			<!-- fileset file="${jpa-api.jar}"/>
-            <fileset file="${commons-annotations.jar}"/ -->
-            <fileset dir="${ivy.dep.dir}/core">
-                <include name="*.jar"/>
-            </fileset>
-        </copy>
-        <mkdir dir="${dist.lib.dir}/test"/>
-        <copy todir="${dist.lib.dir}/test" failonerror="false">
-			<fileset dir="${ivy.dep.dir}/test">
-                <include name="*.jar"/>
-            </fileset>
-        </copy>
-        <copy todir="${dist.lib.dir}/test" failonerror="false">
-			<fileset file="${lib.dir}/test/*.jar"/>
-        </copy>
-
-        <mkdir dir="${dist.lib.dir}/build"/>
-        <copy todir="${dist.lib.dir}/build" failonerror="false">
-			<fileset file="${lib.dir}/build/*.jar"/>
-        </copy>
-
-        <!-- ivy uses the module name without hibernate- (to mimic the directory names). Revert the situation -->
-        <move file="${dist.lib.dir}/commons-annotations.jar" tofile="${dist.lib.dir}/hibernate-commons-annotations.jar"
-              failonerror="false"/>
-        <move file="${dist.lib.dir}/test/commons-annotations.jar" tofile="${dist.lib.dir}/test/hibernate-commons-annotations.jar"
-              failonerror="false"/>
-        <move file="${dist.lib.dir}/test/annotations.jar" tofile="${dist.lib.dir}/test/hibernate-annotations.jar"
-              failonerror="false"/>
-        <move file="${dist.lib.dir}/test/entitymanager.jar" tofile="${dist.lib.dir}/test/hibernate-entitymanager.jar"
-              failonerror="false"/>
-
-
-        <copy file="${basedir}/build.properties.dist" tofile="${dist.dir}/build.properties" failonerror="false">
-		</copy>
-		<antcall target="common-build.dist"/>
-	</target>
-
-    <target name="zip-dist" description="zip the dist">
-		<zip zipfile="${dist.dir}-${version}.zip">
-			<zipfileset prefix="${name}-${version}" dir="${dist.dir}"/>
-		</zip>
-		<tar compression="gzip" tarfile="${dist.dir}-${version}.tar.gz">
-			<tarfileset prefix="${name}-${version}" dir="${dist.dir}"/>
-		</tar>
-	</target>
-</project>

Copied: search/tags/v3_1_0_Beta2/build.xml (from rev 15393, search/trunk/build.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/build.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/build.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,330 @@
+<!-- $Id$ -->
+<!--
+
+  Hibernate Search ANT build script.
+
+  You need JDK 5.0 installed to build Hibernate Search.
+
+-->
+
+<project name="Hibernate Search" default="dist" basedir="."
+    xmlns:ivy="antlib:fr.jayasoft.ivy.ant">
+
+    <!-- Give user a chance to override without editing this file
+		   (and without typing -D each time it compiles it) -->
+	<property file="build.properties"/>
+	<property file="${user.home}/.ant.properties"/>
+
+	<!-- Name of project and version, used to create filenames -->
+	<property name="Name" value="Hibernate Search"/>
+	<property name="name" value="hibernate-search"/>
+	<property name="version" value="3.1.0.Beta2"/>
+	<property name="javadoc.packagenames" value="org.hibernate.search.*"/>
+	<property name="copy.test" value="true"/>
+	<property name="javac.source" value="1.5"/>
+	<property name="javac.target" value="1.5"/>
+    <property name="jdbc.dir" value="jdbc"/>
+    <property name="common.dir" value="${basedir}"/>
+
+    <property name="ivy.dep.dir" value="${basedir}/build/lib" />
+
+    <!-- ivy load -->
+	<property name="ivy.jar.dir" value="${basedir}/ivy" />
+    <property name="ivy.conf.dir" value="${basedir}" />
+	<path id="ivy.lib.path">
+		<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
+	</path>
+	<taskdef resource="fr/jayasoft/ivy/ant/antlib.xml"
+			  uri="antlib:fr.jayasoft.ivy.ant" classpathref="ivy.lib.path"/>
+
+    <import file="${common.dir}/common-build.xml"/>
+
+    
+    <property name="build.testresources.dir" value="${build.dir}/testresources"/>
+    <property name="testresources.dir" value="${basedir}/src/test-resources"/>
+
+    <!-- override order for JBossXB to bootstrap properly -->
+    <path id="junit.classpath">
+           <fileset dir="${lib.dir}">
+                 <include name="*.jar"/>
+           </fileset>
+           <pathelement path="${classes.dir}"/>
+           <pathelement path="${testclasses.dir}"/>
+           <path refid="junit.moduleclasspath"/>
+           <path refid="lib.class.path"/>
+           <path location="${clover.jar}"/>
+    </path>
+	
+    <!-- override order for JBossXB to bootstrap properly -->
+    <path id="lib.class.path">
+        <fileset dir="${ivy.dep.dir}/core">
+            <include name="*.jar"/>
+            <exclude name="xml-apis.jar"/>
+            <exclude name="xerces*.jar"/>
+        </fileset>
+        <fileset dir="${lib.dir}">
+            <include name="*.jar"/>
+        </fileset>
+		<pathelement path="${clover.jar}"/>
+    </path>
+	
+	<path id="junit.moduleclasspath">
+        <!-- order matters for JBoss XB proper bootstrap -->
+        <fileset dir="${lib.dir}/test">
+			<include name="*.jar"/>
+			<include name="*.zip"/>
+		</fileset>
+        <pathelement location="${src.dir}"/>
+		<pathelement location="${test.dir}"/>
+		<!-- pathelement location="${annotations.jar}"/>
+        <pathelement location="${entitymanager.jar}"/ -->
+        <fileset dir="${ivy.dep.dir}/test">
+			<include name="*.jar"/>
+		</fileset>
+        <fileset dir="${jdbc.dir}">
+			<include name="*.jar"/>
+			<include name="*.zip"/>
+		</fileset>
+	</path>
+
+    <target name="init">
+		<antcall target="common-build.init"/>
+        <tstamp>
+            <format property="now" pattern="yyyyMMddhhmmss"/>
+        </tstamp>
+        <mkdir dir="${ivy.dep.dir}/core"/>
+        <mkdir dir="${ivy.dep.dir}/test"/>
+        <ivy:configure file="${ivy.jar.dir}/ivyconf.xml" />
+        <mkdir dir="${lib.dir}/test"/>
+        <mkdir dir="${build.testresources.dir}"/>
+    </target>
+
+    <target name="get.deps.core" depends="init" description="retrieve the core dependencies">
+        <ivy:resolve conf="default" />
+        <ivy:retrieve pattern="${ivy.dep.dir}/core/[artifact].[ext]" conf="default" />
+    </target>
+
+    <target name="compile" depends="init,get.deps.core" description="Compile the Java source code">
+        <available
+				classname="org.eclipse.core.launcher.Main"
+				property="build.compiler"
+				value="org.eclipse.jdt.core.JDTCompilerAdapter"
+				classpath="${java.class.path}"/>
+		<javac
+				srcdir="${src.dir}"
+				destdir="${classes.dir}"
+				classpathref="lib.class.path"
+				debug="${javac.debug}"
+				optimize="${javac.optimize}"
+				nowarn="on"
+                source="${javac.source}"
+                target="${javac.target}">
+			<src path="${src.dir}"/>
+		</javac>
+		<copy todir="${classes.dir}">
+			<fileset dir="${src.dir}">
+				<include name="**/resources/*.properties"/>
+				<include name="**/*.xsd"/>
+			</fileset>
+		</copy>
+	</target>
+
+     <target name="get.deps.test" depends="init" description="retrieve the test dependencies">
+        <ivy:resolve conf="test" />
+        <ivy:retrieve pattern="${ivy.dep.dir}/test/[artifact].[ext]" conf="test" />
+    </target>
+
+    <target name="compiletest" depends="init,get.deps.test,compile" description="Compile the tests">
+        <available
+				classname="org.eclipse.core.launcher.Main"
+				property="build.compiler"
+				value="org.eclipse.jdt.core.JDTCompilerAdapter"
+				classpath="${java.class.path}"/>
+		<javac
+				destdir="${testclasses.dir}"
+				classpathref="junit.classpath"
+				debug="${javac.debug}"
+				optimize="${javac.optimize}"
+				nowarn="on"
+                source="${javac.source}"
+                target="${javac.target}">
+			<src refid="testsrc.path"/>
+		</javac>
+	</target>
+
+    <target name="prepare-test-resources" depends="compiletest">
+        <copy todir="${build.testresources.dir}">
+            <fileset dir="${testresources.dir}">
+                <include name="**/*.*"/>
+				<exclude name="hibernate.properties"/>
+            </fileset>
+        </copy>
+        <mkdir dir="${build.testresources.dir}/jars"/>
+        <jar filesetmanifest="merge" jarfile="${build.testresources.dir}/jars/jms-slave.jar" >
+            <fileset dir="${testclasses.dir}">
+                <include name="org/hibernate/search/test/jms/slave/**.*"/>
+            </fileset>
+        </jar>
+        <jar filesetmanifest="merge" jarfile="${build.testresources.dir}/jars/jms-master.jar" >
+            <fileset dir="${testclasses.dir}">
+                <include name="org/hibernate/search/test/jms/master/**.*"/>
+            </fileset>
+        </jar>
+    </target>
+
+    <target name="junit" depends="compiletest, prepare-test-resources">
+		<for list="${targetdb}" param="db">
+			<sequential>
+				<antcall target="test-resources">
+					<param name="db" value="@{db}"/>
+				</antcall>
+				<mkdir dir="${testreports.dir}/@{db}"/>
+				<echo>Running against db: @{db}</echo>
+				<junit forkmode="perBatch" printsummary="yes" haltonfailure="yes">
+					<classpath>
+						<path path="${build.testresources.dir}"/>
+						<path refid="junit.classpath"/>             
+						<fileset dir="${jdbc.dir}">
+							<include name="**/*.jar"/>
+							<include name="**/*.zip"/>
+						</fileset>
+					</classpath>
+					<sysproperty key="build.dir" value="${build.dir}"/>
+					<formatter type="plain"/>
+					<formatter type="xml"/>
+					<batchtest fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no">
+						<fileset dir="${testclasses.dir}">
+							<include name="**/*Test.class"/>
+							<exclude name="**/JMSSlaveTest.class"/>
+						</fileset>
+					</batchtest>
+					<test fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no" name="org.hibernate.search.test.jms.slave.JMSSlaveTest"/>	
+				</junit>
+			</sequential>
+		</for>		
+    </target>
+
+	<!-- Run a single unit test. -->
+	<target name="junitsingle" depends="compiletest"
+			description="Run a single test suite (requires testname and jdbc.driver properties)">
+		<for list="${targetdb}" param="db">
+			<sequential>
+				<antcall target="test-resources">
+					<param name="db" value="@{db}"/>
+				</antcall>
+				<mkdir dir="${testreports.dir}/@{db}"/>
+				<echo>Running against db: @{db}</echo>
+				<junit printsummary="yes" fork="yes" haltonfailure="yes">
+					<classpath>
+						<path path="${build.testresources.dir}"/>
+						<path refid="junit.classpath"/>             
+						<fileset dir="${jdbc.dir}">
+							<include name="**/*.jar"/>
+							<include name="**/*.zip"/>
+						</fileset>
+					</classpath>
+					<sysproperty key="build.dir" value="${build.dir}"/>
+					<formatter type="plain"/>
+					<formatter type="xml"/>
+					<test fork="yes" todir="${testreports.dir}/@{db}" haltonfailure="no" name="${testname}"/>
+				</junit>
+			</sequential>
+		</for>		
+	</target>
+
+	<target name="jar" depends="compile" description="Build the distribution .jar file">
+		<mkdir dir="${classes.dir}/META-INF"/>
+		<manifest file="${classes.dir}/META-INF/MANIFEST.MF">
+			<attribute name="Implementation-Title" value="${Name}"/>
+			<attribute name="Implementation-Version" value="${version}"/>
+            <attribute name="Implementation-Vendor" value="hibernate.org"/>
+            <attribute name="Implementation-Vendor-Id" value="hibernate.org"/>
+            <attribute name="Implementation-URL" value="http://search.hibernate.org"/>
+		</manifest>
+		<antcall target="common-build.jar"/>
+        <ivy:resolve conf="default"/>
+        <ivy:publish artifactspattern="${dist.dir}/[artifact].[ext]"
+            resolver="local"
+            pubrevision="latest"
+            pubdate="${now}"
+            status="integration"
+        />
+    </target>
+
+	<!-- Some of this can probably be moved to common-build... -->
+	<target name="dist" depends="get.deps.core,get.deps.test,jar,jar,javadoc,copysource,copytest,copylib,extras"
+			description="Build everything">
+
+		<ant inheritall="false" dir="${basedir}/doc/reference"/>
+		<copy todir="${dist.dir}/doc/reference" failonerror="false">
+			<fileset dir="${basedir}/doc/reference/build">
+				<include name="**/*.*"/>
+                <exclude name="en/master.xml"/>
+			</fileset>
+		</copy>
+
+		<copy todir="${dist.dir}" failonerror="false">
+			<fileset dir="${common.dir}">
+				<include name="common-build.xml"/>
+			</fileset>
+		</copy>
+
+        <copy todir="${dist.dir}/test-resources" failonerror="false">
+            <fileset dir="${testresources.dir}">
+                <include name="**/*.*"/>
+            </fileset>
+        </copy>
+        <copy todir="${dist.dir}/ivy" failonerror="false">
+            <fileset dir="${ivy.jar.dir}">
+                <include name="**/*.*"/>
+            </fileset>
+        </copy>
+
+        <!-- copy dependencies -->
+        <copy todir="${dist.lib.dir}" failonerror="false">
+			<!-- fileset file="${jpa-api.jar}"/>
+            <fileset file="${commons-annotations.jar}"/ -->
+            <fileset dir="${ivy.dep.dir}/core">
+                <include name="*.jar"/>
+            </fileset>
+        </copy>
+        <mkdir dir="${dist.lib.dir}/test"/>
+        <copy todir="${dist.lib.dir}/test" failonerror="false">
+			<fileset dir="${ivy.dep.dir}/test">
+                <include name="*.jar"/>
+            </fileset>
+        </copy>
+        <copy todir="${dist.lib.dir}/test" failonerror="false">
+			<fileset file="${lib.dir}/test/*.jar"/>
+        </copy>
+
+        <mkdir dir="${dist.lib.dir}/build"/>
+        <copy todir="${dist.lib.dir}/build" failonerror="false">
+			<fileset file="${lib.dir}/build/*.jar"/>
+        </copy>
+
+        <!-- ivy uses the module name without hibernate- (to mimic the directory names). Revert the situation -->
+        <move file="${dist.lib.dir}/commons-annotations.jar" tofile="${dist.lib.dir}/hibernate-commons-annotations.jar"
+              failonerror="false"/>
+        <move file="${dist.lib.dir}/test/commons-annotations.jar" tofile="${dist.lib.dir}/test/hibernate-commons-annotations.jar"
+              failonerror="false"/>
+        <move file="${dist.lib.dir}/test/annotations.jar" tofile="${dist.lib.dir}/test/hibernate-annotations.jar"
+              failonerror="false"/>
+        <move file="${dist.lib.dir}/test/entitymanager.jar" tofile="${dist.lib.dir}/test/hibernate-entitymanager.jar"
+              failonerror="false"/>
+
+
+        <copy file="${basedir}/build.properties.dist" tofile="${dist.dir}/build.properties" failonerror="false">
+		</copy>
+		<antcall target="common-build.dist"/>
+	</target>
+
+    <target name="zip-dist" description="zip the dist">
+		<zip zipfile="${dist.dir}-${version}.zip">
+			<zipfileset prefix="${name}-${version}" dir="${dist.dir}"/>
+		</zip>
+		<tar compression="gzip" tarfile="${dist.dir}-${version}.tar.gz">
+			<tarfileset prefix="${name}-${version}" dir="${dist.dir}"/>
+		</tar>
+	</target>
+</project>

Deleted: search/tags/v3_1_0_Beta2/changelog.txt
===================================================================
--- search/trunk/changelog.txt	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/changelog.txt	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,261 +0,0 @@
-Hibernate Search Changelog
-==========================
-
-3.1.0.Beta2 (27-09-2008)
-------------------------
-
-
-3.1.0.Beta1 (17-07-2008)
-------------------------
-
-** Bug
-    * [HSEARCH-166] - documentation error : hibernate.search.worker.batch_size vs hibernate.worker.batch_size
-    * [HSEARCH-171] - Do not log missing objects when using QueryLoader
-    * [HSEARCH-173] - CachingWrapperFilter loses its WeakReference making filter caching inefficient
-    * [HSEARCH-194] - Inconsistent performance between hibernate search and pure lucene access
-    * [HSEARCH-196] - ObjectNotFoundException not caught in FullTextSession
-    * [HSEARCH-198] - Documentation out of sync with implemented/released features
-    * [HSEARCH-203] - Counter of index modification operations not always incremented
-    * [HSEARCH-204] - Improper calls to Session during a projection not involving THIS
-    * [HSEARCH-205] - Out of Memory on copy of large indexes
-    * [HSEARCH-217] - Proper errors on parsing of all numeric configuration parameters
-    * [HSEARCH-227] - Criteria based fetching is not used when objects are loaded one by one (iterate())
-
-
-** Improvement
-    * [HSEARCH-19] - Do not filter classes on queries when we know that all Directories only contains the targeted classes
-    * [HSEARCH-156] - Retrofit FieldBridge.set lucene parameters into a LuceneOptions class
-    * [HSEARCH-157] - Make explicit in FAQ and doc that query.list() followed by query.getResultSize() triggers only one query
-    * [HSEARCH-163] - Enhance error messages when @FieldBridge is wrongly used (no impl or impl not implementing the right interfaces)
-    * [HSEARCH-176] - Permits alignment properties to lucene default (Sanne Grinovero)
-    * [HSEARCH-179] - Documentation should be explicit that @FulltextFilter filters every object, regardless which object is annotated
-    * [HSEARCH-181] - Better management of file-based index directories (Sanne Grinovero)
-    * [HSEARCH-189] - Thread management improvements for Master/Slave DirectoryProviders
-    * [HSEARCH-197] - Move to slf4j
-    * [HSEARCH-199] - Property close Search resources on SessionFactory.close()
-    * [HSEARCH-202] - Avoid many maps lookup in Workspace
-    * [HSEARCH-207] - Make DateBridge TwoWay to facilitate projection
-    * [HSEARCH-208] - Raise exception on index and purge when the entity is not an indexed entity
-    * [HSEARCH-209] - merge FullTextIndexCollectionEventListener into FullTextIndexEventListener
-    * [HSEARCH-215] - Rename Search.createFTS to Search.getFTS deprecating the old method
-    * [HSEARCH-223] - Use multiple criteria queries rather than ObjectLoader in most cases
-    * [HSEARCH-230] - Ensure initialization safety in a multi-core machine
-
-** New Feature
-    * [HSEARCH-133] - Allow overriding DefaultSimilarity for indexing and searching (Nick Vincent)
-    * [HSEARCH-141] - Allow term position information to be stored in an index
-    * [HSEARCH-153] - Provide the possibility to configure writer.setRAMBufferSizeMB()  (Lucene 2.3)
-    * [HSEARCH-154] - Provide a facility to access Lucene query explanations
-    * [HSEARCH-164] - Built-in bridge to index java.lang.Class
-    * [HSEARCH-165] - URI and URL built-in bridges
-    * [HSEARCH-174] - Improve transparent filter caching by wrapping filters into our own CachingWrapperFilter
-    * [HSEARCH-186] - Enhance analyzer to support the Solr model
-    * [HSEARCH-190] - Add pom
-    * [HSEARCH-191] - Make build independent of Hibernate Core structure
-    * [HSEARCH-192] - Move to Hibernate Core 3.3
-    * [HSEARCH-193] - Use dependency on Solr-analyzer JAR rather than the full Solr JAR
-    * [HSEARCH-195] - Expose Analyzers instance by name: searchFactory.getAnalyzer(String)
-    * [HSEARCH-200] - Expose IndexWriter setting MAX_FIELD_LENGTH via IndexWriterSetting
-    * [HSEARCH-212] - Added ReaderProvider strategy reusing unchanged segments (using reader.reopen())
-    * [HSEARCH-220] - introduce session.flushToIndexes API and deprecate batch_size
-
-
-** Task
-    * [HSEARCH-169] - Migrate to Lucene 2.3.1 (index corruption possiblity in 2.3.0)
-    * [HSEARCH-187] - Clarify which directories need read-write access, verify readonly behaviour on others.
-    * [HSEARCH-214] - Upgrade Lucene to 2.3.2
-    * [HSEARCH-229] - Deprecate FullTextQuery.BOOST
-
-
-3.0.1.GA (20-02-2008)
----------------------
-
-** Bug
-    * [HSEARCH-56] - Updating a collection does not reindex
-    * [HSEARCH-123] - Use mkdirs instead of mkdir to create necessary parent directory in the DirectoryProviderHelper
-    * [HSEARCH-128] - Indexing embedded children's child
-    * [HSEARCH-136] - CachingWrapperFilter does not cache
-    * [HSEARCH-137] - Wrong class name in Exception when a FieldBridge does not implement TwoWayFieldBridge for a document id property
-    * [HSEARCH-138] - JNDI Property names have first character cut off
-    * [HSEARCH-140] - @IndexedEmbedded default depth is effectively 1 due to integer overflow
-    * [HSEARCH-146] - ObjectLoader doesn't catch javax.persistence.EntityNotFoundException
-    * [HSEARCH-149] - Default FieldBridge for enums passing wrong class to EnumBridge constructor
-
-
-** Improvement
-    * [HSEARCH-125] - Add support for fields declared by interface or unmapped superclass
-    * [HSEARCH-127] - Wrong prefix for worker configurations
-    * [HSEARCH-129] - IndexedEmbedded for Collections Documentation
-    * [HSEARCH-130] - Should provide better log infos (on the indexBase parameter for the FSDirectoryProvider)
-    * [HSEARCH-144] - Keep indexer running till finished on VM shutdown
-    * [HSEARCH-147] - Allow projection of Lucene DocId
-
-** New Feature
-    * [HSEARCH-114] - Introduce ResultTransformer to the query API
-    * [HSEARCH-150] - Migrate to Lucene 2.3
-
-** Patch
-    * [HSEARCH-126] - Better diagnostic when Search index directory cannot be opened (Ian)
-
-
-3.0.0.GA (23-09-2007)
----------------------
-
-** Bug
-    * [HSEARCH-116] - FullTextEntityManager acessing getDelegate() in the constructor leads to NPE in JBoss AS + Seam
-    * [HSEARCH-117] - FullTextEntityManagerImpl and others should implement Serializable
-
-** Deprecation
-    * [HSEARCH-122] - Remove query.setIndexProjection (replaced by query.setProjection)
-
-** Improvement
-    * [HSEARCH-118] - Add ClassBridges (plural) functionality
-
-** New Feature
-    * [HSEARCH-81] - Create a @ClassBridge Annotation (John Griffin)
-
-
-** Task
-    * [HSEARCH-98] - Add a Getting started section to the reference documentation
-
-
-3.0.0.CR1 (4-09-2007)
----------------------
-
-** Bug
-    * [HSEARCH-108] - id of embedded object is not indexed when using @IndexedEmbedded
-    * [HSEARCH-109] - Lazy loaded entity could not be indexed
-    * [HSEARCH-110] - ScrollableResults does not obey out of bounds rules (John Griffin)
-    * [HSEARCH-112] - Unkown @FullTextFilter  when attempting to associate a filter
-
-** Deprecation
-    * [HSEARCH-113] - Remove @Text, @Keyword and @Unstored (old mapping annotations)
-
-** Improvement
-    * [HSEARCH-107] - DirectoryProvider should have a start() method
-
-** New Feature
-    * [HSEARCH-14] - introduce fetch_size for Hibernate Search scrollable resultsets (John Griffin)
-    * [HSEARCH-69] - Ability to purge an index by class (John Griffin)
-    * [HSEARCH-111] - Ability to disable event based indexing (for read only or batch based indexing)
-
-
-3.0.0.Beta4 (1-08-2007)
------------------------
-
-** Bug
-    * [HSEARCH-88] - Unable to update 2 entity types in the same transaction if they share the same index
-    * [HSEARCH-90] - Use of setFirstResult / setMaxResults can lead to a list with negative capacity (John Griffin)
-    * [HSEARCH-92] - NPE for null fields on projection
-    * [HSEARCH-99] - Avoid returning non initialized proxies in scroll() and iterate() (loader.load(EntityInfo))
-
-
-** Improvement
-    * [HSEARCH-79] - Recommend to use FlushMode.APPLICATION on massive indexing
-    * [HSEARCH-84] - Migrate to Lucene 2.2
-    * [HSEARCH-91] - Avoid wrapping a Session object if the Session is already FullTextSession
-    * [HSEARCH-100] - Rename fullTextSession.setIndexProjection() to fullTextSession.setProjection()
-    * [HSEARCH-102] - Default index operation in @Field to TOKENIZED
-    * [HSEARCH-106] - Use the shared reader strategy as the default strategy
-
-** New Feature
-    * [HSEARCH-6] - Provide access to the Hit.getScore() and potentially the Document on a query
-    * [HSEARCH-15] - Notion of Filtered Lucene queries (Hardy Ferentschik)
-    * [HSEARCH-41] - Allow fine grained analyzers (Entity, attribute, @Field)
-    * [HSEARCH-45] - Support @Fields() for multiple indexing per property (useful for sorting)
-    * [HSEARCH-58] - Support named Filters (and caching)
-    * [HSEARCH-67] - Expose mergeFactor, maxMergeDocs and minMergeDocs (Hardy Ferentschik)
-    * [HSEARCH-73] - IncrementalOptimizerStrategy triggered on transactions or operations limits
-    * [HSEARCH-74] - Ability to project Lucene meta information (Score, Boost, Document, Id, This) (John Griffin)
-    * [HSEARCH-83] - Introduce OptimizerStrategy
-    * [HSEARCH-86] - Index sharding: multiple Lucene indexes per entity type
-    * [HSEARCH-89] - FullText wrapper for JPA APIs
-    * [HSEARCH-103] - Ability to override the indexName in the FSDirectoryProviders family
-
-
-** Task
-    * [HSEARCH-94] - Deprecate ContextHelper
-
-
-3.0.0.Beta3 (6-06-2007)
------------------------
-
-** Bug
-    * [HSEARCH-64] - Exception Thrown If Index Directory Does Not Exist
-    * [HSEARCH-66] - Some results not returned in some circumstances (Brandon Munroe)
-
-
-** Improvement
-    * [HSEARCH-60] - Introduce SearchFactory / SearchFactoryImpl
-    * [HSEARCH-68] - Set index copy threads as daemon
-    * [HSEARCH-70] - Create the index base directory if it does not exists
-
-** New Feature
-    * [HSEARCH-11] - Provide access to IndexWriter.optimize()
-    * [HSEARCH-33] - hibernate.search.worker.batch_size to prevent OutOfMemoryException while inserting many objects
-    * [HSEARCH-71] - Provide fullTextSession.getSearchFactory()
-    * [HSEARCH-72] - searchFactory.optimize() and searchFactory.optimize(Class) (Andrew Hahn)
-
-
-3.0.0.Beta2 (31-05-2007)
-------------------------
-
-** Bug
-    * [HSEARCH-37] - Verify that Serializable return type are not resolved by StringBridge built in type
-    * [HSEARCH-39] - event listener declaration example is wrong
-    * [HSEARCH-44] - Build the Lucene Document in the beforeComplete transaction phase
-    * [HSEARCH-50] - Null Booleans lead to NPE
-    * [HSEARCH-59] - Unable to index @indexEmbedded object through session.index when object is lazy and field access is used in object
-
-
-** Improvement
-    * [HSEARCH-36] - Meaningful exception message when Search Listeners are not initialized
-    * [HSEARCH-38] - Make the @IndexedEmbedded documentation example easier to understand
-    * [HSEARCH-51] - Optimization: Use a query rather than batch-size to load objects when a single entity (hierarchy) is expected
-    * [HSEARCH-63] - rename query.resultSize() to getResultSize()
-
-** New Feature
-    * [HSEARCH-4] - Be able to use a Lucene Sort on queries (Hardy Ferentschik)
-    * [HSEARCH-13] - Cache IndexReaders per SearchFactory
-    * [HSEARCH-40] - Be able to embed collections in lucene index (@IndexedEmbeddable in collections)
-    * [HSEARCH-43] - Expose resultSize and do not load object when only resultSize is retrieved
-    * [HSEARCH-52] - Ability to load more efficiently an object graph from a lucene query by customizing the fetch modes
-    * [HSEARCH-53] - Add support for projection (ie read the data from the index only)
-    * [HSEARCH-61] - Move from MultiSearcher to MultiReader
-    * [HSEARCH-62] - Support pluggable ReaderProvider strategies
-    
-
-** Task
-    * [HSEARCH-65] - Update to JBoss Embedded beta2
-
-
-3.0.0.Beta1 (19-03-2007)
-------------------------
-
-Initial release as a standalone product (see Hibernate Annotations changelog for previous informations)
-
-
-Release Notes - Hibernate Search - Version 3.0.0.beta1
-
-** Bug
-    * [HSEARCH-7] - Ignore object found in the index but no longer present in the database (for out of date indexes)
-    * [HSEARCH-21] - NPE in SearchFactory while using different threads
-    * [HSEARCH-22] - Enum value Index.UN_TOKENISED is misspelled
-    * [HSEARCH-24] - Potential deadlock when using multiple DirectoryProviders in a highly concurrent index update
-    * [HSEARCH-25] - Class cast exception in org.hibernate.search.impl.FullTextSessionImpl<init>(FullTextSessionImpl.java:54)
-    * [HSEARCH-28] - Wrong indexDir property in Apache Lucene Integration
-
-
-** Improvement
-    * [HSEARCH-29] - Share the initialization state across all Search event listeners instance
-    * [HSEARCH-30] - @FieldBridge now use o.h.s.a.Parameter rather than o.h.a.Parameter
-    * [HSEARCH-31] - Move to Lucene 2.1.0
-
-** New Feature
-    * [HSEARCH-1] - Give access to Directory providers
-    * [HSEARCH-2] - Default FieldBridge for enums (Sylvain Vieujot)
-    * [HSEARCH-3] - Default FieldBridge for booleans (Sylvain Vieujot)
-    * [HSEARCH-9] - Introduce a worker factory and its configuration
-    * [HSEARCH-16] - Cluster capability through JMS
-    * [HSEARCH-23] - Support asynchronous batch worker queue
-    * [HSEARCH-27] - Ability to index associated / embedded objects

Copied: search/tags/v3_1_0_Beta2/changelog.txt (from rev 15399, search/trunk/changelog.txt)
===================================================================
--- search/tags/v3_1_0_Beta2/changelog.txt	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/changelog.txt	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,311 @@
+Hibernate Search Changelog
+==========================
+
+3.1.0.Beta2 (27-10-2008)
+------------------------
+
+** Bug
+    * [HSEARCH-142] - Modifications on objects indexed via @IndexedEmbedded not updated when not annotated @Indexed
+    * [HSEARCH-162] - NPE on queries when no entity is marked as @Indexed
+    * [HSEARCH-222] - Entities not found during concurrent update
+    * [HSEARCH-225] - Avoid using IndexReader.deleteDocument when index is not shared amongst several entity types
+    * [HSEARCH-232] - Using SnowballPorterFilterFactory throws NoClassDefFoundError
+    * [HSEARCH-237] - IdHashShardingStrategy fails on IDs having negative hashcode
+    * [HSEARCH-241] - initialize methods taking Properties cannot list available properties
+    * [HSEARCH-247] - Hibernate Search cannot run without apache-solr-analyzer.jar
+    * [HSEARCH-253] - Inconsistent detection of EventListeners during autoregistration into Hibernate listeners
+    * [HSEARCH-257] - Ignore delete operation when Core does update then delete on the same entity
+    * [HSEARCH-259] - Filter were not isolated by name in the cache
+    * [HSEARCH-262] - fullTextSession.purgeAll(Class<?>) does not consider subclasses
+    * [HSEARCH-263] - Wrong analyzers used in IndexWriter
+    * [HSEARCH-267] - Inheritance of annotations and analyzer
+    * [HSEARCH-271] - wrong Similarity used when sharing index among entities
+    * [HSEARCH-287] - master.xml is mistakenly copied to the distribution
+
+** Deprecation
+    * [HSEARCH-279] - deprecate SharedReaderProvider replaced by SharingBufferReaderProvider as default ReaderProvider
+
+** Improvement
+    * [HSEARCH-145] - Document a configuration property
+    * [HSEARCH-226] - Use Lucene ability to delete by query in IndexWriter
+    * [HSEARCH-240] - Generify the IndexShardingStrategy
+    * [HSEARCH-245] - Add ReaderStratregy.destroy() method
+    * [HSEARCH-256] - Remove CacheBitResults.YES
+    * [HSEARCH-260] - Simplify the Filter Caching definition: cache=FilterCacheModeType.[MODE]
+    * [HSEARCH-272] - Improve contention on DirectoryProviders in lucene backend
+    * [HSEARCH-273] - Make LuceneOptions an interface
+    * [HSEARCH-282] - Make the API more Generics friendly
+
+** New Feature
+    * [HSEARCH-170] - Support @Boost in @Field
+    * [HSEARCH-235] - provide a destroy() method in ReaderProvider
+    * [HSEARCH-252] - Document Solr integration
+    * [HSEARCH-258] - Add configuration option for Lucene's UseCompoundFile
+
+** Patch
+    * [HSEARCH-20] - Lucene extensions
+
+** Task
+    * [HSEARCH-231] - Update the getting started guide with Solr analyzers
+    * [HSEARCH-236] - Find whether or not indexWriter.optimize() requires an index lock
+    * [HSEARCH-244] - Abiltiy to ask SearchFactory for the scoped analyzer of a given class
+    * [HSEARCH-254] - Migrate to Solr 1.3
+    * [HSEARCH-276] - upgrade to Lucene 2.4
+    * [HSEARCH-286] - Align to GA versions of all dependencies
+    * [HSEARCH-292] - Document the new Filter caching approach
+
+
+3.1.0.Beta1 (17-07-2008)
+------------------------
+
+** Bug
+    * [HSEARCH-166] - documentation error : hibernate.search.worker.batch_size vs hibernate.worker.batch_size
+    * [HSEARCH-171] - Do not log missing objects when using QueryLoader
+    * [HSEARCH-173] - CachingWrapperFilter loses its WeakReference making filter caching inefficient
+    * [HSEARCH-194] - Inconsistent performance between hibernate search and pure lucene access
+    * [HSEARCH-196] - ObjectNotFoundException not caught in FullTextSession
+    * [HSEARCH-198] - Documentation out of sync with implemented/released features
+    * [HSEARCH-203] - Counter of index modification operations not always incremented
+    * [HSEARCH-204] - Improper calls to Session during a projection not involving THIS
+    * [HSEARCH-205] - Out of Memory on copy of large indexes
+    * [HSEARCH-217] - Proper errors on parsing of all numeric configuration parameters
+    * [HSEARCH-227] - Criteria based fetching is not used when objects are loaded one by one (iterate())
+
+
+** Improvement
+    * [HSEARCH-19] - Do not filter classes on queries when we know that all Directories only contains the targeted classes
+    * [HSEARCH-156] - Retrofit FieldBridge.set lucene parameters into a LuceneOptions class
+    * [HSEARCH-157] - Make explicit in FAQ and doc that query.list() followed by query.getResultSize() triggers only one query
+    * [HSEARCH-163] - Enhance error messages when @FieldBridge is wrongly used (no impl or impl not implementing the right interfaces)
+    * [HSEARCH-176] - Permits alignment properties to lucene default (Sanne Grinovero)
+    * [HSEARCH-179] - Documentation should be explicit that @FulltextFilter filters every object, regardless which object is annotated
+    * [HSEARCH-181] - Better management of file-based index directories (Sanne Grinovero)
+    * [HSEARCH-189] - Thread management improvements for Master/Slave DirectoryProviders
+    * [HSEARCH-197] - Move to slf4j
+    * [HSEARCH-199] - Property close Search resources on SessionFactory.close()
+    * [HSEARCH-202] - Avoid many maps lookup in Workspace
+    * [HSEARCH-207] - Make DateBridge TwoWay to facilitate projection
+    * [HSEARCH-208] - Raise exception on index and purge when the entity is not an indexed entity
+    * [HSEARCH-209] - merge FullTextIndexCollectionEventListener into FullTextIndexEventListener
+    * [HSEARCH-215] - Rename Search.createFTS to Search.getFTS deprecating the old method
+    * [HSEARCH-223] - Use multiple criteria queries rather than ObjectLoader in most cases
+    * [HSEARCH-230] - Ensure initialization safety in a multi-core machine
+
+** New Feature
+    * [HSEARCH-133] - Allow overriding DefaultSimilarity for indexing and searching (Nick Vincent)
+    * [HSEARCH-141] - Allow term position information to be stored in an index
+    * [HSEARCH-153] - Provide the possibility to configure writer.setRAMBufferSizeMB()  (Lucene 2.3)
+    * [HSEARCH-154] - Provide a facility to access Lucene query explanations
+    * [HSEARCH-164] - Built-in bridge to index java.lang.Class
+    * [HSEARCH-165] - URI and URL built-in bridges
+    * [HSEARCH-174] - Improve transparent filter caching by wrapping filters into our own CachingWrapperFilter
+    * [HSEARCH-186] - Enhance analyzer to support the Solr model
+    * [HSEARCH-190] - Add pom
+    * [HSEARCH-191] - Make build independent of Hibernate Core structure
+    * [HSEARCH-192] - Move to Hibernate Core 3.3
+    * [HSEARCH-193] - Use dependency on Solr-analyzer JAR rather than the full Solr JAR
+    * [HSEARCH-195] - Expose Analyzers instance by name: searchFactory.getAnalyzer(String)
+    * [HSEARCH-200] - Expose IndexWriter setting MAX_FIELD_LENGTH via IndexWriterSetting
+    * [HSEARCH-212] - Added ReaderProvider strategy reusing unchanged segments (using reader.reopen())
+    * [HSEARCH-220] - introduce session.flushToIndexes API and deprecate batch_size
+
+
+** Task
+    * [HSEARCH-169] - Migrate to Lucene 2.3.1 (index corruption possiblity in 2.3.0)
+    * [HSEARCH-187] - Clarify which directories need read-write access, verify readonly behaviour on others.
+    * [HSEARCH-214] - Upgrade Lucene to 2.3.2
+    * [HSEARCH-229] - Deprecate FullTextQuery.BOOST
+
+
+3.0.1.GA (20-02-2008)
+---------------------
+
+** Bug
+    * [HSEARCH-56] - Updating a collection does not reindex
+    * [HSEARCH-123] - Use mkdirs instead of mkdir to create necessary parent directory in the DirectoryProviderHelper
+    * [HSEARCH-128] - Indexing embedded children's child
+    * [HSEARCH-136] - CachingWrapperFilter does not cache
+    * [HSEARCH-137] - Wrong class name in Exception when a FieldBridge does not implement TwoWayFieldBridge for a document id property
+    * [HSEARCH-138] - JNDI Property names have first character cut off
+    * [HSEARCH-140] - @IndexedEmbedded default depth is effectively 1 due to integer overflow
+    * [HSEARCH-146] - ObjectLoader doesn't catch javax.persistence.EntityNotFoundException
+    * [HSEARCH-149] - Default FieldBridge for enums passing wrong class to EnumBridge constructor
+
+
+** Improvement
+    * [HSEARCH-125] - Add support for fields declared by interface or unmapped superclass
+    * [HSEARCH-127] - Wrong prefix for worker configurations
+    * [HSEARCH-129] - IndexedEmbedded for Collections Documentation
+    * [HSEARCH-130] - Should provide better log infos (on the indexBase parameter for the FSDirectoryProvider)
+    * [HSEARCH-144] - Keep indexer running till finished on VM shutdown
+    * [HSEARCH-147] - Allow projection of Lucene DocId
+
+** New Feature
+    * [HSEARCH-114] - Introduce ResultTransformer to the query API
+    * [HSEARCH-150] - Migrate to Lucene 2.3
+
+** Patch
+    * [HSEARCH-126] - Better diagnostic when Search index directory cannot be opened (Ian)
+
+
+3.0.0.GA (23-09-2007)
+---------------------
+
+** Bug
+    * [HSEARCH-116] - FullTextEntityManager acessing getDelegate() in the constructor leads to NPE in JBoss AS + Seam
+    * [HSEARCH-117] - FullTextEntityManagerImpl and others should implement Serializable
+
+** Deprecation
+    * [HSEARCH-122] - Remove query.setIndexProjection (replaced by query.setProjection)
+
+** Improvement
+    * [HSEARCH-118] - Add ClassBridges (plural) functionality
+
+** New Feature
+    * [HSEARCH-81] - Create a @ClassBridge Annotation (John Griffin)
+
+
+** Task
+    * [HSEARCH-98] - Add a Getting started section to the reference documentation
+
+
+3.0.0.CR1 (4-09-2007)
+---------------------
+
+** Bug
+    * [HSEARCH-108] - id of embedded object is not indexed when using @IndexedEmbedded
+    * [HSEARCH-109] - Lazy loaded entity could not be indexed
+    * [HSEARCH-110] - ScrollableResults does not obey out of bounds rules (John Griffin)
+    * [HSEARCH-112] - Unkown @FullTextFilter  when attempting to associate a filter
+
+** Deprecation
+    * [HSEARCH-113] - Remove @Text, @Keyword and @Unstored (old mapping annotations)
+
+** Improvement
+    * [HSEARCH-107] - DirectoryProvider should have a start() method
+
+** New Feature
+    * [HSEARCH-14] - introduce fetch_size for Hibernate Search scrollable resultsets (John Griffin)
+    * [HSEARCH-69] - Ability to purge an index by class (John Griffin)
+    * [HSEARCH-111] - Ability to disable event based indexing (for read only or batch based indexing)
+
+
+3.0.0.Beta4 (1-08-2007)
+-----------------------
+
+** Bug
+    * [HSEARCH-88] - Unable to update 2 entity types in the same transaction if they share the same index
+    * [HSEARCH-90] - Use of setFirstResult / setMaxResults can lead to a list with negative capacity (John Griffin)
+    * [HSEARCH-92] - NPE for null fields on projection
+    * [HSEARCH-99] - Avoid returning non initialized proxies in scroll() and iterate() (loader.load(EntityInfo))
+
+
+** Improvement
+    * [HSEARCH-79] - Recommend to use FlushMode.APPLICATION on massive indexing
+    * [HSEARCH-84] - Migrate to Lucene 2.2
+    * [HSEARCH-91] - Avoid wrapping a Session object if the Session is already FullTextSession
+    * [HSEARCH-100] - Rename fullTextSession.setIndexProjection() to fullTextSession.setProjection()
+    * [HSEARCH-102] - Default index operation in @Field to TOKENIZED
+    * [HSEARCH-106] - Use the shared reader strategy as the default strategy
+
+** New Feature
+    * [HSEARCH-6] - Provide access to the Hit.getScore() and potentially the Document on a query
+    * [HSEARCH-15] - Notion of Filtered Lucene queries (Hardy Ferentschik)
+    * [HSEARCH-41] - Allow fine grained analyzers (Entity, attribute, @Field)
+    * [HSEARCH-45] - Support @Fields() for multiple indexing per property (useful for sorting)
+    * [HSEARCH-58] - Support named Filters (and caching)
+    * [HSEARCH-67] - Expose mergeFactor, maxMergeDocs and minMergeDocs (Hardy Ferentschik)
+    * [HSEARCH-73] - IncrementalOptimizerStrategy triggered on transactions or operations limits
+    * [HSEARCH-74] - Ability to project Lucene meta information (Score, Boost, Document, Id, This) (John Griffin)
+    * [HSEARCH-83] - Introduce OptimizerStrategy
+    * [HSEARCH-86] - Index sharding: multiple Lucene indexes per entity type
+    * [HSEARCH-89] - FullText wrapper for JPA APIs
+    * [HSEARCH-103] - Ability to override the indexName in the FSDirectoryProviders family
+
+
+** Task
+    * [HSEARCH-94] - Deprecate ContextHelper
+
+
+3.0.0.Beta3 (6-06-2007)
+-----------------------
+
+** Bug
+    * [HSEARCH-64] - Exception Thrown If Index Directory Does Not Exist
+    * [HSEARCH-66] - Some results not returned in some circumstances (Brandon Munroe)
+
+
+** Improvement
+    * [HSEARCH-60] - Introduce SearchFactory / SearchFactoryImpl
+    * [HSEARCH-68] - Set index copy threads as daemon
+    * [HSEARCH-70] - Create the index base directory if it does not exists
+
+** New Feature
+    * [HSEARCH-11] - Provide access to IndexWriter.optimize()
+    * [HSEARCH-33] - hibernate.search.worker.batch_size to prevent OutOfMemoryException while inserting many objects
+    * [HSEARCH-71] - Provide fullTextSession.getSearchFactory()
+    * [HSEARCH-72] - searchFactory.optimize() and searchFactory.optimize(Class) (Andrew Hahn)
+
+
+3.0.0.Beta2 (31-05-2007)
+------------------------
+
+** Bug
+    * [HSEARCH-37] - Verify that Serializable return type are not resolved by StringBridge built in type
+    * [HSEARCH-39] - event listener declaration example is wrong
+    * [HSEARCH-44] - Build the Lucene Document in the beforeComplete transaction phase
+    * [HSEARCH-50] - Null Booleans lead to NPE
+    * [HSEARCH-59] - Unable to index @indexEmbedded object through session.index when object is lazy and field access is used in object
+
+
+** Improvement
+    * [HSEARCH-36] - Meaningful exception message when Search Listeners are not initialized
+    * [HSEARCH-38] - Make the @IndexedEmbedded documentation example easier to understand
+    * [HSEARCH-51] - Optimization: Use a query rather than batch-size to load objects when a single entity (hierarchy) is expected
+    * [HSEARCH-63] - rename query.resultSize() to getResultSize()
+
+** New Feature
+    * [HSEARCH-4] - Be able to use a Lucene Sort on queries (Hardy Ferentschik)
+    * [HSEARCH-13] - Cache IndexReaders per SearchFactory
+    * [HSEARCH-40] - Be able to embed collections in lucene index (@IndexedEmbeddable in collections)
+    * [HSEARCH-43] - Expose resultSize and do not load object when only resultSize is retrieved
+    * [HSEARCH-52] - Ability to load more efficiently an object graph from a lucene query by customizing the fetch modes
+    * [HSEARCH-53] - Add support for projection (ie read the data from the index only)
+    * [HSEARCH-61] - Move from MultiSearcher to MultiReader
+    * [HSEARCH-62] - Support pluggable ReaderProvider strategies
+    
+
+** Task
+    * [HSEARCH-65] - Update to JBoss Embedded beta2
+
+
+3.0.0.Beta1 (19-03-2007)
+------------------------
+
+Initial release as a standalone product (see Hibernate Annotations changelog for previous informations)
+
+
+Release Notes - Hibernate Search - Version 3.0.0.beta1
+
+** Bug
+    * [HSEARCH-7] - Ignore object found in the index but no longer present in the database (for out of date indexes)
+    * [HSEARCH-21] - NPE in SearchFactory while using different threads
+    * [HSEARCH-22] - Enum value Index.UN_TOKENISED is misspelled
+    * [HSEARCH-24] - Potential deadlock when using multiple DirectoryProviders in a highly concurrent index update
+    * [HSEARCH-25] - Class cast exception in org.hibernate.search.impl.FullTextSessionImpl<init>(FullTextSessionImpl.java:54)
+    * [HSEARCH-28] - Wrong indexDir property in Apache Lucene Integration
+
+
+** Improvement
+    * [HSEARCH-29] - Share the initialization state across all Search event listeners instance
+    * [HSEARCH-30] - @FieldBridge now use o.h.s.a.Parameter rather than o.h.a.Parameter
+    * [HSEARCH-31] - Move to Lucene 2.1.0
+
+** New Feature
+    * [HSEARCH-1] - Give access to Directory providers
+    * [HSEARCH-2] - Default FieldBridge for enums (Sylvain Vieujot)
+    * [HSEARCH-3] - Default FieldBridge for booleans (Sylvain Vieujot)
+    * [HSEARCH-9] - Introduce a worker factory and its configuration
+    * [HSEARCH-16] - Cluster capability through JMS
+    * [HSEARCH-23] - Support asynchronous batch worker queue
+    * [HSEARCH-27] - Ability to index associated / embedded objects

Deleted: search/tags/v3_1_0_Beta2/common-build.xml
===================================================================
--- search/trunk/common-build.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/common-build.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,450 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project name="common-build" default="dist"
-	xmlns:artifact="urn:maven-artifact-ant" xmlns:ivy="antlib:fr.jayasoft.ivy.ant">
-	<description>Common properties and targets for the HibernateExt
-		project</description>
-	
-	
-	<!-- my.basedir property can be used to refer to files/directories relatively to the common-build.xml file -->
-	<dirname property="common-build.basedir" file="${ant.file.common-build}"/>
-	
-	<!-- Give user a chance to override without editing this file
-	(and without typing -D each time it compiles it) -->
-	<property file="${common-build.basedir}/build.properties"/>
-	<property file="${user.home}/.ant.properties"/>
-	
-	<property name="src.dir" location="src/java"/>
-	<property name="test.dir" location="src/test"/>
-	<property name="test.resources.dir" location="src/test-resources"/>
-	<property name="filter.dir" location="src/filters"/>
-	<property name="lib.dir" location="lib"/>
-	<property name="build.dir" location="build"/>
-	<property name="classes.dir" location="${build.dir}/classes"/>
-	<property name="testclasses.dir" location="${build.dir}/testclasses"/>
-	<property name="testreports.dir" location="${build.dir}/test-reports"/>
-	<property name="dist.target.dir" location="target"/>
-	<property name="dist.dir" location="${dist.target.dir}/${name}"/>
-	<property name="instrumenttest.out.dir" value="${build.dir}/test-reports/instrument"/>
-	<property name="doc.dir" location="doc"/>
-	<property name="doc.api.dir" location="${doc.dir}/api"/>
-	<property name="doc.reference.dir" location="${doc.dir}/reference"/>
-	
-	<property name="dist.doc.dir" location="${dist.dir}/doc"/>
-	<property name="dist.api.dir" location="${dist.dir}/doc/api"/>
-	
-	<property name="dist.src.dir" location="${dist.dir}/src"/>
-	<property name="dist.test.dir" location="${dist.dir}/test"/>
-	<property name="dist.lib.dir" location="${dist.dir}/lib"/>
-	<property name="jar.name" value="${name}"/>
-	<property name="jar.file.name" value="${dist.dir}/${jar.name}.jar"/>
-	<property name="jartest.file.name" value="${dist.dir}/${jar.name}-tests.jar"/>
-	
-	<property name="javadoc" value="http://java.sun.com/j2se/1.4/docs/api"/>
-	<property name="javac.debug" value="on"/>
-	<property name="javac.optimize" value="off"/>
-	<property name="javac.source" value="1.4"/>
-	<property name="javac.target" value="1.4"/>
-	
-	<property name="pom.file" value="pom.xml"/>
-	<property name="src.jar" value="${build.dir}/src.jar"/>
-	
-	<taskdef name="junit"
-		classname="org.apache.tools.ant.taskdefs.optional.junit.JUnitTask">
-		<classpath>
-			<fileset dir="${common-build.basedir}/lib/build">
-				<!-- ${build.lib.dir} fails in reference doc build -->
-				<include name="junit-*.jar"/>
-				<include name="ant-junit-*.jar"/>
-			</fileset>
-		</classpath>
-	</taskdef>
-	
-	<taskdef name="junitreport"
-		classname="org.apache.tools.ant.taskdefs.optional.junit.XMLResultAggregator">
-		<classpath>
-			<fileset dir="${common-build.basedir}/lib/build">
-				<!-- ${build.lib.dir} fails in reference doc build -->
-				<include name="junit-*.jar"/>
-				<include name="ant-junit-*.jar"/>
-			</fileset>
-		</classpath>
-	</taskdef>
-	
-	<taskdef resource="net/sf/antcontrib/antlib.xml">
-		<classpath>
-			<fileset dir="${common-build.basedir}/lib/build">
-				<!-- ${build.lib.dir} fails in reference doc build -->
-				<include name="ant-contrib-*.jar"/>
-			</fileset>
-		</classpath>
-	</taskdef>
-	
-	<!-- ivy load -->
-	<property name="ivy.jar.dir" value="${common-build.basedir}/ivy"/>
-	<property name="ivy.conf.dir" value="${common-build.basedir}"/>
-	<path id="ivy.lib.path">
-		<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
-	</path>
-	<taskdef resource="fr/jayasoft/ivy/ant/antlib.xml"
-		uri="antlib:fr.jayasoft.ivy.ant" classpathref="ivy.lib.path"/>
-	
-	<!-- maven task load -->
-	<path id="maven-ant-tasks.path" path="${ivy.jar.dir}/maven-ant-tasks.jar"/>
-	<typedef resource="org/apache/maven/artifact/ant/antlib.xml"
-		uri="urn:maven-artifact-ant" classpathref="maven-ant-tasks.path"/>
-	
-	<artifact:remoteRepository id="offline.repository.jboss.org"
-		url="file://${offline.repository.jboss.org}"/>
-	
-	<path id="lib.class.path">
-		<path refid="lib.moduleclass.path"/>
-		<pathelement path="${clover.jar}"/>
-	</path>
-	
-	<!-- overridable in modules -->
-	<path id="lib.moduleclass.path"/>
-	
-	<patternset id="support.files">
-		<include name="**/*.jpg"/>
-		<include name="**/*.gif"/>
-		<include name="**/*.dtd"/>
-		<include name="**/*.xsd"/>
-		<include name="**/*.xml"/>
-		<include name="**/*.xslt"/>
-		
-		<!-- exclude everything we don't want in the jar -->
-		<exclude name="${build.dir}/**/*"/>
-		<exclude name="${doc.dir}/**/*"/>
-		<exclude name="classes/**/*"/>
-		<exclude name="build.xml"/>
-		<exclude name="**/*.properties"/>
-		<exclude name="**/*.ccf"/>
-		<exclude name="**/*.cfg.xml"/>
-		<exclude name="**/ehcache.xml"/>
-	</patternset>
-	
-	<patternset id="source.files">
-		<include name="**/*.java"/>
-		<include name="**/*.properties"/>
-	</patternset>
-	
-	<!-- junit paths/filesets -->
-	<fileset dir="${testclasses.dir}" id="junit.batchtestset">
-		<include name="**/*Test.class"/>
-	</fileset>
-	
-	<path id="testsrc.path">
-		<pathelement location="${test.dir}"/>
-	</path>
-		
-	<path id="junit.classpath">
-		<pathelement path="${classes.dir}"/>
-		<pathelement path="${testclasses.dir}"/>
-		<path refid="lib.class.path"/>
-		<path refid="junit.moduleclasspath"/>
-		<path location="${clover.jar}"/>
-	</path>
-	
-	<!-- Determine the database against which to run  tests-->
-	<if>
-		<equals arg1="${targetdb}" arg2="$${targetdb}"/>
-		<then>
-			<echo message="No target database specified using default HSQLDB"/>
-			<property name="targetdb" value="hsqldb"/>
-		</then>
-	</if>
-	
-	<!-- Clover tasks -->
-	<target name="with.clover">
-		<clover-setup initString="clover_coverage.db"/>
-	</target>
-	
-	<target name="cloverreport.html" depends="with.clover"
-		description="Generate a clover report from the current clover database.">
-		<clover-report>
-			<current outfile="${clover.out.dir}">
-				<format type="html"/>
-			</current>
-		</clover-report>
-	</target>
-	
-	<target name="cloverreport"
-		depends="with.clover,junitreport,cloverreport.html"
-		description="Run the tests and generate a clover report">
-	</target>
-	
-	<!-- Tasks -->
-	<target name="clean" description="Cleans up build and dist directories">
-		<delete dir="${build.dir}"/>
-		<delete dir="${dist.target.dir}"/>
-		<delete dir="${clover.out.dir}"/>
-	</target>
-	
-	<target name="init" description="Initialize the build">
-		<tstamp>
-			<format property="subversion" pattern="yyyy-MM-dd hh:mm:ss"/>
-		</tstamp>
-		<echo message="Build ${Name}-${version} (${subversion})"/>
-		<mkdir dir="${classes.dir}"/>
-		<mkdir dir="${testclasses.dir}"/>
-		<copy todir="${classes.dir}">
-			<fileset dir="${src.dir}">
-				<patternset refid="support.files"/>
-			</fileset>
-		</copy>
-		
-		<copy todir="${build.dir}">
-			<fileset dir=".">
-				<include name="readme.txt"/>
-				<include name="lgpl.txt"/>
-			</fileset>
-		</copy>
-	</target>
-	
-	<target name="get.deps.core" depends="init"
-		description="retrieve the core dependencies">
-		<ivy:resolve conf="default"/>
-		<ivy:retrieve pattern="${ivy.dep.dir}/core/[artifact].[ext]"
-			conf="default"/>
-	</target>	
-	
-	<target name="get.deps.test" depends="init"
-		description="retrieve the test dependencies">
-		<ivy:resolve conf="test"/>
-		<ivy:retrieve pattern="${ivy.dep.dir}/test/[artifact].[ext]" conf="test"/>
-	</target>	
-				
-	<target name="copytest" description="Copy tests to dist dir" if="copy.test">
-		<mkdir dir="${dist.test.dir}"/>
-		<copy todir="${dist.test.dir}">
-			<fileset dir="${test.dir}"/>
-		</copy>
-	</target>
-	
-	<target name="copysource" depends="copytest"
-		description="Copy sources to dist dir">
-		<mkdir dir="${dist.src.dir}"/>
-		<copy todir="${dist.src.dir}">
-			<fileset dir="${src.dir}">
-				<patternset refid="source.files"/>
-			</fileset>
-			<fileset dir="${src.dir}">
-				<patternset refid="support.files"/>
-			</fileset>
-		</copy>
-		<mkdir dir="${dist.src.dir}"/>
-		<copy todir="${dist.src.dir}">
-			<fileset dir="${src.dir}">
-				<patternset refid="source.files"/>
-			</fileset>
-			<fileset dir="${src.dir}">
-				<patternset refid="support.files"/>
-			</fileset>
-		</copy>
-	</target>
-	
-	<target name="copylib" description="Copy jars to lib dir">
-		<mkdir dir="${dist.lib.dir}"/>
-		<copy todir="${dist.lib.dir}" verbose="true">
-			<fileset dir="${lib.dir}">
-				<include name="**/*.jar"/>
-				<exclude name="log4j.jar"/>
-				<exclude name="checkstyle*.jar"/>
-				<include name="*.txt"/>
-			</fileset>
-		</copy>
-	</target>
-	
-	<target name="copydoc" description="Copy doc to dist dir" if="copy.doc">
-		<mkdir dir="${dist.doc.dir}"/>
-		<copy todir="${dist.doc.dir}">
-			<fileset dir="${doc.dir}">
-				<include name="**/*.html"/>
-			</fileset>
-		</copy>
-	</target>
-	
-	<target name="jar" depends="compile"
-		description="Build the distribution .jar file">
-		<mkdir dir="${dist.dir}"/>
-		<jar filesetmanifest="merge" jarfile="${jar.file.name}"
-			basedir="${classes.dir}"/>
-	</target>
-	
-	<target name="jartest" depends="compiletest"
-		description="Build the distribution .jar file">
-		<mkdir dir="${dist.dir}"/>
-		<jar filesetmanifest="merge" jarfile="${jartest.file.name}"
-			basedir="${testclasses.dir}"/>
-	</target>
-	
-	<!-- DOCUMENTATION -->
-	
-	<target name="javadoc"
-		description="Compile the Javadoc API documentation to dist dir">
-		<mkdir dir="${dist.api.dir}"/>
-		<javadoc packagenames="${javadoc.packagenames}"
-			classpathref="lib.class.path" destdir="${dist.api.dir}" use="true"
-			protected="true" version="true"
-			windowtitle="${Name} API Documentation"
-			Overview="${doc.api.dir}/package.html"
-			doctitle="${Name} API Documentation"
-			stylesheetfile="${doc.api.dir}/jdstyle.css" link="${javadoc}">
-			<packageset dir="${src.dir}" defaultexcludes="yes">
-				<include name="**/*"/>
-			</packageset>
-		</javadoc>
-	</target>
-	
-	<target name="extras" description="Copies miscellaneous files to root dir">
-		<copy todir="${dist.dir}/bin" failonerror="false">
-			<fileset dir="bin">
-				<include name="*.bat"/>
-			</fileset>
-		</copy>
-		<copy file="readme.txt" todir="${dist.dir}"/>
-		<copy file="lgpl.txt" todir="${dist.dir}"/>
-		<copy file="changelog.txt" todir="${dist.dir}"/>
-		<copy file="build.xml" todir="${dist.dir}"/>
-		<replace file="${dist.dir}/build.xml">
-			<replacetoken><![CDATA[../${name}-${version}]]>
-			</replacetoken>
-			<replacevalue><![CDATA[../${name}]]>
-			</replacevalue>
-		</replace>
-	</target>
-	
-	<target name="dist" depends="jar,javadoc,copysource,copydoc,extras"
-		description="Build everything">
-		<zip zipfile="${dist.dir}-${version}.zip">
-			<zipfileset prefix="${name}-${version}" dir="${dist.dir}"/>
-		</zip>
-		<tar compression="gzip" tarfile="${dist.dir}-${version}.tar.gz">
-			<tarfileset prefix="${name}-${version}" dir="${dist.dir}"/>
-		</tar>
-	</target>
-	
-	<target name="info" description="Echoes useful system properties">
-		<echo message="java.vm.info=${java.vm.info}"/>
-		<echo message="java.vm.name=${java.vm.name}"/>
-		<echo message="java.vm.vendor=${java.vm.vendor}"/>
-		<echo message="java.vm.version=${java.vm.version}"/>
-		<echo message="os.arch=${os.arch}"/>
-		<echo message="os.name=${os.name}"/>
-		<echo message="os.version=${os.version}"/>
-		<echo message="java.home = ${java.home}"/>
-		<echo message="java.class.path = ${java.class.path}"/>
-		<echo message="build.compiler = ${build.compiler}"/>
-		<echo message="file.encoding=${file.encoding}"/>
-		<echo message="user.home = ${user.home}"/>
-		<echo message="user.language=${user.language}"/>
-	</target>
-	
-	<target name="test-resources" description="Copies and filters test resources">
-		<filter filtersfile="${filter.dir}/${db}.filter"/>
-		<mkdir dir="${testclasses.dir}"/>
-		<copy todir="${testclasses.dir}" filtering="true" overwrite="true">
-			<fileset dir="${test.resources.dir}">
-				<include name="*.properties"/>
-				<include name="*.xml"/>
-			</fileset>
-		</copy>
-	</target>
-		
-	<target name="instrument" depends="compiletest"
-		description="Instrument the persistent classes"> <!-- depends="jar" -->
-		
-		<taskdef name="instrument"
-			classname="org.hibernate.tool.instrument.javassist.InstrumentTask">
-			<classpath refid="junit.classpath"/>
-		</taskdef>
-		
-		<instrument verbose="true">
-			<fileset dir="${testclasses.dir}/org/hibernate/test">
-				<include name="**/*.class"/>
-				<exclude name="**/*Test$*.class"/>
-				<exclude name="**/*Test.class"/>
-				<exclude name="**/*Tests.class"/>
-			</fileset>
-		</instrument>
-	</target>	
-	
-	<target name="junitinstrument" depends="compiletest,instrument"
-		description="Run the instrument test suite">
-		<for list="${targetdb}" param="db">
-			<sequential>
-				<antcall target="test-resources">
-					<param name="db" value="@{db}"/>
-				</antcall>
-				<mkdir dir="${instrumenttest.out.dir}/@{db}"/>
-				<echo>Running against db: @{db}</echo>
-				<junit printsummary="yes" haltonfailure="yes" dir="${basedir}"
-					maxmemory="256M" fork="yes" forkmode="perBatch">
-					<classpath refid="junit.classpath"/>
-					<formatter type="plain"/>
-					<formatter type="xml"/>
-					<batchtest todir="${instrumenttest.out.dir}/@{db}" haltonfailure="no">
-						<fileset refid="junit.batchtestset"/>
-					</batchtest>
-				</junit>
-			</sequential>
-		</for>
-	</target>
-	
-	<target name="junitreport" depends="">
-		<junitreport todir="${testreports.dir}">
-			<fileset dir="${testreports.dir}">
-				<include name="TEST-*.xml"/>
-			</fileset>
-			<report format="frames" todir="${testreports.dir}"/>
-		</junitreport>
-	</target>
-		
-	<target name="checkstyle" description="Check coding style">
-		<taskdef resource="checkstyletask.properties">
-			<classpath>
-				<path refid="lib.class.path"/>
-				<fileset dir="${common-build.basedir}/lib">
-					<include name="checkstyle*.jar"/>
-				</fileset>
-			</classpath>
-		</taskdef>
-		
-		<checkstyle config="${common-build.basedir}/checkstyle_checks.xml">
-			<fileset dir="${src.dir}">
-				<include name="**/*.java"/>
-			</fileset>
-			<formatter type="plain"/>
-		</checkstyle>
-	</target>
-	
-	<target name="patch" depends="checkstyle" description="Create a patch">
-		<cvs command="-q diff -u -N" output="patch.txt"/>
-	</target>
-	
-	<!-- maven deploy: to be used by the subbuild and delcare deps on jar -->
-	<target name="deploy" depends="jar">
-		<fail unless="offline.repository.jboss.org"
-			message="offline.repository.jboss.org must be defined"/>
-		<jar jarfile="${src.jar}" basedir="${src.dir}">
-			<include name="**/*.java"/>
-			<exclude name="**/test/*.java"/>
-			<!-- patternset refid="meta.files" / -->
-		</jar>
-		
-		<artifact:pom id="maven.project" file="${pom.file}"/>
-		
-		<artifact:install file="${jar.file.name}">
-			<pom refid="maven.project"/>
-		</artifact:install>
-		
-		<artifact:deploy file="${jar.file.name}">
-			<pom refid="maven.project"/>
-			<remoteRepository refId="offline.repository.jboss.org">
-			</remoteRepository>
-			<attach file="${src.jar}" classifier="sources"/>
-			<attach file="${jar.file.name}" classifier=""/>
-		</artifact:deploy>
-	</target>
-	
-</project>
\ No newline at end of file

Copied: search/tags/v3_1_0_Beta2/common-build.xml (from rev 15395, search/trunk/common-build.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/common-build.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/common-build.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,451 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project name="common-build" default="dist"
+	xmlns:artifact="urn:maven-artifact-ant" xmlns:ivy="antlib:fr.jayasoft.ivy.ant">
+	<description>Common properties and targets for the HibernateExt
+		project</description>
+	
+	
+	<!-- my.basedir property can be used to refer to files/directories relatively to the common-build.xml file -->
+	<dirname property="common-build.basedir" file="${ant.file.common-build}"/>
+	
+	<!-- Give user a chance to override without editing this file
+	(and without typing -D each time it compiles it) -->
+	<property file="${common-build.basedir}/build.properties"/>
+	<property file="${user.home}/.ant.properties"/>
+	
+	<property name="src.dir" location="src/java"/>
+	<property name="test.dir" location="src/test"/>
+	<property name="test.resources.dir" location="src/test-resources"/>
+	<property name="filter.dir" location="src/filters"/>
+	<property name="lib.dir" location="lib"/>
+	<property name="build.dir" location="build"/>
+	<property name="classes.dir" location="${build.dir}/classes"/>
+	<property name="testclasses.dir" location="${build.dir}/testclasses"/>
+	<property name="testreports.dir" location="${build.dir}/test-reports"/>
+	<property name="dist.target.dir" location="target"/>
+	<property name="dist.dir" location="${dist.target.dir}/${name}"/>
+	<property name="instrumenttest.out.dir" value="${build.dir}/test-reports/instrument"/>
+	<property name="doc.dir" location="doc"/>
+	<property name="doc.api.dir" location="${doc.dir}/api"/>
+	<property name="doc.reference.dir" location="${doc.dir}/reference"/>
+	
+	<property name="dist.doc.dir" location="${dist.dir}/doc"/>
+	<property name="dist.api.dir" location="${dist.dir}/doc/api"/>
+	
+	<property name="dist.src.dir" location="${dist.dir}/src"/>
+	<property name="dist.test.dir" location="${dist.dir}/test"/>
+	<property name="dist.lib.dir" location="${dist.dir}/lib"/>
+	<property name="jar.name" value="${name}"/>
+	<property name="jar.file.name" value="${dist.dir}/${jar.name}.jar"/>
+	<property name="jartest.file.name" value="${dist.dir}/${jar.name}-tests.jar"/>
+	
+	<property name="javadoc" value="http://java.sun.com/j2se/1.4/docs/api"/>
+	<property name="javac.debug" value="on"/>
+	<property name="javac.optimize" value="off"/>
+	<property name="javac.source" value="1.4"/>
+	<property name="javac.target" value="1.4"/>
+	
+	<property name="pom.file" value="pom.xml"/>
+	<property name="src.jar" value="${build.dir}/src.jar"/>
+	
+	<taskdef name="junit"
+		classname="org.apache.tools.ant.taskdefs.optional.junit.JUnitTask">
+		<classpath>
+			<fileset dir="${common-build.basedir}/lib/build">
+				<!-- ${build.lib.dir} fails in reference doc build -->
+				<include name="junit-*.jar"/>
+				<include name="ant-junit-*.jar"/>
+			</fileset>
+		</classpath>
+	</taskdef>
+	
+	<taskdef name="junitreport"
+		classname="org.apache.tools.ant.taskdefs.optional.junit.XMLResultAggregator">
+		<classpath>
+			<fileset dir="${common-build.basedir}/lib/build">
+				<!-- ${build.lib.dir} fails in reference doc build -->
+				<include name="junit-*.jar"/>
+				<include name="ant-junit-*.jar"/>
+			</fileset>
+		</classpath>
+	</taskdef>
+	
+	<taskdef resource="net/sf/antcontrib/antlib.xml">
+		<classpath>
+			<fileset dir="${common-build.basedir}/lib/build">
+				<!-- ${build.lib.dir} fails in reference doc build -->
+				<include name="ant-contrib-*.jar"/>
+			</fileset>
+		</classpath>
+	</taskdef>
+	
+	<!-- ivy load -->
+	<property name="ivy.jar.dir" value="${common-build.basedir}/ivy"/>
+	<property name="ivy.conf.dir" value="${common-build.basedir}"/>
+	<path id="ivy.lib.path">
+		<fileset dir="${ivy.jar.dir}" includes="*.jar"/>
+	</path>
+	<taskdef resource="fr/jayasoft/ivy/ant/antlib.xml"
+		uri="antlib:fr.jayasoft.ivy.ant" classpathref="ivy.lib.path"/>
+	
+	<!-- maven task load -->
+	<path id="maven-ant-tasks.path" path="${ivy.jar.dir}/maven-ant-tasks.jar"/>
+	<typedef resource="org/apache/maven/artifact/ant/antlib.xml"
+		uri="urn:maven-artifact-ant" classpathref="maven-ant-tasks.path"/>
+	
+	<artifact:remoteRepository id="offline.repository.jboss.org"
+		url="file://${offline.repository.jboss.org}"/>
+	
+	<path id="lib.class.path">
+		<path refid="lib.moduleclass.path"/>
+		<pathelement path="${clover.jar}"/>
+	</path>
+	
+	<!-- overridable in modules -->
+	<path id="lib.moduleclass.path"/>
+	
+	<patternset id="support.files">
+		<include name="**/*.jpg"/>
+		<include name="**/*.gif"/>
+		<include name="**/*.dtd"/>
+		<include name="**/*.xsd"/>
+		<include name="**/*.xml"/>
+		<include name="**/*.xslt"/>
+		
+		<!-- exclude everything we don't want in the jar -->
+		<exclude name="${build.dir}/**/*"/>
+		<exclude name="${doc.dir}/**/*"/>
+		<exclude name="classes/**/*"/>
+		<exclude name="build.xml"/>
+		<exclude name="**/*.properties"/>
+		<exclude name="**/*.ccf"/>
+		<exclude name="**/*.cfg.xml"/>
+		<exclude name="**/ehcache.xml"/>
+	</patternset>
+	
+	<patternset id="source.files">
+		<include name="**/*.java"/>
+		<include name="**/*.properties"/>
+	</patternset>
+	
+	<!-- junit paths/filesets -->
+	<fileset dir="${testclasses.dir}" id="junit.batchtestset">
+		<include name="**/*Test.class"/>
+	</fileset>
+	
+	<path id="testsrc.path">
+		<pathelement location="${test.dir}"/>
+	</path>
+		
+	<path id="junit.classpath">
+		<pathelement path="${classes.dir}"/>
+		<pathelement path="${testclasses.dir}"/>
+		<path refid="lib.class.path"/>
+		<path refid="junit.moduleclasspath"/>
+		<path location="${clover.jar}"/>
+	</path>
+	
+	<!-- Determine the database against which to run  tests-->
+	<if>
+		<equals arg1="${targetdb}" arg2="$${targetdb}"/>
+		<then>
+			<echo message="No target database specified using default HSQLDB"/>
+			<property name="targetdb" value="hsqldb"/>
+		</then>
+	</if>
+	
+	<!-- Clover tasks -->
+	<target name="with.clover">
+		<clover-setup initString="clover_coverage.db"/>
+	</target>
+	
+	<target name="cloverreport.html" depends="with.clover"
+		description="Generate a clover report from the current clover database.">
+		<clover-report>
+			<current outfile="${clover.out.dir}">
+				<format type="html"/>
+			</current>
+		</clover-report>
+	</target>
+	
+	<target name="cloverreport"
+		depends="with.clover,junitreport,cloverreport.html"
+		description="Run the tests and generate a clover report">
+	</target>
+	
+	<!-- Tasks -->
+	<target name="clean" description="Cleans up build and dist directories">
+		<delete dir="${build.dir}"/>
+		<delete dir="${dist.target.dir}"/>
+		<delete dir="${clover.out.dir}"/>
+	</target>
+	
+	<target name="init" description="Initialize the build">
+		<tstamp>
+			<format property="subversion" pattern="yyyy-MM-dd hh:mm:ss"/>
+		</tstamp>
+		<echo message="Build ${Name}-${version} (${subversion})"/>
+		<mkdir dir="${classes.dir}"/>
+		<mkdir dir="${testclasses.dir}"/>
+		<copy todir="${classes.dir}">
+			<fileset dir="${src.dir}">
+				<patternset refid="support.files"/>
+			</fileset>
+		</copy>
+		
+		<copy todir="${build.dir}">
+			<fileset dir=".">
+				<include name="readme.txt"/>
+				<include name="lgpl.txt"/>
+			</fileset>
+		</copy>
+	</target>
+	
+	<target name="get.deps.core" depends="init"
+		description="retrieve the core dependencies">
+		<ivy:resolve conf="default"/>
+		<ivy:retrieve pattern="${ivy.dep.dir}/core/[artifact].[ext]"
+			conf="default"/>
+	</target>	
+	
+	<target name="get.deps.test" depends="init"
+		description="retrieve the test dependencies">
+		<ivy:resolve conf="test"/>
+		<ivy:retrieve pattern="${ivy.dep.dir}/test/[artifact].[ext]" conf="test"/>
+	</target>	
+				
+	<target name="copytest" description="Copy tests to dist dir" if="copy.test">
+		<mkdir dir="${dist.test.dir}"/>
+		<copy todir="${dist.test.dir}">
+			<fileset dir="${test.dir}"/>
+		</copy>
+	</target>
+	
+	<target name="copysource" depends="copytest"
+		description="Copy sources to dist dir">
+		<mkdir dir="${dist.src.dir}"/>
+		<copy todir="${dist.src.dir}">
+			<fileset dir="${src.dir}">
+				<patternset refid="source.files"/>
+			</fileset>
+			<fileset dir="${src.dir}">
+				<patternset refid="support.files"/>
+			</fileset>
+		</copy>
+		<mkdir dir="${dist.src.dir}"/>
+		<copy todir="${dist.src.dir}">
+			<fileset dir="${src.dir}">
+				<patternset refid="source.files"/>
+			</fileset>
+			<fileset dir="${src.dir}">
+				<patternset refid="support.files"/>
+			</fileset>
+		</copy>
+	</target>
+	
+	<target name="copylib" description="Copy jars to lib dir">
+		<mkdir dir="${dist.lib.dir}"/>
+		<copy todir="${dist.lib.dir}" verbose="true">
+			<fileset dir="${lib.dir}">
+				<include name="**/*.jar"/>
+				<exclude name="log4j.jar"/>
+				<exclude name="checkstyle*.jar"/>
+				<include name="*.txt"/>
+			</fileset>
+		</copy>
+	</target>
+	
+	<target name="copydoc" description="Copy doc to dist dir" if="copy.doc">
+		<mkdir dir="${dist.doc.dir}"/>
+		<copy todir="${dist.doc.dir}">
+			<fileset dir="${doc.dir}">
+				<include name="**/*.html"/>
+			</fileset>
+		</copy>
+	</target>
+	
+	<target name="jar" depends="compile"
+		description="Build the distribution .jar file">
+		<mkdir dir="${dist.dir}"/>
+		<jar filesetmanifest="merge" jarfile="${jar.file.name}"
+			basedir="${classes.dir}"/>
+	</target>
+	
+	<target name="jartest" depends="compiletest"
+		description="Build the distribution .jar file">
+		<mkdir dir="${dist.dir}"/>
+		<jar filesetmanifest="merge" jarfile="${jartest.file.name}"
+			basedir="${testclasses.dir}"/>
+	</target>
+	
+	<!-- DOCUMENTATION -->
+	
+	<target name="javadoc"
+		description="Compile the Javadoc API documentation to dist dir">
+		<mkdir dir="${dist.api.dir}"/>
+		<javadoc packagenames="${javadoc.packagenames}"
+			classpathref="lib.class.path" destdir="${dist.api.dir}" use="true"
+			protected="true" version="true"
+			windowtitle="${Name} API Documentation"
+			Overview="${doc.api.dir}/package.html"
+			doctitle="${Name} API Documentation"
+			stylesheetfile="${doc.api.dir}/jdstyle.css" link="${javadoc}">
+			<packageset dir="${src.dir}" defaultexcludes="yes">
+				<include name="**/*"/>
+			</packageset>
+            <link href="http://lucene.apache.org/java/2_4_0/api"/>            
+		</javadoc>
+	</target>
+	
+	<target name="extras" description="Copies miscellaneous files to root dir">
+		<copy todir="${dist.dir}/bin" failonerror="false">
+			<fileset dir="bin">
+				<include name="*.bat"/>
+			</fileset>
+		</copy>
+		<copy file="readme.txt" todir="${dist.dir}"/>
+		<copy file="lgpl.txt" todir="${dist.dir}"/>
+		<copy file="changelog.txt" todir="${dist.dir}"/>
+		<copy file="build.xml" todir="${dist.dir}"/>
+		<replace file="${dist.dir}/build.xml">
+			<replacetoken><![CDATA[../${name}-${version}]]>
+			</replacetoken>
+			<replacevalue><![CDATA[../${name}]]>
+			</replacevalue>
+		</replace>
+	</target>
+	
+	<target name="dist" depends="jar,javadoc,copysource,copydoc,extras"
+		description="Build everything">
+		<zip zipfile="${dist.dir}-${version}.zip">
+			<zipfileset prefix="${name}-${version}" dir="${dist.dir}"/>
+		</zip>
+		<tar compression="gzip" tarfile="${dist.dir}-${version}.tar.gz">
+			<tarfileset prefix="${name}-${version}" dir="${dist.dir}"/>
+		</tar>
+	</target>
+	
+	<target name="info" description="Echoes useful system properties">
+		<echo message="java.vm.info=${java.vm.info}"/>
+		<echo message="java.vm.name=${java.vm.name}"/>
+		<echo message="java.vm.vendor=${java.vm.vendor}"/>
+		<echo message="java.vm.version=${java.vm.version}"/>
+		<echo message="os.arch=${os.arch}"/>
+		<echo message="os.name=${os.name}"/>
+		<echo message="os.version=${os.version}"/>
+		<echo message="java.home = ${java.home}"/>
+		<echo message="java.class.path = ${java.class.path}"/>
+		<echo message="build.compiler = ${build.compiler}"/>
+		<echo message="file.encoding=${file.encoding}"/>
+		<echo message="user.home = ${user.home}"/>
+		<echo message="user.language=${user.language}"/>
+	</target>
+	
+	<target name="test-resources" description="Copies and filters test resources">
+		<filter filtersfile="${filter.dir}/${db}.filter"/>
+		<mkdir dir="${testclasses.dir}"/>
+		<copy todir="${testclasses.dir}" filtering="true" overwrite="true">
+			<fileset dir="${test.resources.dir}">
+				<include name="*.properties"/>
+				<include name="*.xml"/>
+			</fileset>
+		</copy>
+	</target>
+		
+	<target name="instrument" depends="compiletest"
+		description="Instrument the persistent classes"> <!-- depends="jar" -->
+		
+		<taskdef name="instrument"
+			classname="org.hibernate.tool.instrument.javassist.InstrumentTask">
+			<classpath refid="junit.classpath"/>
+		</taskdef>
+		
+		<instrument verbose="true">
+			<fileset dir="${testclasses.dir}/org/hibernate/test">
+				<include name="**/*.class"/>
+				<exclude name="**/*Test$*.class"/>
+				<exclude name="**/*Test.class"/>
+				<exclude name="**/*Tests.class"/>
+			</fileset>
+		</instrument>
+	</target>	
+	
+	<target name="junitinstrument" depends="compiletest,instrument"
+		description="Run the instrument test suite">
+		<for list="${targetdb}" param="db">
+			<sequential>
+				<antcall target="test-resources">
+					<param name="db" value="@{db}"/>
+				</antcall>
+				<mkdir dir="${instrumenttest.out.dir}/@{db}"/>
+				<echo>Running against db: @{db}</echo>
+				<junit printsummary="yes" haltonfailure="yes" dir="${basedir}"
+					maxmemory="256M" fork="yes" forkmode="perBatch">
+					<classpath refid="junit.classpath"/>
+					<formatter type="plain"/>
+					<formatter type="xml"/>
+					<batchtest todir="${instrumenttest.out.dir}/@{db}" haltonfailure="no">
+						<fileset refid="junit.batchtestset"/>
+					</batchtest>
+				</junit>
+			</sequential>
+		</for>
+	</target>
+	
+	<target name="junitreport" depends="">
+		<junitreport todir="${testreports.dir}">
+			<fileset dir="${testreports.dir}">
+				<include name="TEST-*.xml"/>
+			</fileset>
+			<report format="frames" todir="${testreports.dir}"/>
+		</junitreport>
+	</target>
+		
+	<target name="checkstyle" description="Check coding style">
+		<taskdef resource="checkstyletask.properties">
+			<classpath>
+				<path refid="lib.class.path"/>
+				<fileset dir="${common-build.basedir}/lib">
+					<include name="checkstyle*.jar"/>
+				</fileset>
+			</classpath>
+		</taskdef>
+		
+		<checkstyle config="${common-build.basedir}/checkstyle_checks.xml">
+			<fileset dir="${src.dir}">
+				<include name="**/*.java"/>
+			</fileset>
+			<formatter type="plain"/>
+		</checkstyle>
+	</target>
+	
+	<target name="patch" depends="checkstyle" description="Create a patch">
+		<cvs command="-q diff -u -N" output="patch.txt"/>
+	</target>
+	
+	<!-- maven deploy: to be used by the subbuild and delcare deps on jar -->
+	<target name="deploy" depends="jar">
+		<fail unless="offline.repository.jboss.org"
+			message="offline.repository.jboss.org must be defined"/>
+		<jar jarfile="${src.jar}" basedir="${src.dir}">
+			<include name="**/*.java"/>
+			<exclude name="**/test/*.java"/>
+			<!-- patternset refid="meta.files" / -->
+		</jar>
+		
+		<artifact:pom id="maven.project" file="${pom.file}"/>
+		
+		<artifact:install file="${jar.file.name}">
+			<pom refid="maven.project"/>
+		</artifact:install>
+		
+		<artifact:deploy file="${jar.file.name}">
+			<pom refid="maven.project"/>
+			<remoteRepository refId="offline.repository.jboss.org">
+			</remoteRepository>
+			<attach file="${src.jar}" classifier="sources"/>
+			<attach file="${jar.file.name}" classifier=""/>
+		</artifact:deploy>
+	</target>
+	
+</project>
\ No newline at end of file

Deleted: search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml
===================================================================
--- search/trunk/doc/quickstart/src/main/resources/archetype-resources/pom.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,81 +0,0 @@
-<?xml version="1.0"?>
-<project>
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>${groupId}</groupId>
-    <artifactId>${artifactId}</artifactId>
-    <packaging>jar</packaging>
-    <version>${version}</version>
-    <name>A custom project</name>
-    <url>http://www.myorganization.org</url>
-    <dependencies>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-search</artifactId>
-            <version>3.1.0.Beta2</version>
-        </dependency>
-        <dependency>
-            <groupId>cglib</groupId>
-            <artifactId>cglib</artifactId>
-            <version>2.1_3</version>
-        </dependency>        
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-annotations</artifactId>
-            <version>3.4.0.CR1</version>
-        </dependency>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-entitymanager</artifactId>
-            <version>3.4.0.CR1</version>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-common</artifactId>
-            <version>1.3.0</version>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-core</artifactId>
-            <version>1.3.0</version>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-lucene-snowball</artifactId>
-            <version>1.3.0</version>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <version>1.4.2</version>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-log4j12</artifactId>
-            <version>1.4.2</version>
-        </dependency>
-        <dependency>
-            <groupId>hsqldb</groupId>
-            <artifactId>hsqldb</artifactId>
-            <version>1.8.0.2</version>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <version>4.4</version>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-compiler-plugin</artifactId>
-                <version>2.0.2</version>
-                <configuration>
-                    <source>1.5</source>
-                    <target>1.5</target>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-</project>

Copied: search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml (from rev 15398, search/trunk/doc/quickstart/src/main/resources/archetype-resources/pom.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/doc/quickstart/src/main/resources/archetype-resources/pom.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,81 @@
+<?xml version="1.0"?>
+<project>
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>${groupId}</groupId>
+    <artifactId>${artifactId}</artifactId>
+    <packaging>jar</packaging>
+    <version>${version}</version>
+    <name>A custom project</name>
+    <url>http://www.myorganization.org</url>
+    <dependencies>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-search</artifactId>
+            <version>3.1.0.Beta2</version>
+        </dependency>
+        <dependency>
+            <groupId>cglib</groupId>
+            <artifactId>cglib</artifactId>
+            <version>2.1_3</version>
+        </dependency>        
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-annotations</artifactId>
+            <version>3.4.0.CR1</version>
+        </dependency>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-entitymanager</artifactId>
+            <version>3.4.0.CR1</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.solr</groupId>
+            <artifactId>solr-common</artifactId>
+            <version>1.3.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.solr</groupId>
+            <artifactId>solr-core</artifactId>
+            <version>1.3.0</version>
+        </dependency>
+       <dependency>
+            <groupId>org.apache.lucene</groupId>
+            <artifactId>lucene-snowball</artifactId>
+            <version>2.4.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>1.4.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-log4j12</artifactId>
+            <version>1.4.2</version>
+        </dependency>
+        <dependency>
+            <groupId>hsqldb</groupId>
+            <artifactId>hsqldb</artifactId>
+            <version>1.8.0.2</version>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.4</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>2.0.2</version>
+                <configuration>
+                    <source>1.5</source>
+                    <target>1.5</target>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>

Deleted: search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml
===================================================================
--- search/trunk/doc/reference/en/modules/getting-started.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,576 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Hibernate, Relational Persistence for Idiomatic Java
-  ~
-  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
-  ~ indicated by the @author tags or express copyright attribution
-  ~ statements applied by the authors.  All third-party contributions are
-  ~ distributed under license by Red Hat Middleware LLC.
-  ~
-  ~ This copyrighted material is made available to anyone wishing to use, modify,
-  ~ copy, or redistribute it subject to the terms and conditions of the GNU
-  ~ Lesser General Public License, as published by the Free Software Foundation.
-  ~
-  ~ This program is distributed in the hope that it will be useful,
-  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
-  ~ for more details.
-  ~
-  ~ You should have received a copy of the GNU Lesser General Public License
-  ~ along with this distribution; if not, write to:
-  ~ Free Software Foundation, Inc.
-  ~ 51 Franklin Street, Fifth Floor
-  ~ Boston, MA  02110-1301  USA
-  -->
-<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
-"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
-<chapter id="getting-started">
-  <!--  $Id$ -->
-
-  <title>Getting started</title>
-
-  <para>Welcome to Hibernate Search! The following chapter will guide you
-  through the initial steps required to integrate Hibernate Search into an
-  existing Hibernate enabled application. In case you are a Hibernate new
-  timer we recommend you start <ulink
-  url="http://hibernate.org/152.html">here</ulink>.</para>
-
-  <section>
-    <title>System Requirements</title>
-
-    <table>
-      <title>System requirements</title>
-
-      <tgroup cols="2">
-        <tbody>
-          <row>
-            <entry>Java Runtime</entry>
-
-            <entry>A JDK or JRE version <emphasis>5</emphasis> or greater. You
-            can download a Java Runtime for Windows/Linux/Solaris <ulink
-            url="http://java.sun.com/javase/downloads/"> here
-            </ulink>.</entry>
-          </row>
-
-          <row>
-            <entry>Hibernate Search</entry>
-
-            <entry><literal>hibernate-search.jar</literal> and all the
-            dependencies from the <literal>lib</literal> directory of the
-            Hibernate Search distribution, especially lucene :)</entry>
-          </row>
-
-          <row>
-            <entry>Hibernate Core</entry>
-
-            <entry>This instructions have been tested against Hibernate 3.3.x.
-            Next to the main <literal>hibernate3.jar</literal> you will need
-            all required libaries from the <literal>lib</literal> directory of
-            the distribution. Refer to <literal>README.txt</literal> in the
-            <literal>lib</literal> directory of the distribution to determine
-            the minimum runtime requirements.</entry>
-          </row>
-
-          <row>
-            <entry>Hibernate Annotations</entry>
-
-            <entry>Even though Hibernate Search can be used without Hibernate
-            Annotations the following instructions will use them for basic
-            entity configuration (<emphasis>@Entity, @Id,
-            @OneToMany,...</emphasis>). This part of the configuration could
-            also be expressed in xml or code. However, Hibernate Search itself
-            has its own set of annotations (<emphasis>@Indexed, @DocumentId,
-            @Field,...</emphasis>) for which there exists so far no
-            alternative configuration. The tutorial is tested against version
-            3.4.x of Hibernate Annotations.</entry>
-          </row>
-        </tbody>
-      </tgroup>
-    </table>
-
-    <para>You can download all dependencies from the Hibernate <ulink
-    url="http://www.hibernate.org/6.html">download site</ulink>. You can also
-    verify the dependency versions against the <ulink
-    url="http://www.hibernate.org/6.html#A3">Hibernate Compatibility
-    Matrix</ulink>.</para>
-  </section>
-
-  <section>
-    <title>Maven</title>
-
-    <para>Instead of managing all dependencies manually, maven users have the
-    possibility to use the <ulink
-    url="http://repository.jboss.com/maven2">JBoss maven repository</ulink>.
-    Just add the JBoss repository url to the <emphasis>repositories</emphasis>
-    section of your <filename>pom.xml</filename> or
-    <filename>settings.xml</filename>:</para>
-
-    <programlisting>
-&lt;repository&gt;
-  &lt;id&gt;repository.jboss.org&lt;/id&gt;
-  &lt;name&gt;JBoss Maven Repository&lt;/name&gt;
-  &lt;url&gt;http://repository.jboss.org/maven2&lt;/url&gt;
-  &lt;layout&gt;default&lt;/layout&gt;
-&lt;/repository&gt;
-      </programlisting>
-
-    <para>Then add the following dependencies to your pom.xml:</para>
-
-    <programlisting>
-&lt;dependency&gt;
-   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
-   &lt;artifactId&gt;hibernate-search&lt;/artifactId&gt;
-   &lt;version&gt;3.1.0.Beta2&lt;/version&gt;
-&lt;/dependency&gt;
-&lt;dependency&gt;
-   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
-   &lt;artifactId&gt;hibernate-annotations&lt;/artifactId&gt;
-   &lt;version&gt;3.4.0.CR1&lt;/version&gt;
-&lt;/dependency&gt;
-&lt;dependency&gt;
-   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
-   &lt;artifactId&gt;hibernate-entitymanager&lt;/artifactId&gt;
-   &lt;version&gt;3.4.0.CR1&lt;/version&gt;
-&lt;/dependency&gt;
-&lt;dependency&gt;
-   &lt;groupId&gt;org.apache.solr&lt;/groupId&gt;
-   &lt;artifactId&gt;solr-common&lt;/artifactId&gt;
-   &lt;version&gt;1.3.0&lt;/version&gt;
-&lt;/dependency&gt;
-&lt;dependency&gt;
-   &lt;groupId&gt;org.apache.solr&lt;/groupId&gt;
-   &lt;artifactId&gt;solr-core&lt;/artifactId&gt;
-   &lt;version&gt;1.3.0&lt;/version&gt;
-&lt;/dependency&gt;
-&lt;dependency&gt;
-   &lt;groupId&gt;org.apache.solr&lt;/groupId&gt;
-   &lt;artifactId&gt;solr-lucene-snowball&lt;/artifactId&gt;
-   &lt;version&gt;1.3.0&lt;/version&gt;
-&lt;/dependency&gt;
-      </programlisting>
-
-    <para>Not all dependencies are required. Only the
-    <emphasis>hibernate-search</emphasis> dependeny is mandatory. This
-    dependeny, together with its required transitive dependencies, contains
-    everything needed to use Hibernate Search.
-    <emphasis>hibernate-annotations</emphasis> is only needed if you want to
-    use annotations to configure your domain model as we do in this tutorial.
-    However, even if you choose not to use Hibernate Annotations you will
-    still have to use the Hibernate Search specific annotations to configure
-    your Lucene index. Currently there is no XML configuration option
-    available for Hibernate Search.
-    <emphasis>hibernate-entitymanager</emphasis> is required if you want to
-    use Hibernate Search in conjunction with JPA. Finally, the Solr
-    dependencies are only needed if you want to utilize Solr's analyzer
-    framework. More about this later.</para>
-  </section>
-
-  <section>
-    <title>Configuration</title>
-
-    <para>Once you have downloaded and added all required dependencies to your
-    application you have to add a few properties to your hibernate
-    configuration file. If you are using Hibernate directly this can be done
-    in <literal>hibernate.properties</literal> or
-    <literal>hibernate.cfg.xml</literal>. If you are using Hibernate via JPA
-    you can also add the properties to <literal>persistence.xml</literal>. The
-    good news is that for standard use most properties offer a sensible
-    default. Within <filename>persistence.xml</filename> this could look like
-    this:</para>
-
-    <para><programlisting>
-...
-&lt;property name="hibernate.search.default.directory_provider" 
-   value="org.hibernate.search.store.FSDirectoryProvider"/&gt; 
-
-&lt;property name="hibernate.search.default.indexBase" value="/var/lucene/indexes"/&gt; 
-...
-    </programlisting>First you have to tell Hibernate Search which
-    <classname>DirectoryProvider</classname> to use. This can be achieved by
-    setting the <literal>hibernate.search.default.directory_provider</literal>
-    property. Apache Lucene has a notion of <literal>Directory</literal> to
-    store the index files. Hibernate Search handles the initialization and
-    configuration of a Lucene <literal>Directory</literal> instance via a
-    <literal>DirectoryProvider</literal>. In this tutorial we will use a
-    subclass of <literal>DirectoryProvider</literal> called
-    <classname>FSDirectoryProvider</classname>. This will give us the ability
-    to physically inspect the Lucene indexes created by Hibernate Search (eg
-    via <ulink url="http://www.getopt.org/luke/">Luke</ulink>). Once you have
-    a working configuration you can start experimenting with other directory
-    providers (see <xref linkend="search-configuration-directory" />). Next to
-    the directory provider you also have to specify the default root directory
-    for all indexes via
-    <literal>hibernate.search.default.indexBase</literal>.</para>
-
-    <para>Lets further assume that your application contains the Hibernate
-    managed classes <classname>example.Book</classname> and
-    <classname>example.Author</classname> and you want to add free text search
-    capabilities to your application in order to search the books contained in
-    your database.</para>
-
-    <programlisting>
-package example;
-...
- at Entity
-public class Book {
-
-  @Id
-  @GeneratedValue
-  private Integer id; 
-
-  private String title;  
-
-  private String subtitle; 
-
-  @ManyToMany 
-  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
-
-  private Date publicationDate;
-  
-  public Book() {
-  } 
-  
-  // standard getters/setters follow here
-  ...
-} 
-    </programlisting>
-
-    <para><programlisting>
-package example;
-...
- at Entity
-public class Author {
-
-  @Id
-  @GeneratedValue
-  private Integer id;
-
-  private String name;
-
-  public Author() {
-  } 
- 
-  // standard getters/setters follow here
-  ...
-}
-
-</programlisting></para>
-
-    <para>To achieve this you have to add a few annotations to the
-    <classname>Book</classname> and <classname>Author</classname> class. The
-    first annotation <literal>@Indexed</literal> marks
-    <classname>Book</classname> as indexable. By design Hibernate Search needs
-    to store an untokenized id in the index to ensure index unicity for a
-    given entity. <literal>@DocumentId</literal> marks the property to use for
-    this purpose and is in most cases the same as the database primary
-    key.</para>
-
-    <para>Next you have to mark the fields you want to make searchable. Let's
-    start with <literal>title</literal> and <literal>subtitle</literal> and
-    annotate both with <literal>@Field</literal>. The parameter
-    <literal>index=Index.TOKENIZED</literal> will ensure that the text will be
-    tokenized using the default Lucene analyzer. Usually, tokenizing means
-    chunking a sentence into individual words and potentially excluding common
-    words like <literal>'a'</literal> or '<literal>the</literal>'. We will
-    talk more about analyzers a little later on. The second parameter we
-    specify within <literal>@Field</literal>,<literal>
-    store=Store.NO</literal>, ensures that the actual data will not be stored
-    in the index. This is the default settings and probably a good choice
-    unless you want to avoid database roundtrips and retrieve the indexed data
-    via projections (<xref linkend="projections" />). Without projections,
-    Hibernate Search will per default execute the Lucene query in order to
-    find the database identifiers of the entities matching the query critera
-    and use these identifiers to retrieve managed objects from the database.
-    Is it not better then to always use projections? The answer is no, since
-    projections only returns object arrays and not managed entities. The
-    decision for or against projection has to be made on a case to case
-    basis.</para>
-
-    <para>After this short look under the hood let's go back to annotating the
-    <classname>Book</classname> class. Another annotation we have not yet
-    discussed is <literal>@DateBridge</literal>. This annotation is one of the
-    built-in field bridges in Hibernate Search. The Lucene index is purely
-    string based. For this reason Hibernate Search must convert the data types
-    of the indexed fields to strings and vice versa. A range of predefined
-    bridges are provided, including the <classname>DateBridge</classname>
-    which will convert a <classname>java.util.Date</classname> into a
-    <classname>String</classname> with the specified resolution. For more
-    details see <xref linkend="search-mapping-bridge" />.</para>
-
-    <para>This leaves us with <literal>@IndexedEmbedded. </literal>This
-    annotation is used to index associated entities
-    (<literal>@ManyToMany</literal>, <literal>@*ToOne</literal> and
-    <literal>@Embedded</literal>) as part of the owning entity. This is needed
-    since a Lucene index document is a flat data structure which does not know
-    anything about object relations. To ensure that the author's name wil be
-    searchable you have to make sure that the names are indexed as part of the
-    book itself. On top of <literal>@IndexedEmbedded</literal> you will also
-    have to mark all fields of the associated entity you want to have included
-    in the index with <literal>@Indexed</literal>. For more dedails see <xref
-    linkend="search-mapping-associated" />.</para>
-
-    <para>These settings should be sufficient for now. For more details on
-    entity mapping refer to <xref linkend="search-mapping-entity" />.</para>
-
-    <programlisting>
-package example;
-...
- at Entity
-<emphasis role="bold">@Indexed</emphasis>
-public class Book {
-
-  @Id
-  @GeneratedValue
-  <emphasis role="bold">@DocumentId</emphasis>
-  private Integer id;
-  
-  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
-  private String title;
-  
-  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
-  private String subtitle; 
-
-  <emphasis role="bold">@IndexedEmbedded</emphasis>
-  @ManyToMany 
-  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
-
-<emphasis role="bold">  @Field(index = Index.UN_TOKENIZED, store = Store.YES)
-  @DateBridge(resolution = Resolution.DAY)</emphasis>
-  private Date publicationDate;
-  
-  public Book() {
-  } 
-  
-  // standard getters/setters follow here
-  ... 
-}
-  </programlisting>
-
-    <programlisting>
-package example;
-...
- at Entity
-public class Author {
-
-  @Id
-  @GeneratedValue
-  private Integer id;
-
-  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
-  private String name;
-
-  public Author() {
-  } 
- 
-  // standard getters/setters follow here
-  ...
-}
-  </programlisting>
-  </section>
-
-  <section>
-    <title>Indexing</title>
-
-    <para>Hibernate Search will transparently index every entity persisted,
-    updated or removed through Hibernate Core. However, you have to trigger an
-    inital indexing to populate the Lucene index with the data already present
-    in your database. Once you have added the above properties and annotations
-    it is time to trigger an initial batch index of your books. You can
-    achieve this by using one of the following code snipplets (see also <xref
-    linkend="search-batchindex" />):</para>
-
-    <para>Example using Hibernate Session:</para>
-
-    <programlisting>
-FullTextSession fullTextSession = Search.getFullTextSession(session);
-Transaction tx = fullTextSession.beginTransaction();
-List books = session.createQuery("from Book as book").list();
-for (Book book : books) {
-    fullTextSession.index(book);
-}
-tx.commit(); //index is written at commit time       
-    </programlisting>
-
-    <para>Example using JPA:</para>
-
-    <programlisting>
-EntityManager em = entityManagerFactory.createEntityManager();
-FullTextEntityManager fullTextEntityManager = Search.getFullTextEntityManager(em);
-List books = em.createQuery("select book from Book as book").getResultList();
-for (Book book : books) {
-    fullTextEntityManager.index(book);
-} 
-    </programlisting>
-
-    <para>After executing the above code, you should be able to see a Lucene
-    index under <literal>/var/lucene/indexes/example.Book</literal>. Go ahead
-    an inspect this index. It will help you to understand how Hibernate Search
-    works.</para>
-  </section>
-
-  <section>
-    <title>Searching</title>
-
-    <para>Now it is time to execute a first search. The following code will
-    prepare a query against the indexed fields, execute it and return a list
-    of <classname>Book</classname>s:</para>
-
-    <para>Example using Hibernate Session:</para>
-
-    <programlisting>
-FullTextSession fullTextSession = Search.getFullTextSession(session);
-
-Transaction tx = fullTextSession.beginTransaction();
-
-MultiFieldQueryParser parser = new MultiFieldQueryParser( new String[]{"title", "subtitle", "authors.name", "publicationDate"}, 
-  new StandardAnalyzer());
-Query query = parser.parse( "Java rocks!" );
-org.hibernate.Query hibQuery = fullTextSession.createFullTextQuery( query, Book.class );
-List result = hibQuery.list();
-  
-tx.commit();
-session.close();  
-    </programlisting>
-
-    <para>Example using JPA:</para>
-
-    <programlisting>
-EntityManager em = entityManagerFactory.createEntityManager();
-
-FullTextEntityManager fullTextEntityManager = 
-    org.hibernate.hibernate.search.jpa.Search.getFullTextEntityManager(em);
-MultiFieldQueryParser parser = new MultiFieldQueryParser( new String[]{"title", "subtitle", "authors.name", "publicationDate"}, 
-  new StandardAnalyzer());
-Query query = parser.parse( "Java rocks!" );
-org.hibernate.Query hibQuery = fullTextEntityManager.createFullTextQuery( query, Book.class );
-List result = hibQuery.list();
-    </programlisting>
-  </section>
-
-  <section>
-    <title>Analyzer</title>
-
-    <para>Assume that one of your indexed book entities has the title
-    "Refactoring: Improving the Design of Existing Code" and you want to get
-    hits for all of the following queries: "refactor", "refactors",
-    "refactored" and "refactoring". In Lucene this can be achieved by choosing
-    an analyzer class which applies word stemming during the indexing
-    <emphasis role="bold">and</emphasis> search process. Hibernate Search
-    offers several ways to configure the analyzer to use (see <xref
-    linkend="analyzer" />):</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>Setting the <literal>hibernate.search.analyzer</literal>
-        property in the configuration file. The specified class will then be
-        the default analyzer.</para>
-      </listitem>
-
-      <listitem>
-        <para>Setting the <literal><literal>@Analyzer</literal></literal>
-        annotation at the entity level.</para>
-      </listitem>
-
-      <listitem>
-        <para>Setting the <literal>@<literal>Analyzer</literal></literal>
-        annotation at the field level.</para>
-      </listitem>
-    </itemizedlist>
-
-    <para>When using the <literal>@Analyzer</literal> annotation one can
-    either specify the fully qualified classname of the analyzer to use or one
-    can refer to an analyzer definition defined by the
-    <literal>@AnalyzerDef</literal> annotation. In the latter case the Solr
-    analyzer framework with its factories approach can be used. To find out
-    more about the factory classes available you can either browse the Solr
-    JavaDoc or read the corresponding section on the <ulink
-    url="http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters">Solr
-    Wiki.</ulink> In the example a
-    <classname>StandardTokenizerFactory</classname> is used followed by two
-    filter factories, <classname>LowerCaseFilterFactory</classname> and
-    <classname>SnowballPorterFilterFactory</classname>. The standard tokenizer
-    splits words at punctuation characters and hyphens while keeping email
-    addresses and internet hostnames intact. It is a good general purpose
-    tokenizer. The lowercase filter lowercases then the letters in each token
-    whereas the snowball filter finally applies the actual language
-    stemming.</para>
-
-    <para>Generally, when using the Solr framework you have to start with a
-    tokenizer followed by an arbitrary number of filters.</para>
-
-    <programlisting>
-
-package example;
-...
- at Entity
- at Indexed
-<emphasis role="bold">@AnalyzerDef(name = "customanalyzer",
-  tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
-  filters = {
-    @TokenFilterDef(factory = LowerCaseFilterFactory.class),
-    @TokenFilterDef(factory = SnowballPorterFilterFactory.class, params = {
-      @Parameter(name = "language", value = "English")
-    })
-  })</emphasis>
-public class Book {
-
-  @Id
-  @GeneratedValue
-  @DocumentId
-  private Integer id;
-  
-  @Field(index=Index.TOKENIZED, store=Store.NO)
-  <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
-  private String title;
-  
-  @Field(index=Index.TOKENIZED, store=Store.NO)
-  <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
-  private String subtitle; 
-
-  @IndexedEmbedded
-  @ManyToMany 
-  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
-
-<emphasis role="bold"> </emphasis> @Field(index = Index.UN_TOKENIZED, store = Store.YES)
-  @DateBridge(resolution = Resolution.DAY)
-  private Date publicationDate;
-  
-  public Book() {
-  } 
-  
-  // standard getters/setters follow here
-  ... 
-}
-
-  </programlisting>
-  </section>
-
-  <section>
-    <title>What's next</title>
-
-    <para>The above paragraphs hopefully helped you getting an overview of
-    Hibernate Search. Using the maven archetype plugin and the following
-    command you can create an initial runnable maven project structure
-    populated with the example code of this tutorial.</para>
-
-    <para><programlisting>mvn archetype:create \ 
-    -DarchetypeGroupId=org.hibernate \
-    -DarchetypeArtifactId=hibernate-search-quickstart \ 
-    -DarchetypeVersion=3.1.0.Beta2 \
-    -DgroupId=my.company -DartifactId=quickstart</programlisting>Using the
-    maven project you can execute the examples, inspect the file system based
-    index and search and retrieve a list of managed objects. Just run
-    <emphasis>mvn package</emphasis> to compile the sources and run the unit
-    tests.</para>
-
-    <para>The next step after this tutorial is to get more familiar with the
-    overall architecture of Hibernate Search (<xref
-    linkend="search-architecture" />) and explore the basic features in more
-    detail. Two topics which where only briefly touched in this tutorial were
-    analyzer configuration (<xref linkend="analyzer" />) and field bridges
-    (<xref linkend="search-mapping-bridge" />), both important features
-    required for more fine-grained indexing. More advanced topics cover
-    clustering (<xref linkend="jms-backend" />) and large indexes handling
-    (<xref linkend="search-configuration-directory-sharding" />).</para>
-  </section>
-</chapter>

Copied: search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml (from rev 15398, search/trunk/doc/reference/en/modules/getting-started.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/getting-started.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,576 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Hibernate, Relational Persistence for Idiomatic Java
+  ~
+  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
+  ~ indicated by the @author tags or express copyright attribution
+  ~ statements applied by the authors.  All third-party contributions are
+  ~ distributed under license by Red Hat Middleware LLC.
+  ~
+  ~ This copyrighted material is made available to anyone wishing to use, modify,
+  ~ copy, or redistribute it subject to the terms and conditions of the GNU
+  ~ Lesser General Public License, as published by the Free Software Foundation.
+  ~
+  ~ This program is distributed in the hope that it will be useful,
+  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
+  ~ for more details.
+  ~
+  ~ You should have received a copy of the GNU Lesser General Public License
+  ~ along with this distribution; if not, write to:
+  ~ Free Software Foundation, Inc.
+  ~ 51 Franklin Street, Fifth Floor
+  ~ Boston, MA  02110-1301  USA
+  -->
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
+"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
+<chapter id="getting-started">
+  <!--  $Id$ -->
+
+  <title>Getting started</title>
+
+  <para>Welcome to Hibernate Search! The following chapter will guide you
+  through the initial steps required to integrate Hibernate Search into an
+  existing Hibernate enabled application. In case you are a Hibernate new
+  timer we recommend you start <ulink
+  url="http://hibernate.org/152.html">here</ulink>.</para>
+
+  <section>
+    <title>System Requirements</title>
+
+    <table>
+      <title>System requirements</title>
+
+      <tgroup cols="2">
+        <tbody>
+          <row>
+            <entry>Java Runtime</entry>
+
+            <entry>A JDK or JRE version <emphasis>5</emphasis> or greater. You
+            can download a Java Runtime for Windows/Linux/Solaris <ulink
+            url="http://java.sun.com/javase/downloads/"> here
+            </ulink>.</entry>
+          </row>
+
+          <row>
+            <entry>Hibernate Search</entry>
+
+            <entry><literal>hibernate-search.jar</literal> and all the
+            dependencies from the <literal>lib</literal> directory of the
+            Hibernate Search distribution, especially lucene :)</entry>
+          </row>
+
+          <row>
+            <entry>Hibernate Core</entry>
+
+            <entry>This instructions have been tested against Hibernate 3.3.x.
+            Next to the main <literal>hibernate3.jar</literal> you will need
+            all required libaries from the <literal>lib</literal> directory of
+            the distribution. Refer to <literal>README.txt</literal> in the
+            <literal>lib</literal> directory of the distribution to determine
+            the minimum runtime requirements.</entry>
+          </row>
+
+          <row>
+            <entry>Hibernate Annotations</entry>
+
+            <entry>Even though Hibernate Search can be used without Hibernate
+            Annotations the following instructions will use them for basic
+            entity configuration (<emphasis>@Entity, @Id,
+            @OneToMany,...</emphasis>). This part of the configuration could
+            also be expressed in xml or code. However, Hibernate Search itself
+            has its own set of annotations (<emphasis>@Indexed, @DocumentId,
+            @Field,...</emphasis>) for which there exists so far no
+            alternative configuration. The tutorial is tested against version
+            3.4.x of Hibernate Annotations.</entry>
+          </row>
+        </tbody>
+      </tgroup>
+    </table>
+
+    <para>You can download all dependencies from the Hibernate <ulink
+    url="http://www.hibernate.org/6.html">download site</ulink>. You can also
+    verify the dependency versions against the <ulink
+    url="http://www.hibernate.org/6.html#A3">Hibernate Compatibility
+    Matrix</ulink>.</para>
+  </section>
+
+  <section>
+    <title>Maven</title>
+
+    <para>Instead of managing all dependencies manually, maven users have the
+    possibility to use the <ulink
+    url="http://repository.jboss.com/maven2">JBoss maven repository</ulink>.
+    Just add the JBoss repository url to the <emphasis>repositories</emphasis>
+    section of your <filename>pom.xml</filename> or
+    <filename>settings.xml</filename>:</para>
+
+    <programlisting>
+&lt;repository&gt;
+  &lt;id&gt;repository.jboss.org&lt;/id&gt;
+  &lt;name&gt;JBoss Maven Repository&lt;/name&gt;
+  &lt;url&gt;http://repository.jboss.org/maven2&lt;/url&gt;
+  &lt;layout&gt;default&lt;/layout&gt;
+&lt;/repository&gt;
+      </programlisting>
+
+    <para>Then add the following dependencies to your pom.xml:</para>
+
+    <programlisting>
+&lt;dependency&gt;
+   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
+   &lt;artifactId&gt;hibernate-search&lt;/artifactId&gt;
+   &lt;version&gt;3.1.0.Beta2&lt;/version&gt;
+&lt;/dependency&gt;
+&lt;dependency&gt;
+   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
+   &lt;artifactId&gt;hibernate-annotations&lt;/artifactId&gt;
+   &lt;version&gt;3.4.0.CR1&lt;/version&gt;
+&lt;/dependency&gt;
+&lt;dependency&gt;
+   &lt;groupId&gt;org.hibernate&lt;/groupId&gt;
+   &lt;artifactId&gt;hibernate-entitymanager&lt;/artifactId&gt;
+   &lt;version&gt;3.4.0.CR1&lt;/version&gt;
+&lt;/dependency&gt;
+&lt;dependency&gt;
+   &lt;groupId&gt;org.apache.solr&lt;/groupId&gt;
+   &lt;artifactId&gt;solr-common&lt;/artifactId&gt;
+   &lt;version&gt;1.3.0&lt;/version&gt;
+&lt;/dependency&gt;
+&lt;dependency&gt;
+   &lt;groupId&gt;org.apache.solr&lt;/groupId&gt;
+   &lt;artifactId&gt;solr-core&lt;/artifactId&gt;
+   &lt;version&gt;1.3.0&lt;/version&gt;
+&lt;/dependency&gt;
+&lt;dependency&gt;
+   &lt;groupId&gt;org.apache.lucene&lt;/groupId&gt;
+   &lt;artifactId&gt;lucene-snowball&lt;/artifactId&gt;
+   &lt;version&gt;2.4.0&lt;/version&gt;
+&lt;/dependency&gt;
+      </programlisting>
+
+    <para>Not all dependencies are required. Only the
+    <emphasis>hibernate-search</emphasis> dependeny is mandatory. This
+    dependeny, together with its required transitive dependencies, contains
+    everything needed to use Hibernate Search.
+    <emphasis>hibernate-annotations</emphasis> is only needed if you want to
+    use annotations to configure your domain model as we do in this tutorial.
+    However, even if you choose not to use Hibernate Annotations you will
+    still have to use the Hibernate Search specific annotations to configure
+    your Lucene index. Currently there is no XML configuration option
+    available for Hibernate Search.
+    <emphasis>hibernate-entitymanager</emphasis> is required if you want to
+    use Hibernate Search in conjunction with JPA. Finally, the Solr
+    dependencies are only needed if you want to utilize Solr's analyzer
+    framework. More about this later.</para>
+  </section>
+
+  <section>
+    <title>Configuration</title>
+
+    <para>Once you have downloaded and added all required dependencies to your
+    application you have to add a few properties to your hibernate
+    configuration file. If you are using Hibernate directly this can be done
+    in <literal>hibernate.properties</literal> or
+    <literal>hibernate.cfg.xml</literal>. If you are using Hibernate via JPA
+    you can also add the properties to <literal>persistence.xml</literal>. The
+    good news is that for standard use most properties offer a sensible
+    default. Within <filename>persistence.xml</filename> this could look like
+    this:</para>
+
+    <para><programlisting>
+...
+&lt;property name="hibernate.search.default.directory_provider" 
+   value="org.hibernate.search.store.FSDirectoryProvider"/&gt; 
+
+&lt;property name="hibernate.search.default.indexBase" value="/var/lucene/indexes"/&gt; 
+...
+    </programlisting>First you have to tell Hibernate Search which
+    <classname>DirectoryProvider</classname> to use. This can be achieved by
+    setting the <literal>hibernate.search.default.directory_provider</literal>
+    property. Apache Lucene has a notion of <literal>Directory</literal> to
+    store the index files. Hibernate Search handles the initialization and
+    configuration of a Lucene <literal>Directory</literal> instance via a
+    <literal>DirectoryProvider</literal>. In this tutorial we will use a
+    subclass of <literal>DirectoryProvider</literal> called
+    <classname>FSDirectoryProvider</classname>. This will give us the ability
+    to physically inspect the Lucene indexes created by Hibernate Search (eg
+    via <ulink url="http://www.getopt.org/luke/">Luke</ulink>). Once you have
+    a working configuration you can start experimenting with other directory
+    providers (see <xref linkend="search-configuration-directory" />). Next to
+    the directory provider you also have to specify the default root directory
+    for all indexes via
+    <literal>hibernate.search.default.indexBase</literal>.</para>
+
+    <para>Lets further assume that your application contains the Hibernate
+    managed classes <classname>example.Book</classname> and
+    <classname>example.Author</classname> and you want to add free text search
+    capabilities to your application in order to search the books contained in
+    your database.</para>
+
+    <programlisting>
+package example;
+...
+ at Entity
+public class Book {
+
+  @Id
+  @GeneratedValue
+  private Integer id; 
+
+  private String title;  
+
+  private String subtitle; 
+
+  @ManyToMany 
+  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
+
+  private Date publicationDate;
+  
+  public Book() {
+  } 
+  
+  // standard getters/setters follow here
+  ...
+} 
+    </programlisting>
+
+    <para><programlisting>
+package example;
+...
+ at Entity
+public class Author {
+
+  @Id
+  @GeneratedValue
+  private Integer id;
+
+  private String name;
+
+  public Author() {
+  } 
+ 
+  // standard getters/setters follow here
+  ...
+}
+
+</programlisting></para>
+
+    <para>To achieve this you have to add a few annotations to the
+    <classname>Book</classname> and <classname>Author</classname> class. The
+    first annotation <literal>@Indexed</literal> marks
+    <classname>Book</classname> as indexable. By design Hibernate Search needs
+    to store an untokenized id in the index to ensure index unicity for a
+    given entity. <literal>@DocumentId</literal> marks the property to use for
+    this purpose and is in most cases the same as the database primary
+    key.</para>
+
+    <para>Next you have to mark the fields you want to make searchable. Let's
+    start with <literal>title</literal> and <literal>subtitle</literal> and
+    annotate both with <literal>@Field</literal>. The parameter
+    <literal>index=Index.TOKENIZED</literal> will ensure that the text will be
+    tokenized using the default Lucene analyzer. Usually, tokenizing means
+    chunking a sentence into individual words and potentially excluding common
+    words like <literal>'a'</literal> or '<literal>the</literal>'. We will
+    talk more about analyzers a little later on. The second parameter we
+    specify within <literal>@Field</literal>,<literal>
+    store=Store.NO</literal>, ensures that the actual data will not be stored
+    in the index. This is the default settings and probably a good choice
+    unless you want to avoid database roundtrips and retrieve the indexed data
+    via projections (<xref linkend="projections" />). Without projections,
+    Hibernate Search will per default execute the Lucene query in order to
+    find the database identifiers of the entities matching the query critera
+    and use these identifiers to retrieve managed objects from the database.
+    Is it not better then to always use projections? The answer is no, since
+    projections only returns object arrays and not managed entities. The
+    decision for or against projection has to be made on a case to case
+    basis.</para>
+
+    <para>After this short look under the hood let's go back to annotating the
+    <classname>Book</classname> class. Another annotation we have not yet
+    discussed is <literal>@DateBridge</literal>. This annotation is one of the
+    built-in field bridges in Hibernate Search. The Lucene index is purely
+    string based. For this reason Hibernate Search must convert the data types
+    of the indexed fields to strings and vice versa. A range of predefined
+    bridges are provided, including the <classname>DateBridge</classname>
+    which will convert a <classname>java.util.Date</classname> into a
+    <classname>String</classname> with the specified resolution. For more
+    details see <xref linkend="search-mapping-bridge" />.</para>
+
+    <para>This leaves us with <literal>@IndexedEmbedded. </literal>This
+    annotation is used to index associated entities
+    (<literal>@ManyToMany</literal>, <literal>@*ToOne</literal> and
+    <literal>@Embedded</literal>) as part of the owning entity. This is needed
+    since a Lucene index document is a flat data structure which does not know
+    anything about object relations. To ensure that the author's name wil be
+    searchable you have to make sure that the names are indexed as part of the
+    book itself. On top of <literal>@IndexedEmbedded</literal> you will also
+    have to mark all fields of the associated entity you want to have included
+    in the index with <literal>@Indexed</literal>. For more dedails see <xref
+    linkend="search-mapping-associated" />.</para>
+
+    <para>These settings should be sufficient for now. For more details on
+    entity mapping refer to <xref linkend="search-mapping-entity" />.</para>
+
+    <programlisting>
+package example;
+...
+ at Entity
+<emphasis role="bold">@Indexed</emphasis>
+public class Book {
+
+  @Id
+  @GeneratedValue
+  <emphasis role="bold">@DocumentId</emphasis>
+  private Integer id;
+  
+  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
+  private String title;
+  
+  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
+  private String subtitle; 
+
+  <emphasis role="bold">@IndexedEmbedded</emphasis>
+  @ManyToMany 
+  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
+
+<emphasis role="bold">  @Field(index = Index.UN_TOKENIZED, store = Store.YES)
+  @DateBridge(resolution = Resolution.DAY)</emphasis>
+  private Date publicationDate;
+  
+  public Book() {
+  } 
+  
+  // standard getters/setters follow here
+  ... 
+}
+  </programlisting>
+
+    <programlisting>
+package example;
+...
+ at Entity
+public class Author {
+
+  @Id
+  @GeneratedValue
+  private Integer id;
+
+  <emphasis role="bold">@Field(index=Index.TOKENIZED, store=Store.NO)</emphasis>
+  private String name;
+
+  public Author() {
+  } 
+ 
+  // standard getters/setters follow here
+  ...
+}
+  </programlisting>
+  </section>
+
+  <section>
+    <title>Indexing</title>
+
+    <para>Hibernate Search will transparently index every entity persisted,
+    updated or removed through Hibernate Core. However, you have to trigger an
+    inital indexing to populate the Lucene index with the data already present
+    in your database. Once you have added the above properties and annotations
+    it is time to trigger an initial batch index of your books. You can
+    achieve this by using one of the following code snipplets (see also <xref
+    linkend="search-batchindex" />):</para>
+
+    <para>Example using Hibernate Session:</para>
+
+    <programlisting>
+FullTextSession fullTextSession = Search.getFullTextSession(session);
+Transaction tx = fullTextSession.beginTransaction();
+List books = session.createQuery("from Book as book").list();
+for (Book book : books) {
+    fullTextSession.index(book);
+}
+tx.commit(); //index is written at commit time       
+    </programlisting>
+
+    <para>Example using JPA:</para>
+
+    <programlisting>
+EntityManager em = entityManagerFactory.createEntityManager();
+FullTextEntityManager fullTextEntityManager = Search.getFullTextEntityManager(em);
+List books = em.createQuery("select book from Book as book").getResultList();
+for (Book book : books) {
+    fullTextEntityManager.index(book);
+} 
+    </programlisting>
+
+    <para>After executing the above code, you should be able to see a Lucene
+    index under <literal>/var/lucene/indexes/example.Book</literal>. Go ahead
+    an inspect this index. It will help you to understand how Hibernate Search
+    works.</para>
+  </section>
+
+  <section>
+    <title>Searching</title>
+
+    <para>Now it is time to execute a first search. The following code will
+    prepare a query against the indexed fields, execute it and return a list
+    of <classname>Book</classname>s:</para>
+
+    <para>Example using Hibernate Session:</para>
+
+    <programlisting>
+FullTextSession fullTextSession = Search.getFullTextSession(session);
+
+Transaction tx = fullTextSession.beginTransaction();
+
+MultiFieldQueryParser parser = new MultiFieldQueryParser( new String[]{"title", "subtitle", "authors.name", "publicationDate"}, 
+  new StandardAnalyzer());
+Query query = parser.parse( "Java rocks!" );
+org.hibernate.Query hibQuery = fullTextSession.createFullTextQuery( query, Book.class );
+List result = hibQuery.list();
+  
+tx.commit();
+session.close();  
+    </programlisting>
+
+    <para>Example using JPA:</para>
+
+    <programlisting>
+EntityManager em = entityManagerFactory.createEntityManager();
+
+FullTextEntityManager fullTextEntityManager = 
+    org.hibernate.hibernate.search.jpa.Search.getFullTextEntityManager(em);
+MultiFieldQueryParser parser = new MultiFieldQueryParser( new String[]{"title", "subtitle", "authors.name", "publicationDate"}, 
+  new StandardAnalyzer());
+Query query = parser.parse( "Java rocks!" );
+org.hibernate.Query hibQuery = fullTextEntityManager.createFullTextQuery( query, Book.class );
+List result = hibQuery.list();
+    </programlisting>
+  </section>
+
+  <section>
+    <title>Analyzer</title>
+
+    <para>Assume that one of your indexed book entities has the title
+    "Refactoring: Improving the Design of Existing Code" and you want to get
+    hits for all of the following queries: "refactor", "refactors",
+    "refactored" and "refactoring". In Lucene this can be achieved by choosing
+    an analyzer class which applies word stemming during the indexing
+    <emphasis role="bold">and</emphasis> search process. Hibernate Search
+    offers several ways to configure the analyzer to use (see <xref
+    linkend="analyzer" />):</para>
+
+    <itemizedlist>
+      <listitem>
+        <para>Setting the <literal>hibernate.search.analyzer</literal>
+        property in the configuration file. The specified class will then be
+        the default analyzer.</para>
+      </listitem>
+
+      <listitem>
+        <para>Setting the <literal><literal>@Analyzer</literal></literal>
+        annotation at the entity level.</para>
+      </listitem>
+
+      <listitem>
+        <para>Setting the <literal>@<literal>Analyzer</literal></literal>
+        annotation at the field level.</para>
+      </listitem>
+    </itemizedlist>
+
+    <para>When using the <literal>@Analyzer</literal> annotation one can
+    either specify the fully qualified classname of the analyzer to use or one
+    can refer to an analyzer definition defined by the
+    <literal>@AnalyzerDef</literal> annotation. In the latter case the Solr
+    analyzer framework with its factories approach can be used. To find out
+    more about the factory classes available you can either browse the Solr
+    JavaDoc or read the corresponding section on the <ulink
+    url="http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters">Solr
+    Wiki.</ulink> In the example a
+    <classname>StandardTokenizerFactory</classname> is used followed by two
+    filter factories, <classname>LowerCaseFilterFactory</classname> and
+    <classname>SnowballPorterFilterFactory</classname>. The standard tokenizer
+    splits words at punctuation characters and hyphens while keeping email
+    addresses and internet hostnames intact. It is a good general purpose
+    tokenizer. The lowercase filter lowercases then the letters in each token
+    whereas the snowball filter finally applies the actual language
+    stemming.</para>
+
+    <para>Generally, when using the Solr framework you have to start with a
+    tokenizer followed by an arbitrary number of filters.</para>
+
+    <programlisting>
+
+package example;
+...
+ at Entity
+ at Indexed
+<emphasis role="bold">@AnalyzerDef(name = "customanalyzer",
+  tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
+  filters = {
+    @TokenFilterDef(factory = LowerCaseFilterFactory.class),
+    @TokenFilterDef(factory = SnowballPorterFilterFactory.class, params = {
+      @Parameter(name = "language", value = "English")
+    })
+  })</emphasis>
+public class Book {
+
+  @Id
+  @GeneratedValue
+  @DocumentId
+  private Integer id;
+  
+  @Field(index=Index.TOKENIZED, store=Store.NO)
+  <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
+  private String title;
+  
+  @Field(index=Index.TOKENIZED, store=Store.NO)
+  <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
+  private String subtitle; 
+
+  @IndexedEmbedded
+  @ManyToMany 
+  private Set&lt;Author&gt; authors = new HashSet&lt;Author&gt;();
+
+<emphasis role="bold"> </emphasis> @Field(index = Index.UN_TOKENIZED, store = Store.YES)
+  @DateBridge(resolution = Resolution.DAY)
+  private Date publicationDate;
+  
+  public Book() {
+  } 
+  
+  // standard getters/setters follow here
+  ... 
+}
+
+  </programlisting>
+  </section>
+
+  <section>
+    <title>What's next</title>
+
+    <para>The above paragraphs hopefully helped you getting an overview of
+    Hibernate Search. Using the maven archetype plugin and the following
+    command you can create an initial runnable maven project structure
+    populated with the example code of this tutorial.</para>
+
+    <para><programlisting>mvn archetype:create \ 
+    -DarchetypeGroupId=org.hibernate \
+    -DarchetypeArtifactId=hibernate-search-quickstart \ 
+    -DarchetypeVersion=3.1.0.Beta2 \
+    -DgroupId=my.company -DartifactId=quickstart</programlisting>Using the
+    maven project you can execute the examples, inspect the file system based
+    index and search and retrieve a list of managed objects. Just run
+    <emphasis>mvn package</emphasis> to compile the sources and run the unit
+    tests.</para>
+
+    <para>The next step after this tutorial is to get more familiar with the
+    overall architecture of Hibernate Search (<xref
+    linkend="search-architecture" />) and explore the basic features in more
+    detail. Two topics which where only briefly touched in this tutorial were
+    analyzer configuration (<xref linkend="analyzer" />) and field bridges
+    (<xref linkend="search-mapping-bridge" />), both important features
+    required for more fine-grained indexing. More advanced topics cover
+    clustering (<xref linkend="jms-backend" />) and large indexes handling
+    (<xref linkend="search-configuration-directory-sharding" />).</para>
+  </section>
+</chapter>

Deleted: search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml
===================================================================
--- search/trunk/doc/reference/en/modules/mapping.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,1285 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Hibernate, Relational Persistence for Idiomatic Java
-  ~
-  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
-  ~ indicated by the @author tags or express copyright attribution
-  ~ statements applied by the authors.  All third-party contributions are
-  ~ distributed under license by Red Hat Middleware LLC.
-  ~
-  ~ This copyrighted material is made available to anyone wishing to use, modify,
-  ~ copy, or redistribute it subject to the terms and conditions of the GNU
-  ~ Lesser General Public License, as published by the Free Software Foundation.
-  ~
-  ~ This program is distributed in the hope that it will be useful,
-  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
-  ~ for more details.
-  ~
-  ~ You should have received a copy of the GNU Lesser General Public License
-  ~ along with this distribution; if not, write to:
-  ~ Free Software Foundation, Inc.
-  ~ 51 Franklin Street, Fifth Floor
-  ~ Boston, MA  02110-1301  USA
-  -->
-<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
-"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
-<chapter id="search-mapping" revision="3">
-  <!--  $Id$ -->
-
-  <title>Mapping entities to the index structure</title>
-
-  <para>All the metadata information needed to index entities is described
-  through some Java annotations. There is no need for xml mapping files (in
-  fact there exists currently no xml configuration option) nor a list of
-  indexed entities. The list is discovered at startup time scanning the
-  Hibernate mapped entities.</para>
-
-  <section id="search-mapping-entity" revision="3">
-    <title>Mapping an entity</title>
-
-    <section>
-      <title>Basic mapping</title>
-
-      <para>First, we must declare a persistent class as indexable. This is
-      done by annotating the class with <literal>@Indexed</literal> (all
-      entities not annotated with <literal>@Indexed</literal> will be ignored
-      by the indexing process):</para>
-
-      <programlisting>@Entity
-<emphasis role="bold">@Indexed(index="indexes/essays")</emphasis>
-public class Essay {
-    ...
-}</programlisting>
-
-      <para>The <literal>index</literal> attribute tells Hibernate what the
-      Lucene directory name is (usually a directory on your file system). If
-      you wish to define a base directory for all Lucene indexes, you can use
-      the <literal>hibernate.search.default.indexBase</literal> property in
-      your configuration file. Each entity instance will be represented by a
-      Lucene <classname>Document</classname> inside the given index (aka
-      Directory).</para>
-
-      <para>For each property (or attribute) of your entity, you have the
-      ability to describe how it will be indexed. The default (ie no
-      annotation) means that the property is completly ignored by the indexing
-      process. <literal>@Field</literal> does declare a property as indexed.
-      When indexing an element to a Lucene document you can specify how it is
-      indexed:</para>
-
-      <itemizedlist>
-        <listitem>
-          <para><literal>name</literal> : describe under which name, the
-          property should be stored in the Lucene Document. The default value
-          is the property name (following the JavaBeans convention)</para>
-        </listitem>
-
-        <listitem>
-          <para><literal>store</literal> : describe whether or not the
-          property is stored in the Lucene index. You can store the value
-          <literal>Store.YES</literal> (comsuming more space in the index but
-          allowing projection, see <xref linkend="projections" /> for more
-          information), store it in a compressed way
-          <literal>Store.COMPRESS</literal> (this does consume more CPU), or
-          avoid any storage <literal>Store.NO</literal> (this is the default
-          value). When a property is stored, you can retrieve it from the
-          Lucene Document (note that this is not related to whether the
-          element is indexed or not).</para>
-        </listitem>
-
-        <listitem>
-          <para>index: describe how the element is indexed (ie the process
-          used to index the property and the type of information store). The
-          different values are <literal>Index.NO</literal> (no indexing, ie
-          cannot be found by a query), <literal>Index.TOKENIZED</literal> (use
-          an analyzer to process the property),
-          <literal>Index.UN_TOKENISED</literal> (no analyzer pre processing),
-          <literal>Index.NO_NORM</literal> (do not store the normalization
-          data). The default value is <literal>TOKENIZED</literal>.</para>
-        </listitem>
-
-        <listitem>
-          <para>termVector: describes collections of term-frequency pairs.
-          This attribute enables term vectors being stored during indexing so
-          they are available within documents. The default value is
-          TermVector.NO.</para>
-
-          <para>The different values of this attribute are</para>
-
-          <informaltable align="left" width="">
-            <tgroup cols="2">
-              <colspec align="center" />
-
-              <thead>
-                <row>
-                  <entry align="center">Value</entry>
-
-                  <entry align="center">Definition</entry>
-                </row>
-              </thead>
-
-              <tbody>
-                <row>
-                  <entry align="left">TermVector.YES</entry>
-
-                  <entry>Store the term vectors of each document. This
-                  produces two synchronized arrays, one contains document
-                  terms and the other contains the term's frequency.</entry>
-                </row>
-
-                <row>
-                  <entry align="left">TermVector.NO</entry>
-
-                  <entry>Do not store term vectors.</entry>
-                </row>
-
-                <row>
-                  <entry align="left">TermVector.WITH_OFFSETS</entry>
-
-                  <entry>Store the term vector and token offset information.
-                  This is the same as TermVector.YES plus it contains the
-                  starting and ending offset position information for the
-                  terms.</entry>
-                </row>
-
-                <row>
-                  <entry align="left">TermVector.WITH_POSITIONS</entry>
-
-                  <entry>Store the term vector and token position information.
-                  This is the same as TermVector.YES plus it contains the
-                  ordinal positions of each occurrence of a term in a
-                  document.</entry>
-                </row>
-
-                <row>
-                  <entry
-                  align="left">TermVector.WITH_POSITIONS_OFFSETS</entry>
-
-                  <entry>Store the term vector, token position and offset
-                  information. This is a combination of the YES, WITH_OFFSETS
-                  and WITH_POSITIONS.</entry>
-                </row>
-              </tbody>
-            </tgroup>
-          </informaltable>
-        </listitem>
-      </itemizedlist>
-
-      <para>These attributes are part of the <literal>@Field</literal>
-      annotation.</para>
-
-      <para>Whether or not you want to store the data depends on how you wish
-      to use the index query result. For a regular Hibernate Search usage,
-      storing is not necessary. However you might want to store some fields to
-      subsequently project them (see <xref linkend="projections" /> for more
-      information).</para>
-
-      <para>Whether or not you want to tokenize a property depends on whether
-      you wish to search the element as is, or by the words it contains. It
-      make sense to tokenize a text field, but it does not to do it for a date
-      field (or an id field). Note that fields used for sorting must not be
-      tokenized.</para>
-
-      <para>Finally, the id property of an entity is a special property used
-      by Hibernate Search to ensure index unicity of a given entity. By
-      design, an id has to be stored and must not be tokenized. To mark a
-      property as index id, use the <literal>@DocumentId</literal>
-      annotation.</para>
-
-      <programlisting>@Entity
- at Indexed(index="indexes/essays")
-public class Essay {
-    ...
-
-    @Id
-    <emphasis role="bold">@DocumentId</emphasis>
-    public Long getId() { return id; }
-
-    <emphasis role="bold">@Field(name="Abstract", index=Index.TOKENIZED, store=Store.YES)</emphasis>
-    public String getSummary() { return summary; }
-
-    @Lob
-    <emphasis role="bold">@Field(index=Index.TOKENIZED)</emphasis>
-    public String getText() { return text; }
-}</programlisting>
-
-      <para>These annotations define an index with three fields:
-      <literal>id</literal> , <literal>Abstract</literal> and
-      <literal>text</literal> . Note that by default the field name is
-      decapitalized, following the JavaBean specification.</para>
-
-      <note>
-        <para>You <emphasis>must</emphasis> specify
-        <literal>@DocumentId</literal> on the identifier property of your
-        entity class.</para>
-      </note>
-    </section>
-
-    <section>
-      <title>Mapping properties multiple times</title>
-
-      <para>It is sometimes needed to map a property multiple times per index,
-      with slightly different indexing strategies. Especially, sorting a query
-      by field requires the field to be <literal>UN_TOKENIZED</literal>. If
-      one want to search by words in this property and still sort it, one need
-      to index it twice, once tokenized, once untokenized. @Fields allows to
-      achieve this goal.</para>
-
-      <programlisting>@Entity
- at Indexed(index = "Book" )
-public class Book {
-    @Fields( {
-            @Field(index = Index.TOKENIZED),
-            @Field(name = "summary_forSort", index = Index.UN_TOKENIZED, store = Store.YES)
-            } )
-    public String getSummary() {
-        return summary;
-    }
-
-    ...
-}</programlisting>
-
-      <para>The field summary is indexed twice, once as
-      <literal>summary</literal> in a tokenized way, and once as
-      <literal>summary_forSort</literal> in an untokenized way. @Field
-      supports 2 attributes useful when @Fields is used:</para>
-
-      <itemizedlist>
-        <listitem>
-          <para>analyzer: defines a @Analyzer annotation per field rather than
-          per property</para>
-        </listitem>
-
-        <listitem>
-          <para>bridge: defines a @FieldBridge annotation per field rather
-          than per property</para>
-        </listitem>
-      </itemizedlist>
-
-      <para>See below for more information about analyzers and field
-      bridges.</para>
-    </section>
-
-    <section id="search-mapping-associated">
-      <title>Embedded and associated objects</title>
-
-      <para>Associated objects as well as embedded objects can be indexed as
-      part of the root entity index. It is necessary if you expect to search a
-      given entity based on properties of the associated object(s). In the
-      following example, the use case is to return the places whose city is
-      Atlanta (In the Lucene query parser language, it would translate into
-      <code>address.city:Atlanta</code>).</para>
-
-      <programlisting>@Entity
- at Indexed
-public class Place {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Long id;
-
-    @Field( index = Index.TOKENIZED )
-    private String name;
-
-    @OneToOne( cascade = { CascadeType.PERSIST, CascadeType.REMOVE } )
-    <emphasis role="bold">@IndexedEmbedded</emphasis>
-    private Address address;
-    ....
-}
-
- at Entity
- at Indexed
-public class Address {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Long id;
-
-    @Field(index=Index.TOKENIZED)
-    private String street;
-
-    @Field(index=Index.TOKENIZED)
-    private String city;
-
-    <emphasis role="bold">@ContainedIn</emphasis>
-    @OneToMany(mappedBy="address")
-    private Set&lt;Place&gt; places;
-    ...
-}</programlisting>
-
-      <para>In this example, the place fields will be indexed in the
-      <literal>Place</literal> index. The <literal>Place</literal> index
-      documents will also contain the fields <literal>address.id</literal>,
-      <literal>address.street</literal>, and <literal>address.city</literal>
-      which you will be able to query. This is enabled by the
-      <literal>@IndexedEmbedded</literal> annotation.</para>
-
-      <para>Be careful. Because the data is denormalized in the Lucene index
-      when using the <classname>@IndexedEmbedded</classname> technique,
-      Hibernate Search needs to be aware of any change in the Place object and
-      any change in the Address object to keep the index up to date. To make
-      sure the Place Lucene document is updated when it's Address changes, you
-      need to mark the other side of the birirectional relationship with
-      <classname>@ContainedIn</classname>.</para>
-
-      <para><literal>@ContainedIn</literal> is only useful on associations
-      pointing to entities as opposed to embedded (collection of)
-      objects.</para>
-
-      <para>Let's make our example a bit more complex:</para>
-
-      <programlisting>@Entity
- at Indexed
-public class Place {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Long id;
-
-    @Field( index = Index.TOKENIZED )
-    private String name;
-
-    @OneToOne( cascade = { CascadeType.PERSIST, CascadeType.REMOVE } )
-    <emphasis role="bold">@IndexedEmbedded</emphasis>
-    private Address address;
-    ....
-}
-
- at Entity
- at Indexed
-public class Address {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Long id;
-
-    @Field(index=Index.TOKENIZED)
-    private String street;
-
-    @Field(index=Index.TOKENIZED)
-    private String city;
-
-    <emphasis role="bold">@IndexedEmbedded(depth = 1, prefix = "ownedBy_")</emphasis>
-    private Owner ownedBy;
-
-    <emphasis role="bold">@ContainedIn</emphasis>
-    @OneToMany(mappedBy="address")
-    private Set&lt;Place&gt; places;
-    ...
-}
-
- at Embeddable
-public class Owner {
-    @Field(index = Index.TOKENIZED)
-    private String name;
-   ...
-}</programlisting>
-
-      <para>Any <literal>@*ToMany, @*ToOne</literal> and
-      <literal>@Embedded</literal> attribute can be annotated with
-      <literal>@IndexedEmbedded</literal>. The attributes of the associated
-      class will then be added to the main entity index. In the previous
-      example, the index will contain the following fields</para>
-
-      <itemizedlist>
-        <listitem>
-          <para>id</para>
-        </listitem>
-
-        <listitem>
-          <para>name</para>
-        </listitem>
-
-        <listitem>
-          <para>address.street</para>
-        </listitem>
-
-        <listitem>
-          <para>address.city</para>
-        </listitem>
-
-        <listitem>
-          <para>addess.ownedBy_name</para>
-        </listitem>
-      </itemizedlist>
-
-      <para>The default prefix is <literal>propertyName.</literal>, following
-      the traditional object navigation convention. You can override it using
-      the <literal>prefix</literal> attribute as it is shown on the
-      <literal>ownedBy</literal> property.</para>
-
-      <para><literal>depth</literal> is necessary when the object graph
-      contains a cyclic dependency of classes (not instances). For example, if
-      <classname>Owner</classname> points to <classname>Place</classname>.
-      Hibernate Search will stop including Indexed embedded atttributes after
-      reaching the expected depth (or the object graph boundaries are
-      reached). A class having a self reference is an example of cyclic
-      dependency. In our example, because <literal>depth</literal> is set to
-      1, any <literal>@IndexedEmbedded</literal> attribute in Owner (if any)
-      will be ignored.</para>
-
-      <para>Such a feature (<literal>@IndexedEmbedded</literal>) is very
-      useful to express queries refering to associated objects, such
-      as:</para>
-
-      <itemizedlist>
-        <listitem>
-          <para>Return places where name contains JBoss and where address city
-          is Atlanta. In Lucene query this would be</para>
-
-          <programlisting>+name:jboss +address.city:atlanta  </programlisting>
-        </listitem>
-
-        <listitem>
-          <para>Return places where name contains JBoss and where owner's name
-          contain Joe. In Lucene query this would be</para>
-
-          <programlisting>+name:jboss +address.orderBy_name:joe  </programlisting>
-        </listitem>
-      </itemizedlist>
-
-      <para>In a way it mimics the relational join operation in a more
-      efficient way (at the cost of data duplication). Remember that, out of
-      the box, Lucene indexes have no notion of association, the join
-      operation is simply non-existent. It might help to keep the relational
-      model normalzed while benefiting from the full text index speed and
-      feature richness.</para>
-
-      <para><note>
-          <para>An associated object can itself (but does not have to) be
-          <literal>@Indexed</literal></para>
-        </note></para>
-
-      <para>When @IndexedEmbedded points to an entity, the association has to
-      be directional and the other side has to be annotated
-      <literal>@ContainedIn</literal> (as seen in the previous example). If
-      not, Hibernate Search has no way to update the root index when the
-      associated entity is updated (in our example, a <literal>Place</literal>
-      index document has to be updated when the associated
-      <classname>Address</classname> instance is updated).</para>
-
-      <para>Sometimes, the object type annotated by
-      <classname>@IndexedEmbedded</classname> is not the object type targeted
-      by Hibernate and Hibernate Search especially when interfaces are used in
-      lieu of their implementation. You can override the object type targeted
-      by Hibernate Search using the <methodname>targetElement</methodname>
-      parameter.</para>
-
-      <programlisting>@Entity
- at Indexed
-public class Address {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Long id;
-
-    @Field(index= Index.TOKENIZED)
-    private String street;
-
-    @IndexedEmbedded(depth = 1, prefix = "ownedBy_", <emphasis role="bold">targetElement = Owner.class</emphasis>)
-    @Target(Owner.class)
-    private Person ownedBy;
-
-
-    ...
-}
-
- at Embeddable
-public class Owner implements Person { ... }</programlisting>
-    </section>
-
-    <section>
-      <title>Boost factor</title>
-
-      <para>Lucene has the notion of <emphasis>boost factor</emphasis>. It's a
-      way to give more weigth to a field or to an indexed element over others
-      during the indexation process. You can use <literal>@Boost</literal> at
-      the @Field, method or class level.</para>
-
-      <programlisting>@Entity
- at Indexed(index="indexes/essays")
-<emphasis role="bold">@Boost(1.7f)</emphasis>
-public class Essay {
-    ...
-
-    @Id
-    @DocumentId
-    public Long getId() { return id; }
-
-    @Field(name="Abstract", index=Index.TOKENIZED, store=Store.YES, boost=<emphasis
-          role="bold">@Boost(2f)</emphasis>)
-    <emphasis role="bold">@Boost(1.5f)</emphasis>
-    public String getSummary() { return summary; }
-
-    @Lob
-    @Field(index=Index.TOKENIZED, boost=<emphasis role="bold">@Boost(1.2f)</emphasis>)
-    public String getText() { return text; }
-
-    @Field
-    public String getISBN() { return isbn; }
-
-}        </programlisting>
-
-      <para>In our example, Essay's probability to reach the top of the search
-      list will be multiplied by 1.7. The <methodname>summary</methodname>
-      field will be 2.5 (2 * 1.5) more important than the
-      <methodname>isbn</methodname> field. The <methodname>text</methodname>
-      field will be 1.2 times more important than the
-      <methodname>isbn</methodname> field. Note that this explanation in
-      strictest terms is actually wrong, but it is simple and close enough to
-      reality for all practical purposes. Please check the Lucene
-      documentation or the excellent <citetitle>Lucene In Action </citetitle>
-      from Otis Gospodnetic and Erik Hatcher.</para>
-
-      <para><methodname>@Field.boost</methodname>,
-      <classname>@Boost</classname> on a property and
-      <classname>@Boost</classname> on a class are all cumulative.</para>
-    </section>
-
-    <section id="analyzer">
-      <title>Analyzer</title>
-
-      <para>The default analyzer class used to index tokenized fields is
-      configurable through the <literal>hibernate.search.analyzer</literal>
-      property. The default value for this property is
-      <classname>org.apache.lucene.analysis.standard.StandardAnalyzer</classname>.</para>
-
-      <para>You can also define the analyzer class per entity, property and
-      even per @Field (useful when multiple fields are indexed from a single
-      property).</para>
-
-      <programlisting>@Entity
- at Indexed
- at Analyzer(impl = EntityAnalyzer.class)
-public class MyEntity {
-    @Id
-    @GeneratedValue
-    @DocumentId
-    private Integer id;
-
-    @Field(index = Index.TOKENIZED)
-    private String name;
-
-    @Field(index = Index.TOKENIZED)
-    @Analyzer(impl = PropertyAnalyzer.class)
-    private String summary;
-
-    @Field(index = Index.TOKENIZED, analyzer = @Analyzer(impl = FieldAnalyzer.class)
-    private String body;
-
-    ...
-}</programlisting>
-
-      <para>In this example, <classname>EntityAnalyzer</classname> is used to
-      index all tokenized properties (eg. <literal>name</literal>), except
-      <literal>summary</literal> and <literal>body</literal> which are indexed
-      with <classname>PropertyAnalyzer</classname> and
-      <classname>FieldAnalyzer</classname> respectively.</para>
-
-      <caution>
-        <para>Mixing different analyzers in the same entity is most of the
-        time a bad practice. It makes query building more complex and results
-        less predictable (for the novice), especially if you are using a
-        QueryParser (which uses the same analyzer for the whole query). As a
-        rule of thumb, for any given field the same analyzer should be used
-        for indexing and querying.</para>
-      </caution>
-
-      <section>
-        <title>Analyzer definitions</title>
-
-        <para>Analyzers can become quite complex to deal with for which reason
-        Hibernate Search introduces the notion of analyzer definitions. An
-        analyzer definition can be reused by many
-        <classname>@Analyzer</classname> declarations. An analyzer definition
-        is composed of:</para>
-
-        <itemizedlist>
-          <listitem>
-            <para>a name: the unique string used to refer to the
-            definition</para>
-          </listitem>
-
-          <listitem>
-            <para>a tokenizer: responsible for tokenizing the input stream
-            into individual words</para>
-          </listitem>
-
-          <listitem>
-            <para>a list of filters: each filter is responsible to remove,
-            modify or sometimes even add words into the stream provided by the
-            tokenizer</para>
-          </listitem>
-        </itemizedlist>
-
-        <para>This separation of tasks - a tokenizer followed by a list of
-        filters - allows easy reuse of each individual component and let you
-        build your customized analyzer in a very flexible way (just like
-        lego). Generally speaking the <classname>Tokenizer</classname> starts
-        the analysis process by turning the character input into tokens which
-        are then further processed by the <classname>TokenFilter</classname>s.
-        Hibernate Search supports this infrastructure by utilizing the Solr
-        analyzer framework. Make sure to add<filename> solr-core.jar and
-        </filename><filename>solr-common.jar</filename> to your classpath to
-        use analyzer definitions. In case you also want to utilizing a
-        snowball stemmer also include the
-        <filename>solr-lucene-snowball.jar.</filename> Your distribution of
-        Hibernate Search provides these dependecies in its
-        <filename>lib</filename> directory.</para>
-
-        <programlisting>@AnalyzerDef(name="customanalyzer",
-        tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
-        filters = {
-                @TokenFilterDef(factory = ISOLatin1AccentFilterFactory.class),
-                @TokenFilterDef(factory = LowerCaseFilterFactory.class),
-                @TokenFilterDef(factory = StopFilterFactory.class, params = {
-                    @Parameter(name="words", value= "org/hibernate/search/test/analyzer/solr/stoplist.properties" ),
-                    @Parameter(name="ignoreCase", value="true")
-                })
-})
-public class Team {
-    ...
-}</programlisting>
-
-        <para>A tokenizer is defined by its factory which is responsible for
-        building the tokenizer and using the optional list of parameters. This
-        example use the standard tokenizer. A filter is defined by its factory
-        which is responsible for creating the filter instance using the
-        opetional paramenters. In our example, the StopFilter filter is built
-        reading the dedicated words property file and is expected to ignore
-        case. The list of parameters is dependent on the tokenizer or filter
-        factory.</para>
-
-        <warning>
-          <para>Filters are applied in the order they are defined in the
-          <classname>@AnalyzerDef</classname> annotation. Make sure to think
-          twice about this order.</para>
-        </warning>
-
-        <para>Once defined, an analyzer definition can be reused by an
-        <classname>@Analyzer</classname> declaration using the definition name
-        rather than declaring an implementation class.</para>
-
-        <programlisting>@Entity
- at Indexed
- at AnalyzerDef(name="customanalyzer", ... )
-public class Team {
-    @Id
-    @DocumentId
-    @GeneratedValue
-    private Integer id;
-
-    @Field
-    private String name;
-
-    @Field
-    private String location;
-
-    @Field <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
-    private String description;
-}</programlisting>
-
-        <para>Analyzer instances declared by
-        <classname>@AnalyzerDef</classname> are available by their name in the
-        <classname>SearchFactory</classname>.</para>
-
-        <programlisting>Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer("customanalyzer");</programlisting>
-
-        <para>This is quite useful wen building queries. Fields in queries
-        should be analyzed with the same analyzer used to index the field so
-        that they speak a common "language": the same tokens are reused
-        between the query and the indexing process. This rule has some
-        exceptions but is true most of the time, respect it unless you know
-        what you are doing.</para>
-      </section>
-
-      <section>
-        <title>Available analyzers</title>
-
-        <para>Solr and Lucene come with a lot of useful default tokenizers and
-        filters. You can find a complete list of tokenizer factories and
-        filter factories at <ulink
-        url="http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters">http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters</ulink>.
-        Let check a few of them.</para>
-
-        <table>
-          <title>Some of the tokenizers avalable</title>
-
-          <tgroup cols="3">
-            <thead>
-              <row>
-                <entry align="center">Factory</entry>
-
-                <entry align="center">Description</entry>
-
-                <entry align="center">parameters</entry>
-              </row>
-            </thead>
-
-            <tbody>
-              <row>
-                <entry>StandardTokenizerFactory</entry>
-
-                <entry>Use the Lucene StandardTokenizer</entry>
-
-                <entry>none</entry>
-              </row>
-
-              <row>
-                <entry>HTMLStripStandardTokenizerFactory</entry>
-
-                <entry>Remove HTML tags, keep the text and pass it to a
-                StandardTokenizer</entry>
-
-                <entry>none</entry>
-              </row>
-            </tbody>
-          </tgroup>
-        </table>
-
-        <table>
-          <title>Some of the filters avalable</title>
-
-          <tgroup cols="3">
-            <thead>
-              <row>
-                <entry align="center">Factory</entry>
-
-                <entry align="center">Description</entry>
-
-                <entry align="center">parameters</entry>
-              </row>
-            </thead>
-
-            <tbody>
-              <row>
-                <entry>StandardFilterFactory</entry>
-
-                <entry>Remove dots from acronyms and 's from words</entry>
-
-                <entry>none</entry>
-              </row>
-
-              <row>
-                <entry>LowerCaseFilterFactory</entry>
-
-                <entry>Lowercase words</entry>
-
-                <entry>none</entry>
-              </row>
-
-              <row>
-                <entry>StopFilterFactory</entry>
-
-                <entry>remove words (tokens) matching a list of stop
-                words</entry>
-
-                <entry><para><literal>words</literal>: points to a resource
-                file containing the stop words</para><para>ignoreCase: true if
-                <literal>case</literal> should be ignore when comparing stop
-                words, <literal>false</literal> otherwise </para></entry>
-              </row>
-
-              <row>
-                <entry>SnowballPorterFilterFactory</entry>
-
-                <entry>Reduces a word to it's root in a given language. (eg.
-                protect, protects, protection share the same root). Using such
-                a filter allows searches matching related words.</entry>
-
-                <entry><para><literal>language</literal>: Danish, Dutch,
-                English, Finnish, French, German, Italian, Norwegian,
-                Portuguese, Russian, Spanish, Swedish</para>and a few
-                more</entry>
-              </row>
-
-              <row>
-                <entry>ISOLatin1AccentFilterFactory</entry>
-
-                <entry>remove accents for languages like French</entry>
-
-                <entry>none</entry>
-              </row>
-            </tbody>
-          </tgroup>
-        </table>
-
-        <para>Don't hesitate to check all the implementations of
-        <classname>org.apache.solr.analysis.TokenizerFactory</classname> and
-        <classname>org.apache.solr.analysis.TokenFilterFactory</classname> in
-        your IDE to see the implementations available.</para>
-      </section>
-
-      <section id="analyzer-retrievinganalyzer">
-        <title>Retrieving an analyzer</title>
-
-        <para>During indexing time, Hibernate Search is using analyzers under
-        the hood for you. In some situations, retrieving analyzers can be
-        handy. If your domain model makes use of multiple analyzers (maybe to
-        benefit from stemming, use phonetic approximation and so on), you need
-        to make sure to use the same analyzers when you build your query.
-        </para>
-
-        <note>
-          <para>This rule can be broken but you need a good reason for it. If
-          you are unsure, use the same analyzers.</para>
-        </note>
-
-        <para>You can retrieve the scoped analyzer for a given entity used at
-        indexing time by Hibernate Search. A scoped analyzer is an analyzer
-        which applies the right analyzers depending on the field indexed:
-        multiple analyzers can be defined on a given entity each one working
-        on an individual field, a scoped analyzer unify all these analyzers
-        into a context-aware analyzer. While the theory seems a bit complex,
-        using the right analyzer in a query is very easy. </para>
-
-        <example>
-          <title>Using the scoped analyzer when building a full-text
-          query</title>
-
-          <programlisting>org.apache.lucene.queryParser.QueryParser parser = new QueryParser(
-    "title", 
-    fullTextSession.getSearchFactory().getAnalyzer( Song.class )
-);
-
-org.apache.lucene.search.Query luceneQuery = parser.parse( "title:sky Or title_stemmed:diamond" );
-
-org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Song.class );
-
-List result = fullTextQuery.list(); //return a list of managed objects    </programlisting>
-        </example>
-
-        <para>In the example above, the song title is indexed in two fields:
-        the standard analyzer is used in the field <literal>title</literal>
-        and a stemming analyzer is used in the field
-        <literal>title_stemmed</literal>. By using the analyzer provided by
-        the search factory, the query uses the appropriate analyzer depending
-        on the field targeted.</para>
-
-        <note>
-          <para>This is true if you use the query parser which takes the
-          analyzer into consideration. If you do not use the Lucene query
-          parser, make sure to use the scoped analyzer and tokenize the query
-          accordingly. TODO: show an example</para>
-        </note>
-
-        <para>If your query targets more that one query and you wish to use
-        your standard analyzer, make sure to describe it using an analyzer
-        definition. You can retrieve analyzers by their definition name using
-        <code>searchFactory.getAnalyzer(String)</code>.</para>
-      </section>
-    </section>
-  </section>
-
-  <section id="search-mapping-bridge">
-    <title>Property/Field Bridge</title>
-
-    <para>In Lucene all index fields have to be represented as Strings. For
-    this reason all entity properties annotated with <literal>@Field</literal>
-    have to be indexed in a String form. For most of your properties,
-    Hibernate Search does the translation job for you thanks to a built-in set
-    of bridges. In some cases, though you need a more fine grain control over
-    the translation process.</para>
-
-    <section>
-      <title>Built-in bridges</title>
-
-      <para><literal>Hibernate Search</literal> comes bundled with a set of
-      built-in bridges between a Java property type and its full text
-      representation.</para>
-
-      <variablelist>
-        <varlistentry>
-          <term>null</term>
-
-          <listitem>
-            <para>null elements are not indexed. Lucene does not support null
-            elements and this does not make much sense either.</para>
-          </listitem>
-        </varlistentry>
-
-        <varlistentry>
-          <term>java.lang.String</term>
-
-          <listitem>
-            <para>String are indexed as is</para>
-          </listitem>
-        </varlistentry>
-
-        <varlistentry>
-          <term>short, Short, integer, Integer, long, Long, float, Float,
-          double, Double, BigInteger, BigDecimal</term>
-
-          <listitem>
-            <para>Numbers are converted in their String representation. Note
-            that numbers cannot be compared by Lucene (ie used in ranged
-            queries) out of the box: they have to be padded <footnote>
-                <para>Using a Range query is debatable and has drawbacks, an
-                alternative approach is to use a Filter query which will
-                filter the result query to the appropriate range.</para>
-
-                <para>Hibernate Search will support a padding mechanism</para>
-              </footnote></para>
-          </listitem>
-        </varlistentry>
-
-        <varlistentry>
-          <term>java.util.Date</term>
-
-          <listitem>
-            <para>Dates are stored as yyyyMMddHHmmssSSS in GMT time
-            (200611072203012 for Nov 7th of 2006 4:03PM and 12ms EST). You
-            shouldn't really bother with the internal format. What is
-            important is that when using a DateRange Query, you should know
-            that the dates have to be expressed in GMT time.</para>
-
-            <para>Usually, storing the date up to the milisecond is not
-            necessary. <literal>@DateBridge</literal> defines the appropriate
-            resolution you are willing to store in the index ( <literal>
-            <literal>@DateBridge(resolution=Resolution.DAY)</literal>
-            </literal> ). The date pattern will then be truncated
-            accordingly.</para>
-
-            <programlisting>@Entity 
- at Indexed
-public class Meeting {
-    @Field(index=Index.UN_TOKENIZED)
-    <emphasis role="bold">@DateBridge(resolution=Resolution.MINUTE)</emphasis>
-    private Date date;
-    ...                 </programlisting>
-
-            <warning>
-              <para>A Date whose resolution is lower than
-              <literal>MILLISECOND</literal> cannot be a
-              <literal>@DocumentId</literal></para>
-            </warning>
-          </listitem>
-        </varlistentry>
-
-        <varlistentry>
-          <term>java.net.URI, java.net.URL</term>
-
-          <listitem>
-            <para>URI and URL are converted to their string
-            representation</para>
-          </listitem>
-        </varlistentry>
-
-        <varlistentry>
-          <term>java.lang.Class</term>
-
-          <listitem>
-            <para>Class are converted to their filly qualified class name. The
-            thread context classloader is used when the class is
-            rehydrated</para>
-          </listitem>
-        </varlistentry>
-      </variablelist>
-    </section>
-
-    <section>
-      <title>Custom Bridge</title>
-
-      <para>Sometimes, the built-in bridges of Hibernate Search do not cover
-      some of your property types, or the String representation used by the
-      bridge does not meet your requirements. The following paragraphs
-      describe several solutions to this problem.</para>
-
-      <section>
-        <title>StringBridge</title>
-
-        <para>The simpliest custom solution is to give Hibernate Search an
-        implementation of your expected <emphasis>object to String</emphasis>
-        bridge. To do so you need to implements the
-        <literal>org.hibernate.search.bridge.StringBridge</literal>
-        interface</para>
-
-        <programlisting>/**
- * Padding Integer bridge.
- * All numbers will be padded with 0 to match 5 digits
- *
- * @author Emmanuel Bernard
- */
-public class PaddedIntegerBridge implements <emphasis role="bold">StringBridge</emphasis> {
-
-    private int PADDING = 5;
-
-    <emphasis role="bold">public String objectToString(Object object)</emphasis> {
-        String rawInteger = ( (Integer) object ).toString();
-        if (rawInteger.length() &gt; PADDING) 
-            throw new IllegalArgumentException( "Try to pad on a number too big" );
-        StringBuilder paddedInteger = new StringBuilder( );
-        for ( int padIndex = rawInteger.length() ; padIndex &lt; PADDING ; padIndex++ ) {
-            paddedInteger.append('0');
-        }
-        return paddedInteger.append( rawInteger ).toString();
-    }
-}                </programlisting>
-
-        <para>Then any property or field can use this bridge thanks to the
-        <literal>@FieldBridge</literal> annotation</para>
-
-        <programlisting><emphasis role="bold">@FieldBridge(impl = PaddedIntegerBridge.class)</emphasis>
-private Integer length;                </programlisting>
-
-        <para>Parameters can be passed to the Bridge implementation making it
-        more flexible. The Bridge implementation implements a
-        <classname>ParameterizedBridge</classname> interface, and the
-        parameters are passed through the <literal>@FieldBridge</literal>
-        annotation.</para>
-
-        <programlisting>public class PaddedIntegerBridge implements StringBridge, <emphasis
-            role="bold">ParameterizedBridge</emphasis> {
-
-    public static String PADDING_PROPERTY = "padding";
-    private int padding = 5; //default
-
-    <emphasis role="bold">public void setParameterValues(Map parameters)</emphasis> {
-        Object padding = parameters.get( PADDING_PROPERTY );
-        if (padding != null) this.padding = (Integer) padding;
-    }
-
-    public String objectToString(Object object) {
-        String rawInteger = ( (Integer) object ).toString();
-        if (rawInteger.length() &gt; padding) 
-            throw new IllegalArgumentException( "Try to pad on a number too big" );
-        StringBuilder paddedInteger = new StringBuilder( );
-        for ( int padIndex = rawInteger.length() ; padIndex &lt; padding ; padIndex++ ) {
-            paddedInteger.append('0');
-        }
-        return paddedInteger.append( rawInteger ).toString();
-    }
-}
-
-
-//property
- at FieldBridge(impl = PaddedIntegerBridge.class,
-             <emphasis role="bold">params = @Parameter(name="padding", value="10")</emphasis>
-            )
-private Integer length;                </programlisting>
-
-        <para>The <classname>ParameterizedBridge</classname> interface can be
-        implemented by <classname>StringBridge</classname> ,
-        <classname>TwoWayStringBridge</classname> ,
-        <classname>FieldBridge</classname> implementations (see
-        bellow).</para>
-
-        <para>If you expect to use your bridge implementation on for an id
-        property (ie annotated with <literal>@DocumentId</literal> ), you need
-        to use a slightly extended version of <literal>StringBridge</literal>
-        named <classname>TwoWayStringBridge</classname> . <literal>Hibernate
-        Search </literal> needs to read the string representation of the
-        identifier and generate the object out of it. There is not difference
-        in the way the <literal>@FieldBridge</literal> annotation is
-        used.</para>
-
-        <programlisting>public class PaddedIntegerBridge implements TwoWayStringBridge, ParameterizedBridge {
-
-    public static String PADDING_PROPERTY = "padding";
-    private int padding = 5; //default
-
-    public void setParameterValues(Map parameters) {
-        Object padding = parameters.get( PADDING_PROPERTY );
-        if (padding != null) this.padding = (Integer) padding;
-    }
-
-    public String objectToString(Object object) {
-        String rawInteger = ( (Integer) object ).toString();
-        if (rawInteger.length() &gt; padding) 
-            throw new IllegalArgumentException( "Try to pad on a number too big" );
-        StringBuilder paddedInteger = new StringBuilder( );
-        for ( int padIndex = rawInteger.length() ; padIndex &lt; padding ; padIndex++ ) {
-            paddedInteger.append('0');
-        }
-        return paddedInteger.append( rawInteger ).toString();
-    }
-
-    <emphasis role="bold">public Object stringToObject(String stringValue)</emphasis> {
-        return new Integer(stringValue);
-    }
-}
-
-
-//id property
- at DocumentId
- at FieldBridge(impl = PaddedIntegerBridge.class,
-             params = @Parameter(name="padding", value="10") 
-private Integer id;
-                </programlisting>
-
-        <para>It is critically important for the two-way process to be
-        idempotent (ie object = stringToObject( objectToString( object ) )
-        ).</para>
-      </section>
-
-      <section>
-        <title>FieldBridge</title>
-
-        <para>Some usecase requires more than a simple object to string
-        translation when mapping a property to a Lucene index. To give you
-        most of the flexibility you can also implement a bridge as a
-        <classname>FieldBridge</classname> . This interface give you a
-        property value and let you map it the way you want in your Lucene
-        <classname>Document</classname> .This interface is very similar in its
-        concept to the <productname>Hibernate</productname>
-        <classname>UserType</classname> .</para>
-
-        <para>You can for example store a given property in two different
-        document fields</para>
-
-        <programlisting>/**
- * Store the date in 3 different fields - year, month, day - to ease Range Query per
- * year, month or day (eg get all the elements of December for the last 5 years).
- * 
- * @author Emmanuel Bernard
- */
-public class DateSplitBridge implements FieldBridge {
-    private final static TimeZone GMT = TimeZone.getTimeZone("GMT");
-
-    <emphasis role="bold">public void set(String name, Object value, Document document, LuceneOptions luceneOptions)</emphasis> {
-        Date date = (Date) value;
-        Calendar cal = GregorianCalendar.getInstance(GMT);
-        cal.setTime(date);
-        int year = cal.get(Calendar.YEAR);
-        int month = cal.get(Calendar.MONTH) + 1;
-        int day = cal.get(Calendar.DAY_OF_MONTH);
-  
-        // set year
-        Field field = new Field(name + ".year", String.valueOf(year),
-            luceneOptions.getStore(), luceneOptions.getIndex(),
-            luceneOptions.getTermVector());
-        field.setBoost(luceneOptions.getBoost());
-        document.add(field);
-  
-        // set month and pad it if needed
-        field = new Field(name + ".month", month &lt; 10 ? "0" : ""
-            + String.valueOf(month), luceneOptions.getStore(),
-            luceneOptions.getIndex(), luceneOptions.getTermVector());
-        field.setBoost(luceneOptions.getBoost());
-        document.add(field);
-  
-        // set day and pad it if needed
-        field = new Field(name + ".day", day &lt; 10 ? "0" : ""
-            + String.valueOf(day), luceneOptions.getStore(),
-            luceneOptions.getIndex(), luceneOptions.getTermVector());
-        field.setBoost(luceneOptions.getBoost());
-        document.add(field);
-    }
-}
-
-//property
-<emphasis role="bold">@FieldBridge(impl = DateSplitBridge.class)</emphasis>
-private Date date;                </programlisting>
-      </section>
-
-      <section>
-        <title>@ClassBridge</title>
-
-        <para>It is sometimes useful to combine more than one property of a
-        given entity and index this combination in a specific way into the
-        Lucene index. The <classname>@ClassBridge</classname> and
-        <classname>@ClassBridges</classname> annotations can be defined at the
-        class level (as opposed to the property level). In this case the
-        custom field bridge implementation receives the entity instance as the
-        value parameter instead of a particular property. Though not shown in
-        this example, <classname>@ClassBridge</classname> supports the
-        <methodname>termVector</methodname> attribute discussed
-        previously.</para>
-
-        <programlisting>@Entity
- at Indexed
-<emphasis role="bold">@ClassBridge</emphasis>(name="branchnetwork",
-             index=Index.TOKENIZED,
-             store=Store.YES,
-             impl = <emphasis role="bold">CatFieldsClassBridge.class</emphasis>,
-             params = @Parameter( name="sepChar", value=" " ) )
-public class Department {
-    private int id;
-    private String network;
-    private String branchHead;
-    private String branch;
-    private Integer maxEmployees;
-    ...
-}
-
-
-public class CatFieldsClassBridge implements FieldBridge, ParameterizedBridge {
-    private String sepChar;
-
-    public void setParameterValues(Map parameters) {
-        this.sepChar = (String) parameters.get( "sepChar" );
-    }
-
-    <emphasis role="bold">public void set(String name, Object value, Document document, LuceneOptions luceneOptions)</emphasis> {
-        // In this particular class the name of the new field was passed
-        // from the name field of the ClassBridge Annotation. This is not
-        // a requirement. It just works that way in this instance. The
-        // actual name could be supplied by hard coding it below.
-        Department dep = (Department) value;
-        String fieldValue1 = dep.getBranch();
-        if ( fieldValue1 == null ) {
-            fieldValue1 = "";
-        }
-        String fieldValue2 = dep.getNetwork();
-        if ( fieldValue2 == null ) {
-            fieldValue2 = "";
-        }
-        String fieldValue = fieldValue1 + sepChar + fieldValue2;
-        Field field = new Field( name, fieldValue, luceneOptions.getStore(), luceneOptions.getIndex(), luceneOptions.getTermVector() );
-        field.setBoost( luceneOptions.getBoost() );
-        document.add( field );
-   }
-}</programlisting>
-
-        <para>In this example, the particular
-        <classname>CatFieldsClassBridge</classname> is applied to the
-        <literal>department</literal> instance, the field bridge then
-        concatenate both branch and network and index the
-        concatenation.</para>
-      </section>
-    </section>
-  </section>
-
-  <section id="provided-id">
-    <title>Providing your own id</title>
-
-    <warning>
-      <para>This part of the documentation is a work in progress.</para>
-    </warning>
-
-    <para>You can provide your own id for Hibernate Search if you are
-    extending the internals. You will have to generate a unique value so it
-    can be given to Lucene to be indexed. This will have to be given to
-    Hibernate Search when you create an org.hibernate.search.Work object - the
-    document id is required in the constructor.</para>
-
-    <section id="ProvidedId">
-      <title>The @ProvidedId annotation</title>
-
-      <para>Unlike conventional Hibernate Search API and @DocumentId, this
-      annotation is used on the class and not a field. You also can provide
-      your own bridge implementation when you put in this annotation by
-      calling the bridge() which is on @ProvidedId. Also, if you annotate a
-      class with @ProvidedId, your subclasses will also get the annotation -
-      but it is not done by using the java.lang.annotations. at Inherited. Be
-      sure however, to <emphasis>not</emphasis> use this annotation with
-      @DocumentId as your system will break.</para>
-
-      <programlisting>
-				
-				@ProvidedId (bridge = org.my.own.package.MyCustomBridge)
-				@Indexed
-				public class MyClass{
-				
-				@Field
-				String MyString;
-				
-				...
-				
-				}
-				
-				
-			</programlisting>
-    </section>
-  </section>
-</chapter>
\ No newline at end of file

Copied: search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml (from rev 15398, search/trunk/doc/reference/en/modules/mapping.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/mapping.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,1285 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Hibernate, Relational Persistence for Idiomatic Java
+  ~
+  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
+  ~ indicated by the @author tags or express copyright attribution
+  ~ statements applied by the authors.  All third-party contributions are
+  ~ distributed under license by Red Hat Middleware LLC.
+  ~
+  ~ This copyrighted material is made available to anyone wishing to use, modify,
+  ~ copy, or redistribute it subject to the terms and conditions of the GNU
+  ~ Lesser General Public License, as published by the Free Software Foundation.
+  ~
+  ~ This program is distributed in the hope that it will be useful,
+  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
+  ~ for more details.
+  ~
+  ~ You should have received a copy of the GNU Lesser General Public License
+  ~ along with this distribution; if not, write to:
+  ~ Free Software Foundation, Inc.
+  ~ 51 Franklin Street, Fifth Floor
+  ~ Boston, MA  02110-1301  USA
+  -->
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
+"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
+<chapter id="search-mapping" revision="3">
+  <!--  $Id$ -->
+
+  <title>Mapping entities to the index structure</title>
+
+  <para>All the metadata information needed to index entities is described
+  through some Java annotations. There is no need for xml mapping files (in
+  fact there exists currently no xml configuration option) nor a list of
+  indexed entities. The list is discovered at startup time scanning the
+  Hibernate mapped entities.</para>
+
+  <section id="search-mapping-entity" revision="3">
+    <title>Mapping an entity</title>
+
+    <section>
+      <title>Basic mapping</title>
+
+      <para>First, we must declare a persistent class as indexable. This is
+      done by annotating the class with <literal>@Indexed</literal> (all
+      entities not annotated with <literal>@Indexed</literal> will be ignored
+      by the indexing process):</para>
+
+      <programlisting>@Entity
+<emphasis role="bold">@Indexed(index="indexes/essays")</emphasis>
+public class Essay {
+    ...
+}</programlisting>
+
+      <para>The <literal>index</literal> attribute tells Hibernate what the
+      Lucene directory name is (usually a directory on your file system). If
+      you wish to define a base directory for all Lucene indexes, you can use
+      the <literal>hibernate.search.default.indexBase</literal> property in
+      your configuration file. Each entity instance will be represented by a
+      Lucene <classname>Document</classname> inside the given index (aka
+      Directory).</para>
+
+      <para>For each property (or attribute) of your entity, you have the
+      ability to describe how it will be indexed. The default (ie no
+      annotation) means that the property is completly ignored by the indexing
+      process. <literal>@Field</literal> does declare a property as indexed.
+      When indexing an element to a Lucene document you can specify how it is
+      indexed:</para>
+
+      <itemizedlist>
+        <listitem>
+          <para><literal>name</literal> : describe under which name, the
+          property should be stored in the Lucene Document. The default value
+          is the property name (following the JavaBeans convention)</para>
+        </listitem>
+
+        <listitem>
+          <para><literal>store</literal> : describe whether or not the
+          property is stored in the Lucene index. You can store the value
+          <literal>Store.YES</literal> (comsuming more space in the index but
+          allowing projection, see <xref linkend="projections" /> for more
+          information), store it in a compressed way
+          <literal>Store.COMPRESS</literal> (this does consume more CPU), or
+          avoid any storage <literal>Store.NO</literal> (this is the default
+          value). When a property is stored, you can retrieve it from the
+          Lucene Document (note that this is not related to whether the
+          element is indexed or not).</para>
+        </listitem>
+
+        <listitem>
+          <para>index: describe how the element is indexed (ie the process
+          used to index the property and the type of information store). The
+          different values are <literal>Index.NO</literal> (no indexing, ie
+          cannot be found by a query), <literal>Index.TOKENIZED</literal> (use
+          an analyzer to process the property),
+          <literal>Index.UN_TOKENISED</literal> (no analyzer pre processing),
+          <literal>Index.NO_NORM</literal> (do not store the normalization
+          data). The default value is <literal>TOKENIZED</literal>.</para>
+        </listitem>
+
+        <listitem>
+          <para>termVector: describes collections of term-frequency pairs.
+          This attribute enables term vectors being stored during indexing so
+          they are available within documents. The default value is
+          TermVector.NO.</para>
+
+          <para>The different values of this attribute are</para>
+
+          <informaltable align="left" width="">
+            <tgroup cols="2">
+              <colspec align="center" />
+
+              <thead>
+                <row>
+                  <entry align="center">Value</entry>
+
+                  <entry align="center">Definition</entry>
+                </row>
+              </thead>
+
+              <tbody>
+                <row>
+                  <entry align="left">TermVector.YES</entry>
+
+                  <entry>Store the term vectors of each document. This
+                  produces two synchronized arrays, one contains document
+                  terms and the other contains the term's frequency.</entry>
+                </row>
+
+                <row>
+                  <entry align="left">TermVector.NO</entry>
+
+                  <entry>Do not store term vectors.</entry>
+                </row>
+
+                <row>
+                  <entry align="left">TermVector.WITH_OFFSETS</entry>
+
+                  <entry>Store the term vector and token offset information.
+                  This is the same as TermVector.YES plus it contains the
+                  starting and ending offset position information for the
+                  terms.</entry>
+                </row>
+
+                <row>
+                  <entry align="left">TermVector.WITH_POSITIONS</entry>
+
+                  <entry>Store the term vector and token position information.
+                  This is the same as TermVector.YES plus it contains the
+                  ordinal positions of each occurrence of a term in a
+                  document.</entry>
+                </row>
+
+                <row>
+                  <entry
+                  align="left">TermVector.WITH_POSITIONS_OFFSETS</entry>
+
+                  <entry>Store the term vector, token position and offset
+                  information. This is a combination of the YES, WITH_OFFSETS
+                  and WITH_POSITIONS.</entry>
+                </row>
+              </tbody>
+            </tgroup>
+          </informaltable>
+        </listitem>
+      </itemizedlist>
+
+      <para>These attributes are part of the <literal>@Field</literal>
+      annotation.</para>
+
+      <para>Whether or not you want to store the data depends on how you wish
+      to use the index query result. For a regular Hibernate Search usage,
+      storing is not necessary. However you might want to store some fields to
+      subsequently project them (see <xref linkend="projections" /> for more
+      information).</para>
+
+      <para>Whether or not you want to tokenize a property depends on whether
+      you wish to search the element as is, or by the words it contains. It
+      make sense to tokenize a text field, but it does not to do it for a date
+      field (or an id field). Note that fields used for sorting must not be
+      tokenized.</para>
+
+      <para>Finally, the id property of an entity is a special property used
+      by Hibernate Search to ensure index unicity of a given entity. By
+      design, an id has to be stored and must not be tokenized. To mark a
+      property as index id, use the <literal>@DocumentId</literal>
+      annotation.</para>
+
+      <programlisting>@Entity
+ at Indexed(index="indexes/essays")
+public class Essay {
+    ...
+
+    @Id
+    <emphasis role="bold">@DocumentId</emphasis>
+    public Long getId() { return id; }
+
+    <emphasis role="bold">@Field(name="Abstract", index=Index.TOKENIZED, store=Store.YES)</emphasis>
+    public String getSummary() { return summary; }
+
+    @Lob
+    <emphasis role="bold">@Field(index=Index.TOKENIZED)</emphasis>
+    public String getText() { return text; }
+}</programlisting>
+
+      <para>These annotations define an index with three fields:
+      <literal>id</literal> , <literal>Abstract</literal> and
+      <literal>text</literal> . Note that by default the field name is
+      decapitalized, following the JavaBean specification.</para>
+
+      <note>
+        <para>You <emphasis>must</emphasis> specify
+        <literal>@DocumentId</literal> on the identifier property of your
+        entity class.</para>
+      </note>
+    </section>
+
+    <section>
+      <title>Mapping properties multiple times</title>
+
+      <para>It is sometimes needed to map a property multiple times per index,
+      with slightly different indexing strategies. Especially, sorting a query
+      by field requires the field to be <literal>UN_TOKENIZED</literal>. If
+      one want to search by words in this property and still sort it, one need
+      to index it twice, once tokenized, once untokenized. @Fields allows to
+      achieve this goal.</para>
+
+      <programlisting>@Entity
+ at Indexed(index = "Book" )
+public class Book {
+    @Fields( {
+            @Field(index = Index.TOKENIZED),
+            @Field(name = "summary_forSort", index = Index.UN_TOKENIZED, store = Store.YES)
+            } )
+    public String getSummary() {
+        return summary;
+    }
+
+    ...
+}</programlisting>
+
+      <para>The field summary is indexed twice, once as
+      <literal>summary</literal> in a tokenized way, and once as
+      <literal>summary_forSort</literal> in an untokenized way. @Field
+      supports 2 attributes useful when @Fields is used:</para>
+
+      <itemizedlist>
+        <listitem>
+          <para>analyzer: defines a @Analyzer annotation per field rather than
+          per property</para>
+        </listitem>
+
+        <listitem>
+          <para>bridge: defines a @FieldBridge annotation per field rather
+          than per property</para>
+        </listitem>
+      </itemizedlist>
+
+      <para>See below for more information about analyzers and field
+      bridges.</para>
+    </section>
+
+    <section id="search-mapping-associated">
+      <title>Embedded and associated objects</title>
+
+      <para>Associated objects as well as embedded objects can be indexed as
+      part of the root entity index. It is necessary if you expect to search a
+      given entity based on properties of the associated object(s). In the
+      following example, the use case is to return the places whose city is
+      Atlanta (In the Lucene query parser language, it would translate into
+      <code>address.city:Atlanta</code>).</para>
+
+      <programlisting>@Entity
+ at Indexed
+public class Place {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Long id;
+
+    @Field( index = Index.TOKENIZED )
+    private String name;
+
+    @OneToOne( cascade = { CascadeType.PERSIST, CascadeType.REMOVE } )
+    <emphasis role="bold">@IndexedEmbedded</emphasis>
+    private Address address;
+    ....
+}
+
+ at Entity
+ at Indexed
+public class Address {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Long id;
+
+    @Field(index=Index.TOKENIZED)
+    private String street;
+
+    @Field(index=Index.TOKENIZED)
+    private String city;
+
+    <emphasis role="bold">@ContainedIn</emphasis>
+    @OneToMany(mappedBy="address")
+    private Set&lt;Place&gt; places;
+    ...
+}</programlisting>
+
+      <para>In this example, the place fields will be indexed in the
+      <literal>Place</literal> index. The <literal>Place</literal> index
+      documents will also contain the fields <literal>address.id</literal>,
+      <literal>address.street</literal>, and <literal>address.city</literal>
+      which you will be able to query. This is enabled by the
+      <literal>@IndexedEmbedded</literal> annotation.</para>
+
+      <para>Be careful. Because the data is denormalized in the Lucene index
+      when using the <classname>@IndexedEmbedded</classname> technique,
+      Hibernate Search needs to be aware of any change in the Place object and
+      any change in the Address object to keep the index up to date. To make
+      sure the Place Lucene document is updated when it's Address changes, you
+      need to mark the other side of the birirectional relationship with
+      <classname>@ContainedIn</classname>.</para>
+
+      <para><literal>@ContainedIn</literal> is only useful on associations
+      pointing to entities as opposed to embedded (collection of)
+      objects.</para>
+
+      <para>Let's make our example a bit more complex:</para>
+
+      <programlisting>@Entity
+ at Indexed
+public class Place {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Long id;
+
+    @Field( index = Index.TOKENIZED )
+    private String name;
+
+    @OneToOne( cascade = { CascadeType.PERSIST, CascadeType.REMOVE } )
+    <emphasis role="bold">@IndexedEmbedded</emphasis>
+    private Address address;
+    ....
+}
+
+ at Entity
+ at Indexed
+public class Address {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Long id;
+
+    @Field(index=Index.TOKENIZED)
+    private String street;
+
+    @Field(index=Index.TOKENIZED)
+    private String city;
+
+    <emphasis role="bold">@IndexedEmbedded(depth = 1, prefix = "ownedBy_")</emphasis>
+    private Owner ownedBy;
+
+    <emphasis role="bold">@ContainedIn</emphasis>
+    @OneToMany(mappedBy="address")
+    private Set&lt;Place&gt; places;
+    ...
+}
+
+ at Embeddable
+public class Owner {
+    @Field(index = Index.TOKENIZED)
+    private String name;
+   ...
+}</programlisting>
+
+      <para>Any <literal>@*ToMany, @*ToOne</literal> and
+      <literal>@Embedded</literal> attribute can be annotated with
+      <literal>@IndexedEmbedded</literal>. The attributes of the associated
+      class will then be added to the main entity index. In the previous
+      example, the index will contain the following fields</para>
+
+      <itemizedlist>
+        <listitem>
+          <para>id</para>
+        </listitem>
+
+        <listitem>
+          <para>name</para>
+        </listitem>
+
+        <listitem>
+          <para>address.street</para>
+        </listitem>
+
+        <listitem>
+          <para>address.city</para>
+        </listitem>
+
+        <listitem>
+          <para>addess.ownedBy_name</para>
+        </listitem>
+      </itemizedlist>
+
+      <para>The default prefix is <literal>propertyName.</literal>, following
+      the traditional object navigation convention. You can override it using
+      the <literal>prefix</literal> attribute as it is shown on the
+      <literal>ownedBy</literal> property.</para>
+
+      <para><literal>depth</literal> is necessary when the object graph
+      contains a cyclic dependency of classes (not instances). For example, if
+      <classname>Owner</classname> points to <classname>Place</classname>.
+      Hibernate Search will stop including Indexed embedded atttributes after
+      reaching the expected depth (or the object graph boundaries are
+      reached). A class having a self reference is an example of cyclic
+      dependency. In our example, because <literal>depth</literal> is set to
+      1, any <literal>@IndexedEmbedded</literal> attribute in Owner (if any)
+      will be ignored.</para>
+
+      <para>Such a feature (<literal>@IndexedEmbedded</literal>) is very
+      useful to express queries refering to associated objects, such
+      as:</para>
+
+      <itemizedlist>
+        <listitem>
+          <para>Return places where name contains JBoss and where address city
+          is Atlanta. In Lucene query this would be</para>
+
+          <programlisting>+name:jboss +address.city:atlanta  </programlisting>
+        </listitem>
+
+        <listitem>
+          <para>Return places where name contains JBoss and where owner's name
+          contain Joe. In Lucene query this would be</para>
+
+          <programlisting>+name:jboss +address.orderBy_name:joe  </programlisting>
+        </listitem>
+      </itemizedlist>
+
+      <para>In a way it mimics the relational join operation in a more
+      efficient way (at the cost of data duplication). Remember that, out of
+      the box, Lucene indexes have no notion of association, the join
+      operation is simply non-existent. It might help to keep the relational
+      model normalzed while benefiting from the full text index speed and
+      feature richness.</para>
+
+      <para><note>
+          <para>An associated object can itself (but does not have to) be
+          <literal>@Indexed</literal></para>
+        </note></para>
+
+      <para>When @IndexedEmbedded points to an entity, the association has to
+      be directional and the other side has to be annotated
+      <literal>@ContainedIn</literal> (as seen in the previous example). If
+      not, Hibernate Search has no way to update the root index when the
+      associated entity is updated (in our example, a <literal>Place</literal>
+      index document has to be updated when the associated
+      <classname>Address</classname> instance is updated).</para>
+
+      <para>Sometimes, the object type annotated by
+      <classname>@IndexedEmbedded</classname> is not the object type targeted
+      by Hibernate and Hibernate Search especially when interfaces are used in
+      lieu of their implementation. You can override the object type targeted
+      by Hibernate Search using the <methodname>targetElement</methodname>
+      parameter.</para>
+
+      <programlisting>@Entity
+ at Indexed
+public class Address {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Long id;
+
+    @Field(index= Index.TOKENIZED)
+    private String street;
+
+    @IndexedEmbedded(depth = 1, prefix = "ownedBy_", <emphasis role="bold">targetElement = Owner.class</emphasis>)
+    @Target(Owner.class)
+    private Person ownedBy;
+
+
+    ...
+}
+
+ at Embeddable
+public class Owner implements Person { ... }</programlisting>
+    </section>
+
+    <section>
+      <title>Boost factor</title>
+
+      <para>Lucene has the notion of <emphasis>boost factor</emphasis>. It's a
+      way to give more weigth to a field or to an indexed element over others
+      during the indexation process. You can use <literal>@Boost</literal> at
+      the @Field, method or class level.</para>
+
+      <programlisting>@Entity
+ at Indexed(index="indexes/essays")
+<emphasis role="bold">@Boost(1.7f)</emphasis>
+public class Essay {
+    ...
+
+    @Id
+    @DocumentId
+    public Long getId() { return id; }
+
+    @Field(name="Abstract", index=Index.TOKENIZED, store=Store.YES, boost=<emphasis
+          role="bold">@Boost(2f)</emphasis>)
+    <emphasis role="bold">@Boost(1.5f)</emphasis>
+    public String getSummary() { return summary; }
+
+    @Lob
+    @Field(index=Index.TOKENIZED, boost=<emphasis role="bold">@Boost(1.2f)</emphasis>)
+    public String getText() { return text; }
+
+    @Field
+    public String getISBN() { return isbn; }
+
+}        </programlisting>
+
+      <para>In our example, Essay's probability to reach the top of the search
+      list will be multiplied by 1.7. The <methodname>summary</methodname>
+      field will be 2.5 (2 * 1.5) more important than the
+      <methodname>isbn</methodname> field. The <methodname>text</methodname>
+      field will be 1.2 times more important than the
+      <methodname>isbn</methodname> field. Note that this explanation in
+      strictest terms is actually wrong, but it is simple and close enough to
+      reality for all practical purposes. Please check the Lucene
+      documentation or the excellent <citetitle>Lucene In Action </citetitle>
+      from Otis Gospodnetic and Erik Hatcher.</para>
+
+      <para><methodname>@Field.boost</methodname>,
+      <classname>@Boost</classname> on a property and
+      <classname>@Boost</classname> on a class are all cumulative.</para>
+    </section>
+
+    <section id="analyzer">
+      <title>Analyzer</title>
+
+      <para>The default analyzer class used to index tokenized fields is
+      configurable through the <literal>hibernate.search.analyzer</literal>
+      property. The default value for this property is
+      <classname>org.apache.lucene.analysis.standard.StandardAnalyzer</classname>.</para>
+
+      <para>You can also define the analyzer class per entity, property and
+      even per @Field (useful when multiple fields are indexed from a single
+      property).</para>
+
+      <programlisting>@Entity
+ at Indexed
+ at Analyzer(impl = EntityAnalyzer.class)
+public class MyEntity {
+    @Id
+    @GeneratedValue
+    @DocumentId
+    private Integer id;
+
+    @Field(index = Index.TOKENIZED)
+    private String name;
+
+    @Field(index = Index.TOKENIZED)
+    @Analyzer(impl = PropertyAnalyzer.class)
+    private String summary;
+
+    @Field(index = Index.TOKENIZED, analyzer = @Analyzer(impl = FieldAnalyzer.class)
+    private String body;
+
+    ...
+}</programlisting>
+
+      <para>In this example, <classname>EntityAnalyzer</classname> is used to
+      index all tokenized properties (eg. <literal>name</literal>), except
+      <literal>summary</literal> and <literal>body</literal> which are indexed
+      with <classname>PropertyAnalyzer</classname> and
+      <classname>FieldAnalyzer</classname> respectively.</para>
+
+      <caution>
+        <para>Mixing different analyzers in the same entity is most of the
+        time a bad practice. It makes query building more complex and results
+        less predictable (for the novice), especially if you are using a
+        QueryParser (which uses the same analyzer for the whole query). As a
+        rule of thumb, for any given field the same analyzer should be used
+        for indexing and querying.</para>
+      </caution>
+
+      <section>
+        <title>Analyzer definitions</title>
+
+        <para>Analyzers can become quite complex to deal with for which reason
+        Hibernate Search introduces the notion of analyzer definitions. An
+        analyzer definition can be reused by many
+        <classname>@Analyzer</classname> declarations. An analyzer definition
+        is composed of:</para>
+
+        <itemizedlist>
+          <listitem>
+            <para>a name: the unique string used to refer to the
+            definition</para>
+          </listitem>
+
+          <listitem>
+            <para>a tokenizer: responsible for tokenizing the input stream
+            into individual words</para>
+          </listitem>
+
+          <listitem>
+            <para>a list of filters: each filter is responsible to remove,
+            modify or sometimes even add words into the stream provided by the
+            tokenizer</para>
+          </listitem>
+        </itemizedlist>
+
+        <para>This separation of tasks - a tokenizer followed by a list of
+        filters - allows easy reuse of each individual component and let you
+        build your customized analyzer in a very flexible way (just like
+        lego). Generally speaking the <classname>Tokenizer</classname> starts
+        the analysis process by turning the character input into tokens which
+        are then further processed by the <classname>TokenFilter</classname>s.
+        Hibernate Search supports this infrastructure by utilizing the Solr
+        analyzer framework. Make sure to add<filename> solr-core.jar and
+        </filename><filename>solr-common.jar</filename> to your classpath to
+        use analyzer definitions. In case you also want to utilizing a
+        snowball stemmer also include the
+        <filename>lucene-snowball.jar.</filename> Your distribution of
+        Hibernate Search provides these dependecies in its
+        <filename>lib</filename> directory.</para>
+
+        <programlisting>@AnalyzerDef(name="customanalyzer",
+        tokenizer = @TokenizerDef(factory = StandardTokenizerFactory.class),
+        filters = {
+                @TokenFilterDef(factory = ISOLatin1AccentFilterFactory.class),
+                @TokenFilterDef(factory = LowerCaseFilterFactory.class),
+                @TokenFilterDef(factory = StopFilterFactory.class, params = {
+                    @Parameter(name="words", value= "org/hibernate/search/test/analyzer/solr/stoplist.properties" ),
+                    @Parameter(name="ignoreCase", value="true")
+                })
+})
+public class Team {
+    ...
+}</programlisting>
+
+        <para>A tokenizer is defined by its factory which is responsible for
+        building the tokenizer and using the optional list of parameters. This
+        example use the standard tokenizer. A filter is defined by its factory
+        which is responsible for creating the filter instance using the
+        opetional paramenters. In our example, the StopFilter filter is built
+        reading the dedicated words property file and is expected to ignore
+        case. The list of parameters is dependent on the tokenizer or filter
+        factory.</para>
+
+        <warning>
+          <para>Filters are applied in the order they are defined in the
+          <classname>@AnalyzerDef</classname> annotation. Make sure to think
+          twice about this order.</para>
+        </warning>
+
+        <para>Once defined, an analyzer definition can be reused by an
+        <classname>@Analyzer</classname> declaration using the definition name
+        rather than declaring an implementation class.</para>
+
+        <programlisting>@Entity
+ at Indexed
+ at AnalyzerDef(name="customanalyzer", ... )
+public class Team {
+    @Id
+    @DocumentId
+    @GeneratedValue
+    private Integer id;
+
+    @Field
+    private String name;
+
+    @Field
+    private String location;
+
+    @Field <emphasis role="bold">@Analyzer(definition = "customanalyzer")</emphasis>
+    private String description;
+}</programlisting>
+
+        <para>Analyzer instances declared by
+        <classname>@AnalyzerDef</classname> are available by their name in the
+        <classname>SearchFactory</classname>.</para>
+
+        <programlisting>Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer("customanalyzer");</programlisting>
+
+        <para>This is quite useful wen building queries. Fields in queries
+        should be analyzed with the same analyzer used to index the field so
+        that they speak a common "language": the same tokens are reused
+        between the query and the indexing process. This rule has some
+        exceptions but is true most of the time, respect it unless you know
+        what you are doing.</para>
+      </section>
+
+      <section>
+        <title>Available analyzers</title>
+
+        <para>Solr and Lucene come with a lot of useful default tokenizers and
+        filters. You can find a complete list of tokenizer factories and
+        filter factories at <ulink
+        url="http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters">http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters</ulink>.
+        Let check a few of them.</para>
+
+        <table>
+          <title>Some of the tokenizers avalable</title>
+
+          <tgroup cols="3">
+            <thead>
+              <row>
+                <entry align="center">Factory</entry>
+
+                <entry align="center">Description</entry>
+
+                <entry align="center">parameters</entry>
+              </row>
+            </thead>
+
+            <tbody>
+              <row>
+                <entry>StandardTokenizerFactory</entry>
+
+                <entry>Use the Lucene StandardTokenizer</entry>
+
+                <entry>none</entry>
+              </row>
+
+              <row>
+                <entry>HTMLStripStandardTokenizerFactory</entry>
+
+                <entry>Remove HTML tags, keep the text and pass it to a
+                StandardTokenizer</entry>
+
+                <entry>none</entry>
+              </row>
+            </tbody>
+          </tgroup>
+        </table>
+
+        <table>
+          <title>Some of the filters avalable</title>
+
+          <tgroup cols="3">
+            <thead>
+              <row>
+                <entry align="center">Factory</entry>
+
+                <entry align="center">Description</entry>
+
+                <entry align="center">parameters</entry>
+              </row>
+            </thead>
+
+            <tbody>
+              <row>
+                <entry>StandardFilterFactory</entry>
+
+                <entry>Remove dots from acronyms and 's from words</entry>
+
+                <entry>none</entry>
+              </row>
+
+              <row>
+                <entry>LowerCaseFilterFactory</entry>
+
+                <entry>Lowercase words</entry>
+
+                <entry>none</entry>
+              </row>
+
+              <row>
+                <entry>StopFilterFactory</entry>
+
+                <entry>remove words (tokens) matching a list of stop
+                words</entry>
+
+                <entry><para><literal>words</literal>: points to a resource
+                file containing the stop words</para><para>ignoreCase: true if
+                <literal>case</literal> should be ignore when comparing stop
+                words, <literal>false</literal> otherwise </para></entry>
+              </row>
+
+              <row>
+                <entry>SnowballPorterFilterFactory</entry>
+
+                <entry>Reduces a word to it's root in a given language. (eg.
+                protect, protects, protection share the same root). Using such
+                a filter allows searches matching related words.</entry>
+
+                <entry><para><literal>language</literal>: Danish, Dutch,
+                English, Finnish, French, German, Italian, Norwegian,
+                Portuguese, Russian, Spanish, Swedish</para>and a few
+                more</entry>
+              </row>
+
+              <row>
+                <entry>ISOLatin1AccentFilterFactory</entry>
+
+                <entry>remove accents for languages like French</entry>
+
+                <entry>none</entry>
+              </row>
+            </tbody>
+          </tgroup>
+        </table>
+
+        <para>Don't hesitate to check all the implementations of
+        <classname>org.apache.solr.analysis.TokenizerFactory</classname> and
+        <classname>org.apache.solr.analysis.TokenFilterFactory</classname> in
+        your IDE to see the implementations available.</para>
+      </section>
+
+      <section id="analyzer-retrievinganalyzer">
+        <title>Retrieving an analyzer</title>
+
+        <para>During indexing time, Hibernate Search is using analyzers under
+        the hood for you. In some situations, retrieving analyzers can be
+        handy. If your domain model makes use of multiple analyzers (maybe to
+        benefit from stemming, use phonetic approximation and so on), you need
+        to make sure to use the same analyzers when you build your
+        query.</para>
+
+        <note>
+          <para>This rule can be broken but you need a good reason for it. If
+          you are unsure, use the same analyzers.</para>
+        </note>
+
+        <para>You can retrieve the scoped analyzer for a given entity used at
+        indexing time by Hibernate Search. A scoped analyzer is an analyzer
+        which applies the right analyzers depending on the field indexed:
+        multiple analyzers can be defined on a given entity each one working
+        on an individual field, a scoped analyzer unify all these analyzers
+        into a context-aware analyzer. While the theory seems a bit complex,
+        using the right analyzer in a query is very easy.</para>
+
+        <example>
+          <title>Using the scoped analyzer when building a full-text
+          query</title>
+
+          <programlisting>org.apache.lucene.queryParser.QueryParser parser = new QueryParser(
+    "title", 
+    fullTextSession.getSearchFactory().getAnalyzer( Song.class )
+);
+
+org.apache.lucene.search.Query luceneQuery = parser.parse( "title:sky Or title_stemmed:diamond" );
+
+org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Song.class );
+
+List result = fullTextQuery.list(); //return a list of managed objects    </programlisting>
+        </example>
+
+        <para>In the example above, the song title is indexed in two fields:
+        the standard analyzer is used in the field <literal>title</literal>
+        and a stemming analyzer is used in the field
+        <literal>title_stemmed</literal>. By using the analyzer provided by
+        the search factory, the query uses the appropriate analyzer depending
+        on the field targeted.</para>
+
+        <note>
+          <para>This is true if you use the query parser which takes the
+          analyzer into consideration. If you do not use the Lucene query
+          parser, make sure to use the scoped analyzer and tokenize the query
+          accordingly. TODO: show an example</para>
+        </note>
+
+        <para>If your query targets more that one query and you wish to use
+        your standard analyzer, make sure to describe it using an analyzer
+        definition. You can retrieve analyzers by their definition name using
+        <code>searchFactory.getAnalyzer(String)</code>.</para>
+      </section>
+    </section>
+  </section>
+
+  <section id="search-mapping-bridge">
+    <title>Property/Field Bridge</title>
+
+    <para>In Lucene all index fields have to be represented as Strings. For
+    this reason all entity properties annotated with <literal>@Field</literal>
+    have to be indexed in a String form. For most of your properties,
+    Hibernate Search does the translation job for you thanks to a built-in set
+    of bridges. In some cases, though you need a more fine grain control over
+    the translation process.</para>
+
+    <section>
+      <title>Built-in bridges</title>
+
+      <para><literal>Hibernate Search</literal> comes bundled with a set of
+      built-in bridges between a Java property type and its full text
+      representation.</para>
+
+      <variablelist>
+        <varlistentry>
+          <term>null</term>
+
+          <listitem>
+            <para>null elements are not indexed. Lucene does not support null
+            elements and this does not make much sense either.</para>
+          </listitem>
+        </varlistentry>
+
+        <varlistentry>
+          <term>java.lang.String</term>
+
+          <listitem>
+            <para>String are indexed as is</para>
+          </listitem>
+        </varlistentry>
+
+        <varlistentry>
+          <term>short, Short, integer, Integer, long, Long, float, Float,
+          double, Double, BigInteger, BigDecimal</term>
+
+          <listitem>
+            <para>Numbers are converted in their String representation. Note
+            that numbers cannot be compared by Lucene (ie used in ranged
+            queries) out of the box: they have to be padded <footnote>
+                <para>Using a Range query is debatable and has drawbacks, an
+                alternative approach is to use a Filter query which will
+                filter the result query to the appropriate range.</para>
+
+                <para>Hibernate Search will support a padding mechanism</para>
+              </footnote></para>
+          </listitem>
+        </varlistentry>
+
+        <varlistentry>
+          <term>java.util.Date</term>
+
+          <listitem>
+            <para>Dates are stored as yyyyMMddHHmmssSSS in GMT time
+            (200611072203012 for Nov 7th of 2006 4:03PM and 12ms EST). You
+            shouldn't really bother with the internal format. What is
+            important is that when using a DateRange Query, you should know
+            that the dates have to be expressed in GMT time.</para>
+
+            <para>Usually, storing the date up to the milisecond is not
+            necessary. <literal>@DateBridge</literal> defines the appropriate
+            resolution you are willing to store in the index ( <literal>
+            <literal>@DateBridge(resolution=Resolution.DAY)</literal>
+            </literal> ). The date pattern will then be truncated
+            accordingly.</para>
+
+            <programlisting>@Entity 
+ at Indexed
+public class Meeting {
+    @Field(index=Index.UN_TOKENIZED)
+    <emphasis role="bold">@DateBridge(resolution=Resolution.MINUTE)</emphasis>
+    private Date date;
+    ...                 </programlisting>
+
+            <warning>
+              <para>A Date whose resolution is lower than
+              <literal>MILLISECOND</literal> cannot be a
+              <literal>@DocumentId</literal></para>
+            </warning>
+          </listitem>
+        </varlistentry>
+
+        <varlistentry>
+          <term>java.net.URI, java.net.URL</term>
+
+          <listitem>
+            <para>URI and URL are converted to their string
+            representation</para>
+          </listitem>
+        </varlistentry>
+
+        <varlistentry>
+          <term>java.lang.Class</term>
+
+          <listitem>
+            <para>Class are converted to their filly qualified class name. The
+            thread context classloader is used when the class is
+            rehydrated</para>
+          </listitem>
+        </varlistentry>
+      </variablelist>
+    </section>
+
+    <section>
+      <title>Custom Bridge</title>
+
+      <para>Sometimes, the built-in bridges of Hibernate Search do not cover
+      some of your property types, or the String representation used by the
+      bridge does not meet your requirements. The following paragraphs
+      describe several solutions to this problem.</para>
+
+      <section>
+        <title>StringBridge</title>
+
+        <para>The simpliest custom solution is to give Hibernate Search an
+        implementation of your expected <emphasis>object to String</emphasis>
+        bridge. To do so you need to implements the
+        <literal>org.hibernate.search.bridge.StringBridge</literal>
+        interface</para>
+
+        <programlisting>/**
+ * Padding Integer bridge.
+ * All numbers will be padded with 0 to match 5 digits
+ *
+ * @author Emmanuel Bernard
+ */
+public class PaddedIntegerBridge implements <emphasis role="bold">StringBridge</emphasis> {
+
+    private int PADDING = 5;
+
+    <emphasis role="bold">public String objectToString(Object object)</emphasis> {
+        String rawInteger = ( (Integer) object ).toString();
+        if (rawInteger.length() &gt; PADDING) 
+            throw new IllegalArgumentException( "Try to pad on a number too big" );
+        StringBuilder paddedInteger = new StringBuilder( );
+        for ( int padIndex = rawInteger.length() ; padIndex &lt; PADDING ; padIndex++ ) {
+            paddedInteger.append('0');
+        }
+        return paddedInteger.append( rawInteger ).toString();
+    }
+}                </programlisting>
+
+        <para>Then any property or field can use this bridge thanks to the
+        <literal>@FieldBridge</literal> annotation</para>
+
+        <programlisting><emphasis role="bold">@FieldBridge(impl = PaddedIntegerBridge.class)</emphasis>
+private Integer length;                </programlisting>
+
+        <para>Parameters can be passed to the Bridge implementation making it
+        more flexible. The Bridge implementation implements a
+        <classname>ParameterizedBridge</classname> interface, and the
+        parameters are passed through the <literal>@FieldBridge</literal>
+        annotation.</para>
+
+        <programlisting>public class PaddedIntegerBridge implements StringBridge, <emphasis
+            role="bold">ParameterizedBridge</emphasis> {
+
+    public static String PADDING_PROPERTY = "padding";
+    private int padding = 5; //default
+
+    <emphasis role="bold">public void setParameterValues(Map parameters)</emphasis> {
+        Object padding = parameters.get( PADDING_PROPERTY );
+        if (padding != null) this.padding = (Integer) padding;
+    }
+
+    public String objectToString(Object object) {
+        String rawInteger = ( (Integer) object ).toString();
+        if (rawInteger.length() &gt; padding) 
+            throw new IllegalArgumentException( "Try to pad on a number too big" );
+        StringBuilder paddedInteger = new StringBuilder( );
+        for ( int padIndex = rawInteger.length() ; padIndex &lt; padding ; padIndex++ ) {
+            paddedInteger.append('0');
+        }
+        return paddedInteger.append( rawInteger ).toString();
+    }
+}
+
+
+//property
+ at FieldBridge(impl = PaddedIntegerBridge.class,
+             <emphasis role="bold">params = @Parameter(name="padding", value="10")</emphasis>
+            )
+private Integer length;                </programlisting>
+
+        <para>The <classname>ParameterizedBridge</classname> interface can be
+        implemented by <classname>StringBridge</classname> ,
+        <classname>TwoWayStringBridge</classname> ,
+        <classname>FieldBridge</classname> implementations (see
+        bellow).</para>
+
+        <para>If you expect to use your bridge implementation on for an id
+        property (ie annotated with <literal>@DocumentId</literal> ), you need
+        to use a slightly extended version of <literal>StringBridge</literal>
+        named <classname>TwoWayStringBridge</classname> . <literal>Hibernate
+        Search </literal> needs to read the string representation of the
+        identifier and generate the object out of it. There is not difference
+        in the way the <literal>@FieldBridge</literal> annotation is
+        used.</para>
+
+        <programlisting>public class PaddedIntegerBridge implements TwoWayStringBridge, ParameterizedBridge {
+
+    public static String PADDING_PROPERTY = "padding";
+    private int padding = 5; //default
+
+    public void setParameterValues(Map parameters) {
+        Object padding = parameters.get( PADDING_PROPERTY );
+        if (padding != null) this.padding = (Integer) padding;
+    }
+
+    public String objectToString(Object object) {
+        String rawInteger = ( (Integer) object ).toString();
+        if (rawInteger.length() &gt; padding) 
+            throw new IllegalArgumentException( "Try to pad on a number too big" );
+        StringBuilder paddedInteger = new StringBuilder( );
+        for ( int padIndex = rawInteger.length() ; padIndex &lt; padding ; padIndex++ ) {
+            paddedInteger.append('0');
+        }
+        return paddedInteger.append( rawInteger ).toString();
+    }
+
+    <emphasis role="bold">public Object stringToObject(String stringValue)</emphasis> {
+        return new Integer(stringValue);
+    }
+}
+
+
+//id property
+ at DocumentId
+ at FieldBridge(impl = PaddedIntegerBridge.class,
+             params = @Parameter(name="padding", value="10") 
+private Integer id;
+                </programlisting>
+
+        <para>It is critically important for the two-way process to be
+        idempotent (ie object = stringToObject( objectToString( object ) )
+        ).</para>
+      </section>
+
+      <section>
+        <title>FieldBridge</title>
+
+        <para>Some usecase requires more than a simple object to string
+        translation when mapping a property to a Lucene index. To give you
+        most of the flexibility you can also implement a bridge as a
+        <classname>FieldBridge</classname> . This interface give you a
+        property value and let you map it the way you want in your Lucene
+        <classname>Document</classname> .This interface is very similar in its
+        concept to the <productname>Hibernate</productname>
+        <classname>UserType</classname> .</para>
+
+        <para>You can for example store a given property in two different
+        document fields</para>
+
+        <programlisting>/**
+ * Store the date in 3 different fields - year, month, day - to ease Range Query per
+ * year, month or day (eg get all the elements of December for the last 5 years).
+ * 
+ * @author Emmanuel Bernard
+ */
+public class DateSplitBridge implements FieldBridge {
+    private final static TimeZone GMT = TimeZone.getTimeZone("GMT");
+
+    <emphasis role="bold">public void set(String name, Object value, Document document, LuceneOptions luceneOptions)</emphasis> {
+        Date date = (Date) value;
+        Calendar cal = GregorianCalendar.getInstance(GMT);
+        cal.setTime(date);
+        int year = cal.get(Calendar.YEAR);
+        int month = cal.get(Calendar.MONTH) + 1;
+        int day = cal.get(Calendar.DAY_OF_MONTH);
+  
+        // set year
+        Field field = new Field(name + ".year", String.valueOf(year),
+            luceneOptions.getStore(), luceneOptions.getIndex(),
+            luceneOptions.getTermVector());
+        field.setBoost(luceneOptions.getBoost());
+        document.add(field);
+  
+        // set month and pad it if needed
+        field = new Field(name + ".month", month &lt; 10 ? "0" : ""
+            + String.valueOf(month), luceneOptions.getStore(),
+            luceneOptions.getIndex(), luceneOptions.getTermVector());
+        field.setBoost(luceneOptions.getBoost());
+        document.add(field);
+  
+        // set day and pad it if needed
+        field = new Field(name + ".day", day &lt; 10 ? "0" : ""
+            + String.valueOf(day), luceneOptions.getStore(),
+            luceneOptions.getIndex(), luceneOptions.getTermVector());
+        field.setBoost(luceneOptions.getBoost());
+        document.add(field);
+    }
+}
+
+//property
+<emphasis role="bold">@FieldBridge(impl = DateSplitBridge.class)</emphasis>
+private Date date;                </programlisting>
+      </section>
+
+      <section>
+        <title>@ClassBridge</title>
+
+        <para>It is sometimes useful to combine more than one property of a
+        given entity and index this combination in a specific way into the
+        Lucene index. The <classname>@ClassBridge</classname> and
+        <classname>@ClassBridges</classname> annotations can be defined at the
+        class level (as opposed to the property level). In this case the
+        custom field bridge implementation receives the entity instance as the
+        value parameter instead of a particular property. Though not shown in
+        this example, <classname>@ClassBridge</classname> supports the
+        <methodname>termVector</methodname> attribute discussed
+        previously.</para>
+
+        <programlisting>@Entity
+ at Indexed
+<emphasis role="bold">@ClassBridge</emphasis>(name="branchnetwork",
+             index=Index.TOKENIZED,
+             store=Store.YES,
+             impl = <emphasis role="bold">CatFieldsClassBridge.class</emphasis>,
+             params = @Parameter( name="sepChar", value=" " ) )
+public class Department {
+    private int id;
+    private String network;
+    private String branchHead;
+    private String branch;
+    private Integer maxEmployees;
+    ...
+}
+
+
+public class CatFieldsClassBridge implements FieldBridge, ParameterizedBridge {
+    private String sepChar;
+
+    public void setParameterValues(Map parameters) {
+        this.sepChar = (String) parameters.get( "sepChar" );
+    }
+
+    <emphasis role="bold">public void set(String name, Object value, Document document, LuceneOptions luceneOptions)</emphasis> {
+        // In this particular class the name of the new field was passed
+        // from the name field of the ClassBridge Annotation. This is not
+        // a requirement. It just works that way in this instance. The
+        // actual name could be supplied by hard coding it below.
+        Department dep = (Department) value;
+        String fieldValue1 = dep.getBranch();
+        if ( fieldValue1 == null ) {
+            fieldValue1 = "";
+        }
+        String fieldValue2 = dep.getNetwork();
+        if ( fieldValue2 == null ) {
+            fieldValue2 = "";
+        }
+        String fieldValue = fieldValue1 + sepChar + fieldValue2;
+        Field field = new Field( name, fieldValue, luceneOptions.getStore(), luceneOptions.getIndex(), luceneOptions.getTermVector() );
+        field.setBoost( luceneOptions.getBoost() );
+        document.add( field );
+   }
+}</programlisting>
+
+        <para>In this example, the particular
+        <classname>CatFieldsClassBridge</classname> is applied to the
+        <literal>department</literal> instance, the field bridge then
+        concatenate both branch and network and index the
+        concatenation.</para>
+      </section>
+    </section>
+  </section>
+
+  <section id="provided-id">
+    <title>Providing your own id</title>
+
+    <warning>
+      <para>This part of the documentation is a work in progress.</para>
+    </warning>
+
+    <para>You can provide your own id for Hibernate Search if you are
+    extending the internals. You will have to generate a unique value so it
+    can be given to Lucene to be indexed. This will have to be given to
+    Hibernate Search when you create an org.hibernate.search.Work object - the
+    document id is required in the constructor.</para>
+
+    <section id="ProvidedId">
+      <title>The @ProvidedId annotation</title>
+
+      <para>Unlike conventional Hibernate Search API and @DocumentId, this
+      annotation is used on the class and not a field. You also can provide
+      your own bridge implementation when you put in this annotation by
+      calling the bridge() which is on @ProvidedId. Also, if you annotate a
+      class with @ProvidedId, your subclasses will also get the annotation -
+      but it is not done by using the java.lang.annotations. at Inherited. Be
+      sure however, to <emphasis>not</emphasis> use this annotation with
+      @DocumentId as your system will break.</para>
+
+      <programlisting>
+				
+				@ProvidedId (bridge = org.my.own.package.MyCustomBridge)
+				@Indexed
+				public class MyClass{
+				
+				@Field
+				String MyString;
+				
+				...
+				
+				}
+				
+				
+			</programlisting>
+    </section>
+  </section>
+</chapter>

Deleted: search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml
===================================================================
--- search/trunk/doc/reference/en/modules/query.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,740 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Hibernate, Relational Persistence for Idiomatic Java
-  ~
-  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
-  ~ indicated by the @author tags or express copyright attribution
-  ~ statements applied by the authors.  All third-party contributions are
-  ~ distributed under license by Red Hat Middleware LLC.
-  ~
-  ~ This copyrighted material is made available to anyone wishing to use, modify,
-  ~ copy, or redistribute it subject to the terms and conditions of the GNU
-  ~ Lesser General Public License, as published by the Free Software Foundation.
-  ~
-  ~ This program is distributed in the hope that it will be useful,
-  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
-  ~ for more details.
-  ~
-  ~ You should have received a copy of the GNU Lesser General Public License
-  ~ along with this distribution; if not, write to:
-  ~ Free Software Foundation, Inc.
-  ~ 51 Franklin Street, Fifth Floor
-  ~ Boston, MA  02110-1301  USA
-  -->
-<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
-"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
-<chapter id="search-query" xreflabel="Querying">
-  <!--  $Id$ -->
-
-  <title>Querying</title>
-
-  <para>The second most important capability of Hibernate Search is the
-  ability to execute a Lucene query and retrieve entities managed by an
-  Hibernate session, providing the power of Lucene without living the
-  Hibernate paradigm, and giving another dimension to the Hibernate classic
-  search mechanisms (HQL, Criteria query, native SQL query).</para>
-
-  <para>To access the <productname>Hibernate Search</productname> querying
-  facilities, you have to use an Hibernate
-  <classname>FullTextSession</classname> . A Search Session wraps a regular
-  <classname>org.hibernate.Session</classname> to provide query and indexing
-  capabilities.</para>
-
-  <programlisting>Session session = sessionFactory.openSession();
-...
-FullTextSession fullTextSession = Search.getFullTextSession(session);    </programlisting>
-
-  <para>The search facility is built on native Lucene queries.</para>
-
-  <programlisting>org.apache.lucene.queryParser.QueryParser parser = new QueryParser("title", new StopAnalyzer() );
-
-org.apache.lucene.search.Query luceneQuery = parser.parse( "summary:Festina Or brand:Seiko" );
-<emphasis role="bold">org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery );
-        </emphasis>
-
-List result = fullTextQuery.list(); //return a list of managed objects    </programlisting>
-
-  <para>The Hibernate query built on top of the Lucene query is a regular
-  <literal>org.hibernate.Query</literal> , you are in the same paradigm as the
-  other Hibernate query facilities (HQL, Native or Criteria). The regular
-  <literal>list()</literal> , <literal>uniqueResult()</literal> ,
-  <literal>iterate()</literal> and <literal>scroll()</literal> can be
-  used.</para>
-
-  <para>For people using Java Persistence (aka EJB 3.0 Persistence) APIs of
-  Hibernate, the same extensions exist:</para>
-
-  <programlisting>EntityManager em = entityManagerFactory.createEntityManager();
-
-FullTextEntityManager fullTextEntityManager = 
-    org.hibernate.hibernate.search.jpa.Search.getFullTextEntityManager(em);
-
-...
-org.apache.lucene.queryParser.QueryParser parser = new QueryParser("title", new StopAnalyzer() );
-
-org.apache.lucene.search.Query luceneQuery = parser.parse( "summary:Festina Or brand:Seiko" );
-<emphasis role="bold">javax.persistence.Query fullTextQuery = fullTextEntityManager.createFullTextQuery( luceneQuery );</emphasis>
-
-List result = fullTextQuery.getResultList(); //return a list of managed objects  </programlisting>
-
-  <para>The following examples show the Hibernate APIs but the same example
-  can be easily rewritten with the Java Persistence API by just adjusting the
-  way the FullTextQuery is retrieved.</para>
-
-  <section>
-    <title>Building queries</title>
-
-    <para>Hibernate Search queries are built on top of Lucene queries. It
-    gives you a total freedom on the kind of Lucene queries you are willing to
-    execute. However, once built, Hibernate Search abstract the query
-    processing from your application using org.hibernate.Query as your primary
-    query manipulation API.</para>
-
-    <section>
-      <title>Building a Lucene query</title>
-
-      <para>This subject is generally speaking out of the scope of this
-      documentation. Please refer to the Lucene documentation Lucene In Action
-      or Hibernate Search in Action from Manning.</para>
-
-      <para>It is essential to use the same analyzer when indexing a field and
-      when querying that field. Hibernate Search gives you access to the
-      analyzers used during indexing time (see <xref
-      linkend="analyzer-retrievinganalyzer" /> for more information).</para>
-
-      <programlisting>//retrieve an analyzer by name
-Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer("phonetic-analyzer");
-
-//or the scoped analyzer for a given entity
-Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer(Song.class);</programlisting>
-
-      <para>Using the same analyzer at indexing and querying time is
-      important. See <xref linkend="analyzer" /> for more information.</para>
-    </section>
-
-    <section>
-      <title>Building a Hibernate Search query</title>
-
-      <section>
-        <title>Generality</title>
-
-        <para>Once the Lucene query is built, it needs to be wrapped into an
-        Hibernate Query.</para>
-
-        <programlisting>FullTextSession fullTextSession = Search.getFullTextSession( session );
-org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery );</programlisting>
-
-        <para>If not specified otherwise, the query will be executed against
-        all indexed entities, potentially returning all types of indexed
-        classes. It is advised, from a performance point of view, to restrict
-        the returned types:</para>
-
-        <programlisting>org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Customer.class );
-//or
-fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Item.class, Actor.class );</programlisting>
-
-        <para>The first example returns only matching customers, the second
-        returns matching actors and items.</para>
-      </section>
-
-      <section>
-        <title>Pagination</title>
-
-        <para>It is recommended to restrict the number of returned objects per
-        query. It is a very common use case as well, the user usually navigate
-        from one page to an other. The way to define pagination is exactly the
-        way you would define pagination in a plain HQL or Criteria
-        query.</para>
-
-        <programlisting>org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Customer.class );
-fullTextQuery.setFirstResult(15); //start from the 15th element
-fullTextQuery.setMaxResults(10); //return 10 elements</programlisting>
-
-        <note>
-          <para>It is still possible to get the total number of matching
-          elements regardless of the pagination. See
-          <methodname>getResultSize()</methodname> below</para>
-        </note>
-      </section>
-
-      <section>
-        <title>Sorting</title>
-
-        <para>Apache Lucene provides a very flexible and powerful way to sort
-        results. While the default sorting (by relevance) is appropriate most
-        of the time, it can interesting to sort by one or several
-        properties.</para>
-
-        <para>Inject the Lucene Sort object to apply a Lucene sorting strategy
-        to an Hibernate Search.</para>
-
-        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( query, Book.class );
-org.apache.lucene.search.Sort sort = new Sort(new SortField("title"));
-<emphasis role="bold">query.setSort(sort);</emphasis>
-List results = query.list();</programlisting>
-
-        <para>One can notice the <classname>FullTextQuery</classname>
-        interface which is a sub interface of
-        <classname>org.hibernate.Query</classname>.</para>
-
-        <para>Fields used for sorting must not be tokenized.</para>
-      </section>
-
-      <section>
-        <title>Fetching strategy</title>
-
-        <para>When you restrict the return types to one class, Hibernate
-        Search loads the objects using a single query. It also respects the
-        static fetching strategy defined in your domain model.</para>
-
-        <para>It is often useful, however, to refine the fetching strategy for
-        a specific use case.</para>
-
-        <programlisting>Criteria criteria = s.createCriteria( Book.class ).setFetchMode( "authors", FetchMode.JOIN );
-s.createFullTextQuery( luceneQuery ).setCriteriaQuery( criteria );</programlisting>
-
-        <para>In this example, the query will return all Books matching the
-        luceneQuery. The authors collection will be loaded from the same query
-        using an SQL outer join.</para>
-
-        <para>When defining a criteria query, it is not needed to restrict the
-        entity types returned while creating the Hibernate Search query from
-        the full text session: the type is guessed from the criteria query
-        itself. Only fetch mode can be adjusted, refrain from applying any
-        other restriction.</para>
-
-        <para>One cannot use <methodname>setCriteriaQuery</methodname> if more
-        than one entity type is expected to be returned.</para>
-      </section>
-
-      <section id="projections">
-        <title>Projection</title>
-
-        <para>For some use cases, returning the domain object (graph) is
-        overkill. Only a small subset of the properties is necessary.
-        Hibernate Search allows you to return a subset of properties:</para>
-
-        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
-query.<emphasis role="bold">setProjection( "id", "summary", "body", "mainAuthor.name" )</emphasis>;
-List results = query.list();
-Object[] firstResult = (Object[]) results.get(0);
-Integer id = firstResult[0];
-String summary = firstResult[1];
-String body = firstResult[2];
-String authorName = firstResult[3];</programlisting>
-
-        <para>Hibernate Search extracts the properties from the Lucene index
-        and convert them back to their object representation, returning a list
-        of <classname>Object[]</classname>. Projections avoid a potential
-        database round trip (useful if the query response time is critical),
-        but has some constraints:</para>
-
-        <itemizedlist>
-          <listitem>
-            <para>the properties projected must be stored in the index
-            (<literal>@Field(store=Store.YES)</literal>), which increase the
-            index size</para>
-          </listitem>
-
-          <listitem>
-            <para>the properties projected must use a
-            <literal>FieldBridge</literal> implementing
-            <classname>org.hibernate.search.bridge.TwoWayFieldBridge</classname>
-            or
-            <literal>org.hibernate.search.bridge.TwoWayStringBridge</literal>,
-            the latter being the simpler version. All Hibernate Search
-            built-in types are two-way.</para>
-          </listitem>
-        </itemizedlist>
-
-        <para>Projection is useful for another kind of usecases. Lucene
-        provides some metadata informations to the user about the results. By
-        using some special placeholders, the projection mechanism can retrieve
-        them:</para>
-
-        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
-query.<emphasis role="bold">setProjection( FullTextQuery.SCORE, FullTextQuery.THIS, "mainAuthor.name" )</emphasis>;
-List results = query.list();
-Object[] firstResult = (Object[]) results.get(0);
-float score = firstResult[0];
-Book book = firstResult[1];
-String authorName = firstResult[2];</programlisting>
-
-        <para>You can mix and match regular fields and special placeholders.
-        Here is the list of available placeholders:</para>
-
-        <itemizedlist>
-          <listitem>
-            <para>FullTextQuery.THIS: returns the intialized and managed
-            entity (as a non projected query would have done)</para>
-          </listitem>
-
-          <listitem>
-            <para>FullTextQuery.DOCUMENT: returns the Lucene Document related
-            to the object projected</para>
-          </listitem>
-
-          <listitem>
-            <para>FullTextQuery.SCORE: returns the document score in the
-            query. The score is guatanteed to be between 0 and 1 but the
-            highest score is not necessarily equals to 1. Scores are handy to
-            compare one result against an other for a given query but are
-            useless when comparing the result of different queries.</para>
-          </listitem>
-
-          <listitem>
-            <para>FullTextQuery.ID: the id property value of the projected
-            object</para>
-          </listitem>
-
-          <listitem>
-            <para>FullTextQuery.DOCUMENT_ID: the Lucene document id. Careful,
-            Lucene document id can change overtime between two different
-            IndexReader opening (this feature is experimental)</para>
-          </listitem>
-
-          <listitem>
-            <para>FullTextQuery.EXPLANATION: returns the Lucene Explanation
-            object for the matching object/document in the given query. Do not
-            use if you retrieve a lot of data. Running explanation typically
-            is as costly as running the whole Lucene query per matching
-            element. Make sure you use projection!</para>
-          </listitem>
-        </itemizedlist>
-      </section>
-    </section>
-  </section>
-
-  <section>
-    <title>Retrieving the results</title>
-
-    <para>Once the Hibernate Search query is built, executing it is in no way
-    different than executing a HQL or Criteria query. The same paradigm and
-    object semantic apply. All the common operations are available:
-    <methodname>list()</methodname>, <methodname>uniqueResult()</methodname>,
-    <methodname>iterate()</methodname>,
-    <methodname>scroll()</methodname>.</para>
-
-    <section>
-      <title>Performance considerations</title>
-
-      <para>If you expect a reasonable number of results (for example using
-      pagination) and expect to work on all of them,
-      <methodname>list()</methodname> or
-      <methodname>uniqueResult()</methodname> are recommended.
-      <methodname>list()</methodname> work best if the entity
-      <literal>batch-size</literal> is set up properly. Note that Hibernate
-      Search has to process all Lucene Hits elements (within the pagination)
-      when using <methodname>list()</methodname> ,
-      <methodname>uniqueResult()</methodname> and
-      <methodname>iterate()</methodname>.</para>
-
-      <para>If you wish to minimize Lucene document loading,
-      <methodname>scroll()</methodname> is more appropriate. Don't forget to
-      close the <classname>ScrollableResults</classname> object when you're
-      done, since it keeps Lucene resources. If you expect to use
-      <methodname>scroll</methodname> but wish to load objects in batch, you
-      can use <methodname>query.setFetchSize()</methodname>: When an object is
-      accessed, and if not already loaded, Hibernate Search will load the next
-      <literal>fetchSize</literal> objects in one pass.</para>
-
-      <para>Pagination is a preferred method over scrolling though.</para>
-    </section>
-
-    <section>
-      <title>Result size</title>
-
-      <para>It is sometime useful to know the total number of matching
-      documents:</para>
-
-      <itemizedlist>
-        <listitem>
-          <para>for the Google-like feature 1-10 of about 888,000,000</para>
-        </listitem>
-
-        <listitem>
-          <para>to implement a fast pagination navigation</para>
-        </listitem>
-
-        <listitem>
-          <para>to implement a multi step search engine (adding approximation
-          if the restricted query return no or not enough results)</para>
-        </listitem>
-      </itemizedlist>
-
-      <para>But it would be costly to retrieve all the matching
-      documents.</para>
-
-      <para>Hibernate Search allows you to retrieve the total number of
-      matching documents regardless of the pagination parameters. Even more
-      interesting, you can retrieve the number of matching elements without
-      triggering a single object load.</para>
-
-      <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
-assert 3245 == <emphasis role="bold">query.getResultSize()</emphasis>; //return the number of matching books without loading a single one
-
-org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
-query.setMaxResult(10);
-List results = query.list();
-assert 3245 == <emphasis role="bold">query.getResultSize()</emphasis>; //return the total number of matching books regardless of pagination</programlisting>
-
-      <note>
-        <para>Like Google, the number of results is approximative if the index
-        is not fully up-to-date with the database (asynchronous cluster for
-        example).</para>
-      </note>
-    </section>
-
-    <section>
-      <title>ResultTransformer</title>
-
-      <para>Especially when using projection, the data structure returned by a
-      query (an object array in this case), is not always matching the
-      application needs. It is possible to apply a
-      <classname>ResultTransformer</classname> operation post query to match
-      the targeted data structure:</para>
-
-      <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
-query.setProjection( "title", "mainAuthor.name" );
-
-<emphasis role="bold">query.setResultTransformer( 
-    new StaticAliasToBeanResultTransformer( BookView.class, "title", "author" ) 
-);</emphasis>
-List&lt;BookView&gt; results = (List&lt;BookView&gt;) query.list();
-for(BookView view : results) {
-    log.info( "Book: " + view.getTitle() + ", " + view.getAuthor() );
-}</programlisting>
-
-      <para>Examples of <classname>ResultTransformer</classname>
-      implementations can be found in the Hibernate Core codebase.</para>
-    </section>
-
-    <section>
-      <title>Understanding results</title>
-
-      <para>You will find yourself sometimes puzzled by a result showing up in
-      a query or a result not showing up in a query. Luke is a great tool to
-      understand those mysteries. But Hibernate Search also let's you access
-      to the Lucene <classname>Explanation</classname> object for a given
-      result (in a given query). This class is considered fairly advanced to
-      Lucene users but can provide a good understanding of the scoring of an
-      object. You have two ways to access the Explanation object for a given
-      result:</para>
-
-      <itemizedlist>
-        <listitem>
-          <para>Use the <methodname>fullTextQuery.explain(int)</methodname>
-          method</para>
-        </listitem>
-
-        <listitem>
-          <para>Use projection</para>
-        </listitem>
-      </itemizedlist>
-
-      <para>The first approach takes a document id as a parameter and return
-      the Explanation object. The document id can be retrieved using
-      projection and the <literal>FullTextQuery.DOCUMENT_ID</literal>
-      constant.</para>
-
-      <warning>
-        <para>The Document id has nothing to do with the entity id. do not
-        mess up the two notions.</para>
-      </warning>
-
-      <para>The second approach let's you project the
-      <classname>Explanation</classname> object using the
-      <literal>FullTextQuery.EXPLANATION</literal> constant.</para>
-
-      <programlisting>FullTextQuery ftQuery = s.createFullTextQuery( luceneQuery, Dvd.class )
-        .setProjection( FullTextQuery.DOCUMENT_ID, <emphasis role="bold">FullTextQuery.EXPLANATION</emphasis>, FullTextQuery.THIS );
- at SuppressWarnings("unchecked") List&lt;Object[]&gt; results = ftQuery.list();
-for (Object[] result : results) {
-    Explanation e = (Explanation) result[1];
-    display( e.toString() );
-}</programlisting>
-
-      <para>Be careful, building the explanation object is quite expensive, it
-      is roughly as expensive as running the Lucene query again. Don't do it
-      if you don't need the object</para>
-    </section>
-  </section>
-
-  <section>
-    <title>Filters</title>
-
-    <para>Apache Lucene has a powerful feature that allows to filter query
-    results according to a custom filtering process. This is a very powerful
-    way to apply additional data restrictions, especially since filters can be
-    cached and reused. Some interesting usecases are:</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>security</para>
-      </listitem>
-
-      <listitem>
-        <para>temporal data (eg. view only last month's data)</para>
-      </listitem>
-
-      <listitem>
-        <para>population filter (eg. search limited to a given
-        category)</para>
-      </listitem>
-
-      <listitem>
-        <para>and many more</para>
-      </listitem>
-    </itemizedlist>
-
-    <para>Hibernate Search pushes the concept further by introducing the
-    notion of parameterizable named filters which are transparently cached.
-    For people familiar with the notion of Hibernate Core filters, the API is
-    very similar:</para>
-
-    <programlisting>fullTextQuery = s.createFullTextQuery( query, Driver.class );
-fullTextQuery.enableFullTextFilter("bestDriver");
-fullTextQuery.enableFullTextFilter("security").setParameter( "login", "andre" );
-fullTextQuery.list(); //returns only best drivers where andre has credentials</programlisting>
-
-    <para>In this example we enabled two filters on top of the query. You can
-    enable (or disable) as many filters as you want.</para>
-
-    <para>Declaring filters is done through the
-    <classname>@FullTextFilterDef</classname> annotation. This annotation can
-    be on any <literal>@Indexed</literal> entity regardless of the query the
-    filter is later applied to. This means filter definitions are global and
-    their names must be unique. A <classname>SearchException</classname> is
-    thrown in case two different <classname>@FullTextFilterDef</classname>
-    annotations with the same name are defined. Each named filter has to point
-    to an actual filter implementation.</para>
-
-    <programlisting>@Entity
- at Indexed
- at FullTextFilterDefs( {
-    <emphasis role="bold">@FullTextFilterDef(name = "bestDriver", impl = BestDriversFilter.class, cache=false)</emphasis>, //actual Filter implementation
-    <emphasis role="bold">@FullTextFilterDef(name = "security", impl = SecurityFilterFactory.class)</emphasis> //Filter factory with parameters
-})
-public class Driver { ... }</programlisting>
-
-    <programlisting>public class BestDriversFilter extends <emphasis
-        role="bold">org.apache.lucene.search.Filter</emphasis> {
-
-    public BitSet bits(IndexReader reader) throws IOException {
-        BitSet bitSet = new BitSet( reader.maxDoc() );
-        TermDocs termDocs = reader.termDocs( new Term("score", "5") );
-        while ( termDocs.next() ) {
-            bitSet.set( termDocs.doc() );
-        }
-        return bitSet;
-    }
-}</programlisting>
-
-    <para><classname>BestDriversFilter</classname> is an example of a simple
-    Lucene filter that will filter all results returning only drivers whose
-    score is 5. In this example the specified filter implements the
-    <literal>org.apache.lucene.search.Filter</literal> directly and contains a
-    no-arg constructor. The <literal>cache</literal> flag, defaulted to
-    <literal>true</literal>, tells Hibernate Search to search the filter in
-    its internal cache and reuse it if found.</para>
-
-    <para>If your Filter creation requires additional steps or if the filter
-    you want to use does not have a no-arg constructor, you can use the
-    factory pattern:</para>
-
-    <programlisting>@Entity
- at Indexed
- at FullTextFilterDef(name = "bestDriver", impl = BestDriversFilterFactory.class) //Filter factory
-public class Driver { ... }
-
-public class BestDriversFilterFactory {
-
-    <emphasis role="bold">@Factory</emphasis>
-    public Filter getFilter() {
-        //some additional steps to cache the filter results per IndexReader
-        Filter bestDriversFilter = new BestDriversFilter();
-        return new CachingWrapperFilter(bestDriversFilter);
-    }
-}</programlisting>
-
-    <para>Hibernate Search will look for a <literal>@Factory</literal>
-    annotated method and use it to build the filter instance. The factory must
-    have a no-arg constructor. For people familiar with JBoss Seam, this is
-    similar to the component factory pattern, but the annotation is
-    different!</para>
-
-    <para>Named filters come in handy where parameters have to be passed to
-    the filter. For example a security filter might want to know which
-    security level you want to apply:</para>
-
-    <programlisting>fullTextQuery = s.createFullTextQuery( query, Driver.class );
-fullTextQuery.enableFullTextFilter("security")<emphasis role="bold">.setParameter( "level", 5 )</emphasis>;</programlisting>
-
-    <para>Each parameter name should have an associated setter on either the
-    filter or filter factory of the targeted named filter definition.</para>
-
-    <programlisting>public class SecurityFilterFactory {
-    private Integer level;
-
-    /**
-     * injected parameter
-     */
-    <emphasis role="bold">public void setLevel(Integer level)</emphasis> {
-        this.level = level;
-    }
-
-    <emphasis role="bold">@Key
-    public FilterKey getKey()</emphasis> {
-        StandardFilterKey key = new StandardFilterKey();
-        key.addParameter( level );
-        return key;
-    }
-
-    @Factory
-    public Filter getFilter() {
-        Query query = new TermQuery( new Term("level", level.toString() ) );
-        return new CachingWrapperFilter( new QueryWrapperFilter(query) );
-    }
-}</programlisting>
-
-    <para>Note the method annotated <classname>@Key</classname> returning a
-    <classname>FilterKey</classname> object. The returned object has a special
-    contract: the key object must implement equals / hashcode so that 2 keys
-    are equal if and only if the given <classname>Filter</classname> types are
-    the same and the set of parameters are the same. In other words, 2 filter
-    keys are equal if and only if the filters from which the keys are
-    generated can be interchanged. The key object is used as a key in the
-    cache mechanism.</para>
-
-    <para><classname>@Key</classname> methods are needed only if:</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>you enabled the filter caching system (enabled by
-        default)</para>
-      </listitem>
-
-      <listitem>
-        <para>your filter has parameters</para>
-      </listitem>
-    </itemizedlist>
-
-    <para>In most cases, using the <literal>StandardFilterKey</literal>
-    implementation will be good enough. It delegates the equals / hashcode
-    implementation to each of the parameters equals and hashcode
-    methods.</para>
-
-    <para>The filter cache is enabled by default and uses a combination of
-    hard and soft references to allow disposal of memory when needed. The hard
-    reference cache keeps track of the most recently used filters and
-    transforms the ones least used to <classname>SoftReferences</classname>
-    when needed. Once the limit of the hard reference cache is reached
-    addtional filters are cached as <classname>SoftReferences</classname>. To
-    adjust the size of the hard reference cache, use
-    <literal>hibernate.search.filter.cache_strategy.size</literal> (defaults
-    to 128). For advance use of filter caching, you can implement your own
-    <classname>FilterCachingStrategy</classname>. The classname is defined by
-    <literal>hibernate.search.filter.cache_strategy</literal>.</para>
-
-    <para>The described filter cache mechanism should not be confused with
-    caching the actual filter results. In Lucene it is common practice to wrap
-    filters using the <classname>IndexReader</classname> around a
-    <classname>CachingWrapperFilter.</classname> The wrapper will cache the
-    <classname>BitSet</classname> returned from the
-    <methodname>bits(IndexReader reader)</methodname>method to avoid expensive
-    recomputation.</para>
-
-    <para>Hibernate Search also takes care of this aspect of caching. If the
-    <literal>cache</literal> flag of <classname>@FullTextFilterDef
-    </classname>is set to <literal>true</literal>, it will automatically wrap
-    the specified filter around a Hibernate specific implementation of
-    CachingWrapperFilter
-    (<classname>org.hibernate.search.filter.CachingWrapperFilter</classname>).
-    In contrast to Lucene's version of this class SoftReferences are used
-    together with a hard reference count (see dicussion about filter cache).
-    The hard reference count can be adjusted using
-    <literal>hibernate.search.filter.cache_bit_results.size</literal>
-    (defaults to 5). The wrapping behaviour can be controlled by
-    <literal>@FullTextFilterDef.cacheBitResult</literal>. There are two
-    differerent values for this parameter:</para>
-
-    <para><informaltable align="left" width="">
-        <tgroup cols="2">
-          <colspec align="center" />
-
-          <thead>
-            <row>
-              <entry align="center">Value</entry>
-
-              <entry align="center">Definition</entry>
-            </row>
-          </thead>
-
-          <tbody>
-            <row>
-              <entry align="left">CacheBitResults.AUTOMATIC</entry>
-
-              <entry>The use of <classname>CachingWrapperFilter</classname>
-              depends on the <literal>cache</literal> paramter of the filter
-              defintion. If <literal>cache</literal> is set to
-              <literal>true</literal> a wrapper will be used, otherwise not.
-              <literal>CacheBitResults.AUTOMATIC</literal> is the default
-              value.</entry>
-            </row>
-
-            <row>
-              <entry align="left">CacheBitResults.NO</entry>
-
-              <entry>No wrapper will be used.</entry>
-            </row>
-          </tbody>
-        </tgroup>
-      </informaltable>Last but not least - why should filters be cached? There
-    are two areas where filter caching shines:</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>the system does not update the targeted entity index often (in
-        other words, the IndexReader is reused a lot)</para>
-      </listitem>
-
-      <listitem>
-        <para>the Filter BitSet is expensive to compute (compared to the time
-        spent to execute the query)</para>
-      </listitem>
-    </itemizedlist>
-  </section>
-
-  <section>
-    <title>Optimizing the query process</title>
-
-    <para>Query performance depends on several criteria:</para>
-
-    <itemizedlist>
-      <listitem>
-        <para>the Lucene query itself: read the literature on this
-        subject</para>
-      </listitem>
-
-      <listitem>
-        <para>the number of object loaded: use pagination (always ;-) ) or
-        index projection (if needed)</para>
-      </listitem>
-
-      <listitem>
-        <para>the way Hibernate Search interacts with the Lucene readers:
-        defines the appropriate <xref
-        linkend="search-architecture-readerstrategy" />.</para>
-      </listitem>
-    </itemizedlist>
-  </section>
-
-  <section>
-    <title>Native Lucene Queries</title>
-
-    <para>If you wish to use some specific features of Lucene, you can always
-    run Lucene specific queries. Check <xref linkend="search-lucene-native" />
-    for more informations.</para>
-  </section>
-</chapter>
\ No newline at end of file

Copied: search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml (from rev 15395, search/trunk/doc/reference/en/modules/query.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/doc/reference/en/modules/query.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,770 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Hibernate, Relational Persistence for Idiomatic Java
+  ~
+  ~ Copyright (c) 2008, Red Hat Middleware LLC or third-party contributors as
+  ~ indicated by the @author tags or express copyright attribution
+  ~ statements applied by the authors.  All third-party contributions are
+  ~ distributed under license by Red Hat Middleware LLC.
+  ~
+  ~ This copyrighted material is made available to anyone wishing to use, modify,
+  ~ copy, or redistribute it subject to the terms and conditions of the GNU
+  ~ Lesser General Public License, as published by the Free Software Foundation.
+  ~
+  ~ This program is distributed in the hope that it will be useful,
+  ~ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+  ~ or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
+  ~ for more details.
+  ~
+  ~ You should have received a copy of the GNU Lesser General Public License
+  ~ along with this distribution; if not, write to:
+  ~ Free Software Foundation, Inc.
+  ~ 51 Franklin Street, Fifth Floor
+  ~ Boston, MA  02110-1301  USA
+  -->
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
+"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
+<chapter id="search-query" xreflabel="Querying">
+  <!--  $Id$ -->
+
+  <title>Querying</title>
+
+  <para>The second most important capability of Hibernate Search is the
+  ability to execute a Lucene query and retrieve entities managed by an
+  Hibernate session, providing the power of Lucene without living the
+  Hibernate paradigm, and giving another dimension to the Hibernate classic
+  search mechanisms (HQL, Criteria query, native SQL query).</para>
+
+  <para>To access the <productname>Hibernate Search</productname> querying
+  facilities, you have to use an Hibernate
+  <classname>FullTextSession</classname> . A Search Session wraps a regular
+  <classname>org.hibernate.Session</classname> to provide query and indexing
+  capabilities.</para>
+
+  <programlisting>Session session = sessionFactory.openSession();
+...
+FullTextSession fullTextSession = Search.getFullTextSession(session);    </programlisting>
+
+  <para>The search facility is built on native Lucene queries.</para>
+
+  <programlisting>org.apache.lucene.queryParser.QueryParser parser = new QueryParser("title", new StopAnalyzer() );
+
+org.apache.lucene.search.Query luceneQuery = parser.parse( "summary:Festina Or brand:Seiko" );
+<emphasis role="bold">org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery );
+        </emphasis>
+
+List result = fullTextQuery.list(); //return a list of managed objects    </programlisting>
+
+  <para>The Hibernate query built on top of the Lucene query is a regular
+  <literal>org.hibernate.Query</literal> , you are in the same paradigm as the
+  other Hibernate query facilities (HQL, Native or Criteria). The regular
+  <literal>list()</literal> , <literal>uniqueResult()</literal> ,
+  <literal>iterate()</literal> and <literal>scroll()</literal> can be
+  used.</para>
+
+  <para>For people using Java Persistence (aka EJB 3.0 Persistence) APIs of
+  Hibernate, the same extensions exist:</para>
+
+  <programlisting>EntityManager em = entityManagerFactory.createEntityManager();
+
+FullTextEntityManager fullTextEntityManager = 
+    org.hibernate.hibernate.search.jpa.Search.getFullTextEntityManager(em);
+
+...
+org.apache.lucene.queryParser.QueryParser parser = new QueryParser("title", new StopAnalyzer() );
+
+org.apache.lucene.search.Query luceneQuery = parser.parse( "summary:Festina Or brand:Seiko" );
+<emphasis role="bold">javax.persistence.Query fullTextQuery = fullTextEntityManager.createFullTextQuery( luceneQuery );</emphasis>
+
+List result = fullTextQuery.getResultList(); //return a list of managed objects  </programlisting>
+
+  <para>The following examples show the Hibernate APIs but the same example
+  can be easily rewritten with the Java Persistence API by just adjusting the
+  way the FullTextQuery is retrieved.</para>
+
+  <section>
+    <title>Building queries</title>
+
+    <para>Hibernate Search queries are built on top of Lucene queries. It
+    gives you a total freedom on the kind of Lucene queries you are willing to
+    execute. However, once built, Hibernate Search abstract the query
+    processing from your application using org.hibernate.Query as your primary
+    query manipulation API.</para>
+
+    <section>
+      <title>Building a Lucene query</title>
+
+      <para>This subject is generally speaking out of the scope of this
+      documentation. Please refer to the Lucene documentation Lucene In Action
+      or Hibernate Search in Action from Manning.</para>
+
+      <para>It is essential to use the same analyzer when indexing a field and
+      when querying that field. Hibernate Search gives you access to the
+      analyzers used during indexing time (see <xref
+      linkend="analyzer-retrievinganalyzer" /> for more information).</para>
+
+      <programlisting>//retrieve an analyzer by name
+Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer("phonetic-analyzer");
+
+//or the scoped analyzer for a given entity
+Analyzer analyzer = fullTextSession.getSearchFactory().getAnalyzer(Song.class);</programlisting>
+
+      <para>Using the same analyzer at indexing and querying time is
+      important. See <xref linkend="analyzer" /> for more information.</para>
+    </section>
+
+    <section>
+      <title>Building a Hibernate Search query</title>
+
+      <section>
+        <title>Generality</title>
+
+        <para>Once the Lucene query is built, it needs to be wrapped into an
+        Hibernate Query.</para>
+
+        <programlisting>FullTextSession fullTextSession = Search.getFullTextSession( session );
+org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery );</programlisting>
+
+        <para>If not specified otherwise, the query will be executed against
+        all indexed entities, potentially returning all types of indexed
+        classes. It is advised, from a performance point of view, to restrict
+        the returned types:</para>
+
+        <programlisting>org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Customer.class );
+//or
+fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Item.class, Actor.class );</programlisting>
+
+        <para>The first example returns only matching customers, the second
+        returns matching actors and items.</para>
+      </section>
+
+      <section>
+        <title>Pagination</title>
+
+        <para>It is recommended to restrict the number of returned objects per
+        query. It is a very common use case as well, the user usually navigate
+        from one page to an other. The way to define pagination is exactly the
+        way you would define pagination in a plain HQL or Criteria
+        query.</para>
+
+        <programlisting>org.hibernate.Query fullTextQuery = fullTextSession.createFullTextQuery( luceneQuery, Customer.class );
+fullTextQuery.setFirstResult(15); //start from the 15th element
+fullTextQuery.setMaxResults(10); //return 10 elements</programlisting>
+
+        <note>
+          <para>It is still possible to get the total number of matching
+          elements regardless of the pagination. See
+          <methodname>getResultSize()</methodname> below</para>
+        </note>
+      </section>
+
+      <section>
+        <title>Sorting</title>
+
+        <para>Apache Lucene provides a very flexible and powerful way to sort
+        results. While the default sorting (by relevance) is appropriate most
+        of the time, it can interesting to sort by one or several
+        properties.</para>
+
+        <para>Inject the Lucene Sort object to apply a Lucene sorting strategy
+        to an Hibernate Search.</para>
+
+        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( query, Book.class );
+org.apache.lucene.search.Sort sort = new Sort(new SortField("title"));
+<emphasis role="bold">query.setSort(sort);</emphasis>
+List results = query.list();</programlisting>
+
+        <para>One can notice the <classname>FullTextQuery</classname>
+        interface which is a sub interface of
+        <classname>org.hibernate.Query</classname>.</para>
+
+        <para>Fields used for sorting must not be tokenized.</para>
+      </section>
+
+      <section>
+        <title>Fetching strategy</title>
+
+        <para>When you restrict the return types to one class, Hibernate
+        Search loads the objects using a single query. It also respects the
+        static fetching strategy defined in your domain model.</para>
+
+        <para>It is often useful, however, to refine the fetching strategy for
+        a specific use case.</para>
+
+        <programlisting>Criteria criteria = s.createCriteria( Book.class ).setFetchMode( "authors", FetchMode.JOIN );
+s.createFullTextQuery( luceneQuery ).setCriteriaQuery( criteria );</programlisting>
+
+        <para>In this example, the query will return all Books matching the
+        luceneQuery. The authors collection will be loaded from the same query
+        using an SQL outer join.</para>
+
+        <para>When defining a criteria query, it is not needed to restrict the
+        entity types returned while creating the Hibernate Search query from
+        the full text session: the type is guessed from the criteria query
+        itself. Only fetch mode can be adjusted, refrain from applying any
+        other restriction.</para>
+
+        <para>One cannot use <methodname>setCriteriaQuery</methodname> if more
+        than one entity type is expected to be returned.</para>
+      </section>
+
+      <section id="projections">
+        <title>Projection</title>
+
+        <para>For some use cases, returning the domain object (graph) is
+        overkill. Only a small subset of the properties is necessary.
+        Hibernate Search allows you to return a subset of properties:</para>
+
+        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
+query.<emphasis role="bold">setProjection( "id", "summary", "body", "mainAuthor.name" )</emphasis>;
+List results = query.list();
+Object[] firstResult = (Object[]) results.get(0);
+Integer id = firstResult[0];
+String summary = firstResult[1];
+String body = firstResult[2];
+String authorName = firstResult[3];</programlisting>
+
+        <para>Hibernate Search extracts the properties from the Lucene index
+        and convert them back to their object representation, returning a list
+        of <classname>Object[]</classname>. Projections avoid a potential
+        database round trip (useful if the query response time is critical),
+        but has some constraints:</para>
+
+        <itemizedlist>
+          <listitem>
+            <para>the properties projected must be stored in the index
+            (<literal>@Field(store=Store.YES)</literal>), which increase the
+            index size</para>
+          </listitem>
+
+          <listitem>
+            <para>the properties projected must use a
+            <literal>FieldBridge</literal> implementing
+            <classname>org.hibernate.search.bridge.TwoWayFieldBridge</classname>
+            or
+            <literal>org.hibernate.search.bridge.TwoWayStringBridge</literal>,
+            the latter being the simpler version. All Hibernate Search
+            built-in types are two-way.</para>
+          </listitem>
+        </itemizedlist>
+
+        <para>Projection is useful for another kind of usecases. Lucene
+        provides some metadata informations to the user about the results. By
+        using some special placeholders, the projection mechanism can retrieve
+        them:</para>
+
+        <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
+query.<emphasis role="bold">setProjection( FullTextQuery.SCORE, FullTextQuery.THIS, "mainAuthor.name" )</emphasis>;
+List results = query.list();
+Object[] firstResult = (Object[]) results.get(0);
+float score = firstResult[0];
+Book book = firstResult[1];
+String authorName = firstResult[2];</programlisting>
+
+        <para>You can mix and match regular fields and special placeholders.
+        Here is the list of available placeholders:</para>
+
+        <itemizedlist>
+          <listitem>
+            <para>FullTextQuery.THIS: returns the intialized and managed
+            entity (as a non projected query would have done)</para>
+          </listitem>
+
+          <listitem>
+            <para>FullTextQuery.DOCUMENT: returns the Lucene Document related
+            to the object projected</para>
+          </listitem>
+
+          <listitem>
+            <para>FullTextQuery.SCORE: returns the document score in the
+            query. The score is guatanteed to be between 0 and 1 but the
+            highest score is not necessarily equals to 1. Scores are handy to
+            compare one result against an other for a given query but are
+            useless when comparing the result of different queries.</para>
+          </listitem>
+
+          <listitem>
+            <para>FullTextQuery.ID: the id property value of the projected
+            object</para>
+          </listitem>
+
+          <listitem>
+            <para>FullTextQuery.DOCUMENT_ID: the Lucene document id. Careful,
+            Lucene document id can change overtime between two different
+            IndexReader opening (this feature is experimental)</para>
+          </listitem>
+
+          <listitem>
+            <para>FullTextQuery.EXPLANATION: returns the Lucene Explanation
+            object for the matching object/document in the given query. Do not
+            use if you retrieve a lot of data. Running explanation typically
+            is as costly as running the whole Lucene query per matching
+            element. Make sure you use projection!</para>
+          </listitem>
+        </itemizedlist>
+      </section>
+    </section>
+  </section>
+
+  <section>
+    <title>Retrieving the results</title>
+
+    <para>Once the Hibernate Search query is built, executing it is in no way
+    different than executing a HQL or Criteria query. The same paradigm and
+    object semantic apply. All the common operations are available:
+    <methodname>list()</methodname>, <methodname>uniqueResult()</methodname>,
+    <methodname>iterate()</methodname>,
+    <methodname>scroll()</methodname>.</para>
+
+    <section>
+      <title>Performance considerations</title>
+
+      <para>If you expect a reasonable number of results (for example using
+      pagination) and expect to work on all of them,
+      <methodname>list()</methodname> or
+      <methodname>uniqueResult()</methodname> are recommended.
+      <methodname>list()</methodname> work best if the entity
+      <literal>batch-size</literal> is set up properly. Note that Hibernate
+      Search has to process all Lucene Hits elements (within the pagination)
+      when using <methodname>list()</methodname> ,
+      <methodname>uniqueResult()</methodname> and
+      <methodname>iterate()</methodname>.</para>
+
+      <para>If you wish to minimize Lucene document loading,
+      <methodname>scroll()</methodname> is more appropriate. Don't forget to
+      close the <classname>ScrollableResults</classname> object when you're
+      done, since it keeps Lucene resources. If you expect to use
+      <methodname>scroll</methodname> but wish to load objects in batch, you
+      can use <methodname>query.setFetchSize()</methodname>: When an object is
+      accessed, and if not already loaded, Hibernate Search will load the next
+      <literal>fetchSize</literal> objects in one pass.</para>
+
+      <para>Pagination is a preferred method over scrolling though.</para>
+    </section>
+
+    <section>
+      <title>Result size</title>
+
+      <para>It is sometime useful to know the total number of matching
+      documents:</para>
+
+      <itemizedlist>
+        <listitem>
+          <para>for the Google-like feature 1-10 of about 888,000,000</para>
+        </listitem>
+
+        <listitem>
+          <para>to implement a fast pagination navigation</para>
+        </listitem>
+
+        <listitem>
+          <para>to implement a multi step search engine (adding approximation
+          if the restricted query return no or not enough results)</para>
+        </listitem>
+      </itemizedlist>
+
+      <para>But it would be costly to retrieve all the matching
+      documents.</para>
+
+      <para>Hibernate Search allows you to retrieve the total number of
+      matching documents regardless of the pagination parameters. Even more
+      interesting, you can retrieve the number of matching elements without
+      triggering a single object load.</para>
+
+      <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
+assert 3245 == <emphasis role="bold">query.getResultSize()</emphasis>; //return the number of matching books without loading a single one
+
+org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
+query.setMaxResult(10);
+List results = query.list();
+assert 3245 == <emphasis role="bold">query.getResultSize()</emphasis>; //return the total number of matching books regardless of pagination</programlisting>
+
+      <note>
+        <para>Like Google, the number of results is approximative if the index
+        is not fully up-to-date with the database (asynchronous cluster for
+        example).</para>
+      </note>
+    </section>
+
+    <section>
+      <title>ResultTransformer</title>
+
+      <para>Especially when using projection, the data structure returned by a
+      query (an object array in this case), is not always matching the
+      application needs. It is possible to apply a
+      <classname>ResultTransformer</classname> operation post query to match
+      the targeted data structure:</para>
+
+      <programlisting>org.hibernate.search.FullTextQuery query = s.createFullTextQuery( luceneQuery, Book.class );
+query.setProjection( "title", "mainAuthor.name" );
+
+<emphasis role="bold">query.setResultTransformer( 
+    new StaticAliasToBeanResultTransformer( BookView.class, "title", "author" ) 
+);</emphasis>
+List&lt;BookView&gt; results = (List&lt;BookView&gt;) query.list();
+for(BookView view : results) {
+    log.info( "Book: " + view.getTitle() + ", " + view.getAuthor() );
+}</programlisting>
+
+      <para>Examples of <classname>ResultTransformer</classname>
+      implementations can be found in the Hibernate Core codebase.</para>
+    </section>
+
+    <section>
+      <title>Understanding results</title>
+
+      <para>You will find yourself sometimes puzzled by a result showing up in
+      a query or a result not showing up in a query. Luke is a great tool to
+      understand those mysteries. But Hibernate Search also let's you access
+      to the Lucene <classname>Explanation</classname> object for a given
+      result (in a given query). This class is considered fairly advanced to
+      Lucene users but can provide a good understanding of the scoring of an
+      object. You have two ways to access the Explanation object for a given
+      result:</para>
+
+      <itemizedlist>
+        <listitem>
+          <para>Use the <methodname>fullTextQuery.explain(int)</methodname>
+          method</para>
+        </listitem>
+
+        <listitem>
+          <para>Use projection</para>
+        </listitem>
+      </itemizedlist>
+
+      <para>The first approach takes a document id as a parameter and return
+      the Explanation object. The document id can be retrieved using
+      projection and the <literal>FullTextQuery.DOCUMENT_ID</literal>
+      constant.</para>
+
+      <warning>
+        <para>The Document id has nothing to do with the entity id. do not
+        mess up the two notions.</para>
+      </warning>
+
+      <para>The second approach let's you project the
+      <classname>Explanation</classname> object using the
+      <literal>FullTextQuery.EXPLANATION</literal> constant.</para>
+
+      <programlisting>FullTextQuery ftQuery = s.createFullTextQuery( luceneQuery, Dvd.class )
+        .setProjection( FullTextQuery.DOCUMENT_ID, <emphasis role="bold">FullTextQuery.EXPLANATION</emphasis>, FullTextQuery.THIS );
+ at SuppressWarnings("unchecked") List&lt;Object[]&gt; results = ftQuery.list();
+for (Object[] result : results) {
+    Explanation e = (Explanation) result[1];
+    display( e.toString() );
+}</programlisting>
+
+      <para>Be careful, building the explanation object is quite expensive, it
+      is roughly as expensive as running the Lucene query again. Don't do it
+      if you don't need the object</para>
+    </section>
+  </section>
+
+  <section>
+    <title>Filters</title>
+
+    <para>Apache Lucene has a powerful feature that allows to filter query
+    results according to a custom filtering process. This is a very powerful
+    way to apply additional data restrictions, especially since filters can be
+    cached and reused. Some interesting usecases are:</para>
+
+    <itemizedlist>
+      <listitem>
+        <para>security</para>
+      </listitem>
+
+      <listitem>
+        <para>temporal data (eg. view only last month's data)</para>
+      </listitem>
+
+      <listitem>
+        <para>population filter (eg. search limited to a given
+        category)</para>
+      </listitem>
+
+      <listitem>
+        <para>and many more</para>
+      </listitem>
+    </itemizedlist>
+
+    <para>Hibernate Search pushes the concept further by introducing the
+    notion of parameterizable named filters which are transparently cached.
+    For people familiar with the notion of Hibernate Core filters, the API is
+    very similar:</para>
+
+    <programlisting>fullTextQuery = s.createFullTextQuery( query, Driver.class );
+fullTextQuery.enableFullTextFilter("bestDriver");
+fullTextQuery.enableFullTextFilter("security").setParameter( "login", "andre" );
+fullTextQuery.list(); //returns only best drivers where andre has credentials</programlisting>
+
+    <para>In this example we enabled two filters on top of the query. You can
+    enable (or disable) as many filters as you like.</para>
+
+    <para>Declaring filters is done through the
+    <classname>@FullTextFilterDef</classname> annotation. This annotation can
+    be on any <literal>@Indexed</literal> entity regardless of the query the
+    filter is later applied to. This implies that filter definitions are
+    global and their names must be unique. A
+    <classname>SearchException</classname> is thrown in case two different
+    <classname>@FullTextFilterDef</classname> annotations with the same name
+    are defined. Each named filter has to specify its actual filter
+    implementation.</para>
+
+    <programlisting>@Entity
+ at Indexed
+ at FullTextFilterDefs( {
+    <emphasis role="bold">@FullTextFilterDef(name = "bestDriver", impl = BestDriversFilter.class, cache=false)</emphasis>, 
+    <emphasis role="bold">@FullTextFilterDef(name = "security", impl = SecurityFilterFactory.class)</emphasis> 
+})
+public class Driver { ... }</programlisting>
+
+    <programlisting>public class BestDriversFilter extends <emphasis
+        role="bold">org.apache.lucene.search.Filter</emphasis> {
+
+    public BitSet bits(IndexReader reader) throws IOException {
+        BitSet bitSet = new BitSet( reader.maxDoc() );
+        TermDocs termDocs = reader.termDocs( new Term("score", "5") );
+        while ( termDocs.next() ) {
+            bitSet.set( termDocs.doc() );
+        }
+        return bitSet;
+    }
+}</programlisting>
+
+    <para><classname>BestDriversFilter</classname> is an example of a simple
+    Lucene filter which reduces the result set to drivers whose score is 5. In
+    this example the specified filter implements the
+    <literal>org.apache.lucene.search.Filter</literal> directly and contains a
+    no-arg constructor. The <literal>cache</literal> flag, defaulted to
+    <literal>true</literal>, tells Hibernate Search to search the filter in
+    its internal cache and reuse it if found.</para>
+
+    <para>If your Filter creation requires additional steps or if the filter
+    you want to use does not have a no-arg constructor, you can use the
+    factory pattern:</para>
+
+    <programlisting>@Entity
+ at Indexed
+ at FullTextFilterDef(name = "bestDriver", impl = BestDriversFilterFactory.class)
+public class Driver { ... }
+
+public class BestDriversFilterFactory {
+
+    <emphasis role="bold">@Factory</emphasis>
+    public Filter getFilter() {
+        //some additional steps to cache the filter results per IndexReader
+        Filter bestDriversFilter = new BestDriversFilter();
+        return new CachingWrapperFilter(bestDriversFilter);
+    }
+}</programlisting>
+
+    <para>Hibernate Search will look for a <literal>@Factory</literal>
+    annotated method and use it to build the filter instance. The factory must
+    have a no-arg constructor. For people familiar with JBoss Seam, this is
+    similar to the component factory pattern, but the annotation is
+    different!</para>
+
+    <para>Named filters come in handy where parameters have to be passed to
+    the filter. For example a security filter might want to know which
+    security level you want to apply:</para>
+
+    <programlisting>fullTextQuery = s.createFullTextQuery( query, Driver.class );
+fullTextQuery.enableFullTextFilter("security")<emphasis role="bold">.setParameter( "level", 5 )</emphasis>;</programlisting>
+
+    <para>Each parameter name should have an associated setter on either the
+    filter or filter factory of the targeted named filter definition.</para>
+
+    <programlisting>public class SecurityFilterFactory {
+    private Integer level;
+
+    /**
+     * injected parameter
+     */
+    <emphasis role="bold">public void setLevel(Integer level)</emphasis> {
+        this.level = level;
+    }
+
+    <emphasis role="bold">@Key
+    public FilterKey getKey()</emphasis> {
+        StandardFilterKey key = new StandardFilterKey();
+        key.addParameter( level );
+        return key;
+    }
+
+    @Factory
+    public Filter getFilter() {
+        Query query = new TermQuery( new Term("level", level.toString() ) );
+        return new CachingWrapperFilter( new QueryWrapperFilter(query) );
+    }
+}</programlisting>
+
+    <para>Note the method annotated <classname>@Key</classname> returning a
+    <classname>FilterKey</classname> object. The returned object has a special
+    contract: the key object must implement <methodname>equals()</methodname>
+    / <methodname>hashcode()</methodname> so that 2 keys are equal if and only
+    if the given <classname>Filter</classname> types are the same and the set
+    of parameters are the same. In other words, 2 filter keys are equal if and
+    only if the filters from which the keys are generated can be interchanged.
+    The key object is used as a key in the cache mechanism.</para>
+
+    <para><classname>@Key</classname> methods are needed only if:</para>
+
+    <itemizedlist>
+      <listitem>
+        <para>you enabled the filter caching system (enabled by
+        default)</para>
+      </listitem>
+
+      <listitem>
+        <para>your filter has parameters</para>
+      </listitem>
+    </itemizedlist>
+
+    <para>In most cases, using the <literal>StandardFilterKey</literal>
+    implementation will be good enough. It delegates the
+    <methodname>equals()</methodname> / <methodname>hashcode()</methodname>
+    implementation to each of the parameters equals and hashcode
+    methods.</para>
+
+    <para>As mentioned before the defined filters are per default cached and
+    the cache uses a combination of hard and soft references to allow disposal
+    of memory when needed. The hard reference cache keeps track of the most
+    recently used filters and transforms the ones least used to
+    <classname>SoftReferences</classname> when needed. Once the limit of the
+    hard reference cache is reached addtional filters are cached as
+    <classname>SoftReferences</classname>. To adjust the size of the hard
+    reference cache, use
+    <literal>hibernate.search.filter.cache_strategy.size</literal> (defaults
+    to 128). For advance use of filter caching, you can implement your own
+    <classname>FilterCachingStrategy</classname>. The classname is defined by
+    <literal>hibernate.search.filter.cache_strategy</literal>.</para>
+
+    <para>This filter caching mechanism should not be confused with caching
+    the actual filter results. In Lucene it is common practice to wrap filters
+    using the <classname>IndexReader</classname> around a
+    <classname>CachingWrapperFilter.</classname> The wrapper will cache the
+    <classname>BitSet</classname> returned from the
+    <methodname>bits(IndexReader reader)</methodname> method to avoid
+    expensive recomputation. The <classname>BitSet</classname> uses one bit
+    per Document. If for example your index contains ten millions documents,
+    the <classname>BitSet</classname> structure will take 1.2 Mb of memory. It
+    is important to mention that the computed <classname>BitSet</classname> is
+    only cachable for the same <classname>IndexReader</classname> instance,
+    because the reader effectively represents the state of the index at the
+    moment it was opened. The document list cannot change within an opened
+    <classname>IndexReader</classname>. A different/new<classname>
+    IndexReader</classname> instance, however, works potentially on a
+    different set of <classname>Document</classname>s (either from a different
+    index or simply because the index has changed), hence the cached
+    <classname>BitSet</classname> has to be recomputed.</para>
+
+    <para>Hibernate Search also helps with this aspect of caching. Per default
+    the <literal>cache</literal> flag of <classname>@FullTextFilterDef
+    </classname>is set to
+    <literal>FilterCacheModeType.INSTANCE_AND_BITSETRESULTS</literal> which
+    will automatically cache the filter instance as well as wrap the specified
+    filter around a Hibernate specific implementation of
+    <classname>CachingWrapperFilter</classname>
+    (<classname>org.hibernate.search.filter.CachingWrapperFilter</classname>).
+    In contrast to Lucene's version of this class
+    <classname>SoftReference</classname>s are used together with a hard
+    reference count (see dicussion about filter cache). The hard reference
+    count can be adjusted using
+    <literal>hibernate.search.filter.cache_bit_results.size</literal>
+    (defaults to 5). The wrapping behaviour can be controlled using the
+    <literal>@FullTextFilterDef.cache</literal> parameter. There are three
+    differerent values for this parameter:</para>
+
+    <para><informaltable align="left" width="">
+        <tgroup cols="2">
+          <colspec align="center" />
+
+          <thead>
+            <row>
+              <entry align="center">Value</entry>
+
+              <entry align="center">Definition</entry>
+            </row>
+          </thead>
+
+          <tbody>
+            <row>
+              <entry align="left">FilterCacheModeType.NONE</entry>
+
+              <entry>No filter instance and no result is cached by Hibernate
+              Search. For every filter call, a new filter instance is created.
+              This setting might be useful for rapidly changing data sets or
+              heavily memory constrained environments. </entry>
+            </row>
+
+            <row>
+              <entry align="left">FilterCacheModeType.INSTANCE_ONLY</entry>
+
+              <entry>The filter instance is cached and reused across
+              concurrent <methodname>Filter.bits()</methodname> calls.
+              <classname>BitSet</classname> results are not cached. This
+              setting is useful when a filter uses its own specific caching
+              mechanism or the filter results change dynamically due to
+              application specific events making <classname>BitSet</classname>
+              caching in both cases unnecessary. </entry>
+            </row>
+
+            <row>
+              <entry
+              align="left">FilterCacheModeType.INSTANCE_AND_BITSETRESULTS</entry>
+
+              <entry>Both the filter instance and the
+              <classname>BitSet</classname> results are cached. This is the
+              default value.</entry>
+            </row>
+          </tbody>
+        </tgroup>
+      </informaltable>Last but not least - why should filters be cached? There
+    are two areas where filter caching shines:</para>
+
+    <itemizedlist>
+      <listitem>
+        <para>the system does not update the targeted entity index often (in
+        other words, the IndexReader is reused a lot)</para>
+      </listitem>
+
+      <listitem>
+        <para>the Filter BitSet is expensive to compute (compared to the time
+        spent to execute the query)</para>
+      </listitem>
+    </itemizedlist>
+  </section>
+
+  <section>
+    <title>Optimizing the query process</title>
+
+    <para>Query performance depends on several criteria:</para>
+
+    <itemizedlist>
+      <listitem>
+        <para>the Lucene query itself: read the literature on this
+        subject</para>
+      </listitem>
+
+      <listitem>
+        <para>the number of object loaded: use pagination (always ;-) ) or
+        index projection (if needed)</para>
+      </listitem>
+
+      <listitem>
+        <para>the way Hibernate Search interacts with the Lucene readers:
+        defines the appropriate <xref
+        linkend="search-architecture-readerstrategy" />.</para>
+      </listitem>
+    </itemizedlist>
+  </section>
+
+  <section>
+    <title>Native Lucene Queries</title>
+
+    <para>If you wish to use some specific features of Lucene, you can always
+    run Lucene specific queries. Check <xref linkend="search-lucene-native" />
+    for more informations.</para>
+  </section>
+</chapter>

Deleted: search/tags/v3_1_0_Beta2/ivy.xml
===================================================================
--- search/trunk/ivy.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/ivy.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<ivy-module version="1.3"
-            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-            xsi:noNamespaceSchemaLocation=
-                  "http://www.jayasoft.org/misc/ivy/ivy.xsd">
-    <info organisation="org.hibernate" module="search"/>
-    <configurations>
-        <conf name="default" description="Core module dependencies"/>
-        <conf name="test" visibility="private" description="Dependencies needed for testing purposes"/>
-    </configurations>
-    <publications>
-        <artifact name="hibernate-search" conf="default"/>
-    </publications>
-    <dependencies>
-		
-        <!-- compile time dependencies -->
-        <dependency name="ejb3-persistence" rev="1.0.2.GA" conf="default->default"/>
-        <dependency name="commons-annotations" rev="3.1.0.GA" conf="default->default"/>
-        <dependency org="org.slf4j" name="slf4j-api" rev="1.4.2" conf="default->default"/>        
-        <dependency org="org.hibernate" name="hibernate-core" rev="3.3.1.GA" conf="default->default"/>
-        <dependency org="javax.transaction" name="jta" rev="1.1" conf="default->default"/>
-        <dependency org="org.apache.lucene" name="lucene-core" rev="2.4.0" conf="default->default"/>
-        <dependency org="javax.jms" name="jms" rev="1.1" conf="default->default"/> <!-- optional -->
-        <dependency org="javax.annotation" name="jsr250-api" rev="1.0" conf="default->default"/> <!-- optional -->
-        <dependency org="org.apache.solr" name="solr-core" rev="1.3.0" conf="default->default"/>
-        <dependency org="org.apache.solr" name="solr-common" rev="1.3.0" conf="default->default"/>
-        <dependency org="org.apache.solr" name="solr-lucene-snowball" rev="1.3.0" conf="default->default"/>
-        <!--dependency org="org.hibernate.apache.lucene.solr" name="apache-solr-analyzer" rev="1.3.0" conf="default->default"/-->
-
-        <!-- transitive dependencies -->
-        <dependency org="antlr" name="antlr" rev="2.7.6" conf="test->default"/>
-        <dependency org="commons-collections" name="commons-collections" rev="3.1" conf="test->default"/>
-        <dependency org="dom4j" name="dom4j" rev="1.6.1" conf="test->default"/>
-
-        <!-- test deps -->
-        <dependency name="annotations" rev="3.4.0.GA" conf="test->default"/>
-        <dependency name="entitymanager" rev="3.4.0.GA" conf="test->default"/>
-        <dependency org="cglib" name="cglib" rev="2.1_3" conf="test->default"/>
-        <dependency org="asm" name="asm" rev="1.5.3" conf="test->default"/>
-        <dependency org="asm" name="asm-attrs" rev="1.5.3" conf="test->default"/>
-        <dependency org="org.slf4j" name="slf4j-log4j12" rev="1.4.2" conf="test->default"/>
-        <dependency org="log4j" name="log4j" rev="1.2.14" conf="test->default"/>
-        <dependency org="junit" name="junit" rev="3.8.1" conf="test->default"/>
-        <dependency org="hsqldb" name="hsqldb" rev="1.8.0.2" conf="test->default"/>
-        <dependency org="postgresql" name="postgresql" rev="8.3-603.jdbc3" conf="test->default"/>
-        <dependency org="mysql" name="mysql-connector-java" rev="5.1.6" conf="test->default"/>
-        <dependency org="org.apache.derby" name="derby" rev="10.2.1.6" conf="test->default"/>
-
-    </dependencies>
-</ivy-module>
\ No newline at end of file

Copied: search/tags/v3_1_0_Beta2/ivy.xml (from rev 15398, search/trunk/ivy.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/ivy.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/ivy.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ivy-module version="1.3"
+            xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+            xsi:noNamespaceSchemaLocation=
+                  "http://www.jayasoft.org/misc/ivy/ivy.xsd">
+    <info organisation="org.hibernate" module="search"/>
+    <configurations>
+        <conf name="default" description="Core module dependencies"/>
+        <conf name="test" visibility="private" description="Dependencies needed for testing purposes"/>
+    </configurations>
+    <publications>
+        <artifact name="hibernate-search" conf="default"/>
+    </publications>
+    <dependencies>
+		
+        <!-- compile time dependencies -->
+        <dependency name="ejb3-persistence" rev="1.0.2.GA" conf="default->default"/>
+        <dependency name="commons-annotations" rev="3.1.0.GA" conf="default->default"/>
+        <dependency org="org.slf4j" name="slf4j-api" rev="1.4.2" conf="default->default"/>        
+        <dependency org="org.hibernate" name="hibernate-core" rev="3.3.1.GA" conf="default->default"/>
+        <dependency org="javax.transaction" name="jta" rev="1.1" conf="default->default"/>
+        <dependency org="org.apache.lucene" name="lucene-core" rev="2.4.0" conf="default->default"/>
+        <dependency org="org.apache.lucene" name="lucene-snowball" rev="2.4.0" conf="default->default"/>
+        <dependency org="javax.jms" name="jms" rev="1.1" conf="default->default"/> <!-- optional -->
+        <dependency org="javax.annotation" name="jsr250-api" rev="1.0" conf="default->default"/> <!-- optional -->
+        <dependency org="org.apache.solr" name="solr-core" rev="1.3.0" conf="default->default"/>
+        <dependency org="org.apache.solr" name="solr-common" rev="1.3.0" conf="default->default"/>
+
+        <!-- transitive dependencies -->
+        <dependency org="antlr" name="antlr" rev="2.7.6" conf="test->default"/>
+        <dependency org="commons-collections" name="commons-collections" rev="3.1" conf="test->default"/>
+        <dependency org="dom4j" name="dom4j" rev="1.6.1" conf="test->default"/>
+
+        <!-- test deps -->
+        <dependency name="annotations" rev="3.4.0.GA" conf="test->default"/>
+        <dependency name="entitymanager" rev="3.4.0.GA" conf="test->default"/>
+        <dependency org="cglib" name="cglib" rev="2.1_3" conf="test->default"/>
+        <dependency org="asm" name="asm" rev="1.5.3" conf="test->default"/>
+        <dependency org="asm" name="asm-attrs" rev="1.5.3" conf="test->default"/>
+        <dependency org="org.slf4j" name="slf4j-log4j12" rev="1.4.2" conf="test->default"/>
+        <dependency org="log4j" name="log4j" rev="1.2.14" conf="test->default"/>
+        <dependency org="junit" name="junit" rev="3.8.1" conf="test->default"/>
+        <dependency org="hsqldb" name="hsqldb" rev="1.8.0.2" conf="test->default"/>
+        <dependency org="postgresql" name="postgresql" rev="8.3-603.jdbc3" conf="test->default"/>
+        <dependency org="mysql" name="mysql-connector-java" rev="5.1.6" conf="test->default"/>
+        <dependency org="org.apache.derby" name="derby" rev="10.2.1.6" conf="test->default"/>
+
+    </dependencies>
+</ivy-module>
\ No newline at end of file

Deleted: search/tags/v3_1_0_Beta2/lib/README.txt
===================================================================
--- search/trunk/lib/README.txt	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/lib/README.txt	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,20 +0,0 @@
-Hibernate Search dependencies
-=============================
-
-Core
-====
-hibernate-commons-annotations.jar: required
-hibernate-core.jar: required + hibernate core dependencies - see Hibernate Core for more information  
-lucene-core.jar: required (used version 2.4.0)
-jta.jar: required 
-slf4j-api: required together with a slf4j-[impl].jar eg slf4j-log4j12.jar  
-
-jms.jar: optional, needed for JMS based clustering strategy, usually available with your application server
-jsr-250-api.jar: optional, needed for JMS based clustering strategy, usually available with your application server
-solr-core.jar, solr-common.jar: optional (used version 1.3.0), needed if @AnalyzerDef is used
-solr-lucenen-snowball.jar: optional, needed if snowball stemmer is used
-
-Test
-====
-hibernate-annotations.jar: required
-hibernate-entitymanager.jar: required

Copied: search/tags/v3_1_0_Beta2/lib/README.txt (from rev 15398, search/trunk/lib/README.txt)
===================================================================
--- search/tags/v3_1_0_Beta2/lib/README.txt	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/lib/README.txt	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,20 @@
+Hibernate Search dependencies
+=============================
+
+Core
+====
+hibernate-commons-annotations.jar: required
+hibernate-core.jar: required + hibernate core dependencies - see Hibernate Core for more information  
+lucene-core.jar: required (used version 2.4.0)
+jta.jar: required 
+slf4j-api: required together with a slf4j-[impl].jar eg slf4j-log4j12.jar  
+
+jms.jar: optional, needed for JMS based clustering strategy, usually available with your application server
+jsr-250-api.jar: optional, needed for JMS based clustering strategy, usually available with your application server
+solr-core.jar, solr-common.jar: optional (used version 1.3.0), needed if @AnalyzerDef is used
+lucene-snowball.jar: optional, needed if snowball stemmer is used
+
+Test
+====
+hibernate-annotations.jar: required
+hibernate-entitymanager.jar: required

Deleted: search/tags/v3_1_0_Beta2/pom.xml
===================================================================
--- search/trunk/pom.xml	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/pom.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,89 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.hibernate</groupId>
-    <artifactId>hibernate-search</artifactId>
-    <version>3.1.0.Beta2</version>
-    <description>Hibernate Search</description>
-    <dependencies>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-core</artifactId>
-            <version>3.3.1.GA</version>
-        </dependency>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-commons-annotations</artifactId>
-            <version>3.1.0.GA</version>
-        </dependency>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>ejb3-persistence</artifactId>
-            <version>1.0.2.GA</version>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.lucene</groupId>
-            <artifactId>lucene-core</artifactId>
-            <version>2.4.0</version>
-        </dependency>
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
-            <version>1.4.2</version>
-        </dependency>
-        <dependency>
-			<groupId>javax.transaction</groupId>
-			<artifactId>jta</artifactId>
-			<version>1.1</version>
-		</dependency>
-
-
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-annotations</artifactId>
-            <version>3.4.0.GA</version>
-            <optional>true</optional>
-        </dependency>
-        <dependency>
-            <groupId>org.hibernate</groupId>
-            <artifactId>hibernate-entitymanager</artifactId>
-            <version>3.4.0.GA</version>
-            <optional>true</optional>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-common</artifactId>
-            <version>1.3.0</version>
-            <optional>true</optional>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-core</artifactId>
-            <version>1.3.0</version>
-            <optional>true</optional>
-        </dependency>
-       <dependency>
-            <groupId>org.apache.solr</groupId>
-            <artifactId>solr-lucene-snowball</artifactId>
-            <version>1.3.0</version>
-            <optional>true</optional>
-        </dependency>
-
-
-        <dependency>
-			<groupId>javax.jms</groupId>
-			<artifactId>jms</artifactId>
-			<version>1.1</version>
-            <scope>runtime</scope>
-            <optional>true</optional>
-        </dependency>
-        <dependency>
-			<groupId>javax.annotation</groupId>
-			<artifactId>jsr250-api</artifactId>
-			<version>1.0</version>
-            <scope>runtime</scope>
-            <optional>true</optional>
-        </dependency>
-    </dependencies>
-</project>

Copied: search/tags/v3_1_0_Beta2/pom.xml (from rev 15398, search/trunk/pom.xml)
===================================================================
--- search/tags/v3_1_0_Beta2/pom.xml	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/pom.xml	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,89 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.hibernate</groupId>
+    <artifactId>hibernate-search</artifactId>
+    <version>3.1.0.Beta2</version>
+    <description>Hibernate Search</description>
+    <dependencies>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-core</artifactId>
+            <version>3.3.1.GA</version>
+        </dependency>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-commons-annotations</artifactId>
+            <version>3.1.0.GA</version>
+        </dependency>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>ejb3-persistence</artifactId>
+            <version>1.0.2.GA</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.lucene</groupId>
+            <artifactId>lucene-core</artifactId>
+            <version>2.4.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>1.4.2</version>
+        </dependency>
+        <dependency>
+			<groupId>javax.transaction</groupId>
+			<artifactId>jta</artifactId>
+			<version>1.1</version>
+		</dependency>
+
+
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-annotations</artifactId>
+            <version>3.4.0.GA</version>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.hibernate</groupId>
+            <artifactId>hibernate-entitymanager</artifactId>
+            <version>3.4.0.GA</version>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.solr</groupId>
+            <artifactId>solr-common</artifactId>
+            <version>1.3.0</version>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.solr</groupId>
+            <artifactId>solr-core</artifactId>
+            <version>1.3.0</version>
+            <optional>true</optional>
+        </dependency>
+       <dependency>
+            <groupId>org.apache.lucene</groupId>
+            <artifactId>lucene-snowball</artifactId>
+            <version>2.4.0</version>
+            <optional>true</optional>
+        </dependency>
+
+
+        <dependency>
+			<groupId>javax.jms</groupId>
+			<artifactId>jms</artifactId>
+			<version>1.1</version>
+            <scope>runtime</scope>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+			<groupId>javax.annotation</groupId>
+			<artifactId>jsr250-api</artifactId>
+			<version>1.0</version>
+            <scope>runtime</scope>
+            <optional>true</optional>
+        </dependency>
+    </dependencies>
+</project>

Deleted: search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/annotations/FilterCacheModeType.java	2008-10-25 22:00:53 UTC (rev 15392)
+++ search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java	2008-10-27 11:50:45 UTC (rev 15400)
@@ -1,30 +0,0 @@
-package org.hibernate.search.annotations;
-
-/**
- * Cache mode strategy for Full Text filters
- *
- * @author Emmanuel Bernard
- */
-public enum FilterCacheModeType {
-	/**
-	 * No filter instance and no result is cached by Hibernate Search
-	 * For every filter call, a new filter instance is created
-	 */
-	NONE,
-
-	/**
-	 * The filter instance is cached by Hibernate Search and reused across
-	 * concurrent filter.bits() calls
-	 * Results are not cache by Hibernate Search
-	 */
-	INSTANCE_ONLY,
-
-	/**
-	 * Both the filter instance and the BitSet results are cached.
-	 * The filter instance is cached by Hibernate Search and reused across
-	 * concurrent filter.bits() calls
-	 * BitSet Results are cached per IndexReader 
-	 */
-	INSTANCE_AND_BITSETRESULTS
-
-}

Copied: search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java (from rev 15395, search/trunk/src/java/org/hibernate/search/annotations/FilterCacheModeType.java)
===================================================================
--- search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java	                        (rev 0)
+++ search/tags/v3_1_0_Beta2/src/java/org/hibernate/search/annotations/FilterCacheModeType.java	2008-10-27 11:50:45 UTC (rev 15400)
@@ -0,0 +1,36 @@
+package org.hibernate.search.annotations;
+
+/**
+ * Cache mode strategy for <code>FullTextFilterDef</code>s.
+ *
+ * @see FullTextFilterDef
+ * @author Emmanuel Bernard
+ */
+public enum FilterCacheModeType {
+	/**
+	 * No filter instance and no result is cached by Hibernate Search.
+	 * For every filter call, a new filter instance is created.
+	 */
+	NONE,
+
+	/**
+	 * The filter instance is cached by Hibernate Search and reused across
+	 * concurrent <code>Filter.bits()</code> calls.
+	 * Results are not cached by Hibernate Search.
+	 *
+	 * @see org.apache.lucene.search.Filter#bits(org.apache.lucene.index.IndexReader)
+
+	 */
+	INSTANCE_ONLY,
+
+	/**
+	 * Both the filter instance and the <code>BitSet</code> results are cached.
+	 * The filter instance is cached by Hibernate Search and reused across
+	 * concurrent <code>Filter.bits()</code> calls.
+	 * <code>BitSet</code> results are cached per <code>IndexReader</code>.
+	 *
+	 * @see org.apache.lucene.search.Filter#bits(org.apache.lucene.index.IndexReader) 
+	 */
+	INSTANCE_AND_BITSETRESULTS
+
+}




More information about the hibernate-commits mailing list