Hibernate SVN: r15628 - in search/trunk/src/java/org/hibernate/search: filter and 1 other directories.
by hibernate-commits@lists.jboss.org
Author: sannegrinovero
Date: 2008-11-29 09:06:12 -0500 (Sat, 29 Nov 2008)
New Revision: 15628
Modified:
search/trunk/src/java/org/hibernate/search/event/ContextHolder.java
search/trunk/src/java/org/hibernate/search/filter/AndDocIdSet.java
search/trunk/src/java/org/hibernate/search/filter/EmptyDocIdBitSet.java
search/trunk/src/java/org/hibernate/search/query/ScrollableResultsImpl.java
Log:
javadoc and really minor code improvements
Modified: search/trunk/src/java/org/hibernate/search/event/ContextHolder.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/event/ContextHolder.java 2008-11-29 13:10:14 UTC (rev 15627)
+++ search/trunk/src/java/org/hibernate/search/event/ContextHolder.java 2008-11-29 14:06:12 UTC (rev 15628)
@@ -18,7 +18,7 @@
new ThreadLocal<WeakHashMap<Configuration, SearchFactoryImpl>>();
//code doesn't have to be multithreaded because SF creation is not.
- //this is not a public API, should really only be used during the SessionFActory building
+ //this is not a public API, should really only be used during the SessionFactory building
public static SearchFactoryImpl getOrBuildSearchFactory(Configuration cfg) {
WeakHashMap<Configuration, SearchFactoryImpl> contextMap = contexts.get();
if ( contextMap == null ) {
Modified: search/trunk/src/java/org/hibernate/search/filter/AndDocIdSet.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/filter/AndDocIdSet.java 2008-11-29 13:10:14 UTC (rev 15627)
+++ search/trunk/src/java/org/hibernate/search/filter/AndDocIdSet.java 2008-11-29 14:06:12 UTC (rev 15628)
@@ -1,3 +1,4 @@
+// $Id$
package org.hibernate.search.filter;
import java.io.IOException;
@@ -20,7 +21,6 @@
private DocIdSet docIdBitSet;
private final List<DocIdSet> andedDocIdSets;
-
private final int maxDocNumber;
public AndDocIdSet(List<DocIdSet> andedDocIdSets, int maxDocs) {
Property changes on: search/trunk/src/java/org/hibernate/search/filter/AndDocIdSet.java
___________________________________________________________________
Name: svn:keywords
+ Id
Modified: search/trunk/src/java/org/hibernate/search/filter/EmptyDocIdBitSet.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/filter/EmptyDocIdBitSet.java 2008-11-29 13:10:14 UTC (rev 15627)
+++ search/trunk/src/java/org/hibernate/search/filter/EmptyDocIdBitSet.java 2008-11-29 14:06:12 UTC (rev 15628)
@@ -1,6 +1,6 @@
+// $Id$
package org.hibernate.search.filter;
-import java.io.IOException;
import java.io.Serializable;
import org.apache.lucene.search.DocIdSet;
@@ -9,6 +9,7 @@
/**
* A DocIdSet which is always empty.
* Stateless and ThreadSafe.
+ *
* @author Sanne Grinovero
*/
public final class EmptyDocIdBitSet extends DocIdSet implements Serializable {
@@ -17,7 +18,7 @@
public static final DocIdSet instance = new EmptyDocIdBitSet();
- private final DocIdSetIterator iterator = new EmptyDocIdSetIterator();
+ private static final DocIdSetIterator iterator = new EmptyDocIdSetIterator();
private EmptyDocIdBitSet(){
// is singleton
@@ -40,12 +41,12 @@
}
@Override
- public final boolean next() throws IOException {
+ public final boolean next() {
return false;
}
@Override
- public final boolean skipTo(int target) throws IOException {
+ public final boolean skipTo(int target) {
return false;
}
Property changes on: search/trunk/src/java/org/hibernate/search/filter/EmptyDocIdBitSet.java
___________________________________________________________________
Name: svn:keywords
+ Id
Modified: search/trunk/src/java/org/hibernate/search/query/ScrollableResultsImpl.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/query/ScrollableResultsImpl.java 2008-11-29 13:10:14 UTC (rev 15627)
+++ search/trunk/src/java/org/hibernate/search/query/ScrollableResultsImpl.java 2008-11-29 14:06:12 UTC (rev 15628)
@@ -29,22 +29,24 @@
import org.hibernate.type.Type;
/**
- * Implements scollable and paginated resultsets.
- * Contrary to query#iterate() or query#list(), this implementation is
+ * Implements scrollable and paginated resultsets.
+ * Contrary to Query#iterate() or Query#list(), this implementation is
* exposed to returned null objects (if the index is out of date).
* <p/>
* <p/>
- * + * The following methods that change the value of 'current' will check
- * + * and set its value to either 'afterLast' or 'beforeFirst' depending
- * + * on direction. This is to prevent rogue values from setting it outside
- * + * the boundaries of the results.
- * + * <ul>
- * + * <li>next()</li>
- * + * <li>previous()</li>
- * + * <li>scroll(i)</li>
- * + * <li>last()</li>
- * + * <li>first()</li>
- * + * </ul>
+ * The following methods that change the value of 'current' will check
+ * and set its value to either 'afterLast' or 'beforeFirst' depending
+ * on direction. This is to prevent rogue values from setting it outside
+ * the boundaries of the results.
+ * <ul>
+ * <li>next()</li>
+ * <li>previous()</li>
+ * <li>scroll(i)</li>
+ * <li>last()</li>
+ * <li>first()</li>
+ * </ul>
+ *
+ * @see org.hibernate.Query
*
* @author Emmanuel Bernard
* @author John Griffin
@@ -125,14 +127,12 @@
}
/**
- * Increases cursor pointer by one. If this places it >
- * max + 1 (afterLast) then set it to afterLast and return
- * false.
- *
- * @return booolean
- * @throws HibernateException
+ * {@inheritDoc}
*/
- public boolean next() throws HibernateException {
+ public boolean next() {
+ // Increases cursor pointer by one. If this places it >
+ // max + 1 (afterLast) then set it to afterLast and return
+ // false.
if ( ++current > max ) {
afterLast();
return false;
@@ -140,15 +140,10 @@
return true;
}
- /**
- * Decreases cursor pointer by one. If this places it <
- * first - 1 (beforeFirst) then set it to beforeFirst and
- * return false.
- *
- * @return boolean
- * @throws HibernateException
- */
- public boolean previous() throws HibernateException {
+ public boolean previous() {
+ // Decreases cursor pointer by one. If this places it <
+ // first - 1 (beforeFirst) then set it to beforeFirst and
+ // return false.
if ( --current < first ) {
beforeFirst();
return false;
@@ -156,16 +151,10 @@
return true;
}
- /**
- * Since we have to take into account that we can scroll any
- * amount positive or negative, we perform the same tests that
- * we performed in next() and previous().
- *
- * @param i the scroll distance.
- * @return boolean
- * @throws HibernateException
- */
- public boolean scroll(int i) throws HibernateException {
+ public boolean scroll(int i) {
+ // Since we have to take into account that we can scroll any
+ // amount positive or negative, we perform the same tests that
+ // we performed in next() and previous().
current = current + i;
if ( current > max ) {
afterLast();
@@ -180,7 +169,7 @@
}
}
- public boolean last() throws HibernateException {
+ public boolean last() {
current = max;
if ( current < first ) {
beforeFirst();
@@ -189,7 +178,7 @@
return max >= first;
}
- public boolean first() throws HibernateException {
+ public boolean first() {
current = first;
if ( current > max ) {
afterLast();
@@ -198,23 +187,23 @@
return max >= first;
}
- public void beforeFirst() throws HibernateException {
+ public void beforeFirst() {
current = first - 1;
}
- public void afterLast() throws HibernateException {
+ public void afterLast() {
current = max + 1;
}
- public boolean isFirst() throws HibernateException {
+ public boolean isFirst() {
return current == first;
}
- public boolean isLast() throws HibernateException {
+ public boolean isLast() {
return current == max;
}
- public void close() throws HibernateException {
+ public void close() {
try {
searchFactory.getReaderProvider().closeReader( searcher.getIndexReader() );
}
@@ -235,96 +224,180 @@
return resultContext.get( entityInfos[current - first] );
}
- public Object get(int i) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Object get(int i) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
public Type getType(int i) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Integer getInteger(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Integer getInteger(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Long getLong(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Long getLong(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Float getFloat(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Float getFloat(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Boolean getBoolean(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Boolean getBoolean(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Double getDouble(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Double getDouble(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Short getShort(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Short getShort(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Byte getByte(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Byte getByte(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Character getCharacter(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Character getCharacter(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public byte[] getBinary(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public byte[] getBinary(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public String getText(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public String getText(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Blob getBlob(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Blob getBlob(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Clob getClob(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Clob getClob(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public String getString(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public String getString(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public BigDecimal getBigDecimal(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public BigDecimal getBigDecimal(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public BigInteger getBigInteger(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public BigInteger getBigInteger(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Date getDate(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Date getDate(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Locale getLocale(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Locale getLocale(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public Calendar getCalendar(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public Calendar getCalendar(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public TimeZone getTimeZone(int col) throws HibernateException {
+ /**
+ * This method is not supported on Lucene based queries
+ * @throws UnsupportedOperationException always thrown
+ */
+ public TimeZone getTimeZone(int col) {
throw new UnsupportedOperationException( "Lucene does not work on columns" );
}
- public int getRowNumber() throws HibernateException {
+ public int getRowNumber() {
if ( max < first ) return -1;
return current - first;
}
- public boolean setRowNumber(int rowNumber) throws HibernateException {
+ public boolean setRowNumber(int rowNumber) {
if ( rowNumber >= 0 ) {
current = first + rowNumber;
}
16 years
Hibernate SVN: r15627 - in search/trunk/src: test/org/hibernate/search/test/filter and 1 other directory.
by hibernate-commits@lists.jboss.org
Author: sannegrinovero
Date: 2008-11-29 08:10:14 -0500 (Sat, 29 Nov 2008)
New Revision: 15627
Added:
search/trunk/src/java/org/hibernate/search/filter/FilterOptimizationHelper.java
search/trunk/src/test/org/hibernate/search/test/filter/FiltersOptimizationTest.java
Modified:
search/trunk/src/java/org/hibernate/search/filter/ChainedFilter.java
Log:
HSEARCH-299 use of bit operations when possible to chain Filters
Modified: search/trunk/src/java/org/hibernate/search/filter/ChainedFilter.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/filter/ChainedFilter.java 2008-11-27 17:26:25 UTC (rev 15626)
+++ search/trunk/src/java/org/hibernate/search/filter/ChainedFilter.java 2008-11-29 13:10:14 UTC (rev 15627)
@@ -12,7 +12,13 @@
import org.hibernate.annotations.common.AssertionFailure;
/**
+ * <p>A Filter capable of chaining other filters, so that it's
+ * possible to apply several filters on a Query.</p>
+ * <p>The resulting filter will only enable result Documents
+ * if no filter removed it.</p>
+ *
* @author Emmanuel Bernard
+ * @author Sanne Grinovero
*/
public class ChainedFilter extends Filter {
@@ -26,16 +32,6 @@
public BitSet bits(IndexReader reader) throws IOException {
throw new UnsupportedOperationException();
- /*
- if (chainedFilters.size() == 0) throw new AssertionFailure("Chainedfilter has no filters to chain for");
- //we need to copy the first BitSet because BitSet is modified by .logicalOp
- Filter filter = chainedFilters.get( 0 );
- BitSet result = (BitSet) filter.bits( reader ).clone();
- for (int index = 1 ; index < chainedFilters.size() ; index++) {
- result.and( chainedFilters.get( index ).bits( reader ) );
- }
- return result;
- */
}
@Override
@@ -45,21 +41,25 @@
throw new AssertionFailure( "Chainedfilter has no filters to chain for" );
}
else if ( size == 1 ) {
- return chainedFilters.get(0).getDocIdSet(reader);
+ return chainedFilters.get( 0 ).getDocIdSet( reader );
}
else {
List<DocIdSet> subSets = new ArrayList<DocIdSet>( size );
for ( Filter f : chainedFilters ) {
subSets.add( f.getDocIdSet( reader ) );
}
+ subSets = FilterOptimizationHelper.mergeByBitAnds( subSets );
+ if ( subSets.size() == 1 ) {
+ return subSets.get( 0 );
+ }
return new AndDocIdSet( subSets, reader.maxDoc() );
}
}
public String toString() {
- StringBuilder sb = new StringBuilder("ChainedFilter [");
+ StringBuilder sb = new StringBuilder( "ChainedFilter [" );
for (Filter filter : chainedFilters) {
- sb.append( "\n ").append( filter.toString() );
+ sb.append( "\n " ).append( filter.toString() );
}
return sb.append("\n]" ).toString();
}
Added: search/trunk/src/java/org/hibernate/search/filter/FilterOptimizationHelper.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/filter/FilterOptimizationHelper.java (rev 0)
+++ search/trunk/src/java/org/hibernate/search/filter/FilterOptimizationHelper.java 2008-11-29 13:10:14 UTC (rev 15627)
@@ -0,0 +1,99 @@
+// $Id$
+package org.hibernate.search.filter;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.OpenBitSet;
+
+/**
+ * Helper class to apply some common optimizations when
+ * several Filters are applied.
+ *
+ * @author Sanne Grinovero
+ */
+public class FilterOptimizationHelper {
+
+ /**
+ * Returns a new list of DocIdSet, applying binary AND
+ * on all DocIdSet implemented by using BitSet or OpenBitSet.
+ * @param docIdSets
+ * @return the same list if not change was done
+ */
+ public static List<DocIdSet> mergeByBitAnds(List<DocIdSet> docIdSets) {
+ int size = docIdSets.size();
+ List<OpenBitSet> openBitSets = new ArrayList<OpenBitSet>( size );
+ List<DocIdBitSet> docIdBitSets = new ArrayList<DocIdBitSet>( size );
+ List<DocIdSet> nonMergeAble = new ArrayList<DocIdSet>( size );
+ for (DocIdSet set : docIdSets) {
+ if (set instanceof OpenBitSet) {
+ openBitSets.add( (OpenBitSet) set );
+ }
+ else if (set instanceof DocIdBitSet) {
+ docIdBitSets.add( (DocIdBitSet) set );
+ }
+ else {
+ nonMergeAble.add( set );
+ }
+ }
+ if ( openBitSets.size() <= 1 && docIdBitSets.size() <= 1 ) {
+ //skip all work as no optimization is possible
+ return docIdSets;
+ }
+ if ( openBitSets.size() > 0 ) {
+ nonMergeAble.add( mergeByBitAnds( openBitSets ) );
+ }
+ if ( docIdBitSets.size() > 0 ) {
+ nonMergeAble.add( mergeByBitAnds( docIdBitSets ) );
+ }
+ return nonMergeAble;
+ }
+
+ /**
+ * Merges all DocIdBitSet in a new DocIdBitSet using
+ * binary AND operations, which is usually more efficient
+ * than using an iterator.
+ * @param docIdBitSets
+ * @return a new DocIdBitSet, or the first element if only
+ * one element was found in the list.
+ */
+ public static DocIdBitSet mergeByBitAnds(List<DocIdBitSet> docIdBitSets) {
+ int listSize = docIdBitSets.size();
+ if ( listSize == 1 ) {
+ return docIdBitSets.get( 0 );
+ }
+ //we need to copy the first BitSet because BitSet is modified by .logicalOp
+ BitSet result = (BitSet) docIdBitSets.get( 0 ).getBitSet().clone();
+ for ( int i=1; i<listSize; i++ ) {
+ BitSet bitSet = docIdBitSets.get( i ).getBitSet();
+ result.and( bitSet );
+ }
+ return new DocIdBitSet( result );
+ }
+
+ /**
+ * Merges all OpenBitSet in a new OpenBitSet using
+ * binary AND operations, which is usually more efficient
+ * than using an iterator.
+ * @param openBitSets
+ * @return a new OpenBitSet, or the first element if only
+ * one element was found in the list.
+ */
+ public static OpenBitSet mergeByBitAnds(List<OpenBitSet> openBitSets) {
+ int listSize = openBitSets.size();
+ if ( listSize == 1 ) {
+ return openBitSets.get( 0 );
+ }
+ //we need to copy the first OpenBitSet because BitSet is modified by .logicalOp
+ OpenBitSet result = (OpenBitSet) openBitSets.get( 0 ).clone();
+ for ( int i=1; i<listSize; i++ ) {
+ OpenBitSet openSet = openBitSets.get( i );
+ result.intersect( openSet );
+ }
+ return result;
+ }
+
+}
Property changes on: search/trunk/src/java/org/hibernate/search/filter/FilterOptimizationHelper.java
___________________________________________________________________
Name: svn:executable
+ *
Name: svn:keywords
+ Id
Added: search/trunk/src/test/org/hibernate/search/test/filter/FiltersOptimizationTest.java
===================================================================
--- search/trunk/src/test/org/hibernate/search/test/filter/FiltersOptimizationTest.java (rev 0)
+++ search/trunk/src/test/org/hibernate/search/test/filter/FiltersOptimizationTest.java 2008-11-29 13:10:14 UTC (rev 15627)
@@ -0,0 +1,180 @@
+package org.hibernate.search.test.filter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.DocIdBitSet;
+import org.apache.lucene.util.OpenBitSet;
+import org.hibernate.search.filter.FilterOptimizationHelper;
+
+import junit.framework.TestCase;
+
+/**
+ * Used to test org.hibernate.search.filter.FiltersOptimizationHelper
+ * @see org.hibernate.search.filter.FilterOptimizationHelper
+ * @author Sanne Grinovero
+ */
+public class FiltersOptimizationTest extends TestCase {
+
+ /**
+ * in some cases optimizations are not possible,
+ * test that mergeByBitAnds returns the same instance
+ * in that case.
+ */
+ public void testSkipMerging() {
+ List<DocIdSet> dataIn = new ArrayList<DocIdSet>( 3 );
+ dataIn.add( makeOpenBitSetTestSet( 1,2,3,5,8,9,10,11 ) );
+ dataIn.add( makeBitSetTestSet( 1,2,3,5,8,9,10,11,20 ) );
+ dataIn.add( makeAnonymousTestSet( 1,2,3,5,8,9,10,11 ) );
+ dataIn.add( makeAnonymousTestSet( 1,2,3,5,8,9,10,11,12 ) );
+ List<DocIdSet> merge = FilterOptimizationHelper.mergeByBitAnds( dataIn );
+ assertSame( dataIn, merge );
+ }
+
+ /**
+ * In case two filters are of OpenBitSet implementation,
+ * they should be AND-ed by using bit operations
+ * (rather than build the iterator).
+ * @throws IOException should not be thrown
+ */
+ public void testDoMergingOnOpenBitSet() throws IOException {
+ List<DocIdSet> dataIn = new ArrayList<DocIdSet>( 3 );
+ dataIn.add( makeOpenBitSetTestSet( 1,2,5,8,9,10,11 ) );
+ dataIn.add( makeOpenBitSetTestSet( 1,2,3,5,8,11 ) );
+ DocIdSet unmergedSet = makeAnonymousTestSet( 1,2,3,5,8,9,10,11 );
+ dataIn.add( unmergedSet );
+ List<DocIdSet> merge = FilterOptimizationHelper.mergeByBitAnds( dataIn );
+ assertNotSame( dataIn, merge );
+
+ assertEquals( 2, merge.size() );
+ assertSame( unmergedSet, merge.get( 0 ) );
+ assertTrue( isIdSetSequenceSameTo( merge.get( 1 ), 1,2,5,8,11 ) );
+ }
+
+ /**
+ * In case two filters are of OpenBitSet implementation,
+ * they should be AND-ed by using bit operations
+ * (rather than build the iterator).
+ * @throws IOException should be thrown
+ */
+ public void testDoMergingOnJavaBitSet() throws IOException {
+ List<DocIdSet> dataIn = new ArrayList<DocIdSet>( 3 );
+ dataIn.add( makeBitSetTestSet( 1,2,5,8,9,10,11 ) );
+ dataIn.add( makeBitSetTestSet( 1,2,3,5,8,11 ) );
+ DocIdSet unmergedSet = makeAnonymousTestSet( 1,2,3,5,8,9,10,11 );
+ dataIn.add( unmergedSet );
+ List<DocIdSet> merge = FilterOptimizationHelper.mergeByBitAnds( dataIn );
+ assertNotSame( dataIn, merge );
+
+ assertEquals( 2, merge.size() );
+ assertSame( unmergedSet, merge.get( 0 ) );
+ assertTrue( isIdSetSequenceSameTo( merge.get( 1 ), 1,2,5,8,11 ) );
+ }
+
+ /**
+ * Used to this test the testcase's helper method isIdSetSequenceSameTo
+ * @throws IOException
+ */
+ public void testSelfIdSequenceTester() throws IOException {
+ assertTrue( isIdSetSequenceSameTo(
+ makeOpenBitSetTestSet( 1,2,3,5,8,11 ),
+ 1,2,3,5,8,11 ) );
+ assertFalse( isIdSetSequenceSameTo(
+ makeOpenBitSetTestSet( 1,2,3,5,8 ),
+ 1,2,3,5,8,11 ) );
+ assertFalse( isIdSetSequenceSameTo(
+ makeOpenBitSetTestSet( 1,2,3,5,8,11 ),
+ 1,2,3,5,8 ) );
+ }
+
+ /**
+ * Verifies if the docIdSet is representing a specific
+ * sequence of docIds.
+ * @param docIdSet the docIdSet to test
+ * @param expectedIds an array of document ids
+ * @return true if iterating on docIdSet returns the expectedIds
+ * @throws IOException should not happen
+ */
+ private boolean isIdSetSequenceSameTo(DocIdSet docIdSet, int...expectedIds) throws IOException {
+ DocIdSetIterator idSetIterator = docIdSet.iterator();
+ for ( int setBit : expectedIds ) {
+ if ( ! idSetIterator.next() ) {
+ return false;
+ }
+ if ( idSetIterator.doc() != setBit ) {
+ return false;
+ }
+ }
+ if ( idSetIterator.next() ){
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * test helper, makes an implementation of a DocIdSet
+ * @param docIds the ids it should contain
+ * @return
+ */
+ private DocIdSet makeAnonymousTestSet(int... docIds) {
+ DocIdSet idSet = makeOpenBitSetTestSet( docIds );
+ return new DocIdSetHiddenType( idSet );
+ }
+
+ /**
+ * test helper, makes a prefilled OpenBitSet
+ * @param enabledBits the ids it should contain
+ * @return a new OpenBitSet
+ */
+ private OpenBitSet makeOpenBitSetTestSet(int... enabledBits) {
+ OpenBitSet set = new OpenBitSet();
+ for (int position : enabledBits ) {
+ // a minimal check for input duplicates:
+ assertFalse( set.get( position ) );
+ set.set( position );
+ }
+ return set;
+ }
+
+ /**
+ * test helper, makes a prefilled DocIdBitSet
+ * using the java.lang.BitSet
+ * @see java.lang.BitSet
+ * @param enabledBits the ids it should contain
+ * @return a ne DocIdBitSet
+ */
+ private DocIdBitSet makeBitSetTestSet(int... enabledBits) {
+ BitSet set = new BitSet();
+ for (int position : enabledBits ) {
+ // a minimal check for input duplicates:
+ assertFalse( set.get( position ) );
+ set.set( position );
+ }
+ return new DocIdBitSet( set );
+ }
+
+ /**
+ * Implementation for testing: wraps a DocIdSet with a new type
+ * to make it not possible to cast/detect to the original type.
+ */
+ private class DocIdSetHiddenType extends DocIdSet {
+
+ private final DocIdSet bitSet;
+
+ DocIdSetHiddenType(DocIdSet wrapped) {
+ this.bitSet = wrapped;
+ }
+
+ @Override
+ public DocIdSetIterator iterator() {
+ return bitSet.iterator();
+ }
+
+ }
+
+}
Property changes on: search/trunk/src/test/org/hibernate/search/test/filter/FiltersOptimizationTest.java
___________________________________________________________________
Name: svn:executable
+ *
16 years
Hibernate SVN: r15626 - search/trunk/doc/reference/en/modules.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-27 12:26:25 -0500 (Thu, 27 Nov 2008)
New Revision: 15626
Modified:
search/trunk/doc/reference/en/modules/architecture.xml
Log:
HSEARCH-303
Modified: search/trunk/doc/reference/en/modules/architecture.xml
===================================================================
--- search/trunk/doc/reference/en/modules/architecture.xml 2008-11-27 13:32:12 UTC (rev 15625)
+++ search/trunk/doc/reference/en/modules/architecture.xml 2008-11-27 17:26:25 UTC (rev 15626)
@@ -22,8 +22,8 @@
~ 51 Franklin Street, Fifth Floor
~ Boston, MA 02110-1301 USA
-->
-
-<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
+<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN"
+"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">
<chapter id="search-architecture">
<!-- $Id$ -->
@@ -32,14 +32,14 @@
<section>
<title>Overview</title>
- <para>Hibernate Search consists of an indexing and an index search engine.
- Both are backed by Apache Lucene.</para>
+ <para>Hibernate Search consists of an indexing component and an index
+ search component. Both are backed by Apache Lucene.</para>
- <para>When an entity is inserted, updated or removed in/from the database,
- Hibernate Search keeps track of this event (through the Hibernate event
- system) and schedules an index update. All the index updates are handled
- for you without you having to use the Apache Lucene APIs (see <xref
- linkend="search-configuration-event" />).</para>
+ <para>Each time an entity is inserted, updated or removed in/from the
+ database, Hibernate Search keeps track of this event (through the
+ Hibernate event system) and schedules an index update. All the index
+ updates are handled without you having to use the Apache Lucene APIs (see
+ <xref linkend="search-configuration-event" />).</para>
<para>To interact with Apache Lucene indexes, Hibernate Search has the
notion of <classname>DirectoryProvider</classname>s. A directory provider
@@ -67,8 +67,8 @@
<para>It is however recommended, for both your database and Hibernate
Search, to execute your operation in a transaction be it JDBC or JTA. When
in a transaction, the index update operation is scheduled for the
- transaction commit and discarded in case of transaction rollback. The
- batching scope is the transaction. There are two immediate
+ transaction commit phase and discarded in case of transaction rollback.
+ The batching scope is the transaction. There are two immediate
benefits:</para>
<itemizedlist>
@@ -113,101 +113,114 @@
box for two different scenarios.</para>
<section>
- <title>Lucene</title>
+ <title>Back end types</title>
- <para>In this mode, all index update operations applied on a given node
- (JVM) will be executed to the Lucene directories (through the directory
- providers) by the same node. This mode is typically used in non
- clustered environment or in clustered environments where the directory
- store is shared.</para>
+ <section>
+ <title>Lucene</title>
- <mediaobject>
- <imageobject role="html">
- <imagedata align="center"
- fileref="../shared/images/lucene-backend.png"
- format="PNG" />
- </imageobject>
+ <para>In this mode, all index update operations applied on a given
+ node (JVM) will be executed to the Lucene directories (through the
+ directory providers) by the same node. This mode is typically used in
+ non clustered environment or in clustered environments where the
+ directory store is shared.</para>
- <imageobject role="fo">
- <imagedata align="center" fileref="images/lucene-backend.png"
- format="PNG" />
- </imageobject>
- </mediaobject>
+ <mediaobject>
+ <imageobject role="html">
+ <imagedata align="center"
+ fileref="../shared/images/lucene-backend.png"
+ format="PNG" />
+ </imageobject>
- <para>This mode targets non clustered applications, or clustered
- applications where the Directory is taking care of the locking
- strategy.</para>
+ <imageobject role="fo">
+ <imagedata align="center" fileref="images/lucene-backend.png"
+ format="PNG" />
+ </imageobject>
- <para>The main advantage is simplicity and immediate visibility of the
- changes in Lucene queries (a requirement is some applications).</para>
- </section>
+ <caption>
+ <para>Lucene back end configuration.</para>
+ </caption>
+ </mediaobject>
- <section>
- <title>JMS</title>
+ <para>This mode targets non clustered applications, or clustered
+ applications where the Directory is taking care of the locking
+ strategy.</para>
- <para>All index update operations applied on a given node are sent to a
- JMS queue. A unique reader will then process the queue and update the
- master Lucene index. The master index is then replicated on a regular
- basis to the slave copies. This is known as the master / slaves pattern.
- The master is the sole responsible for updating the Lucene index. The
- slaves can accept read as well as write operations. However, they only
- process the read operation on their local index copy and delegate the
- update operations to the master.</para>
+ <para>The main advantage is simplicity and immediate visibility of the
+ changes in Lucene queries (a requirement in some applications).</para>
+ </section>
- <mediaobject>
- <imageobject role="html">
- <imagedata align="center" fileref="../shared/images/jms-backend.png"
- format="PNG" />
- </imageobject>
+ <section>
+ <title>JMS</title>
- <imageobject role="fo">
- <imagedata align="center" fileref="images/jms-backend.png"
- format="PNG" />
- </imageobject>
- </mediaobject>
+ <para>All index update operations applied on a given node are sent to
+ a JMS queue. A unique reader will then process the queue and update
+ the master Lucene index. The master index is then replicated on a
+ regular basis to the slave copies. This is known as the master /
+ slaves pattern. The master is the sole responsible for updating the
+ Lucene index. The slaves can accept read as well as write operations.
+ However, they only process the read operation on their local index
+ copy and delegate the update operations to the master.</para>
- <para>This mode targets clustered environments where throughput is
- critical, and index update delays are affordable. Reliability is ensured
- by the JMS provider and by having the slaves working on a local copy of
- the index.</para>
- </section>
+ <mediaobject>
+ <imageobject role="html">
+ <imagedata align="center"
+ fileref="../shared/images/jms-backend.png" format="PNG" />
+ </imageobject>
- <note>Hibernate Search is an extensible architecture. While not yet part
- of the public API, plugging a third party back end is possible. Feel free
- to drop ideas to <literal>hibernate-dev(a)lists.jboss.org</literal>.</note>
- </section>
+ <imageobject role="fo">
+ <imagedata align="center" fileref="images/jms-backend.png"
+ format="PNG" />
+ </imageobject>
- <section>
- <title>Work execution</title>
+ <caption>
+ <para>JMS back end configuration.</para>
+ </caption>
+ </mediaobject>
- <para>The indexing work (done by the back end) can be executed
- synchronously with the transaction commit (or update operation if out of
- transaction), or asynchronously.</para>
+ <para>This mode targets clustered environments where throughput is
+ critical, and index update delays are affordable. Reliability is
+ ensured by the JMS provider and by having the slaves working on a
+ local copy of the index.</para>
+ </section>
- <section>
- <title>Synchronous</title>
-
- <para>This is the safe mode where the back end work is executed in
- concert with the transaction commit. Under highly concurrent
- environment, this can lead to throughput limitations (due to the Apache
- Lucene lock mechanism) and it can increase the system response time if
- the backend is significantly slower than the transactional process and
- if a lot of IO operations are involved.</para>
+ <note>Hibernate Search is an extensible architecture. Feel free to drop
+ ideas for other third party back ends to
+ <literal>hibernate-dev(a)lists.jboss.org</literal>.</note>
</section>
<section>
- <title>Asynchronous</title>
+ <title>Work execution</title>
- <para>This mode delegates the work done by the back end to a different
- thread. That way, throughput and response time are (to a certain extend)
- decorrelated from the back end performance. The drawback is that a small
- delay appears between the transaction commit and the index update and a
- small overhead is introduced to deal with thread management.</para>
+ <para>The indexing work (done by the back end) can be executed
+ synchronously with the transaction commit (or update operation if out of
+ transaction), or asynchronously.</para>
- <para>It is recommended to use synchronous execution first and evaluate
- asynchronous execution if performance problems occur and after having
- set up a proper benchmark (ie not a lonely cowboy hitting the system in
- a completely unrealistic way).</para>
+ <section>
+ <title>Synchronous</title>
+
+ <para>This is the safe mode where the back end work is executed in
+ concert with the transaction commit. Under highly concurrent
+ environment, this can lead to throughput limitations (due to the
+ Apache Lucene lock mechanism) and it can increase the system response
+ time if the backend is significantly slower than the transactional
+ process and if a lot of IO operations are involved.</para>
+ </section>
+
+ <section>
+ <title>Asynchronous</title>
+
+ <para>This mode delegates the work done by the back end to a different
+ thread. That way, throughput and response time are (to a certain
+ extend) decorrelated from the back end performance. The drawback is
+ that a small delay appears between the transaction commit and the
+ index update and a small overhead is introduced to deal with thread
+ management.</para>
+
+ <para>It is recommended to use synchronous execution first and
+ evaluate asynchronous execution if performance problems occur and
+ after having set up a proper benchmark (ie not a lonely cowboy hitting
+ the system in a completely unrealistic way).</para>
+ </section>
</section>
</section>
@@ -228,13 +241,12 @@
multiple queries and threads provided that the
<classname>IndexReader</classname> is still up-to-date. If the
<classname>IndexReader</classname> is not up-to-date, a new one is
- opened and provided. Each
- <classname>IndexReader</classname> is made of several
- <classname>SegmentReader</classname>s. This strategy only reopens
- segments that have been modified or created after last opening and
- shares the already loaded segments from the previous instance.
- This strategy is the default.</para>
-
+ opened and provided. Each <classname>IndexReader</classname> is made of
+ several <classname>SegmentReader</classname>s. This strategy only
+ reopens segments that have been modified or created after last opening
+ and shares the already loaded segments from the previous instance. This
+ strategy is the default.</para>
+
<para>The name of this strategy is <literal>shared</literal>.</para>
</section>
@@ -259,4 +271,4 @@
implementation must be thread safe.</para>
</section>
</section>
-</chapter>
\ No newline at end of file
+</chapter>
16 years
Hibernate SVN: r15625 - search/trunk/src/java/org/hibernate/search/annotations.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-27 08:32:12 -0500 (Thu, 27 Nov 2008)
New Revision: 15625
Modified:
search/trunk/src/java/org/hibernate/search/annotations/FullTextFilterDef.java
Log:
Javadoc
Modified: search/trunk/src/java/org/hibernate/search/annotations/FullTextFilterDef.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/annotations/FullTextFilterDef.java 2008-11-27 13:12:23 UTC (rev 15624)
+++ search/trunk/src/java/org/hibernate/search/annotations/FullTextFilterDef.java 2008-11-27 13:32:12 UTC (rev 15625)
@@ -19,22 +19,25 @@
@Documented
public @interface FullTextFilterDef {
/**
- * Filter name. Must be unique across all mappings for a given persistence unit
+ * @return the filter name. Must be unique across all mappings for a given persistence unit
*/
String name();
/**
- * Either implements org.apache.lucene.search.Filter
- * or contains a @Factory method returning one.
- * The Filter generated must be thread-safe
+ * Either implements {@link org.apache.lucene.search.Filter}
+ * or contains a <code>@Factory</code> method returning one.
+ * The generated <code>Filter</code> must be thread-safe.
*
- * If the filter accept parameters, an @Key method must be present as well
+ * If the filter accept parameters, an <code>@Key</code> method must be present as well.
*
+ * @return a class which either implements <code>Filter</code> directly or contains a method annotated with
+ * <code>@Factory</code>.
+ *
*/
Class<?> impl();
/**
- * Cache mode for the filter. Default to instance and results caching
+ * @return The cache mode for the filter. Default to instance and results caching
*/
FilterCacheModeType cache() default FilterCacheModeType.INSTANCE_AND_DOCIDSETRESULTS;
}
16 years
Hibernate SVN: r15624 - search/trunk/src/java/org/hibernate/search/annotations.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-27 08:12:23 -0500 (Thu, 27 Nov 2008)
New Revision: 15624
Modified:
search/trunk/src/java/org/hibernate/search/annotations/Resolution.java
search/trunk/src/java/org/hibernate/search/annotations/Similarity.java
Log:
Javadoc
Modified: search/trunk/src/java/org/hibernate/search/annotations/Resolution.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/annotations/Resolution.java 2008-11-26 22:25:15 UTC (rev 15623)
+++ search/trunk/src/java/org/hibernate/search/annotations/Resolution.java 2008-11-27 13:12:23 UTC (rev 15624)
@@ -2,7 +2,7 @@
package org.hibernate.search.annotations;
/**
- * Date indexing resolution
+ * Date indexing resolution.
*
* @author Emmanuel Bernard
*/
Modified: search/trunk/src/java/org/hibernate/search/annotations/Similarity.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/annotations/Similarity.java 2008-11-26 22:25:15 UTC (rev 15623)
+++ search/trunk/src/java/org/hibernate/search/annotations/Similarity.java 2008-11-27 13:12:23 UTC (rev 15624)
@@ -1,4 +1,4 @@
-// $Id:$
+// $Id$
package org.hibernate.search.annotations;
import java.lang.annotation.Documented;
@@ -11,7 +11,7 @@
@Target( ElementType.TYPE )
@Documented
/**
- * Specifies a similarity implementation to use
+ * Specifies a similarity implementation to use.
*
* @author Nick Vincent
*/
16 years
Hibernate SVN: r15623 - in search/trunk: src/java/org/hibernate/search and 2 other directories.
by hibernate-commits@lists.jboss.org
Author: epbernard
Date: 2008-11-26 17:25:15 -0500 (Wed, 26 Nov 2008)
New Revision: 15623
Modified:
search/trunk/doc/reference/en/modules/query.xml
search/trunk/src/java/org/hibernate/search/Environment.java
search/trunk/src/java/org/hibernate/search/impl/SearchFactoryImpl.java
search/trunk/src/test/org/hibernate/search/test/filter/FilterTest.java
Log:
HSEARCH-312 rename property to hibernate.search.filter.cache_docidresults.size
Modified: search/trunk/doc/reference/en/modules/query.xml
===================================================================
--- search/trunk/doc/reference/en/modules/query.xml 2008-11-26 13:50:03 UTC (rev 15622)
+++ search/trunk/doc/reference/en/modules/query.xml 2008-11-26 22:25:15 UTC (rev 15623)
@@ -671,7 +671,7 @@
<classname>SoftReference</classname>s are used together with a hard
reference count (see dicussion about filter cache). The hard reference
count can be adjusted using
- <literal>hibernate.search.filter.cache_bit_results.size</literal>
+ <literal>hibernate.search.filter.cache_docidresults.size</literal>
(defaults to 5). The wrapping behaviour can be controlled using the
<literal>@FullTextFilterDef.cache</literal> parameter. There are three
differerent values for this parameter:</para>
Modified: search/trunk/src/java/org/hibernate/search/Environment.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/Environment.java 2008-11-26 13:50:03 UTC (rev 15622)
+++ search/trunk/src/java/org/hibernate/search/Environment.java 2008-11-26 22:25:15 UTC (rev 15623)
@@ -69,7 +69,7 @@
public static final String FILTER_CACHING_STRATEGY = "hibernate.search.filter.cache_strategy";
/**
- * Property name for the hard ref count of our <code>CachingWrapperFilter</code>.
+ * number of docidresults cached in hard referemnce.
*/
- public static final String CACHE_BIT_RESULT_SIZE = "hibernate.search.filter.cache_bit_results.size";
+ public static final String CACHE_DOCIDRESULTS_SIZE = "hibernate.search.filter.cache_docidresults.size";
}
Modified: search/trunk/src/java/org/hibernate/search/impl/SearchFactoryImpl.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/impl/SearchFactoryImpl.java 2008-11-26 13:50:03 UTC (rev 15622)
+++ search/trunk/src/java/org/hibernate/search/impl/SearchFactoryImpl.java 2008-11-26 22:25:15 UTC (rev 15623)
@@ -130,7 +130,7 @@
this.readerProvider = ReaderProviderFactory.createReaderProvider( cfg, this );
this.filterCachingStrategy = buildFilterCachingStrategy( cfg.getProperties() );
this.cacheBitResultsSize = ConfigurationParseHelper.getIntValue(
- cfg.getProperties(), Environment.CACHE_BIT_RESULT_SIZE, CachingWrapperFilter.DEFAULT_SIZE
+ cfg.getProperties(), Environment.CACHE_DOCIDRESULTS_SIZE, CachingWrapperFilter.DEFAULT_SIZE
);
this.barrier = 1; //write barrier
}
Modified: search/trunk/src/test/org/hibernate/search/test/filter/FilterTest.java
===================================================================
--- search/trunk/src/test/org/hibernate/search/test/filter/FilterTest.java 2008-11-26 13:50:03 UTC (rev 15622)
+++ search/trunk/src/test/org/hibernate/search/test/filter/FilterTest.java 2008-11-26 22:25:15 UTC (rev 15623)
@@ -185,6 +185,6 @@
protected void configure(org.hibernate.cfg.Configuration cfg) {
super.configure(cfg);
- cfg.setProperty( "hibernate.search.filter.cache_bit_results.size", "10" );
+ cfg.setProperty( "hibernate.search.filter.cache_docidresults.size", "10" );
}
}
16 years
Hibernate SVN: r15622 - search/trunk/src/java/org/hibernate/search/engine.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-26 08:50:03 -0500 (Wed, 26 Nov 2008)
New Revision: 15622
Modified:
search/trunk/src/java/org/hibernate/search/engine/DocumentBuilderContainedEntity.java
search/trunk/src/java/org/hibernate/search/engine/MultiClassesQueryLoader.java
Log:
HSEARCH-309
changed the semantics of DocumentBuildercontainedEntity.getMappedSubclasses() to only return mapped subclasses.
Modified: search/trunk/src/java/org/hibernate/search/engine/DocumentBuilderContainedEntity.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/engine/DocumentBuilderContainedEntity.java 2008-11-26 13:17:23 UTC (rev 15621)
+++ search/trunk/src/java/org/hibernate/search/engine/DocumentBuilderContainedEntity.java 2008-11-26 13:50:03 UTC (rev 15622)
@@ -610,7 +610,7 @@
Set<Class<?>> tempMappedSubclasses = new HashSet<Class<?>>();
//together with the caller this creates a o(2), but I think it's still faster than create the up hierarchy for each class
for ( Class currentClass : indexedClasses ) {
- if ( plainClass.isAssignableFrom( currentClass ) ) {
+ if ( plainClass != currentClass && plainClass.isAssignableFrom( currentClass ) ) {
tempMappedSubclasses.add( currentClass );
}
}
Modified: search/trunk/src/java/org/hibernate/search/engine/MultiClassesQueryLoader.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/engine/MultiClassesQueryLoader.java 2008-11-26 13:17:23 UTC (rev 15621)
+++ search/trunk/src/java/org/hibernate/search/engine/MultiClassesQueryLoader.java 2008-11-26 13:50:03 UTC (rev 15622)
@@ -75,7 +75,7 @@
for (EntityInfo entityInfo : entityInfos) {
boolean found = false;
for (RootEntityMetadata rootEntityInfo : entityMatadata) {
- if ( rootEntityInfo.mappedSubclasses.contains( entityInfo.clazz ) ) {
+ if ( rootEntityInfo.rootEntity == entityInfo.clazz || rootEntityInfo.mappedSubclasses.contains( entityInfo.clazz ) ) {
List<EntityInfo> bucket = entityinfoBuckets.get( rootEntityInfo );
if ( bucket == null ) {
bucket = new ArrayList<EntityInfo>();
16 years
Hibernate SVN: r15621 - search/trunk/src/java/org/hibernate/search/impl.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-26 08:17:23 -0500 (Wed, 26 Nov 2008)
New Revision: 15621
Modified:
search/trunk/src/java/org/hibernate/search/impl/FullTextSessionImpl.java
Log:
HSEARCH-309
Removed the code added for HSEARCH-262 since it is obsolete after implementing HSEARCH-160
Modified: search/trunk/src/java/org/hibernate/search/impl/FullTextSessionImpl.java
===================================================================
--- search/trunk/src/java/org/hibernate/search/impl/FullTextSessionImpl.java 2008-11-26 12:21:24 UTC (rev 15620)
+++ search/trunk/src/java/org/hibernate/search/impl/FullTextSessionImpl.java 2008-11-26 13:17:23 UTC (rev 15621)
@@ -48,7 +48,6 @@
import org.hibernate.search.backend.Work;
import org.hibernate.search.backend.WorkType;
import org.hibernate.search.backend.impl.EventSourceTransactionContext;
-import org.hibernate.search.engine.DocumentBuilderIndexedEntity;
import org.hibernate.search.engine.SearchFactoryImplementor;
import org.hibernate.search.query.FullTextQueryImpl;
import org.hibernate.search.util.ContextHelper;
@@ -102,6 +101,7 @@
/**
* {@inheritDoc}
*/
+ @SuppressWarnings( "unchecked" )
public <T> void purge(Class<T> entityType, Serializable id) {
if ( entityType == null ) {
return;
@@ -114,21 +114,11 @@
throw new IllegalArgumentException( msg );
}
+ Work<T> work;
for ( Class clazz : targetedClasses ) {
- DocumentBuilderIndexedEntity builder = searchFactoryImplementor.getDocumentBuilderIndexedEntity( clazz );
- Work<T> work;
if ( id == null ) {
- // purge the main entity
work = new Work<T>( clazz, id, WorkType.PURGE_ALL );
searchFactoryImplementor.getWorker().performWork( work, transactionContext );
-
- // purge the subclasses
- Set<Class<?>> subClasses = builder.getMappedSubclasses();
- for ( Class subClazz : subClasses ) {
- @SuppressWarnings( "unchecked" )
- Work subClassWork = new Work( subClazz, id, WorkType.PURGE_ALL );
- searchFactoryImplementor.getWorker().performWork( subClassWork, transactionContext );
- }
}
else {
work = new Work<T>( clazz, id, WorkType.PURGE );
16 years
Hibernate SVN: r15620 - branches/Branch_3_2/HibernateExt/tools/src/java/org/hibernate/tool/hbm2x.
by hibernate-commits@lists.jboss.org
Author: max.andersen(a)jboss.com
Date: 2008-11-26 07:21:24 -0500 (Wed, 26 Nov 2008)
New Revision: 15620
Modified:
branches/Branch_3_2/HibernateExt/tools/src/java/org/hibernate/tool/hbm2x/HibernateConfigurationExporter.java
Log:
JBIDE-3198 escape <> in properties of cfg.xml (by Dima)
Modified: branches/Branch_3_2/HibernateExt/tools/src/java/org/hibernate/tool/hbm2x/HibernateConfigurationExporter.java
===================================================================
--- branches/Branch_3_2/HibernateExt/tools/src/java/org/hibernate/tool/hbm2x/HibernateConfigurationExporter.java 2008-11-26 11:28:20 UTC (rev 15619)
+++ branches/Branch_3_2/HibernateExt/tools/src/java/org/hibernate/tool/hbm2x/HibernateConfigurationExporter.java 2008-11-26 12:21:24 UTC (rev 15620)
@@ -109,7 +109,7 @@
}
}
if(key.startsWith("hibernate.") ) { // if not starting with hibernate. not relevant for cfg.xml
- pw.println(" <property name=\"" + key + "\">" + element.getValue() + "</property>");
+ pw.println(" <property name=\"" + key + "\">" + forXML(element.getValue().toString()) + "</property>");
}
}
@@ -170,4 +170,27 @@
public String getName() {
return "cfg2cfgxml";
}
+
+ /**
+ *
+ * @param text
+ * @return String with escaped [<,>] special characters.
+ */
+ public static String forXML(String text) {
+ if (text == null) return null;
+ final StringBuilder result = new StringBuilder();
+ char[] chars = text.toCharArray();
+ for (int i = 0; i < chars.length; i++){
+ char character = chars[i];
+ if (character == '<') {
+ result.append("<");
+ } else if (character == '>'){
+ result.append(">");
+ } else {
+ result.append(character);
+ }
+ }
+ return result.toString();
+ }
+
}
16 years
Hibernate SVN: r15619 - search/trunk/doc/reference/en/modules.
by hibernate-commits@lists.jboss.org
Author: hardy.ferentschik
Date: 2008-11-26 06:28:20 -0500 (Wed, 26 Nov 2008)
New Revision: 15619
Modified:
search/trunk/doc/reference/en/modules/query.xml
Log:
Added OBJECT_CLASS documentation
Modified: search/trunk/doc/reference/en/modules/query.xml
===================================================================
--- search/trunk/doc/reference/en/modules/query.xml 2008-11-25 18:38:50 UTC (rev 15618)
+++ search/trunk/doc/reference/en/modules/query.xml 2008-11-26 11:28:20 UTC (rev 15619)
@@ -267,31 +267,35 @@
<itemizedlist>
<listitem>
<para>FullTextQuery.THIS: returns the intialized and managed
- entity (as a non projected query would have done)</para>
+ entity (as a non projected query would have done).</para>
</listitem>
<listitem>
<para>FullTextQuery.DOCUMENT: returns the Lucene Document related
- to the object projected</para>
+ to the object projected.</para>
</listitem>
<listitem>
+ <para>FullTextQuery.OBJECT_CLASS: returns the class of the
+ indexded entity.</para>
+ </listitem>
+
+ <listitem>
<para>FullTextQuery.SCORE: returns the document score in the
- query. The score is guatanteed to be between 0 and 1 but the
- highest score is not necessarily equals to 1. Scores are handy to
- compare one result against an other for a given query but are
- useless when comparing the result of different queries.</para>
+ query. Scores are handy to compare one result against an other for
+ a given query but are useless when comparing the result of
+ different queries.</para>
</listitem>
<listitem>
<para>FullTextQuery.ID: the id property value of the projected
- object</para>
+ object.</para>
</listitem>
<listitem>
<para>FullTextQuery.DOCUMENT_ID: the Lucene document id. Careful,
Lucene document id can change overtime between two different
- IndexReader opening (this feature is experimental)</para>
+ IndexReader opening (this feature is experimental).</para>
</listitem>
<listitem>
16 years