[exo-jcr-commits] exo-jcr SVN: r5691 - in jcr/branches/1.15.x: exo.jcr.component.core and 9 other directories.

do-not-reply at jboss.org do-not-reply at jboss.org
Wed Feb 22 09:18:14 EST 2012


Author: nzamosenchuk
Date: 2012-02-22 09:18:12 -0500 (Wed, 22 Feb 2012)
New Revision: 5691

Added:
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractFieldComparator.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldComparatorBase.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldComparatorSource.java
Removed:
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldSortComparator.java
Modified:
   jcr/branches/1.15.x/exo.jcr.component.core/developer-notes.txt
   jcr/branches/1.15.x/exo.jcr.component.core/pom.xml
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CaseTermQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChangesHolder.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChildAxisQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DerefQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DescendantSelfAxisQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrIndexSearcher.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrQueryParser.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrStandartAnalyzer.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryHits.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MatchAllScorer.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MoreLikeThis.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiScorer.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NotQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ParentAxisQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHitsQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryImpl.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RangeQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldCache.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SortedLuceneQueryHits.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WildcardQuery.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/directory/FSDirectoryManager.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/hits/ScorerHits.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/spell/LuceneSpellChecker.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/synonym/WordNetSynonyms.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/BaseStandaloneTest.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/BaseQueryTest.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestArabicSearch.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestDateSearch.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestExcelFileSearch.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestIndexingConfig.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestMultiValueSearch.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestRewriteNode.java
   jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestChangesHolder.java
   jcr/branches/1.15.x/pom.xml
Log:
EXOJCR-1766 : upgrading to Lucene 3.0

Modified: jcr/branches/1.15.x/exo.jcr.component.core/developer-notes.txt
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/developer-notes.txt	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/developer-notes.txt	2012-02-22 14:18:12 UTC (rev 5691)
@@ -14,3 +14,4 @@
 * Removed ValueStorageCleanHelper class
 * Removed BackupSchedulerException class
 * EXOJCR-1728: Remove ReadOnly support for WorkspacePersistentDataManager
+* Lucene 3.0
\ No newline at end of file

Modified: jcr/branches/1.15.x/exo.jcr.component.core/pom.xml
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/pom.xml	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/pom.xml	2012-02-22 14:18:12 UTC (rev 5691)
@@ -109,6 +109,10 @@
          <artifactId>lucene-memory</artifactId>
       </dependency>
       <dependency>
+        <groupId>org.apache.lucene</groupId>
+        <artifactId>lucene-wordnet</artifactId>
+      </dependency>  
+      <dependency>
          <groupId>com.sun.xml.stream</groupId>
          <artifactId>sjsxp</artifactId>
       </dependency>

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractExcerpt.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,8 +16,9 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Fieldable;
 import org.apache.lucene.index.IndexReader;
@@ -193,16 +194,16 @@
    /**
     * @return the extracted terms from the query.
     */
-   protected final Set getQueryTerms()
+   protected final Set<Term> getQueryTerms()
    {
-      Set extractedTerms = new HashSet();
-      Set relevantTerms = new HashSet();
+      Set<Term> extractedTerms = new HashSet<Term>();
+      Set<Term> relevantTerms = new HashSet<Term>();
       query.extractTerms(extractedTerms);
       // only keep terms for fulltext fields
-      Iterator it = extractedTerms.iterator();
+      Iterator<Term> it = extractedTerms.iterator();
       while (it.hasNext())
       {
-         Term t = (Term)it.next();
+         Term t = it.next();
          if (t.field().equals(FieldNames.FULLTEXT))
          {
             relevantTerms.add(t);
@@ -260,16 +261,17 @@
    private TermPositionVector createTermPositionVector(String text)
    {
       // term -> TermVectorOffsetInfo[]
-      final SortedMap termMap = new TreeMap();
+      final SortedMap<String, TermVectorOffsetInfo[]> termMap = new TreeMap<String, TermVectorOffsetInfo[]>();
       Reader r = new StringReader(text);
       TokenStream ts = index.getTextAnalyzer().tokenStream("", r);
-      Token t = new Token();
       try
       {
-         while ((t = ts.next(t)) != null)
+         while (ts.incrementToken())
          {
-            String termText = t.term();
-            TermVectorOffsetInfo[] info = (TermVectorOffsetInfo[])termMap.get(termText);
+            OffsetAttribute offset = (OffsetAttribute)ts.getAttribute(OffsetAttribute.class);
+            TermAttribute term = (TermAttribute)ts.getAttribute(TermAttribute.class);
+            String termText = term.term();
+            TermVectorOffsetInfo[] info = termMap.get(termText);
             if (info == null)
             {
                info = new TermVectorOffsetInfo[1];
@@ -280,22 +282,21 @@
                info = new TermVectorOffsetInfo[tmp.length + 1];
                System.arraycopy(tmp, 0, info, 0, tmp.length);
             }
-            info[info.length - 1] = new TermVectorOffsetInfo(t.startOffset(), t.endOffset());
+            info[info.length - 1] = new TermVectorOffsetInfo(offset.startOffset(), offset.endOffset());
             termMap.put(termText, info);
          }
+         ts.end();
+         ts.close();
       }
       catch (IOException e)
       {
-         if (LOG.isTraceEnabled())
-         {
-            LOG.trace("An exception occurred: " + e.getMessage());
-         }
+         // should never happen, we are reading from a string
       }
 
       return new TermPositionVector()
       {
 
-         private String[] terms = (String[])termMap.keySet().toArray(new String[termMap.size()]);
+         private String[] terms = termMap.keySet().toArray(new String[termMap.size()]);
 
          public int[] getTermPositions(int index)
          {
@@ -307,7 +308,7 @@
             TermVectorOffsetInfo[] info = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
             if (index >= 0 && index < terms.length)
             {
-               info = (TermVectorOffsetInfo[])termMap.get(terms[index]);
+               info = termMap.get(terms[index]);
             }
             return info;
          }
@@ -332,7 +333,7 @@
             int[] freqs = new int[terms.length];
             for (int i = 0; i < terms.length; i++)
             {
-               freqs[i] = ((TermVectorOffsetInfo[])termMap.get(terms[i])).length;
+               freqs[i] = termMap.get(terms[i]).length;
             }
             return freqs;
          }

Added: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractFieldComparator.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractFieldComparator.java	                        (rev 0)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/AbstractFieldComparator.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.index.IndexReader;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Abstract base class for <code>FieldComparator</code>s which keep their values
+ * (<code>Comparable</code>s) in an array.
+ */
+public abstract class AbstractFieldComparator extends FieldComparatorBase
+{
+
+   /**
+    * The values for comparing.
+    */
+   private final Comparable[] values;
+
+   /**
+    * The index readers.
+    */
+
+   protected final List<IndexReader> readers = new ArrayList<IndexReader>();
+
+   /**
+    * The document number starts for the {@link #readers}.
+    */
+   protected int[] starts;
+
+   /**
+    * Create a new instance with the given number of values.
+    *
+    * @param numHits  the number of values
+    */
+   protected AbstractFieldComparator(int numHits)
+   {
+      values = new Comparable[numHits];
+   }
+
+   /**
+    * Returns the reader index for document <code>n</code>.
+    *
+    * @param n document number.
+    * @return the reader index.
+    */
+   protected final int readerIndex(int n)
+   {
+      int lo = 0;
+      int hi = readers.size() - 1;
+
+      while (hi >= lo)
+      {
+         int mid = (lo + hi) >> 1;
+         int midValue = starts[mid];
+         if (n < midValue)
+         {
+            hi = mid - 1;
+         }
+         else if (n > midValue)
+         {
+            lo = mid + 1;
+         }
+         else
+         {
+            while (mid + 1 < readers.size() && starts[mid + 1] == midValue)
+            {
+               mid++;
+            }
+            return mid;
+         }
+      }
+      return hi;
+   }
+
+   /**
+    * Add the given value to the values array
+    *
+    * @param slot   index into values
+    * @param value  value for adding
+    */
+   @Override
+   public void setValue(int slot, Comparable value)
+   {
+      values[slot] = value;
+   }
+
+   /**
+    * Return a value from the values array
+    *
+    * @param slot  index to retrieve
+    * @return  the retrieved value
+    */
+   @Override
+   public Comparable getValue(int slot)
+   {
+      return values[slot];
+   }
+
+   @Override
+   public void setNextReader(IndexReader reader, int docBase) throws IOException
+   {
+      getIndexReaders(readers, reader);
+
+      int maxDoc = 0;
+      starts = new int[readers.size() + 1];
+
+      for (int i = 0; i < readers.size(); i++)
+      {
+         IndexReader r = readers.get(i);
+         starts[i] = maxDoc;
+         maxDoc += r.maxDoc();
+      }
+      starts[readers.size()] = maxDoc;
+   }
+
+   /**
+    * Checks if <code>reader</code> is of type {@link MultiIndexReader} and if
+    * so calls itself recursively for each reader within the
+    * <code>MultiIndexReader</code> or otherwise adds the reader to the list.
+    *
+    * @param readers  list of index readers.
+    * @param reader   reader to decompose
+    */
+   private static void getIndexReaders(List<IndexReader> readers, IndexReader reader)
+   {
+      if (reader instanceof MultiIndexReader)
+      {
+         for (IndexReader r : ((MultiIndexReader)reader).getIndexReaders())
+         {
+            getIndexReaders(readers, r);
+         }
+      }
+      else
+      {
+         readers.add(reader);
+      }
+   }
+}
\ No newline at end of file

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CaseTermQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CaseTermQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/CaseTermQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -21,6 +21,7 @@
 import org.apache.lucene.index.TermEnum;
 import org.apache.lucene.search.FilteredTermEnum;
 import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.util.ToStringUtils;
 import org.exoplatform.services.log.ExoLogger;
 import org.exoplatform.services.log.Log;
 
@@ -37,180 +38,228 @@
  */
 abstract class CaseTermQuery extends MultiTermQuery implements TransformConstants
 {
+
    private static final Log LOG = ExoLogger.getLogger("exo.jcr.component.core.CaseTermQuery");
 
-    /**
-     * Indicates whether terms from the index should be lower-cased or
-     * upper-cased.
-     */
-    protected final int transform;
+   /**
+    * Indicates whether terms from the index should be lower-cased or
+    * upper-cased.
+    */
+   protected final int transform;
 
-    CaseTermQuery(Term term, int transform) {
-        super(term);
-        this.transform = transform;
-    }
+   private final Term term;
 
-    /**
-     * {@inheritDoc}
-     */
-    protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
-        return new CaseTermEnum(reader);
-    }
+   CaseTermQuery(Term term, int transform)
+   {
+      this.term = term;
+      this.transform = transform;
+   }
 
-    static final class Upper extends CaseTermQuery {
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   protected FilteredTermEnum getEnum(IndexReader reader) throws IOException
+   {
+      return new CaseTermEnum(reader);
+   }
 
-        Upper(Term term) {
-            super(term, TRANSFORM_UPPER_CASE);
-        }
-    }
+   /** Prints a user-readable version of this query. */
+   @Override
+   public String toString(String field)
+   {
+      StringBuffer buffer = new StringBuffer();
+      if (!term.field().equals(field))
+      {
+         buffer.append(term.field());
+         buffer.append(':');
+      }
+      buffer.append(term.text());
+      buffer.append(ToStringUtils.boost(getBoost()));
+      return buffer.toString();
+   }
 
-    static final class Lower extends CaseTermQuery {
+   static final class Upper extends CaseTermQuery
+   {
 
-        Lower(Term term) {
-            super(term, TRANSFORM_LOWER_CASE);
-        }
+      Upper(Term term)
+      {
+         super(term, TRANSFORM_UPPER_CASE);
+      }
+   }
 
-    }
+   static final class Lower extends CaseTermQuery
+   {
 
+      Lower(Term term)
+      {
+         super(term, TRANSFORM_LOWER_CASE);
+      }
+   }
+
    private final class CaseTermEnum extends FilteredTermEnum
    {
 
-        private final int nameLength;
+      CaseTermEnum(IndexReader reader) throws IOException
+      {
+         // gather all terms that match
+         // keep them in order and remember the doc frequency as value
+         final Map<Term, Integer> orderedTerms = new LinkedHashMap<Term, Integer>();
 
-        private final OffsetCharSequence termText;
+         // there are always two range scans: one with an initial
+         // lower case character and another one with an initial upper case
+         // character
+         List<RangeScan> rangeScans = new ArrayList<RangeScan>(2);
+         int nameLength = FieldNames.getNameLength(term.text());
+         String propName = term.text().substring(0, nameLength);
+         OffsetCharSequence termText = new OffsetCharSequence(nameLength, term.text());
+         OffsetCharSequence currentTerm = new OffsetCharSequence(nameLength, term.text(), transform);
 
-        private final OffsetCharSequence currentTerm;
+         try
+         {
+            // start with a term using the lower case character for the first
+            // character of the value.
+            if (term.text().length() > nameLength)
+            {
+               // start with initial lower case
+               StringBuffer lowerLimit = new StringBuffer(propName);
+               String termStr = termText.toString();
+               String upperTermStr = termStr.toUpperCase();
+               String lowerTermStr = termStr.toLowerCase();
 
-        CaseTermEnum(IndexReader reader) throws IOException {
-            // gather all terms that match
-            // keep them in order and remember the doc frequency as value
-            final Map orderedTerms = new LinkedHashMap();
+               lowerLimit.append(upperTermStr);
+               lowerLimit.setCharAt(nameLength, Character.toLowerCase(lowerLimit.charAt(nameLength)));
+               StringBuffer upperLimit = new StringBuffer(propName);
+               upperLimit.append(lowerTermStr);
+               rangeScans.add(new RangeScan(reader, new Term(term.field(), lowerLimit.toString()), new Term(term
+                  .field(), upperLimit.toString())));
 
-            Term term = getTerm();
+               // second scan with upper case start
+               lowerLimit = new StringBuffer(propName);
+               lowerLimit.append(upperTermStr);
+               upperLimit = new StringBuffer(propName);
+               upperLimit.append(lowerTermStr);
+               upperLimit.setCharAt(nameLength, Character.toUpperCase(upperLimit.charAt(nameLength)));
+               rangeScans.add(new RangeScan(reader, new Term(term.field(), lowerLimit.toString()), new Term(term
+                  .field(), upperLimit.toString())));
 
-            // there are always two range scanse: one with an initial
-            // lower case character and another one with an initial upper case
-            // character
-            List rangeScans = new ArrayList(2);
-            nameLength = FieldNames.getNameLength(term.text());
-            String propName = term.text().substring(0, nameLength);
-            this.termText = new OffsetCharSequence(nameLength, term.text());
-            this.currentTerm = new OffsetCharSequence(nameLength, term.text(), transform);
+            }
+            else
+            {
+               // use term as is
+               rangeScans.add(new RangeScan(reader, term, term));
+            }
 
-            try {
-                // start with a term using the lower case character for the first
-                // character of the value.
-                if (term.text().length() > nameLength) {
-                    // start with initial lower case
-                    StringBuffer lowerLimit = new StringBuffer(propName);
-                    String termStr = termText.toString();
-                    String upperTermStr = termStr.toUpperCase();
-                    String lowerTermStr = termStr.toLowerCase();
-                    
-                    lowerLimit.append(upperTermStr);
-                    lowerLimit.setCharAt(nameLength, Character.toLowerCase(lowerLimit.charAt(nameLength)));
-                    StringBuffer upperLimit = new StringBuffer(propName);
-                    upperLimit.append(lowerTermStr);
-                    rangeScans.add(new RangeScan(reader,
-                            new Term(term.field(), lowerLimit.toString()),
-                            new Term(term.field(), upperLimit.toString())));
-
-                    // second scan with upper case start
-                    lowerLimit = new StringBuffer(propName);
-                    lowerLimit.append(upperTermStr);
-                    upperLimit = new StringBuffer(propName);
-                    upperLimit.append(lowerTermStr);
-                    upperLimit.setCharAt(nameLength, Character.toUpperCase(upperLimit.charAt(nameLength)));
-                    rangeScans.add(new RangeScan(reader,
-                            new Term(term.field(), lowerLimit.toString()),
-                            new Term(term.field(), upperLimit.toString())));
-
-                } else {
-                    // use term as is
-                    rangeScans.add(new RangeScan(reader, term, term));
-                }
-
-                Iterator it = rangeScans.iterator();
-                while (it.hasNext()) {
-                    TermEnum terms = (TermEnum) it.next();
-                    do {
-                        Term t = terms.term();
-                        if (t != null) {
-                            currentTerm.setBase(t.text());
-                            int compare = currentTerm.compareTo(termText);
-                            if (compare == 0) {
-                                orderedTerms.put(t, new Integer(terms.docFreq()));
-                            } else if (compare < 0) {
-                                // try next one
-                            } else {
-                                // compare > 0
-                            }
-                        } else {
-                            break;
-                        }
-                    } while (terms.next());
-                }
-            } finally {
-                Iterator it = rangeScans.iterator();
-                while (it.hasNext()) {
-                    TermEnum terms = (TermEnum) it.next();
-                    try {
-                        terms.close();
-                    } catch (IOException e) {
-                       if (LOG.isTraceEnabled())
-                       {
-                           LOG.trace("An exception occurred: " + e.getMessage());
-                       }
-                    }
-                }
+            for (TermEnum terms : rangeScans)
+            {
+               do
+               {
+                  Term t = terms.term();
+                  if (t != null)
+                  {
+                     currentTerm.setBase(t.text());
+                     int compare = currentTerm.compareTo(termText);
+                     if (compare == 0)
+                     {
+                        orderedTerms.put(t, terms.docFreq());
+                     }
+                     else if (compare < 0)
+                     {
+                        // try next one
+                     }
+                     else
+                     {
+                        // compare > 0
+                     }
+                  }
+                  else
+                  {
+                     break;
+                  }
+               }
+               while (terms.next());
             }
+         }
+         finally
+         {
+            for (TermEnum terms : rangeScans)
+            {
+               try
+               {
+                  terms.close();
+               }
+               catch (IOException e)
+               {
+                  if (LOG.isTraceEnabled())
+                  {
+                     LOG.trace("An exception occurred: " + e.getMessage());
+                  }
+               }
+            }
+         }
 
-            final Iterator it = orderedTerms.keySet().iterator();
+         final Iterator<Term> it = orderedTerms.keySet().iterator();
 
-            setEnum(new TermEnum() {
+         setEnum(new TermEnum()
+         {
 
-                private Term current;
+            private Term current;
 
-                {
-                    getNext();
-                }
+            {
+               getNext();
+            }
 
-                public boolean next() {
-                    getNext();
-                    return current != null;
-                }
+            @Override
+            public boolean next()
+            {
+               getNext();
+               return current != null;
+            }
 
-                public Term term() {
-                    return current;
-                }
+            @Override
+            public Term term()
+            {
+               return current;
+            }
 
-                public int docFreq() {
-                    Integer docFreq = (Integer) orderedTerms.get(current);
-                    return docFreq != null ? docFreq.intValue() : 0;
-                }
+            @Override
+            public int docFreq()
+            {
+               Integer docFreq = orderedTerms.get(current);
+               return docFreq != null ? docFreq : 0;
+            }
 
-                public void close() {
-                    // nothing to close
-                }
+            @Override
+            public void close()
+            {
+               // nothing to close
+            }
 
-                private void getNext() {
-                    current = it.hasNext() ? (Term) it.next() : null;
-                }
-            });
-        }
+            private void getNext()
+            {
+               current = it.hasNext() ? it.next() : null;
+            }
+         });
+      }
 
-        protected boolean termCompare(Term term) {
-            // they all match
-            return true;
-        }
+      @Override
+      protected boolean termCompare(Term term)
+      {
+         // they all match
+         return true;
+      }
 
-        public float difference() {
-            return 1.0f;
-        }
+      @Override
+      public float difference()
+      {
+         return 1.0f;
+      }
 
-        protected boolean endEnum() {
-            // todo correct?
-            return false;
-        }
-    }
+      @Override
+      protected boolean endEnum()
+      {
+         // todo correct?
+         return false;
+      }
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChangesHolder.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChangesHolder.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChangesHolder.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -19,6 +19,7 @@
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.AbstractField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
@@ -43,8 +44,6 @@
 
    private static final int STORED_FLAG = 1;
 
-   private static final int COMPRESSED_FLAG = 1 << 1;
-
    private static final int INDEXED_FLAG = 1 << 2;
 
    private static final int TOKENIZED_FLAG = 1 << 3;
@@ -120,7 +119,7 @@
       }
       return ids;
    }
-   
+
    /**
     * @return the id of the given lucene doc
     */
@@ -128,7 +127,7 @@
    {
       return doc.get(FieldNames.UUID);
    }
-   
+
    /**
     * {@inheritDoc}
     */
@@ -176,12 +175,12 @@
       {
          // The value is a String
          field =
-                  new Field(name, (String) value, getStoreParameter(flags), getIndexParameter(flags),
-                           getTermVectorParameter(flags));
+            new Field(name, (String)value, getStoreParameter(flags), getIndexParameter(flags),
+               getTermVectorParameter(flags));
       }
       field.setBoost(boost);
       field.setOmitNorms((flags & OMIT_NORMS_FLAG) > 0);
-      field.setOmitTf((flags & OMIT_TF_FLAG) > 0);
+      field.setOmitTermFreqAndPositions((flags & OMIT_TF_FLAG) > 0);
       return field;
    }
 
@@ -215,12 +214,8 @@
     */
    private static Field.Store getStoreParameter(int flags)
    {
-      if ((flags & COMPRESSED_FLAG) > 0)
+      if ((flags & STORED_FLAG) > 0)
       {
-         return Field.Store.COMPRESS;
-      }
-      else if ((flags & STORED_FLAG) > 0)
-      {
          return Field.Store.YES;
       }
       else
@@ -237,8 +232,7 @@
     */
    private static Field.TermVector getTermVectorParameter(int flags)
    {
-      if (((flags & STORE_POSITION_WITH_TERM_VECTOR_FLAG) > 0) 
-          && ((flags & STORE_OFFSET_WITH_TERM_VECTOR_FLAG) > 0))
+      if (((flags & STORE_POSITION_WITH_TERM_VECTOR_FLAG) > 0) && ((flags & STORE_OFFSET_WITH_TERM_VECTOR_FLAG) > 0))
       {
          return Field.TermVector.WITH_POSITIONS_OFFSETS;
       }
@@ -307,7 +301,7 @@
       if (field.getBoost() != 1.0f)
       {
          // Boost
-         out.writeFloat(field.getBoost());         
+         out.writeFloat(field.getBoost());
       }
       // Value
       writeValue(out, field);
@@ -352,10 +346,6 @@
       {
          flags |= STORED_FLAG;
       }
-      if (field.isCompressed())
-      {
-         flags |= COMPRESSED_FLAG;
-      }
       if (field.isIndexed())
       {
          flags |= INDEXED_FLAG;
@@ -388,7 +378,7 @@
       {
          flags |= LAZY_FLAG;
       }
-      if (field.getOmitTf())
+      if (field instanceof AbstractField && ((AbstractField)field).getOmitTermFreqAndPositions())
       {
          flags |= OMIT_TF_FLAG;
       }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChildAxisQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChildAxisQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ChildAxisQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -418,12 +418,14 @@
          this.hResolver = hResolver;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateChildren();
          do
          {
@@ -431,54 +433,46 @@
          }
          while (nextDoc > -1 && !indexIsValid(nextDoc));
 
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public int doc()
+      public int docID()
       {
          return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
       public float score() throws IOException
       {
          return 1.0f;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateChildren();
          nextDoc = hits.skipTo(target);
          while (nextDoc > -1 && !indexIsValid(nextDoc))
          {
-            next();
+            nextDoc();
          }
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       *
-       * @throws UnsupportedOperationException this implementation always
-       *                                       throws an <code>UnsupportedOperationException</code>.
-       */
-      @Override
-      public Explanation explain(int doc) throws IOException
-      {
-         throw new UnsupportedOperationException();
-      }
-
       private void calculateChildren() throws IOException
       {
          if (hits == null)
@@ -492,7 +486,7 @@
                contextScorer.score(new AbstractHitCollector()
                {
                   @Override
-                  public void collect(int doc, float score)
+                  protected void collect(int doc, float score)
                   {
                      calc[0].collectContextHit(doc);
                   }
@@ -505,22 +499,22 @@
                contextScorer.score(new AbstractHitCollector()
                {
 
-                  private List docIds = new ArrayList();
+                  private List<Integer> docIds = new ArrayList<Integer>();
 
                   @Override
-                  public void collect(int doc, float score)
+                  protected void collect(int doc, float score)
                   {
                      calc[0].collectContextHit(doc);
                      if (docIds != null)
                      {
-                        docIds.add(new Integer(doc));
+                        docIds.add(doc);
                         if (docIds.size() > CONTEXT_SIZE_THRESHOLD)
                         {
                            // switch
                            calc[0] = new HierarchyResolvingChildrenCalculator(reader, hResolver);
-                           for (Iterator it = docIds.iterator(); it.hasNext();)
+                           for (int docId : docIds)
                            {
-                              calc[0].collectContextHit(((Integer)it.next()).intValue());
+                              calc[0].collectContextHit(docId);
                            }
                            // indicate that we switched
                            docIds = null;

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DerefQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DerefQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DerefQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -241,7 +241,6 @@
    }
 
    //----------------------< DerefScorer >---------------------------------
-
    /**
     * Implements a <code>Scorer</code> for this <code>DerefQuery</code>.
     */
@@ -261,7 +260,7 @@
       /**
        * List of UUIDs of selected nodes
        */
-      private List uuids = null;
+      private List<String> uuids = null;
 
       /**
        * The next document id to return
@@ -281,67 +280,71 @@
          this.hits = new BitSet(reader.maxDoc());
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateChildren();
          nextDoc = hits.nextSetBit(nextDoc + 1);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public int doc()
+      public int docID()
       {
          return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
       public float score() throws IOException
       {
          return 1.0f;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateChildren();
          nextDoc = hits.nextSetBit(target);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
       /**
-       * {@inheritDoc}
-       *
-       * @throws UnsupportedOperationException this implementation always
-       *                                       throws an <code>UnsupportedOperationException</code>.
+       * 1. do context query
+       * 2. go through each document from the query
+       * 3. find reference property UUIDs
+       * 4. Use UUIDs to find document number
+       * 5. Use the name test to filter the documents
+       * 
+       * @throws IOException if an exception occurs while reading from the
+       *                     index.
        */
-      @Override
-      public Explanation explain(int doc) throws IOException
-      {
-         throw new UnsupportedOperationException();
-      }
-
       private void calculateChildren() throws IOException
       {
          if (uuids == null)
          {
-            uuids = new ArrayList();
+            uuids = new ArrayList<String>();
             contextScorer.score(new AbstractHitCollector()
             {
                @Override
-               public void collect(int doc, float score)
+               protected void collect(int doc, float score)
                {
                   hits.set(doc);
                }
@@ -354,7 +357,7 @@
                nameTestScorer.score(new AbstractHitCollector()
                {
                   @Override
-                  public void collect(int doc, float score)
+                  protected void collect(int doc, float score)
                   {
                      nameTestHits.set(doc);
                   }
@@ -371,11 +374,11 @@
                   // no reference properties at all on this node
                   continue;
                }
-               for (int v = 0; v < values.length; v++)
+               for (String value : values)
                {
-                  if (values[v].startsWith(prefix))
+                  if (value.startsWith(prefix))
                   {
-                     uuids.add(values[v].substring(prefix.length()));
+                     uuids.add(value.substring(prefix.length()));
                   }
                }
             }
@@ -383,9 +386,9 @@
             // collect the doc ids of all target nodes. we reuse the existing
             // bitset.
             hits.clear();
-            for (Iterator it = uuids.iterator(); it.hasNext();)
+            for (String uuid : uuids)
             {
-               TermDocs node = reader.termDocs(new Term(FieldNames.UUID, (String)it.next()));
+               TermDocs node = reader.termDocs(new Term(FieldNames.UUID, uuid));
                try
                {
                   while (node.next())

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DescendantSelfAxisQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DescendantSelfAxisQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/DescendantSelfAxisQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -511,6 +511,11 @@
       private final int[] singleDoc = new int[1];
 
       /**
+       * The next document id to be returned
+       */
+      private int currentDoc = -1;
+
+      /**
        * Creates a new <code>DescendantSelfAxisScorer</code>.
        *
        * @param similarity the <code>Similarity</code> instance to use.
@@ -529,35 +534,39 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (currentDoc == NO_MORE_DOCS)
+         {
+            return currentDoc;
+         }
+
          collectContextHits();
-         if (!subScorer.next() || contextHits.isEmpty())
+         currentDoc = subScorer.nextDoc();
+         if (contextHits.isEmpty())
          {
-            return false;
+            currentDoc = NO_MORE_DOCS;
          }
-         int nextDoc = subScorer.doc();
-         while (nextDoc > -1)
+         while (currentDoc != NO_MORE_DOCS)
          {
-
-            if (isValid(nextDoc))
+            if (isValid(currentDoc))
             {
-               return true;
+               return currentDoc;
             }
 
             // try next
-            nextDoc = subScorer.next() ? subScorer.doc() : -1;
+            currentDoc = subScorer.nextDoc();
          }
-         return false;
+         return currentDoc;
       }
 
       /**
        * {@inheritDoc}
        */
       @Override
-      public int doc()
+      public int docID()
       {
-         return subScorer.doc();
+         return currentDoc;
       }
 
       /**
@@ -573,17 +582,22 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
-         boolean match = subScorer.skipTo(target);
-         if (match)
+         if (currentDoc == NO_MORE_DOCS)
          {
-            collectContextHits();
-            return isValid(subScorer.doc()) || next();
+            return currentDoc;
          }
+
+         currentDoc = subScorer.nextDoc();
+         if (currentDoc == NO_MORE_DOCS)
+         {
+            return NO_MORE_DOCS;
+         }
          else
          {
-            return false;
+            collectContextHits();
+            return isValid(currentDoc) ? currentDoc : nextDoc();
          }
       }
 
@@ -591,40 +605,26 @@
       {
          if (!contextHitsCalculated)
          {
-            long time = 0;
-            if (log.isDebugEnabled())
-            {
-               time = System.currentTimeMillis();
-            }
+            long time = System.currentTimeMillis();
             contextScorer.score(new AbstractHitCollector()
             {
                @Override
-               public void collect(int doc, float score)
+               protected void collect(int doc, float score)
                {
                   contextHits.set(doc);
                }
             }); // find all
             contextHitsCalculated = true;
+            time = System.currentTimeMillis() - time;
             if (log.isDebugEnabled())
             {
-               time = System.currentTimeMillis() - time;
-               log.debug("Collected {} context hits in {} ms for {}", new Object[]{
-                  new Integer(contextHits.cardinality()), new Long(time), DescendantSelfAxisQuery.this});
+               log.debug("Collected {} context hits in {} ms for {}", new Object[]{contextHits.cardinality(), time,
+                  DescendantSelfAxisQuery.this});
             }
          }
       }
 
       /**
-       * @throws UnsupportedOperationException this implementation always
-       *                                       throws an <code>UnsupportedOperationException</code>.
-       */
-      @Override
-      public Explanation explain(int doc) throws IOException
-      {
-         throw new UnsupportedOperationException();
-      }
-
-      /**
        * Returns <code>true</code> if <code>doc</code> is a valid match from
        * the sub scorer against the context hits. The caller must ensure
        * that the context hits are calculated before this method is called!
@@ -644,7 +644,7 @@
          // check if doc is a descendant of one of the context nodes
          pDocs = hResolver.getParents(doc, pDocs);
 
-         if (pDocs.length == 0 || pDocs[0] < 0)
+         if (pDocs.length == 0)
          {
             return false;
          }
@@ -657,10 +657,9 @@
          while (pDocs.length != 0)
          {
             boolean valid = false;
-            for (int i = 0; i < pDocs.length; i++)
+            for (int pDoc : pDocs)
             {
-               int pDoci = pDocs[i];
-               if (pDoci >= 0 && pDoci <= contextHits.size() && ancestorCount >= minLevels && contextHits.get(pDoci))
+               if (ancestorCount >= minLevels && contextHits.get(pDoc))
                {
                   valid = true;
                   break;
@@ -722,9 +721,9 @@
          else
          {
             pDocs = new int[0];
-            for (int i = 0; i < docs.length; i++)
+            for (int doc : docs)
             {
-               int[] p = hResolver.getParents(docs[i], new int[0]);
+               int[] p = hResolver.getParents(doc, new int[0]);
                int[] tmp = new int[p.length + pDocs.length];
                System.arraycopy(pDocs, 0, tmp, 0, pDocs.length);
                System.arraycopy(p, 0, tmp, pDocs.length, p.length);

Added: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldComparatorBase.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldComparatorBase.java	                        (rev 0)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/FieldComparatorBase.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.search.FieldComparator;
+
+import java.io.IOException;
+
+/**
+ * Abstract base class for <code>FieldComparator</code> implementations
+ * which are based on values in the form of <code>Comparables</code>.
+ */
+abstract public class FieldComparatorBase extends FieldComparator
+{
+
+   /**
+    * The bottom value.
+    */
+   private Comparable bottom;
+
+   /**
+    * Value for a document
+    *
+    * @param doc  id of the document
+    * @return  the value for the given id
+    */
+   protected abstract Comparable sortValue(int doc);
+
+   /**
+    * Retrieves the value of a given slot
+    *
+    * @param slot  index of the value to retrieve
+    * @return  the value in the given slot
+    */
+   protected abstract Comparable getValue(int slot);
+
+   /**
+    * Puts a value into a given slot
+    *
+    * @param slot  index where to put the value
+    * @param value  the value to put into the given slot
+    */
+   protected abstract void setValue(int slot, Comparable value);
+
+   @Override
+   public int compare(int slot1, int slot2)
+   {
+      return compare(getValue(slot1), getValue(slot2));
+   }
+
+   @Override
+   public int compareBottom(int doc) throws IOException
+   {
+      return compare(bottom, sortValue(doc));
+   }
+
+   @Override
+   public void setBottom(int slot)
+   {
+      bottom = getValue(slot);
+   }
+
+   /**
+    * Compare two values
+    *
+    * @param val1  first value
+    * @param val2  second value
+    * @return  A negative integer if <code>val1</code> comes before <code>val2</code>,
+    *   a positive integer if <code>val1</code> comes after <code>val2</code> and
+    *   <code>0</code> if <code>val1</code> and <code>val2</code> are equal.
+    */
+   protected int compare(Comparable val1, Comparable val2)
+   {
+      if (val1 == null)
+      {
+         if (val2 == null)
+         {
+            return 0;
+         }
+         return -1;
+      }
+      else if (val2 == null)
+      {
+         return 1;
+      }
+      return Util.compare(val1, val2);
+   }
+
+   @Override
+   public void copy(int slot, int doc) throws IOException
+   {
+      setValue(slot, sortValue(doc));
+   }
+
+   @Override
+   public Comparable value(int slot)
+   {
+      return getValue(slot);
+   }
+}

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexInfos.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -20,6 +20,7 @@
 import org.exoplatform.commons.utils.SecurityHelper;
 import org.exoplatform.services.jcr.impl.core.query.lucene.directory.IndexInputStream;
 import org.exoplatform.services.jcr.impl.core.query.lucene.directory.IndexOutputStream;
+import org.exoplatform.services.jcr.impl.util.io.DirectoryHelper;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -119,7 +120,7 @@
          public Object run() throws Exception
          {
             // Known issue for NFS bases on ext3. Need to refresh directory to read actual data.
-            dir.list();
+            dir.listAll();
 
             names.clear();
             indexes.clear();
@@ -186,7 +187,7 @@
             {
                dir.deleteFile(name);
             }
-            dir.renameFile(name + ".new", name);
+            rename(name + ".new", name);
             dirty = false;
             return null;
          }
@@ -194,6 +195,43 @@
    }
 
    /**
+    * Renames file by copying.
+    * 
+    * @param from
+    * @param to
+    * @throws IOException
+    */
+   private void rename(String from, String to) throws IOException
+   {
+      IndexOutputStream out = null;
+      IndexInputStream in = null;
+      try
+      {
+         out = new IndexOutputStream(dir.createOutput(to));
+         in = new IndexInputStream(dir.openInput(from));
+         DirectoryHelper.transfer(in, out);
+         // delete old one
+         if (dir.fileExists(from))
+         {
+            dir.deleteFile(from);
+         }
+      }
+      finally
+      {
+         if (in != null)
+         {
+            in.close();
+         }
+
+         if (out != null)
+         {
+            out.flush();
+            out.close();
+         }
+      }
+   }
+
+   /**
     * Returns the index name at position <code>i</code>.
     * @param i the position.
     * @return the index name.

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrIndexSearcher.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrIndexSearcher.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrIndexSearcher.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -105,7 +105,7 @@
             }
             if (hits == null)
             {
-               if (sort == null)
+               if (sort == null || sort.getSort().length == 0)
                {
                   hits = new LuceneQueryHits(reader, JcrIndexSearcher.this, query);
                }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrQueryParser.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrQueryParser.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrQueryParser.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,190 +16,233 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.queryParser.ParseException;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
 
+import java.util.ArrayList;
+import java.util.List;
+
 /**
  * <code>JackrabbitQueryParser</code> extends the standard lucene query parser
  * and adds JCR specific customizations.
  */
-public class JcrQueryParser extends QueryParser {
+public class JcrQueryParser extends QueryParser
+{
 
-    /**
-     * The Jackrabbit synonym provider or <code>null</code> if there is none.
-     */
-    private final SynonymProvider synonymProvider;
+   /**
+    * The Jackrabbit synonym provider or <code>null</code> if there is none.
+    */
+   private final SynonymProvider synonymProvider;
 
-    /**
-     * Creates a new query parser instance.
-     *
-     * @param fieldName       the field name.
-     * @param analyzer        the analyzer.
-     * @param synonymProvider the synonym provider or <code>null</code> if none
-     *                        is available.
-     */
-    public JcrQueryParser(String fieldName,
-                                 Analyzer analyzer,
-                                 SynonymProvider synonymProvider) {
-        super(fieldName, analyzer);
-        this.synonymProvider = synonymProvider;
-        setAllowLeadingWildcard(true);
-        setDefaultOperator(Operator.AND);
-    }
+   /**
+    * Creates a new query parser instance.
+    *
+    * @param fieldName       the field name.
+    * @param analyzer        the analyzer.
+    * @param synonymProvider the synonym provider or <code>null</code> if none
+    *                        is available.
+    */
+   public JcrQueryParser(String fieldName, Analyzer analyzer, SynonymProvider synonymProvider)
+   {
+      super(Version.LUCENE_24, fieldName, analyzer);
+      this.synonymProvider = synonymProvider;
+      setAllowLeadingWildcard(true);
+      setDefaultOperator(Operator.AND);
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public Query parse(String textsearch) throws ParseException {
-        // replace escaped ' with just '
-        StringBuffer rewritten = new StringBuffer();
-        // the default lucene query parser recognizes 'AND' and 'NOT' as
-        // keywords.
-        textsearch = textsearch.replaceAll("AND", "and");
-        textsearch = textsearch.replaceAll("NOT", "not");
-        boolean escaped = false;
-        for (int i = 0; i < textsearch.length(); i++) {
-            if (textsearch.charAt(i) == '\\') {
-                if (escaped) {
-                    rewritten.append("\\\\");
-                    escaped = false;
-                } else {
-                    escaped = true;
-                }
-            } else if (textsearch.charAt(i) == '\'') {
-                if (escaped) {
-                    escaped = false;
-                }
-                rewritten.append(textsearch.charAt(i));
-            } else if (textsearch.charAt(i) == '~') {
-                if (i == 0 || Character.isWhitespace(textsearch.charAt(i - 1))) {
-                    // escape tilde so we can use it for similarity query
-                    rewritten.append("\\");
-                }
-                rewritten.append('~');
-            } else {
-                if (escaped) {
-                    rewritten.append('\\');
-                    escaped = false;
-                }
-                rewritten.append(textsearch.charAt(i));
+   /**
+    * {@inheritDoc}
+    */
+   public Query parse(String textsearch) throws ParseException
+   {
+      // replace escaped ' with just '
+      StringBuffer rewritten = new StringBuffer();
+      // the default lucene query parser recognizes 'AND' and 'NOT' as
+      // keywords.
+      textsearch = textsearch.replaceAll("AND", "and");
+      textsearch = textsearch.replaceAll("NOT", "not");
+      boolean escaped = false;
+      for (int i = 0; i < textsearch.length(); i++)
+      {
+         if (textsearch.charAt(i) == '\\')
+         {
+            if (escaped)
+            {
+               rewritten.append("\\\\");
+               escaped = false;
             }
-        }
-        return super.parse(rewritten.toString());
-    }
-
-    /**
-     * Factory method for generating a synonym query.
-     * Called when parser parses an input term token that has the synonym
-     * prefix (~term) prepended.
-     *
-     * @param field Name of the field query will use.
-     * @param termStr Term token to use for building term for the query
-     *
-     * @return Resulting {@link Query} built for the term
-     * @exception ParseException throw in overridden method to disallow
-     */
-    protected Query getSynonymQuery(String field, String termStr)
-            throws ParseException {
-        List synonyms = new ArrayList();
-        synonyms.add(new BooleanClause(getFieldQuery(field, termStr),
-                BooleanClause.Occur.SHOULD));
-        if (synonymProvider != null) {
-            String[] terms = synonymProvider.getSynonyms(termStr);
-            for (int i = 0; i < terms.length; i++) {
-                synonyms.add(new BooleanClause(getFieldQuery(field, terms[i]),
-                        BooleanClause.Occur.SHOULD));
+            else
+            {
+               escaped = true;
             }
-        }
-        if (synonyms.size() == 1) {
-            return ((BooleanClause) synonyms.get(0)).getQuery();
-        } else {
-            return getBooleanQuery(synonyms);
-        }
-    }
+         }
+         else if (textsearch.charAt(i) == '\'')
+         {
+            if (escaped)
+            {
+               escaped = false;
+            }
+            rewritten.append(textsearch.charAt(i));
+         }
+         else if (textsearch.charAt(i) == '~')
+         {
+            if (i == 0 || Character.isWhitespace(textsearch.charAt(i - 1)))
+            {
+               // escape tilde so we can use it for similarity query
+               rewritten.append("\\");
+            }
+            rewritten.append('~');
+         }
+         else
+         {
+            if (escaped)
+            {
+               rewritten.append('\\');
+               escaped = false;
+            }
+            rewritten.append(textsearch.charAt(i));
+         }
+      }
+      return super.parse(rewritten.toString());
+   }
 
+   /**
+    * Factory method for generating a synonym query.
+    * Called when parser parses an input term token that has the synonym
+    * prefix (~term) prepended.
+    *
+    * @param field Name of the field query will use.
+    * @param termStr Term token to use for building term for the query
+    *
+    * @return Resulting {@link Query} built for the term
+    * @exception ParseException throw in overridden method to disallow
+    */
+   protected Query getSynonymQuery(String field, String termStr) throws ParseException
+   {
+      List synonyms = new ArrayList();
+      synonyms.add(new BooleanClause(getFieldQuery(field, termStr), BooleanClause.Occur.SHOULD));
+      if (synonymProvider != null)
+      {
+         String[] terms = synonymProvider.getSynonyms(termStr);
+         for (int i = 0; i < terms.length; i++)
+         {
+            synonyms.add(new BooleanClause(getFieldQuery(field, terms[i]), BooleanClause.Occur.SHOULD));
+         }
+      }
+      if (synonyms.size() == 1)
+      {
+         return ((BooleanClause)synonyms.get(0)).getQuery();
+      }
+      else
+      {
+         return getBooleanQuery(synonyms);
+      }
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    protected Query getFieldQuery(String field, String queryText)
-            throws ParseException {
-        if (queryText.startsWith("~")) {
-            // synonym query
-            return getSynonymQuery(field, queryText.substring(1));
-        } else {
-            return super.getFieldQuery(field, queryText);
-        }
-    }
+   /**
+    * {@inheritDoc}
+    */
+   protected Query getFieldQuery(String field, String queryText) throws ParseException
+   {
+      if (queryText.startsWith("~"))
+      {
+         // synonym query
+         return getSynonymQuery(field, queryText.substring(1));
+      }
+      else
+      {
+         return super.getFieldQuery(field, queryText);
+      }
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    protected Query getPrefixQuery(String field, String termStr)
-            throws ParseException {
-        return getWildcardQuery(field, termStr + "*");
-    }
+   /**
+    * {@inheritDoc}
+    */
+   protected Query getPrefixQuery(String field, String termStr) throws ParseException
+   {
+      return getWildcardQuery(field, termStr + "*");
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    protected Query getWildcardQuery(String field, String termStr)
-            throws ParseException {
-        if (getLowercaseExpandedTerms()) {
-            termStr = termStr.toLowerCase();
-        }
-        return new WildcardQuery(field, null, translateWildcards(termStr));
-    }
+   /**
+    * {@inheritDoc}
+    */
+   protected Query getWildcardQuery(String field, String termStr) throws ParseException
+   {
+      if (getLowercaseExpandedTerms())
+      {
+         termStr = termStr.toLowerCase();
+      }
+      return new WildcardQuery(field, null, translateWildcards(termStr));
+   }
 
-    /**
-     * Translates unescaped wildcards '*' and '?' into '%' and '_'.
-     *
-     * @param input the input String.
-     * @return the translated String.
-     */
-    private String translateWildcards(String input) {
-        StringBuffer translated = new StringBuffer(input.length());
-        boolean escaped = false;
-        for (int i = 0; i < input.length(); i++) {
-            if (input.charAt(i) == '\\') {
-                if (escaped) {
-                    translated.append("\\\\");
-                    escaped = false;
-                } else {
-                    escaped = true;
-                }
-            } else if (input.charAt(i) == '*') {
-                if (escaped) {
-                    translated.append('*');
-                    escaped = false;
-                } else {
-                    translated.append('%');
-                }
-            } else if (input.charAt(i) == '?') {
-                if (escaped) {
-                    translated.append('?');
-                    escaped = false;
-                } else {
-                    translated.append('_');
-                }
-            } else if (input.charAt(i) == '%' || input.charAt(i) == '_') {
-                // escape every occurrence of '%' and '_'
-                escaped = false;
-                translated.append('\\').append(input.charAt(i));
-            } else {
-                if (escaped) {
-                    translated.append('\\');
-                    escaped = false;
-                }
-                translated.append(input.charAt(i));
+   /**
+    * Translates unescaped wildcards '*' and '?' into '%' and '_'.
+    *
+    * @param input the input String.
+    * @return the translated String.
+    */
+   private String translateWildcards(String input)
+   {
+      StringBuffer translated = new StringBuffer(input.length());
+      boolean escaped = false;
+      for (int i = 0; i < input.length(); i++)
+      {
+         if (input.charAt(i) == '\\')
+         {
+            if (escaped)
+            {
+               translated.append("\\\\");
+               escaped = false;
             }
-        }
-        return translated.toString();
-    }
+            else
+            {
+               escaped = true;
+            }
+         }
+         else if (input.charAt(i) == '*')
+         {
+            if (escaped)
+            {
+               translated.append('*');
+               escaped = false;
+            }
+            else
+            {
+               translated.append('%');
+            }
+         }
+         else if (input.charAt(i) == '?')
+         {
+            if (escaped)
+            {
+               translated.append('?');
+               escaped = false;
+            }
+            else
+            {
+               translated.append('_');
+            }
+         }
+         else if (input.charAt(i) == '%' || input.charAt(i) == '_')
+         {
+            // escape every occurrence of '%' and '_'
+            escaped = false;
+            translated.append('\\').append(input.charAt(i));
+         }
+         else
+         {
+            if (escaped)
+            {
+               translated.append('\\');
+               escaped = false;
+            }
+            translated.append(input.charAt(i));
+         }
+      }
+      return translated.toString();
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrStandartAnalyzer.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrStandartAnalyzer.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/JcrStandartAnalyzer.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -19,10 +19,12 @@
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.util.Version;
 import org.exoplatform.commons.utils.SecurityHelper;
 
 import java.io.Reader;
 import java.security.PrivilegedAction;
+import java.util.Collections;
 
 /**
  * This is the global jackrabbit lucene analyzer. By default, all
@@ -34,53 +36,59 @@
  * index the text of the property and to parse searchtext for this property.
  */
 
-public class JcrStandartAnalyzer  extends Analyzer {
+public class JcrStandartAnalyzer extends Analyzer
+{
 
-    /**
-     * The default Jackrabbit analyzer if none is configured in <code><SearchIndex></code>
-     * configuration.
-     */
+   /**
+    * The default Jackrabbit analyzer if none is configured in <code><SearchIndex></code>
+    * configuration.
+    */
    private Analyzer defaultAnalyzer = SecurityHelper.doPrivilegedAction(new PrivilegedAction<Analyzer>()
    {
       public Analyzer run()
       {
-         return new StandardAnalyzer(new String[]{});
+         return new StandardAnalyzer(Version.LUCENE_24, Collections.EMPTY_SET);
       }
    });
 
-    /**
-     * The indexing configuration.
-     */
-    private IndexingConfiguration indexingConfig;
+   /**
+    * The indexing configuration.
+    */
+   private IndexingConfiguration indexingConfig;
 
-    /**
-     * A param indexingConfig the indexing configuration.
-     */
-    protected void setIndexingConfig(IndexingConfiguration indexingConfig) {
-        this.indexingConfig = indexingConfig;
-    }
+   /**
+    * A param indexingConfig the indexing configuration.
+    */
+   protected void setIndexingConfig(IndexingConfiguration indexingConfig)
+   {
+      this.indexingConfig = indexingConfig;
+   }
 
-    /**
-     * @param analyzer the default jackrabbit analyzer
-     */
-    protected void setDefaultAnalyzer(Analyzer analyzer) {
-        defaultAnalyzer = analyzer;
-    }
+   /**
+    * @param analyzer the default jackrabbit analyzer
+    */
+   protected void setDefaultAnalyzer(Analyzer analyzer)
+   {
+      defaultAnalyzer = analyzer;
+   }
 
-    /**
-     * Creates a TokenStream which tokenizes all the text in the provided
-     * Reader. If the fieldName (property) is configured to have a different
-     * analyzer than the default, this analyzer is used for tokenization
-     */
-    @Override
-   public TokenStream tokenStream(String fieldName, Reader reader) {
-        if (indexingConfig != null) {
-            Analyzer propertyAnalyzer = indexingConfig.getPropertyAnalyzer(fieldName);
-            if (propertyAnalyzer != null) {
-                return propertyAnalyzer.tokenStream(fieldName, reader);
-            }
-        }
-        return defaultAnalyzer.tokenStream(fieldName, reader);
-    }
+   /**
+    * Creates a TokenStream which tokenizes all the text in the provided
+    * Reader. If the fieldName (property) is configured to have a different
+    * analyzer than the default, this analyzer is used for tokenization
+    */
+   @Override
+   public TokenStream tokenStream(String fieldName, Reader reader)
+   {
+      if (indexingConfig != null)
+      {
+         Analyzer propertyAnalyzer = indexingConfig.getPropertyAnalyzer(fieldName);
+         if (propertyAnalyzer != null)
+         {
+            return propertyAnalyzer.tokenStream(fieldName, reader);
+         }
+      }
+      return defaultAnalyzer.tokenStream(fieldName, reader);
+   }
 
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryHits.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryHits.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/LuceneQueryHits.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,6 +17,7 @@
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
@@ -73,11 +74,15 @@
     */
    public ScoreNode nextScoreNode() throws IOException
    {
-      if (!scorer.next())
+      if (scorer == null)
       {
          return null;
       }
-      int doc = scorer.doc();
+      int doc = scorer.nextDoc();
+      if (doc == DocIdSetIterator.NO_MORE_DOCS)
+      {
+         return null;
+      }
       String uuid = reader.document(doc).get(FieldNames.UUID);
       return new ScoreNode(uuid, scorer.score(), doc);
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MatchAllScorer.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MatchAllScorer.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MatchAllScorer.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -34,147 +34,174 @@
  * The MatchAllScorer implements a Scorer that scores / collects all
  * documents in the index that match a field.
  */
-class MatchAllScorer extends Scorer {
+class MatchAllScorer extends Scorer
+{
 
-    /**
-     * next doc number
-     */
-    private int nextDoc = -1;
+   /**
+    * next doc number
+    */
+   private int nextDoc = -1;
 
-    /**
-     * IndexReader giving access to index
-     */
-    private IndexReader reader;
+   /**
+    * IndexReader giving access to index
+    */
+   private IndexReader reader;
 
-    /**
-     * The field to match
-     */
-    private String field;
+   /**
+    * The field to match
+    */
+   private String field;
 
-    /**
-     * BitSet filtering documents without content is specified field
-     */
-    private BitSet docFilter;
+   /**
+    * BitSet filtering documents without content is specified field
+    */
+   private BitSet docFilter;
 
-    /**
-     * Explanation object. the same for all docs
-     */
-    private final Explanation matchExpl;
+   /**
+    * Explanation object. the same for all docs
+    */
+   private final Explanation matchExpl;
 
-    /**
-     * Creates a new MatchAllScorer.
-     *
-     * @param reader the IndexReader
-     * @param field  the field name to match.
-     * @throws IOException if an error occurs while collecting hits.
-     *                     e.g. while reading from the search index.
-     */
-    MatchAllScorer(IndexReader reader, String field)
-            throws IOException {
-        super(Similarity.getDefault());
-        this.reader = reader;
-        this.field = field;
-        matchExpl
-                = new Explanation(Similarity.getDefault().idf(reader.maxDoc(),
-                        reader.maxDoc()),
-                        "matchAll");
-        calculateDocFilter();
-    }
+   /**
+    * Creates a new MatchAllScorer.
+    *
+    * @param reader the IndexReader
+    * @param field  the field name to match.
+    * @throws IOException if an error occurs while collecting hits.
+    *                     e.g. while reading from the search index.
+    */
+   MatchAllScorer(IndexReader reader, String field) throws IOException
+   {
+      super(Similarity.getDefault());
+      this.reader = reader;
+      this.field = field;
+      matchExpl = new Explanation(Similarity.getDefault().idf(reader.maxDoc(), reader.maxDoc()), "matchAll");
+      calculateDocFilter();
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public void score(Collector hc) throws IOException {
-        while (next()) {
-            hc.collect(doc());
-        }
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public void score(Collector collector) throws IOException
+   {
+      collector.setScorer(this); // TODO: WTF? missing in exo but present in jcrbt
+      while (nextDoc() != NO_MORE_DOCS)
+      {
+         collector.collect(docID());
+      }
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean next() throws IOException {
-        nextDoc = docFilter.nextSetBit(nextDoc + 1);
-        return nextDoc > -1;
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public int nextDoc() throws IOException
+   {
+      if (nextDoc == NO_MORE_DOCS)
+      {
+         return nextDoc;
+      }
 
-    /**
-     * {@inheritDoc}
-     */
-    public int doc() {
-        return nextDoc;
-    }
+      nextDoc = docFilter.nextSetBit(nextDoc + 1);
+      if (nextDoc < 0)
+      {
+         nextDoc = NO_MORE_DOCS;
+      }
+      return nextDoc;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public float score() throws IOException {
-        return 1.0f;
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public int docID()
+   {
+      return nextDoc;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean skipTo(int target) throws IOException {
-        nextDoc = target - 1;
-        return next();
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public float score() throws IOException
+   {
+      return 1.0f;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public Explanation explain(int doc) {
-        return matchExpl;
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public int advance(int target) throws IOException
+   {
+      if (nextDoc == NO_MORE_DOCS)
+      {
+         return nextDoc;
+      }
 
-    /**
-     * Calculates a BitSet filter that includes all the nodes
-     * that have content in properties according to the field name
-     * passed in the constructor of this MatchAllScorer.
-     *
-     * @throws IOException if an error occurs while reading from
-     *                     the search index.
-     */
-    private void calculateDocFilter() throws IOException {
-        PerQueryCache cache = PerQueryCache.getInstance();
-        Map readerCache = (Map) cache.get(MatchAllScorer.class, reader);
-        if (readerCache == null) {
-            readerCache = new HashMap();
-            cache.put(MatchAllScorer.class, reader, readerCache);
-        }
-        // get BitSet for field
-        docFilter = (BitSet) readerCache.get(field);
+      nextDoc = target - 1;
+      return nextDoc();
+   }
 
-        if (docFilter != null) {
-            // use cached BitSet;
-            return;
-        }
+   /**
+    * Calculates a BitSet filter that includes all the nodes
+    * that have content in properties according to the field name
+    * passed in the constructor of this MatchAllScorer.
+    *
+    * @throws IOException if an error occurs while reading from
+    *                     the search index.
+    */
+   private void calculateDocFilter() throws IOException
+   {
+      PerQueryCache cache = PerQueryCache.getInstance();
+      Map readerCache = (Map)cache.get(MatchAllScorer.class, reader);
+      if (readerCache == null)
+      {
+         readerCache = new HashMap();
+         cache.put(MatchAllScorer.class, reader, readerCache);
+      }
+      // get BitSet for field
+      docFilter = (BitSet)readerCache.get(field);
 
-        // otherwise calculate new
-        docFilter = new BitSet(reader.maxDoc());
-        // we match all terms
-        String namedValue = FieldNames.createNamedValue(field, "");
-        TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, namedValue));
-        try {
-            TermDocs docs = reader.termDocs();
-            try {
-                while (terms.term() != null
-                        && terms.term().field() == FieldNames.PROPERTIES
-                        && terms.term().text().startsWith(namedValue)) {
-                    docs.seek(terms);
-                    while (docs.next()) {
-                        docFilter.set(docs.doc());
-                    }
-                    terms.next();
-                }
-            } finally {
-                docs.close();
+      if (docFilter != null)
+      {
+         // use cached BitSet;
+         return;
+      }
+
+      // otherwise calculate new
+      docFilter = new BitSet(reader.maxDoc());
+      // we match all terms
+      String namedValue = FieldNames.createNamedValue(field, "");
+      TermEnum terms = reader.terms(new Term(FieldNames.PROPERTIES, namedValue));
+      try
+      {
+         TermDocs docs = reader.termDocs();
+         try
+         {
+            while (terms.term() != null && terms.term().field() == FieldNames.PROPERTIES
+               && terms.term().text().startsWith(namedValue))
+            {
+               docs.seek(terms);
+               while (docs.next())
+               {
+                  docFilter.set(docs.doc());
+               }
+               terms.next();
             }
-        } finally {
-            terms.close();
-        }
+         }
+         finally
+         {
+            docs.close();
+         }
+      }
+      finally
+      {
+         terms.close();
+      }
 
-        // put BitSet into cache
-        readerCache.put(field, docFilter);
-    }
+      // put BitSet into cache
+      readerCache.put(field, docFilter);
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MoreLikeThis.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MoreLikeThis.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MoreLikeThis.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,24 +16,10 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.io.StringReader;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
 import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
@@ -45,7 +31,21 @@
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.util.PriorityQueue;
+import org.apache.lucene.util.Version;
 
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.StringReader;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * Generate "more like this" similarity queries.
@@ -140,740 +140,816 @@
  * </pre>
  *
  */
-public final class MoreLikeThis {
+public final class MoreLikeThis
+{
 
-    /**
-     * Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support.
-     * @see #getMaxNumTokensParsed
-     */
-    public static final int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000;
+   /**
+    * Default maximum number of tokens to parse in each example doc field that is not stored with TermVector support.
+    * @see #getMaxNumTokensParsed
+    */
+   public static final int DEFAULT_MAX_NUM_TOKENS_PARSED = 5000;
 
-    /**
-     * Default analyzer to parse source doc with.
-     * @see #getAnalyzer
-     */
-    public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer();
+   /**
+    * Default analyzer to parse source doc with.
+    * @see #getAnalyzer
+    */
+   public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Version.LUCENE_24);
 
-    /**
-     * Ignore terms with less than this frequency in the source doc.
-     * @see #getMinTermFreq
-     * @see #setMinTermFreq
-     */
-    public static final int DEFAULT_MIN_TERM_FREQ = 2;
+   /**
+    * Ignore terms with less than this frequency in the source doc.
+    * @see #getMinTermFreq
+    * @see #setMinTermFreq
+    */
+   public static final int DEFAULT_MIN_TERM_FREQ = 2;
 
-    /**
-     * Ignore words which do not occur in at least this many docs.
-     * @see #getMinDocFreq
-     * @see #setMinDocFreq
-     */
-    public static final int DEFAULT_MIN_DOC_FREQ = 5;
+   /**
+    * Ignore words which do not occur in at least this many docs.
+    * @see #getMinDocFreq
+    * @see #setMinDocFreq
+    */
+   public static final int DEFAULT_MIN_DOC_FREQ = 5;
 
-    /**
-     * Boost terms in query based on score.
-     * @see #isBoost
-     * @see #setBoost
-     */
-    public static final boolean DEFAULT_BOOST = false;
+   /**
+    * Boost terms in query based on score.
+    * @see #isBoost
+    * @see #setBoost
+    */
+   public static final boolean DEFAULT_BOOST = false;
 
-    /**
-     * Default field names. Null is used to specify that the field names should be looked
-     * up at runtime from the provided reader.
-     */
-    public static final String[] DEFAULT_FIELD_NAMES = new String[] { "contents"};
+   /**
+    * Default field names. Null is used to specify that the field names should be looked
+    * up at runtime from the provided reader.
+    */
+   public static final String[] DEFAULT_FIELD_NAMES = new String[]{"contents"};
 
-    /**
-     * Ignore words less than this length or if 0 then this has no effect.
-     * @see #getMinWordLen
-     * @see #setMinWordLen
-     */
-    public static final int DEFAULT_MIN_WORD_LENGTH = 0;
+   /**
+    * Ignore words less than this length or if 0 then this has no effect.
+    * @see #getMinWordLen
+    * @see #setMinWordLen
+    */
+   public static final int DEFAULT_MIN_WORD_LENGTH = 0;
 
-    /**
-     * Ignore words greater than this length or if 0 then this has no effect.
-     * @see #getMaxWordLen
-     * @see #setMaxWordLen
-     */
-    public static final int DEFAULT_MAX_WORD_LENGTH = 0;
+   /**
+    * Ignore words greater than this length or if 0 then this has no effect.
+    * @see #getMaxWordLen
+    * @see #setMaxWordLen
+    */
+   public static final int DEFAULT_MAX_WORD_LENGTH = 0;
 
-    /**
-     * Default set of stopwords.
-     * If null means to allow stop words.
-     *
-     * @see #setStopWords
-     * @see #getStopWords
-     */
-    public static final Set DEFAULT_STOP_WORDS = null;
+   /**
+    * Default set of stopwords.
+    * If null means to allow stop words.
+    *
+    * @see #setStopWords
+    * @see #getStopWords
+    */
+   public static final Set DEFAULT_STOP_WORDS = null;
 
-    /**
-     * Current set of stop words.
-     */
-    private Set stopWords = DEFAULT_STOP_WORDS;
+   /**
+    * Current set of stop words.
+    */
+   private Set stopWords = DEFAULT_STOP_WORDS;
 
-    /**
-     * Return a Query with no more than this many terms.
-     *
-     * @see BooleanQuery#getMaxClauseCount
-     * @see #getMaxQueryTerms
-     * @see #setMaxQueryTerms
-     */
-    public static final int DEFAULT_MAX_QUERY_TERMS = 25;
+   /**
+    * Return a Query with no more than this many terms.
+    *
+    * @see BooleanQuery#getMaxClauseCount
+    * @see #getMaxQueryTerms
+    * @see #setMaxQueryTerms
+    */
+   public static final int DEFAULT_MAX_QUERY_TERMS = 25;
 
-    /**
-     * Analyzer that will be used to parse the doc.
-     */
-    private Analyzer analyzer = DEFAULT_ANALYZER;
+   /**
+    * Analyzer that will be used to parse the doc.
+    */
+   private Analyzer analyzer = DEFAULT_ANALYZER;
 
-    /**
-     * Ignore words less freqent that this.
-     */
-    private int minTermFreq = DEFAULT_MIN_TERM_FREQ;
+   /**
+    * Ignore words less freqent that this.
+    */
+   private int minTermFreq = DEFAULT_MIN_TERM_FREQ;
 
-    /**
-     * Ignore words which do not occur in at least this many docs.
-     */
-    private int minDocFreq = DEFAULT_MIN_DOC_FREQ;
+   /**
+    * Ignore words which do not occur in at least this many docs.
+    */
+   private int minDocFreq = DEFAULT_MIN_DOC_FREQ;
 
-    /**
-     * Should we apply a boost to the Query based on the scores?
-     */
-    private boolean boost = DEFAULT_BOOST;
+   /**
+    * Should we apply a boost to the Query based on the scores?
+    */
+   private boolean boost = DEFAULT_BOOST;
 
-    /**
-     * Field name we'll analyze.
-     */
-    private String[] fieldNames = DEFAULT_FIELD_NAMES;
+   /**
+    * Field name we'll analyze.
+    */
+   private String[] fieldNames = DEFAULT_FIELD_NAMES;
 
-    /**
-     * The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
-     */
-    private int maxNumTokensParsed = DEFAULT_MAX_NUM_TOKENS_PARSED;
+   /**
+    * The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+    */
+   private int maxNumTokensParsed = DEFAULT_MAX_NUM_TOKENS_PARSED;
 
-    /**
-     * Ignore words if less than this len.
-     */
-    private int minWordLen = DEFAULT_MIN_WORD_LENGTH;
+   /**
+    * Ignore words if less than this len.
+    */
+   private int minWordLen = DEFAULT_MIN_WORD_LENGTH;
 
-    /**
-     * Ignore words if greater than this len.
-     */
-    private int maxWordLen = DEFAULT_MAX_WORD_LENGTH;
+   /**
+    * Ignore words if greater than this len.
+    */
+   private int maxWordLen = DEFAULT_MAX_WORD_LENGTH;
 
-    /**
-     * Don't return a query longer than this.
-     */
-    private int maxQueryTerms = DEFAULT_MAX_QUERY_TERMS;
+   /**
+    * Don't return a query longer than this.
+    */
+   private int maxQueryTerms = DEFAULT_MAX_QUERY_TERMS;
 
-    /**
-     * For idf() calculations.
-     */
-    private Similarity similarity;// = new DefaultSimilarity();
+   /**
+    * For idf() calculations.
+    */
+   private Similarity similarity;// = new DefaultSimilarity();
 
-    /**
-     * IndexReader to use
-     */
-    private final IndexReader ir;
+   /**
+    * IndexReader to use
+    */
+   private final IndexReader ir;
 
-    /**
-     * Constructor requiring an IndexReader.
-     */
-    public MoreLikeThis(IndexReader ir) {
-        this(ir, new DefaultSimilarity());
-    }
+   /**
+    * Constructor requiring an IndexReader.
+    */
+   public MoreLikeThis(IndexReader ir)
+   {
+      this(ir, new DefaultSimilarity());
+   }
 
-    public MoreLikeThis(IndexReader ir, Similarity sim){
+   public MoreLikeThis(IndexReader ir, Similarity sim)
+   {
       this.ir = ir;
       this.similarity = sim;
-    }
+   }
 
+   public Similarity getSimilarity()
+   {
+      return similarity;
+   }
 
-  public Similarity getSimilarity() {
-    return similarity;
-  }
+   public void setSimilarity(Similarity similarity)
+   {
+      this.similarity = similarity;
+   }
 
-  public void setSimilarity(Similarity similarity) {
-    this.similarity = similarity;
-  }
+   /**
+      * Returns an analyzer that will be used to parse source doc with. The default analyzer
+      * is the {@link #DEFAULT_ANALYZER}.
+      *
+      * @return the analyzer that will be used to parse source doc with.
+      * @see #DEFAULT_ANALYZER
+      */
+   public Analyzer getAnalyzer()
+   {
+      return analyzer;
+   }
 
-  /**
-     * Returns an analyzer that will be used to parse source doc with. The default analyzer
-     * is the {@link #DEFAULT_ANALYZER}.
-     *
-     * @return the analyzer that will be used to parse source doc with.
-     * @see #DEFAULT_ANALYZER
-     */
-    public Analyzer getAnalyzer() {
-        return analyzer;
-    }
+   /**
+    * Sets the analyzer to use. An analyzer is not required for generating a query with the
+    * {@link #like(int)} method, all other 'like' methods require an analyzer.
+    *
+    * @param analyzer the analyzer to use to tokenize text.
+    */
+   public void setAnalyzer(Analyzer analyzer)
+   {
+      this.analyzer = analyzer;
+   }
 
-    /**
-     * Sets the analyzer to use. An analyzer is not required for generating a query with the
-     * {@link #like(int)} method, all other 'like' methods require an analyzer.
-     *
-     * @param analyzer the analyzer to use to tokenize text.
-     */
-    public void setAnalyzer(Analyzer analyzer) {
-        this.analyzer = analyzer;
-    }
+   /**
+    * Returns the frequency below which terms will be ignored in the source doc. The default
+    * frequency is the {@link #DEFAULT_MIN_TERM_FREQ}.
+    *
+    * @return the frequency below which terms will be ignored in the source doc.
+    */
+   public int getMinTermFreq()
+   {
+      return minTermFreq;
+   }
 
-    /**
-     * Returns the frequency below which terms will be ignored in the source doc. The default
-     * frequency is the {@link #DEFAULT_MIN_TERM_FREQ}.
-     *
-     * @return the frequency below which terms will be ignored in the source doc.
-     */
-    public int getMinTermFreq() {
-        return minTermFreq;
-    }
+   /**
+    * Sets the frequency below which terms will be ignored in the source doc.
+    *
+    * @param minTermFreq the frequency below which terms will be ignored in the source doc.
+    */
+   public void setMinTermFreq(int minTermFreq)
+   {
+      this.minTermFreq = minTermFreq;
+   }
 
-    /**
-     * Sets the frequency below which terms will be ignored in the source doc.
-     *
-     * @param minTermFreq the frequency below which terms will be ignored in the source doc.
-     */
-    public void setMinTermFreq(int minTermFreq) {
-        this.minTermFreq = minTermFreq;
-    }
+   /**
+    * Returns the frequency at which words will be ignored which do not occur in at least this
+    * many docs. The default frequency is {@link #DEFAULT_MIN_DOC_FREQ}.
+    *
+    * @return the frequency at which words will be ignored which do not occur in at least this
+    * many docs.
+    */
+   public int getMinDocFreq()
+   {
+      return minDocFreq;
+   }
 
-    /**
-     * Returns the frequency at which words will be ignored which do not occur in at least this
-     * many docs. The default frequency is {@link #DEFAULT_MIN_DOC_FREQ}.
-     *
-     * @return the frequency at which words will be ignored which do not occur in at least this
-     * many docs.
-     */
-    public int getMinDocFreq() {
-        return minDocFreq;
-    }
+   /**
+    * Sets the frequency at which words will be ignored which do not occur in at least this
+    * many docs.
+    *
+    * @param minDocFreq the frequency at which words will be ignored which do not occur in at
+    * least this many docs.
+    */
+   public void setMinDocFreq(int minDocFreq)
+   {
+      this.minDocFreq = minDocFreq;
+   }
 
-    /**
-     * Sets the frequency at which words will be ignored which do not occur in at least this
-     * many docs.
-     *
-     * @param minDocFreq the frequency at which words will be ignored which do not occur in at
-     * least this many docs.
-     */
-    public void setMinDocFreq(int minDocFreq) {
-        this.minDocFreq = minDocFreq;
-    }
+   /**
+    * Returns whether to boost terms in query based on "score" or not. The default is
+    * {@link #DEFAULT_BOOST}.
+    *
+    * @return whether to boost terms in query based on "score" or not.
+    * @see #setBoost
+    */
+   public boolean isBoost()
+   {
+      return boost;
+   }
 
-    /**
-     * Returns whether to boost terms in query based on "score" or not. The default is
-     * {@link #DEFAULT_BOOST}.
-     *
-     * @return whether to boost terms in query based on "score" or not.
-     * @see #setBoost
-     */
-    public boolean isBoost() {
-        return boost;
-    }
+   /**
+    * Sets whether to boost terms in query based on "score" or not.
+    *
+    * @param boost true to boost terms in query based on "score", false otherwise.
+    * @see #isBoost
+    */
+   public void setBoost(boolean boost)
+   {
+      this.boost = boost;
+   }
 
-    /**
-     * Sets whether to boost terms in query based on "score" or not.
-     *
-     * @param boost true to boost terms in query based on "score", false otherwise.
-     * @see #isBoost
-     */
-    public void setBoost(boolean boost) {
-        this.boost = boost;
-    }
+   /**
+    * Returns the field names that will be used when generating the 'More Like This' query.
+    * The default field names that will be used is {@link #DEFAULT_FIELD_NAMES}.
+    *
+    * @return the field names that will be used when generating the 'More Like This' query.
+    */
+   public String[] getFieldNames()
+   {
+      return fieldNames;
+   }
 
-    /**
-     * Returns the field names that will be used when generating the 'More Like This' query.
-     * The default field names that will be used is {@link #DEFAULT_FIELD_NAMES}.
-     *
-     * @return the field names that will be used when generating the 'More Like This' query.
-     */
-    public String[] getFieldNames() {
-        return fieldNames;
-    }
+   /**
+    * Sets the field names that will be used when generating the 'More Like This' query.
+    * Set this to null for the field names to be determined at runtime from the IndexReader
+    * provided in the constructor.
+    *
+    * @param fieldNames the field names that will be used when generating the 'More Like This'
+    * query.
+    */
+   public void setFieldNames(String[] fieldNames)
+   {
+      this.fieldNames = fieldNames;
+   }
 
-    /**
-     * Sets the field names that will be used when generating the 'More Like This' query.
-     * Set this to null for the field names to be determined at runtime from the IndexReader
-     * provided in the constructor.
-     *
-     * @param fieldNames the field names that will be used when generating the 'More Like This'
-     * query.
-     */
-    public void setFieldNames(String[] fieldNames) {
-        this.fieldNames = fieldNames;
-    }
+   /**
+    * Returns the minimum word length below which words will be ignored. Set this to 0 for no
+    * minimum word length. The default is {@link #DEFAULT_MIN_WORD_LENGTH}.
+    *
+    * @return the minimum word length below which words will be ignored.
+    */
+   public int getMinWordLen()
+   {
+      return minWordLen;
+   }
 
-    /**
-     * Returns the minimum word length below which words will be ignored. Set this to 0 for no
-     * minimum word length. The default is {@link #DEFAULT_MIN_WORD_LENGTH}.
-     *
-     * @return the minimum word length below which words will be ignored.
-     */
-    public int getMinWordLen() {
-        return minWordLen;
-    }
+   /**
+    * Sets the minimum word length below which words will be ignored.
+    *
+    * @param minWordLen the minimum word length below which words will be ignored.
+    */
+   public void setMinWordLen(int minWordLen)
+   {
+      this.minWordLen = minWordLen;
+   }
 
-    /**
-     * Sets the minimum word length below which words will be ignored.
-     *
-     * @param minWordLen the minimum word length below which words will be ignored.
-     */
-    public void setMinWordLen(int minWordLen) {
-        this.minWordLen = minWordLen;
-    }
+   /**
+    * Returns the maximum word length above which words will be ignored. Set this to 0 for no
+    * maximum word length. The default is {@link #DEFAULT_MAX_WORD_LENGTH}.
+    *
+    * @return the maximum word length above which words will be ignored.
+    */
+   public int getMaxWordLen()
+   {
+      return maxWordLen;
+   }
 
-    /**
-     * Returns the maximum word length above which words will be ignored. Set this to 0 for no
-     * maximum word length. The default is {@link #DEFAULT_MAX_WORD_LENGTH}.
-     *
-     * @return the maximum word length above which words will be ignored.
-     */
-    public int getMaxWordLen() {
-        return maxWordLen;
-    }
+   /**
+    * Sets the maximum word length above which words will be ignored.
+    *
+    * @param maxWordLen the maximum word length above which words will be ignored.
+    */
+   public void setMaxWordLen(int maxWordLen)
+   {
+      this.maxWordLen = maxWordLen;
+   }
 
-    /**
-     * Sets the maximum word length above which words will be ignored.
-     *
-     * @param maxWordLen the maximum word length above which words will be ignored.
-     */
-    public void setMaxWordLen(int maxWordLen) {
-        this.maxWordLen = maxWordLen;
-    }
+   /**
+    * Set the set of stopwords.
+    * Any word in this set is considered "uninteresting" and ignored.
+    * Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
+    * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
+    *
+    * @param stopWords set of stopwords, if null it means to allow stop words
+    *
+    * @see org.apache.lucene.analysis.StopFilter#makeStopSet StopFilter.makeStopSet()
+    * @see #getStopWords
+    */
+   public void setStopWords(Set stopWords)
+   {
+      this.stopWords = stopWords;
+   }
 
-    /**
-     * Set the set of stopwords.
-     * Any word in this set is considered "uninteresting" and ignored.
-     * Even if your Analyzer allows stopwords, you might want to tell the MoreLikeThis code to ignore them, as
-     * for the purposes of document similarity it seems reasonable to assume that "a stop word is never interesting".
-     *
-     * @param stopWords set of stopwords, if null it means to allow stop words
-     *
-     * @see org.apache.lucene.analysis.StopFilter#makeStopSet StopFilter.makeStopSet()
-     * @see #getStopWords
-     */
-    public void setStopWords(Set stopWords) {
-        this.stopWords = stopWords;
-    }
+   /**
+    * Get the current stop words being used.
+    * @see #setStopWords
+    */
+   public Set getStopWords()
+   {
+      return stopWords;
+   }
 
-    /**
-     * Get the current stop words being used.
-     * @see #setStopWords
-     */
-    public Set getStopWords() {
-        return stopWords;
-    }
+   /**
+    * Returns the maximum number of query terms that will be included in any generated query.
+    * The default is {@link #DEFAULT_MAX_QUERY_TERMS}.
+    *
+    * @return the maximum number of query terms that will be included in any generated query.
+    */
+   public int getMaxQueryTerms()
+   {
+      return maxQueryTerms;
+   }
 
-    /**
-     * Returns the maximum number of query terms that will be included in any generated query.
-     * The default is {@link #DEFAULT_MAX_QUERY_TERMS}.
-     *
-     * @return the maximum number of query terms that will be included in any generated query.
-     */
-    public int getMaxQueryTerms() {
-        return maxQueryTerms;
-    }
+   /**
+    * Sets the maximum number of query terms that will be included in any generated query.
+    *
+    * @param maxQueryTerms the maximum number of query terms that will be included in any
+    * generated query.
+    */
+   public void setMaxQueryTerms(int maxQueryTerms)
+   {
+      this.maxQueryTerms = maxQueryTerms;
+   }
 
-    /**
-     * Sets the maximum number of query terms that will be included in any generated query.
-     *
-     * @param maxQueryTerms the maximum number of query terms that will be included in any
-     * generated query.
-     */
-    public void setMaxQueryTerms(int maxQueryTerms) {
-        this.maxQueryTerms = maxQueryTerms;
-    }
+   /**
+    * @return The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+    * @see #DEFAULT_MAX_NUM_TOKENS_PARSED
+    */
+   public int getMaxNumTokensParsed()
+   {
+      return maxNumTokensParsed;
+   }
 
-    /**
-     * @return The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
-     * @see #DEFAULT_MAX_NUM_TOKENS_PARSED
-     */
-    public int getMaxNumTokensParsed() {
-        return maxNumTokensParsed;
-    }
+   /**
+    * @param i The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
+    */
+   public void setMaxNumTokensParsed(int i)
+   {
+      maxNumTokensParsed = i;
+   }
 
-    /**
-     * @param i The maximum number of tokens to parse in each example doc field that is not stored with TermVector support
-     */
-    public void setMaxNumTokensParsed(int i) {
-        maxNumTokensParsed = i;
-    }
+   /**
+    * Return a query that will return docs like the passed lucene document ID.
+    *
+    * @param docNum the documentID of the lucene doc to generate the 'More Like This" query for.
+    * @return a query that will return docs like the passed lucene document ID.
+    */
+   public Query like(int docNum) throws IOException
+   {
+      if (fieldNames == null)
+      {
+         // gather list of valid fields from lucene
+         Collection fields = ir.getFieldNames(IndexReader.FieldOption.INDEXED);
+         fieldNames = (String[])fields.toArray(new String[fields.size()]);
+      }
 
-    /**
-     * Return a query that will return docs like the passed lucene document ID.
-     *
-     * @param docNum the documentID of the lucene doc to generate the 'More Like This" query for.
-     * @return a query that will return docs like the passed lucene document ID.
-     */
-    public Query like(int docNum) throws IOException {
-        if (fieldNames == null) {
-            // gather list of valid fields from lucene
-            Collection fields = ir.getFieldNames( IndexReader.FieldOption.INDEXED);
-            fieldNames = (String[]) fields.toArray(new String[fields.size()]);
-        }
+      return createQuery(retrieveTerms(docNum));
+   }
 
-        return createQuery(retrieveTerms(docNum));
-    }
+   /**
+    * Return a query that will return docs like the passed file.
+    *
+    * @return a query that will return docs like the passed file.
+    */
+   public Query like(File f) throws IOException
+   {
+      if (fieldNames == null)
+      {
+         // gather list of valid fields from lucene
+         Collection fields = ir.getFieldNames(IndexReader.FieldOption.INDEXED);
+         fieldNames = (String[])fields.toArray(new String[fields.size()]);
+      }
 
-    /**
-     * Return a query that will return docs like the passed file.
-     *
-     * @return a query that will return docs like the passed file.
-     */
-    public Query like(File f) throws IOException {
-        if (fieldNames == null) {
-            // gather list of valid fields from lucene
-            Collection fields = ir.getFieldNames( IndexReader.FieldOption.INDEXED);
-            fieldNames = (String[]) fields.toArray(new String[fields.size()]);
-        }
+      return like(new FileReader(f));
+   }
 
-        return like(new FileReader(f));
-    }
+   /**
+    * Return a query that will return docs like the passed URL.
+    *
+    * @return a query that will return docs like the passed URL.
+    */
+   public Query like(URL u) throws IOException
+   {
+      return like(new InputStreamReader(u.openConnection().getInputStream()));
+   }
 
-    /**
-     * Return a query that will return docs like the passed URL.
-     *
-     * @return a query that will return docs like the passed URL.
-     */
-    public Query like(URL u) throws IOException {
-        return like(new InputStreamReader(u.openConnection().getInputStream()));
-    }
+   /**
+    * Return a query that will return docs like the passed stream.
+    *
+    * @return a query that will return docs like the passed stream.
+    */
+   public Query like(java.io.InputStream is) throws IOException
+   {
+      return like(new InputStreamReader(is));
+   }
 
-    /**
-     * Return a query that will return docs like the passed stream.
-     *
-     * @return a query that will return docs like the passed stream.
-     */
-    public Query like(java.io.InputStream is) throws IOException {
-        return like(new InputStreamReader(is));
-    }
+   /**
+    * Return a query that will return docs like the passed Reader.
+    *
+    * @return a query that will return docs like the passed Reader.
+    */
+   public Query like(Reader r) throws IOException
+   {
+      return createQuery(retrieveTerms(r));
+   }
 
-    /**
-     * Return a query that will return docs like the passed Reader.
-     *
-     * @return a query that will return docs like the passed Reader.
-     */
-    public Query like(Reader r) throws IOException {
-        return createQuery(retrieveTerms(r));
-    }
+   /**
+    * Create the More like query from a PriorityQueue
+    */
+   private Query createQuery(PriorityQueue q)
+   {
+      BooleanQuery query = new BooleanQuery();
+      Object cur;
+      int qterms = 0;
+      float bestScore = 0;
 
-    /**
-     * Create the More like query from a PriorityQueue
-     */
-    private Query createQuery(PriorityQueue q) {
-        BooleanQuery query = new BooleanQuery();
-        Object cur;
-        int qterms = 0;
-        float bestScore = 0;
+      while (((cur = q.pop()) != null))
+      {
+         Object[] ar = (Object[])cur;
+         TermQuery tq = new JcrTermQuery(new Term((String)ar[1], (String)ar[0]));
 
-        while (((cur = q.pop()) != null)) {
-            Object[] ar = (Object[]) cur;
-            TermQuery tq = new JcrTermQuery(new Term((String) ar[1], (String) ar[0]));
+         if (boost)
+         {
+            if (qterms == 0)
+            {
+               bestScore = ((Float)ar[2]).floatValue();
+            }
+            float myScore = ((Float)ar[2]).floatValue();
 
-            if (boost) {
-                if (qterms == 0) {
-                    bestScore = ((Float) ar[2]).floatValue();
-                }
-                float myScore = ((Float) ar[2]).floatValue();
+            tq.setBoost(myScore / bestScore);
+         }
 
-                tq.setBoost(myScore / bestScore);
-            }
+         try
+         {
+            query.add(tq, BooleanClause.Occur.SHOULD);
+         }
+         catch (BooleanQuery.TooManyClauses ignore)
+         {
+            break;
+         }
 
-            try {
-                query.add(tq, BooleanClause.Occur.SHOULD);
-            }
-            catch (BooleanQuery.TooManyClauses ignore) {
-                break;
-            }
+         qterms++;
+         if (maxQueryTerms > 0 && qterms >= maxQueryTerms)
+         {
+            break;
+         }
+      }
 
-            qterms++;
-            if (maxQueryTerms > 0 && qterms >= maxQueryTerms) {
-                break;
-            }
-        }
+      return query;
+   }
 
-        return query;
-    }
+   /**
+    * Create a PriorityQueue from a word->tf map.
+    *
+    * @param words a map of words keyed on the word(String) with Int objects as the values.
+    */
+   private PriorityQueue createQueue(Map words) throws IOException
+   {
+      // have collected all words in doc and their freqs
+      int numDocs = ir.numDocs();
+      FreqQ res = new FreqQ(words.size()); // will order words by score
 
-    /**
-     * Create a PriorityQueue from a word->tf map.
-     *
-     * @param words a map of words keyed on the word(String) with Int objects as the values.
-     */
-    private PriorityQueue createQueue(Map words) throws IOException {
-        // have collected all words in doc and their freqs
-        int numDocs = ir.numDocs();
-        FreqQ res = new FreqQ(words.size()); // will order words by score
+      Iterator it = words.keySet().iterator();
+      while (it.hasNext())
+      { // for every word
+         String word = (String)it.next();
 
-        Iterator it = words.keySet().iterator();
-        while (it.hasNext()) { // for every word
-            String word = (String) it.next();
+         int tf = ((Int)words.get(word)).x; // term freq in the source doc
+         if (minTermFreq > 0 && tf < minTermFreq)
+         {
+            continue; // filter out words that don't occur enough times in the source
+         }
 
-            int tf = ((Int) words.get(word)).x; // term freq in the source doc
-            if (minTermFreq > 0 && tf < minTermFreq) {
-                continue; // filter out words that don't occur enough times in the source
-            }
+         // go through all the fields and find the largest document frequency
+         String topField = fieldNames[0];
+         int docFreq = 0;
+         for (int i = 0; i < fieldNames.length; i++)
+         {
+            int freq = ir.docFreq(new Term(fieldNames[i], word));
+            topField = (freq > docFreq) ? fieldNames[i] : topField; //NOSONAR
+            docFreq = (freq > docFreq) ? freq : docFreq;
+         }
 
-            // go through all the fields and find the largest document frequency
-            String topField = fieldNames[0];
-            int docFreq = 0;
-            for (int i = 0; i < fieldNames.length; i++) 
-            {
-                int freq = ir.docFreq(new Term(fieldNames[i], word));
-                topField = (freq > docFreq) ? fieldNames[i] : topField; //NOSONAR
-                docFreq = (freq > docFreq) ? freq : docFreq;
-            }
+         if (minDocFreq > 0 && docFreq < minDocFreq)
+         {
+            continue; // filter out words that don't occur in enough docs
+         }
 
-            if (minDocFreq > 0 && docFreq < minDocFreq) {
-                continue; // filter out words that don't occur in enough docs
-            }
+         if (docFreq == 0)
+         {
+            continue; // index update problem?
+         }
 
-            if (docFreq == 0) {
-                continue; // index update problem?
-            }
+         float idf = similarity.idf(docFreq, numDocs);
+         float score = tf * idf;
 
-            float idf = similarity.idf(docFreq, numDocs);
-            float score = tf * idf;
+         // only really need 1st 3 entries, other ones are for troubleshooting
+         res.insertWithOverflow(new Object[]{word, // the word
+            topField, // the top field
+            new Float(score), // overall score
+            new Float(idf), // idf
+            new Integer(docFreq), // freq in all docs
+            new Integer(tf)});
+      }
+      return res;
+   }
 
-            // only really need 1st 3 entries, other ones are for troubleshooting
-            res.insert(new Object[]{word,                   // the word
-                                    topField,               // the top field
-                                    new Float(score),       // overall score
-                                    new Float(idf),         // idf
-                                    new Integer(docFreq),   // freq in all docs
-                                    new Integer(tf)
-            });
-        }
-        return res;
-    }
+   /**
+    * Describe the parameters that control how the "more like this" query is formed.
+    */
+   public String describeParams()
+   {
+      StringBuffer sb = new StringBuffer();
+      sb.append("\t" + "maxQueryTerms  : " + maxQueryTerms + "\n");
+      sb.append("\t" + "minWordLen     : " + minWordLen + "\n");
+      sb.append("\t" + "maxWordLen     : " + maxWordLen + "\n");
+      sb.append("\t" + "fieldNames     : ");
+      String delim = "";
+      for (int i = 0; i < fieldNames.length; i++)
+      {
+         String fieldName = fieldNames[i];
+         sb.append(delim).append(fieldName);
+         delim = ", ";
+      }
+      sb.append("\n");
+      sb.append("\t" + "boost          : " + boost + "\n");
+      sb.append("\t" + "minTermFreq    : " + minTermFreq + "\n");
+      sb.append("\t" + "minDocFreq     : " + minDocFreq + "\n");
+      return sb.toString();
+   }
 
-    /**
-     * Describe the parameters that control how the "more like this" query is formed.
-     */
-    public String describeParams() {
-        StringBuffer sb = new StringBuffer();
-        sb.append("\t" + "maxQueryTerms  : " + maxQueryTerms + "\n");
-        sb.append("\t" + "minWordLen     : " + minWordLen + "\n");
-        sb.append("\t" + "maxWordLen     : " + maxWordLen + "\n");
-        sb.append("\t" + "fieldNames     : ");
-        String delim = "";
-        for (int i = 0; i < fieldNames.length; i++) {
-            String fieldName = fieldNames[i];
-            sb.append(delim).append(fieldName);
-            delim = ", ";
-        }
-        sb.append("\n");
-        sb.append("\t" + "boost          : " + boost + "\n");
-        sb.append("\t" + "minTermFreq    : " + minTermFreq + "\n");
-        sb.append("\t" + "minDocFreq     : " + minDocFreq + "\n");
-        return sb.toString();
-    }
+   /**
+    * Find words for a more-like-this query former.
+    *
+    * @param docNum the id of the lucene document from which to find terms
+    */
+   public PriorityQueue retrieveTerms(int docNum) throws IOException
+   {
+      Map termFreqMap = new HashMap();
+      for (int i = 0; i < fieldNames.length; i++)
+      {
+         String fieldName = fieldNames[i];
+         TermFreqVector vector = ir.getTermFreqVector(docNum, fieldName);
 
-    /**
-     * Find words for a more-like-this query former.
-     *
-     * @param docNum the id of the lucene document from which to find terms
-     */
-    public PriorityQueue retrieveTerms(int docNum) throws IOException {
-        Map termFreqMap = new HashMap();
-        for (int i = 0; i < fieldNames.length; i++) {
-            String fieldName = fieldNames[i];
-            TermFreqVector vector = ir.getTermFreqVector(docNum, fieldName);
-
-            // field does not store term vector info
-            if (vector == null) {
-                Document d = ir.document(docNum);
-                String[] text = d.getValues(fieldName);
-                if (text != null) {
-                    for (int j = 0; j < text.length; j++) {
-                        addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName);
-                    }
-                }
+         // field does not store term vector info
+         if (vector == null)
+         {
+            Document d = ir.document(docNum);
+            String[] text = d.getValues(fieldName);
+            if (text != null)
+            {
+               for (int j = 0; j < text.length; j++)
+               {
+                  addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName);
+               }
             }
-            else {
-                addTermFrequencies(termFreqMap, vector);
-            }
+         }
+         else
+         {
+            addTermFrequencies(termFreqMap, vector);
+         }
 
-        }
+      }
 
-        return createQueue(termFreqMap);
-    }
+      return createQueue(termFreqMap);
+   }
 
-    /**
-     * Adds terms and frequencies found in vector into the Map termFreqMap
-     * @param termFreqMap a Map of terms and their frequencies
-     * @param vector List of terms and their frequencies for a doc/field
-     */
-    private void addTermFrequencies(Map termFreqMap, TermFreqVector vector) {
-        String[] terms = vector.getTerms();
-        int[] freqs = vector.getTermFrequencies();
-        for (int j = 0; j < terms.length; j++) {
-            String term = terms[j];
+   /**
+    * Adds terms and frequencies found in vector into the Map termFreqMap
+    * @param termFreqMap a Map of terms and their frequencies
+    * @param vector List of terms and their frequencies for a doc/field
+    */
+   private void addTermFrequencies(Map termFreqMap, TermFreqVector vector)
+   {
+      String[] terms = vector.getTerms();
+      int[] freqs = vector.getTermFrequencies();
+      for (int j = 0; j < terms.length; j++)
+      {
+         String term = terms[j];
 
-            if (isNoiseWord(term)) {
-                continue;
-            }
-            // increment frequency
-            Int cnt = (Int) termFreqMap.get(term);
-            if (cnt == null) {
-                cnt = new Int();
-                termFreqMap.put(term, cnt);
-                cnt.x = freqs[j];
-            }
-            else {
-                cnt.x += freqs[j];
-            }
-        }
-    }
+         if (isNoiseWord(term))
+         {
+            continue;
+         }
+         // increment frequency
+         Int cnt = (Int)termFreqMap.get(term);
+         if (cnt == null)
+         {
+            cnt = new Int();
+            termFreqMap.put(term, cnt);
+            cnt.x = freqs[j];
+         }
+         else
+         {
+            cnt.x += freqs[j];
+         }
+      }
+   }
 
-    /**
-     * Adds term frequencies found by tokenizing text from reader into the Map words
-     * @param r a source of text to be tokenized
-     * @param termFreqMap a Map of terms and their frequencies
-     * @param fieldName Used by analyzer for any special per-field analysis
-     */
-    private void addTermFrequencies(Reader r, Map termFreqMap, String fieldName)
-            throws IOException {
-        TokenStream ts = analyzer.tokenStream(fieldName, r);
-        int tokenCount = 0;
-        // for every token
-        final Token reusableToken = new Token();
-        for (Token nextToken = ts.next(reusableToken); nextToken != null; nextToken = ts.next(reusableToken)) {
-            String word = nextToken.term();
-            tokenCount++;
-            if (tokenCount > maxNumTokensParsed) {
-                break;
-            }
-            if (isNoiseWord(word)) {
-                continue;
-            }
+   /**
+    * Adds term frequencies found by tokenizing text from reader into the Map words
+    * @param r a source of text to be tokenized
+    * @param termFreqMap a Map of terms and their frequencies
+    * @param fieldName Used by analyzer for any special per-field analysis
+    */
+   private void addTermFrequencies(Reader r, Map<String, Int> termFreqMap, String fieldName) throws IOException
+   {
+      TokenStream ts = analyzer.tokenStream(fieldName, r);
+      int tokenCount = 0;
+      // for every token
+      while (ts.incrementToken())
+      {
+         TermAttribute term = (TermAttribute)ts.getAttribute(TermAttribute.class);
+         String word = term.term();
+         tokenCount++;
+         if (tokenCount > maxNumTokensParsed)
+         {
+            break;
+         }
+         if (isNoiseWord(word))
+         {
+            continue;
+         }
 
-            // increment frequency
-            Int cnt = (Int) termFreqMap.get(word);
-            if (cnt == null) {
-                termFreqMap.put(word, new Int());
-            } else {
-                cnt.x++;
-            }
-        }
-    }
+         // increment frequency
+         Int cnt = termFreqMap.get(word);
+         if (cnt == null)
+         {
+            termFreqMap.put(word, new Int());
+         }
+         else
+         {
+            cnt.x++;
+         }
+      }
+      ts.end();
+      ts.close();
+   }
 
-    /** determines if the passed term is likely to be of interest in "more like" comparisons
-     *
-     * @param term The word being considered
-     * @return true if should be ignored, false if should be used in further analysis
-     */
-    private boolean isNoiseWord(String term) {
-        int len = term.length();
-        if (minWordLen > 0 && len < minWordLen) {
-            return true;
-        }
-        if (maxWordLen > 0 && len > maxWordLen) {
-            return true;
-        }
-        if (stopWords != null && stopWords.contains( term)) {
-            return true;
-        }
-        return false;
-    }
+   /** determines if the passed term is likely to be of interest in "more like" comparisons
+    *
+    * @param term The word being considered
+    * @return true if should be ignored, false if should be used in further analysis
+    */
+   private boolean isNoiseWord(String term)
+   {
+      int len = term.length();
+      if (minWordLen > 0 && len < minWordLen)
+      {
+         return true;
+      }
+      if (maxWordLen > 0 && len > maxWordLen)
+      {
+         return true;
+      }
+      if (stopWords != null && stopWords.contains(term))
+      {
+         return true;
+      }
+      return false;
+   }
 
+   /**
+    * Find words for a more-like-this query former.
+    * The result is a priority queue of arrays with one entry for <b>every word</b> in the document.
+    * Each array has 6 elements.
+    * The elements are:
+    * <ol>
+    * <li> The word (String)
+    * <li> The top field that this word comes from (String)
+    * <li> The score for this word (Float)
+    * <li> The IDF value (Float)
+    * <li> The frequency of this word in the index (Integer)
+    * <li> The frequency of this word in the source document (Integer)
+    * </ol>
+    * This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
+    * This method is exposed so that you can identify the "interesting words" in a document.
+    * For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
+    *
+    * @param r the reader that has the content of the document
+    * @return the most interesting words in the document ordered by score, with the highest scoring, or best entry, first
+    *
+    * @see #retrieveInterestingTerms
+    */
+   public PriorityQueue retrieveTerms(Reader r) throws IOException
+   {
+      Map words = new HashMap();
+      for (int i = 0; i < fieldNames.length; i++)
+      {
+         String fieldName = fieldNames[i];
+         addTermFrequencies(r, words, fieldName);
+      }
+      return createQueue(words);
+   }
 
-    /**
-     * Find words for a more-like-this query former.
-     * The result is a priority queue of arrays with one entry for <b>every word</b> in the document.
-     * Each array has 6 elements.
-     * The elements are:
-     * <ol>
-     * <li> The word (String)
-     * <li> The top field that this word comes from (String)
-     * <li> The score for this word (Float)
-     * <li> The IDF value (Float)
-     * <li> The frequency of this word in the index (Integer)
-     * <li> The frequency of this word in the source document (Integer)
-     * </ol>
-     * This is a somewhat "advanced" routine, and in general only the 1st entry in the array is of interest.
-     * This method is exposed so that you can identify the "interesting words" in a document.
-     * For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
-     *
-     * @param r the reader that has the content of the document
-     * @return the most interesting words in the document ordered by score, with the highest scoring, or best entry, first
-     *
-     * @see #retrieveInterestingTerms
-     */
-    public PriorityQueue retrieveTerms(Reader r) throws IOException {
-        Map words = new HashMap();
-        for (int i = 0; i < fieldNames.length; i++) {
-            String fieldName = fieldNames[i];
-            addTermFrequencies(r, words, fieldName);
-        }
-        return createQueue(words);
-    }
+   /**
+    * @see #retrieveInterestingTerms(java.io.Reader)
+    */
+   public String[] retrieveInterestingTerms(int docNum) throws IOException
+   {
+      ArrayList al = new ArrayList(maxQueryTerms);
+      PriorityQueue pq = retrieveTerms(docNum);
+      Object cur;
+      // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
+      int lim = maxQueryTerms;
+      // we just want to return the top words
+      while (((cur = pq.pop()) != null) && lim-- > 0)
+      {
+         Object[] ar = (Object[])cur;
+         al.add(ar[0]); // the 1st entry is the interesting word
+      }
+      String[] res = new String[al.size()];
+      return (String[])al.toArray(res);
+   }
 
-    /**
-     * @see #retrieveInterestingTerms(java.io.Reader)
-     */
-    public String[] retrieveInterestingTerms(int docNum) throws IOException {
-        ArrayList al = new ArrayList(maxQueryTerms);
-        PriorityQueue pq = retrieveTerms(docNum);
-        Object cur;
-        // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
-        int lim = maxQueryTerms; 
-        // we just want to return the top words
-        while (((cur = pq.pop()) != null) && lim-- > 0) {
-            Object[] ar = (Object[]) cur;
-            al.add(ar[0]); // the 1st entry is the interesting word
-        }
-        String[] res = new String[al.size()];
-        return (String[]) al.toArray(res);
-    }
+   /**
+    * Convenience routine to make it easy to return the most interesting words in a document.
+    * More advanced users will call {@link #retrieveTerms(java.io.Reader) retrieveTerms()} directly.
+    * @param r the source document
+    * @return the most interesting words in the document
+    *
+    * @see #retrieveTerms(java.io.Reader)
+    * @see #setMaxQueryTerms
+    */
+   public String[] retrieveInterestingTerms(Reader r) throws IOException
+   {
+      ArrayList al = new ArrayList(maxQueryTerms);
+      PriorityQueue pq = retrieveTerms(r);
+      Object cur;
+      // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
+      int lim = maxQueryTerms;
+      // we just want to return the top words
+      while (((cur = pq.pop()) != null) && lim-- > 0)
+      {
+         Object[] ar = (Object[])cur;
+         al.add(ar[0]); // the 1st entry is the interesting word
+      }
+      String[] res = new String[al.size()];
+      return (String[])al.toArray(res);
+   }
 
-    /**
-     * Convenience routine to make it easy to return the most interesting words in a document.
-     * More advanced users will call {@link #retrieveTerms(java.io.Reader) retrieveTerms()} directly.
-     * @param r the source document
-     * @return the most interesting words in the document
-     *
-     * @see #retrieveTerms(java.io.Reader)
-     * @see #setMaxQueryTerms
-     */
-    public String[] retrieveInterestingTerms(Reader r) throws IOException {
-        ArrayList al = new ArrayList(maxQueryTerms);
-        PriorityQueue pq = retrieveTerms(r);
-        Object cur;
-        // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
-        int lim = maxQueryTerms; 
-        // we just want to return the top words
-        while (((cur = pq.pop()) != null) && lim-- > 0) {
-            Object[] ar = (Object[]) cur;
-            al.add(ar[0]); // the 1st entry is the interesting word
-        }
-        String[] res = new String[al.size()];
-        return (String[]) al.toArray(res);
-    }
+   /**
+    * PriorityQueue that orders words by score.
+    */
+   private static class FreqQ extends PriorityQueue
+   {
+      FreqQ(int s)
+      {
+         initialize(s);
+      }
 
-    /**
-     * PriorityQueue that orders words by score.
-     */
-    private static class FreqQ extends PriorityQueue {
-        FreqQ (int s) {
-            initialize(s);
-        }
+      protected boolean lessThan(Object a, Object b)
+      {
+         Object[] aa = (Object[])a;
+         Object[] bb = (Object[])b;
+         Float fa = (Float)aa[2];
+         Float fb = (Float)bb[2];
+         return fa.floatValue() > fb.floatValue();
+      }
+   }
 
-        protected boolean lessThan(Object a, Object b) {
-            Object[] aa = (Object[]) a;
-            Object[] bb = (Object[]) b;
-            Float fa = (Float) aa[2];
-            Float fb = (Float) bb[2];
-            return fa.floatValue() > fb.floatValue();
-        }
-    }
+   /**
+    * Use for frequencies and to avoid renewing Integers.
+    */
+   private static class Int
+   {
+      int x;
 
-    /**
-     * Use for frequencies and to avoid renewing Integers.
-     */
-    private static class Int {
-        int x;
+      Int()
+      {
+         x = 1;
+      }
+   }
 
-        Int() {
-            x = 1;
-        }
-    }
-
-
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiScorer.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiScorer.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiScorer.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,138 +16,156 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
-import java.io.IOException;
-
-import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
 
+import java.io.IOException;
+
 /**
  * <code>MultiScorer</code> spans multiple Scorers and returns document numbers
  * and score values in the order as supplied to the constructor of this
  * <code>MultiScorer</code>.
  */
-class MultiScorer extends Scorer {
+class MultiScorer extends Scorer
+{
 
-    /**
-     * The sub scorers.
-     */
-    private final Scorer[] scorers;
+   /**
+    * The sub scorers.
+    */
+   private final Scorer[] scorers;
 
-    /**
-     * The document start numbers of the sub scorers.
-     */
-    private final int[] starts;
+   /**
+    * The document start numbers of the sub scorers.
+    */
+   private final int[] starts;
 
-    /**
-     * Index of the current scorer.
-     */
-    private int current = 0;
+   /**
+    * Index of the current scorer.
+    */
+   private int currentScorer;
 
-    /**
-     * Indicates if there are more documents.
-     */
-    private boolean hasNext = true;
+   /**
+    * The next document id to be returned
+    */
+   private int currentDoc = -1;
 
-    /**
-     * Creates a new <code>MultiScorer</code> that spans multiple
-     * <code>scorers</code>.
-     *
-     * @param similarity the similarity implementation that should be use.
-     * @param scorers the sub scorers.
-     * @param starts the document number start for each sub scorer.
-     */
-    MultiScorer(Similarity similarity, Scorer[] scorers, int[] starts) {
-        super(similarity);
-        this.scorers = scorers;
-        this.starts = starts;
-    }
+   /**
+    * Creates a new <code>MultiScorer</code> that spans multiple
+    * <code>scorers</code>.
+    *
+    * @param similarity the similarity implementation that should be use.
+    * @param scorers the sub scorers.
+    * @param starts the document number start for each sub scorer.
+    */
+   MultiScorer(Similarity similarity, Scorer[] scorers, int[] starts)
+   {
+      super(similarity);
+      this.scorers = scorers;
+      this.starts = starts;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean next() throws IOException {
-        while (hasNext) {
-            if (scorers[current].next()) {
-                return true;
-            } else if (++current < scorers.length) {
-                // advance to next scorer
-            } else {
-                // no more scorers
-                hasNext = false;
-            }
-        }
-        return hasNext;
-    }
+   @Override
+   public int nextDoc() throws IOException
+   {
+      while (currentDoc != NO_MORE_DOCS)
+      {
+         if (scorers[currentScorer].nextDoc() != NO_MORE_DOCS)
+         {
+            currentDoc = scorers[currentScorer].docID() + starts[currentScorer];
+            return currentDoc;
+         }
+         else if (++currentScorer < scorers.length)
+         {
+            // advance to next scorer
+         }
+         else
+         {
+            // no more scorers
+            currentDoc = NO_MORE_DOCS;
+         }
+      }
 
-    /**
-     * {@inheritDoc}
-     */
-    public int doc() {
-        return scorers[current].doc() + starts[current];
-    }
+      return currentDoc;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public float score() throws IOException {
-        return scorers[current].score();
-    }
+   @Override
+   public int docID()
+   {
+      return currentDoc;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public boolean skipTo(int target) throws IOException {
-        current = scorerIndex(target);
-        if (scorers[current].skipTo(target - starts[current])) {
-            return true;
-        } else {
-            if (++current < scorers.length) {
-                // simply move to the next if there is any
-                return next();
-            } else {
-                // no more document
-                hasNext = false;
-                return hasNext;
-            }
-        }
-    }
+   @Override
+   public float score() throws IOException
+   {
+      return scorers[currentScorer].score();
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public Explanation explain(int doc) throws IOException {
-        int scorerIndex = scorerIndex(doc);
-        return scorers[scorerIndex].explain(doc - starts[scorerIndex]);
-    }
+   @Override
+   public int advance(int target) throws IOException
+   {
+      if (currentDoc == NO_MORE_DOCS)
+      {
+         return currentDoc;
+      }
 
-    //--------------------------< internal >------------------------------------
+      currentScorer = scorerIndex(target);
+      if (scorers[currentScorer].advance(target - starts[currentScorer]) != NO_MORE_DOCS)
+      {
+         currentDoc = scorers[currentScorer].docID() + starts[currentScorer];
+         return currentDoc;
+      }
+      else
+      {
+         if (++currentScorer < scorers.length)
+         {
+            // simply move to the next if there is any
+            currentDoc = nextDoc();
+            return currentDoc;
+         }
+         else
+         {
+            // no more document
+            currentDoc = NO_MORE_DOCS;
+            return currentDoc;
+         }
+      }
+   }
 
-    /**
-     * Returns the scorer index for document <code>n</code>.
-     * Implementation copied from lucene MultiReader class.
-     *
-     * @param n document number.
-     * @return the scorer index.
-     */
-    private int scorerIndex(int n) {
-        int lo = 0;                                      // search starts array
-        int hi = scorers.length - 1;                  // for first element less
+   //--------------------------< internal >------------------------------------
 
-        while (hi >= lo) {
-            int mid = (lo + hi) >> 1;
-            int midValue = starts[mid];
-            if (n < midValue) {
-                hi = mid - 1;
-            } else if (n > midValue) {
-                lo = mid + 1;
-            } else {                                      // found a match
-                while (mid + 1 < scorers.length && starts[mid + 1] == midValue) {
-                    mid++;                                  // scan to last match
-                }
-                return mid;
+   /**
+    * Returns the scorer index for document <code>n</code>.
+    * Implementation copied from lucene MultiReader class.
+    *
+    * @param n document number.
+    * @return the scorer index.
+    */
+   private int scorerIndex(int n)
+   {
+      int lo = 0; // search starts array
+      int hi = scorers.length - 1; // for first element less
+
+      while (hi >= lo)
+      {
+         int mid = (lo + hi) >> 1;
+         int midValue = starts[mid];
+         if (n < midValue)
+         {
+            hi = mid - 1;
+         }
+         else if (n > midValue)
+         {
+            lo = mid + 1;
+         }
+         else
+         { // found a match
+            while (mid + 1 < scorers.length && starts[mid + 1] == midValue)
+            {
+               mid++; // scan to last match
             }
-        }
-        return hi;
-    }
-}
+            return mid;
+         }
+      }
+      return hi;
+   }
+}
\ No newline at end of file

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NodeIndexer.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -264,7 +264,7 @@
     *
     * @param doc  the lucene document.
     * @param name the name of the multi-value property.
-   * @throws RepositoryException 
+    * @throws RepositoryException 
     */
    private void addMVPName(Document doc, InternalQName name) throws RepositoryException
    {
@@ -712,7 +712,6 @@
       doc.add(createFieldWithoutNorms(fieldName, pathString.toString(), PropertyType.PATH));
    }
 
-
    /**
     * Adds the string value to the document both as the named field and
     * optionally for full text indexing if <code>tokenized</code> is
@@ -809,33 +808,8 @@
     */
    protected Field createFulltextField(String value, boolean store, boolean withOffsets)
    {
-      Field.TermVector tv;
-      if (withOffsets)
-      {
-         tv = Field.TermVector.WITH_OFFSETS;
-      }
-      else
-      {
-         tv = Field.TermVector.NO;
-      }
-      if (store)
-      {
-         // store field compressed if greater than 16k
-         Field.Store stored;
-         if (value.length() > 0x4000)
-         {
-            stored = Field.Store.COMPRESS;
-         }
-         else
-         {
-            stored = Field.Store.YES;
-         }
-         return new Field(FieldNames.FULLTEXT, value, stored, Field.Index.ANALYZED, tv);
-      }
-      else
-      {
-         return new Field(FieldNames.FULLTEXT, value, Field.Store.NO, Field.Index.ANALYZED, tv);
-      }
+      return new Field(FieldNames.FULLTEXT, value, store ? Field.Store.YES : Field.Store.NO, Field.Index.ANALYZED,
+         withOffsets ? Field.TermVector.WITH_OFFSETS : Field.TermVector.NO);
    }
 
    /**

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NotQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NotQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/NotQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -210,14 +210,20 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (docNo == NO_MORE_DOCS)
+         {
+            return docNo;
+         }
+
          if (docNo == -1)
          {
             // get first doc of context scorer
-            if (contextScorer.next())
+            int docId = contextScorer.nextDoc();
+            if (docId != NO_MORE_DOCS)
             {
-               contextNo = contextScorer.doc();
+               contextNo = docId;
             }
          }
          // move to next candidate
@@ -231,23 +237,21 @@
          while (contextNo != -1 && contextNo == docNo)
          {
             docNo++;
-            if (contextScorer.next())
-            {
-               contextNo = contextScorer.doc();
-            }
-            else
-            {
-               contextNo = -1;
-            }
+            int docId = contextScorer.nextDoc();
+            contextNo = docId == NO_MORE_DOCS ? -1 : docId;
          }
-         return docNo < reader.maxDoc();
+         if (docNo >= reader.maxDoc())
+         {
+            docNo = NO_MORE_DOCS;
+         }
+         return docNo;
       }
 
       /**
        * {@inheritDoc}
        */
       @Override
-      public int doc()
+      public int docID()
       {
          return docNo;
       }
@@ -265,30 +269,20 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
+         if (docNo == NO_MORE_DOCS)
+         {
+            return docNo;
+         }
+
          if (contextNo != -1 && contextNo < target)
          {
-            if (contextScorer.skipTo(target))
-            {
-               contextNo = contextScorer.doc();
-            }
-            else
-            {
-               contextNo = -1;
-            }
+            int docId = contextScorer.advance(target);
+            contextNo = docId == NO_MORE_DOCS ? -1 : docId;
          }
          docNo = target - 1;
-         return next();
+         return nextDoc();
       }
-
-      /**
-       * @throws UnsupportedOperationException always
-       */
-      @Override
-      public Explanation explain(int doc) throws IOException
-      {
-         throw new UnsupportedOperationException();
-      }
    }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ParentAxisQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ParentAxisQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ParentAxisQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -256,13 +256,13 @@
 
       /**
        * Map that contains the scores from matching documents from the context
-       * query. To save memory only scores that are not equal to 1.0f are put
-       * to this map.
+       * query. To save memory only scores that are not equal to the score
+       * value of the first match are put to this map.
        * <p/>
        * key=[Integer] id of selected document from context query<br>
        * value=[Float] score for that document
        */
-      private final Map scores = new HashMap();
+      private final Map<Integer, Float> scores = new HashMap<Integer, Float>();
 
       /**
        * The next document id to return
@@ -270,6 +270,11 @@
       private int nextDoc = -1;
 
       /**
+       * The score of the first match.
+       */
+      private Float firstScore;
+
+      /**
        * Creates a new <code>ParentAxisScorer</code>.
        *
        * @param similarity the <code>Similarity</code> instance to use.
@@ -286,63 +291,57 @@
          this.hResolver = resolver;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateParent();
          nextDoc = hits.nextSetBit(nextDoc + 1);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public int doc()
+      public int docID()
       {
          return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
       public float score() throws IOException
       {
-         Float score = (Float)scores.get(new Integer(nextDoc));
+         Float score = scores.get(nextDoc);
          if (score == null)
          {
-            score = DEFAULT_SCORE;
+            score = firstScore;
          }
-         return score.floatValue();
+         return score;
       }
 
-      /**
-       * {@inheritDoc}
-       */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateParent();
          nextDoc = hits.nextSetBit(target);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
-      /**
-       * {@inheritDoc}
-       *
-       * @throws UnsupportedOperationException this implementation always
-       *                                       throws an <code>UnsupportedOperationException</code>.
-       */
-      @Override
-      public Explanation explain(int doc) throws IOException
-      {
-         throw new UnsupportedOperationException();
-      }
-
       private void calculateParent() throws IOException
       {
          if (hits == null)
@@ -350,44 +349,54 @@
             hits = new BitSet(reader.maxDoc());
 
             final IOException[] ex = new IOException[1];
-            contextScorer.score(new AbstractHitCollector()
+            if (contextScorer != null)
             {
+               contextScorer.score(new AbstractHitCollector()
+               {
+                  private int[] docs = new int[1];
 
-               private int[] docs = new int[1];
-
-               @Override
-               public void collect(int doc, float score)
-               {
-                  try
+                  @Override
+                  protected void collect(int doc, float score)
                   {
-                     docs = hResolver.getParents(doc, docs);
-                     if (docs.length == 1)
+                     try
                      {
-                        // optimize single value
-                        hits.set(docs[0]);
-                        if (score != DEFAULT_SCORE.floatValue())
+                        docs = hResolver.getParents(doc, docs);
+                        if (docs.length == 1)
                         {
-                           scores.put(new Integer(docs[0]), new Float(score));
+                           // optimize single value
+                           hits.set(docs[0]);
+                           if (firstScore == null)
+                           {
+                              firstScore = score;
+                           }
+                           else if (firstScore != score)
+                           {
+                              scores.put(doc, score);
+                           }
                         }
-                     }
-                     else
-                     {
-                        for (int i = 0; i < docs.length; i++)
+                        else
                         {
-                           hits.set(docs[i]);
-                           if (score != DEFAULT_SCORE.floatValue())
+                           for (int docNum : docs)
                            {
-                              scores.put(new Integer(docs[i]), new Float(score));
+                              hits.set(docNum);
+                              if (firstScore == null)
+                              {
+                                 firstScore = score;
+                              }
+                              else if (firstScore != score)
+                              {
+                                 scores.put(doc, score);
+                              }
                            }
                         }
                      }
+                     catch (IOException e)
+                     {
+                        ex[0] = e;
+                     }
                   }
-                  catch (IOException e)
-                  {
-                     ex[0] = e;
-                  }
-               }
-            });
+               });
+            }
 
             if (ex[0] != null)
             {

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/PersistentIndex.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -110,7 +110,7 @@
       byte[] buffer = new byte[1024];
       Directory dir = index.getDirectory();
       Directory dest = getDirectory();
-      String[] files = dir.list();
+      String[] files = dir.listAll();
       for (int i = 0; i < files.length; i++)
       {
          IndexInput in = dir.openInput(files[i]);

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHitsQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHitsQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryHitsQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -39,235 +39,264 @@
  * <code>QueryHitsQuery</code> exposes a {@link QueryHits} implementation again
  * as a Lucene Query.
  */
-public class QueryHitsQuery extends Query implements JcrQuery{
+public class QueryHitsQuery extends Query implements JcrQuery
+{
 
-    /**
-     * The underlying query hits.
-     */
-    private final QueryHits hits;
+   /**
+    * The underlying query hits.
+    */
+   private final QueryHits hits;
 
-    /**
-     * Creates a new query based on {@link QueryHits}.
-     *
-     * @param hits the query hits.
-     */
-    public QueryHitsQuery(QueryHits hits) {
-        this.hits = hits;
-    }
+   /**
+    * Creates a new query based on {@link QueryHits}.
+    *
+    * @param hits the query hits.
+    */
+   public QueryHitsQuery(QueryHits hits)
+   {
+      this.hits = hits;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-   public Weight createWeight(Searcher searcher) throws IOException {
-        return new QueryHitsQueryWeight(searcher.getSimilarity());
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public Weight createWeight(Searcher searcher) throws IOException
+   {
+      return new QueryHitsQueryWeight(searcher.getSimilarity());
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-   public String toString(String field) {
-        return "QueryHitsQuery";
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public String toString(String field)
+   {
+      return "QueryHitsQuery";
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-   public void extractTerms(Set terms) {
-        // no terms
-    }
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public void extractTerms(Set terms)
+   {
+      // no terms
+   }
 
-    //-----------------------< JackrabbitQuery >--------------------------------
+   //-----------------------< JackrabbitQuery >--------------------------------
 
-    /**
-     * {@inheritDoc}
-     */
-    public QueryHits execute(JcrIndexSearcher searcher,
-                             SessionImpl session,
-                             Sort sort) throws IOException {
-        if (sort.getSort().length == 0) {
-            return hits;
-        } else {
-            return null;
-        }
-    }
+   /**
+    * {@inheritDoc}
+    */
+   public QueryHits execute(JcrIndexSearcher searcher, SessionImpl session, Sort sort) throws IOException
+   {
+      if (sort.getSort().length == 0)
+      {
+         return hits;
+      }
+      else
+      {
+         return null;
+      }
+   }
 
-    //------------------------< QueryHitsQueryWeight >--------------------------
+   //------------------------< QueryHitsQueryWeight >--------------------------
 
-    /**
-     * The Weight implementation for this query.
-     */
-    public class QueryHitsQueryWeight extends Weight {
+   /**
+    * The Weight implementation for this query.
+    */
+   public class QueryHitsQueryWeight extends Weight
+   {
 
-        /**
-         * The similarity.
-         */
-        private final Similarity similarity;
+      /**
+       * The similarity.
+       */
+      private final Similarity similarity;
 
-        /**
-         * Creates a new weight with the given <code>similarity</code>.
-         *
-         * @param similarity the similarity.
-         */
-        public QueryHitsQueryWeight(Similarity similarity) {
-            this.similarity = similarity;
-        }
+      /**
+       * Creates a new weight with the given <code>similarity</code>.
+       *
+       * @param similarity the similarity.
+       */
+      public QueryHitsQueryWeight(Similarity similarity)
+      {
+         this.similarity = similarity;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public Query getQuery() {
-            return QueryHitsQuery.this;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public Query getQuery()
+      {
+         return QueryHitsQuery.this;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float getValue() {
-            return 1.0f;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public float getValue()
+      {
+         return 1.0f;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float sumOfSquaredWeights() throws IOException {
-            return 1.0f;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public float sumOfSquaredWeights() throws IOException
+      {
+         return 1.0f;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public void normalize(float norm) {
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public void normalize(float norm)
+      {
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
-            return new QueryHitsQueryScorer(reader, similarity);
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException
+      {
+         return new QueryHitsQueryScorer(reader, similarity);
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public Explanation explain(IndexReader reader, int doc) throws IOException {
-            return new Explanation();
-        }
-    }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public Explanation explain(IndexReader reader, int doc) throws IOException
+      {
+         return new Explanation();
+      }
+   }
 
-    //-------------------< QueryHitsQueryScorer >-------------------------------
+   //-------------------< QueryHitsQueryScorer >-------------------------------
 
-    /**
-     * the scorer implementation for this query.
-     */
-    public class QueryHitsQueryScorer extends Scorer {
+   /**
+    * the scorer implementation for this query.
+    */
+   public class QueryHitsQueryScorer extends Scorer
+   {
 
-        /**
-         * Iterator over <code>Integer</code> instances identifying the
-         * lucene documents. Document numbers are iterated in ascending order.
-         */
-        private final Iterator docs;
+      /**
+       * Iterator over <code>Integer</code> instances identifying the
+       * lucene documents. Document numbers are iterated in ascending order.
+       */
+      private final Iterator<Integer> docs;
 
-        /**
-         * Maps <code>Integer</code> document numbers to <code>Float</code>
-         * scores.
-         */
-        private final Map scores = new HashMap();
+      /**
+       * Maps <code>Integer</code> document numbers to <code>Float</code>
+       * scores.
+       */
+      private final Map<Integer, Float> scores = new HashMap<Integer, Float>();
 
-        /**
-         * The current document number.
-         */
-        private Integer currentDoc = null;
+      /**
+       * The current document number.
+       */
+      private Integer currentDoc = null;
 
-        /**
-         * Creates a new scorer.
-         *
-         * @param reader     the index reader.
-         * @param similarity the similarity implementation.
-         * @throws IOException if an error occurs while reading from the index.
-         */
-        protected QueryHitsQueryScorer(IndexReader reader,
-                                       Similarity similarity)
-                throws IOException {
-            super(similarity);
-            ScoreNode node;
-            Set sortedDocs = new TreeSet();
-            try {
-                while ((node = hits.nextScoreNode()) != null) {
-                    String uuid = node.getNodeId();
-                    Term id = new Term(FieldNames.UUID, uuid);
-                    TermDocs tDocs = reader.termDocs(id);
-                    try {
-                        if (tDocs.next()) {
-                            Integer doc = new Integer(tDocs.doc());
-                            sortedDocs.add(doc);
-                            scores.put(doc, new Float(node.getScore()));
-                        }
-                    } finally {
-                        tDocs.close();
-                    }
-                }
-            } finally {
-                hits.close();
+      /**
+       * Creates a new scorer.
+       *
+       * @param reader     the index reader.
+       * @param similarity the similarity implementation.
+       * @throws IOException if an error occurs while reading from the index.
+       */
+      protected QueryHitsQueryScorer(IndexReader reader, Similarity similarity) throws IOException
+      {
+         super(similarity);
+         ScoreNode node;
+         Set<Integer> sortedDocs = new TreeSet();
+         try
+         {
+            while ((node = hits.nextScoreNode()) != null)
+            {
+               String uuid = node.getNodeId();
+               Term id = new Term(FieldNames.UUID, uuid);
+               TermDocs tDocs = reader.termDocs(id);
+               try
+               {
+                  if (tDocs.next())
+                  {
+                     Integer doc = new Integer(tDocs.doc());
+                     sortedDocs.add(doc);
+                     scores.put(doc, new Float(node.getScore()));
+                  }
+               }
+               finally
+               {
+                  tDocs.close();
+               }
             }
-            docs = sortedDocs.iterator();
-        }
+         }
+         finally
+         {
+            hits.close();
+         }
+         docs = sortedDocs.iterator();
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public boolean next() throws IOException {
-            if (docs.hasNext()) {
-                currentDoc = (Integer) docs.next();
-                return true;
-            }
-            return false;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public int nextDoc() throws IOException
+      {
+         if (currentDoc == NO_MORE_DOCS)
+         {
+            return currentDoc;
+         }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public int doc() {
-            return currentDoc.intValue();
-        }
+         currentDoc = docs.hasNext() ? docs.next() : NO_MORE_DOCS;
+         return currentDoc;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float score() throws IOException {
-            return ((Float) scores.get(currentDoc)).floatValue();
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public int docID()
+      {
+         return currentDoc == null ? -1 : currentDoc;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public boolean skipTo(int target) throws IOException {
-            do {
-                if (!next()) {
-                    return false;
-                }
-            } while (target > doc());
-            return true;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public float score() throws IOException
+      {
+         return scores.get(currentDoc).floatValue();
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public Explanation explain(int doc) throws IOException {
-            return new Explanation();
-        }
-    }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public int advance(int target) throws IOException
+      {
+         if (currentDoc == NO_MORE_DOCS)
+         {
+            return currentDoc;
+         }
+
+         do
+         {
+            if (nextDoc() == NO_MORE_DOCS)
+            {
+               return NO_MORE_DOCS;
+            }
+         }
+         while (target > docID());
+         return docID();
+      }
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryImpl.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryImpl.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/QueryImpl.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -117,9 +117,9 @@
 
       // build lucene query
       Query query =
-         LuceneQueryBuilder.createQuery(root, session, index.getContext().getItemStateManager(), index
-            .getNamespaceMappings(), index.getTextAnalyzer(), propReg, index.getSynonymProvider(), index
-            .getIndexFormatVersion(), index.getContext().getVirtualTableResolver());
+         LuceneQueryBuilder.createQuery(root, session, index.getContext().getItemStateManager(),
+            index.getNamespaceMappings(), index.getTextAnalyzer(), propReg, index.getSynonymProvider(),
+            index.getIndexFormatVersion(), index.getContext().getVirtualTableResolver());
 
       OrderQueryNode orderNode = root.getOrderNode();
 
@@ -142,7 +142,7 @@
 
       return new SingleColumnQueryResult(index, itemMgr, session, session.getAccessManager(), this, query,
          new SpellSuggestion(index.getSpellChecker(), root), getSelectProperties(), orderProperties, ascSpecs,
-         getRespectDocumentOrder(), offset, limit);
+         orderProperties.length == 0 && getRespectDocumentOrder(), offset, limit);
    }
 
    /**

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RangeQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RangeQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/RangeQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -20,19 +20,19 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.search.ConstantScoreRangeQuery;
+import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermRangeQuery;
 import org.apache.lucene.search.Weight;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.BitSet;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -40,464 +40,541 @@
 /**
  * Implements a lucene range query.
  */
-public class RangeQuery extends Query implements Transformable {
+public class RangeQuery extends Query implements Transformable
+{
 
-    /**
-     * The lower term. May be <code>null</code> if <code>upperTerm</code> is not
-     * <code>null</code>.
-     */
-    private Term lowerTerm;
+   /**
+    * The lower term. May be <code>null</code> if <code>upperTerm</code> is not
+    * <code>null</code>.
+    */
+   private Term lowerTerm;
 
-    /**
-     * The upper term. May be <code>null</code> if <code>lowerTerm</code> is not
-     * <code>null</code>.
-     */
-    private Term upperTerm;
+   /**
+    * The upper term. May be <code>null</code> if <code>lowerTerm</code> is not
+    * <code>null</code>.
+    */
+   private Term upperTerm;
 
-    /**
-     * If <code>true</code> the range interval is inclusive.
-     */
-    private boolean inclusive;
+   /**
+    * If <code>true</code> the range interval is inclusive.
+    */
+   private boolean inclusive;
 
-    /**
-     * How the term enum is transformed before it is compared to lower and upper
-     * term.
-     */
-    private int transform = TRANSFORM_NONE;
+   /**
+    * How the term enum is transformed before it is compared to lower and upper
+    * term.
+    */
+   private int transform = TRANSFORM_NONE;
 
-    /**
-     * Creates a new RangeQuery. The lower or the upper term may be
-     * <code>null</code>, but not both!
-     *
-     * @param lowerTerm the lower term of the interval, or <code>null</code>
-     * @param upperTerm the upper term of the interval, or <code>null</code>.
-     * @param inclusive if <code>true</code> the interval is inclusive.
-     */
-    public RangeQuery(Term lowerTerm, Term upperTerm, boolean inclusive) {
-        this(lowerTerm, upperTerm, inclusive, TRANSFORM_NONE);
-    }
+   /**
+    * The rewritten range query or <code>null</code> if the range spans more
+    * than {@link org.apache.lucene.search.BooleanQuery#maxClauseCount} terms.
+    */
+   private Query stdRangeQuery;
 
-    /**
-     * Creates a new RangeQuery. The lower or the upper term may be
-     * <code>null</code>, but not both!
-     *
-     * @param lowerTerm the lower term of the interval, or <code>null</code>
-     * @param upperTerm the upper term of the interval, or <code>null</code>.
-     * @param inclusive if <code>true</code> the interval is inclusive.
-     * @param transform how term enums are transformed when read from the index.
-     */
-    public RangeQuery(Term lowerTerm, Term upperTerm, boolean inclusive, int transform) {
-        if (lowerTerm == null && upperTerm == null) {
-            throw new IllegalArgumentException("At least one term must be non-null");
-        }
-        if (lowerTerm != null && upperTerm != null && lowerTerm.field() != upperTerm.field()) {
-            throw new IllegalArgumentException("Both terms must be for the same field");
-        }
+   /**
+    * Creates a new RangeQuery. The lower or the upper term may be
+    * <code>null</code>, but not both!
+    *
+    * @param lowerTerm the lower term of the interval, or <code>null</code>
+    * @param upperTerm the upper term of the interval, or <code>null</code>.
+    * @param inclusive if <code>true</code> the interval is inclusive.
+    */
+   public RangeQuery(Term lowerTerm, Term upperTerm, boolean inclusive)
+   {
+      this(lowerTerm, upperTerm, inclusive, TRANSFORM_NONE);
+   }
 
-        // if we have a lowerTerm, start there. otherwise, start at beginning
-        if (lowerTerm != null) {
-            this.lowerTerm = lowerTerm;
-        } else {
-            this.lowerTerm = new Term(upperTerm.field(), "");
-        }
+   /**
+    * Creates a new RangeQuery. The lower or the upper term may be
+    * <code>null</code>, but not both!
+    *
+    * @param lowerTerm the lower term of the interval, or <code>null</code>
+    * @param upperTerm the upper term of the interval, or <code>null</code>.
+    * @param inclusive if <code>true</code> the interval is inclusive.
+    * @param transform how term enums are transformed when read from the index.
+    */
+   public RangeQuery(Term lowerTerm, Term upperTerm, boolean inclusive, int transform)
+   {
+      if (lowerTerm == null && upperTerm == null)
+      {
+         throw new IllegalArgumentException("At least one term must be non-null");
+      }
+      if (lowerTerm != null && upperTerm != null && lowerTerm.field() != upperTerm.field())
+      {
+         throw new IllegalArgumentException("Both terms must be for the same field");
+      }
 
-        this.upperTerm = upperTerm;
-        this.inclusive = inclusive;
-        this.transform = transform;
-    }
+      // if we have a lowerTerm, start there. otherwise, start at beginning
+      if (lowerTerm != null)
+      {
+         this.lowerTerm = lowerTerm;
+      }
+      else
+      {
+         this.lowerTerm = new Term(upperTerm.field(), "");
+      }
 
-    /**
-     * {@inheritDoc}
-     */
-    public void setTransformation(int transformation) {
-        this.transform = transformation;
-    }
+      this.upperTerm = upperTerm;
+      this.inclusive = inclusive;
+      this.transform = transform;
+   }
 
-    /**
-     * Rewrites this query into a {@link ConstantScoreRangeQuery} if
-     * {@link #transform} is {@link #TRANSFORM_NONE}.
-     *
-     * @param reader the index reader.
-     * @return the rewritten query or this query if rewriting is not possible.
-     * @throws IOException if an error occurs.
-     */
-    @Override
-   public Query rewrite(IndexReader reader) throws IOException {
-        if (transform == TRANSFORM_NONE) {
-            return new ConstantScoreRangeQuery(lowerTerm.field(),
-                    lowerTerm.text(), upperTerm.text(), inclusive,
-                    inclusive).rewrite(reader);
-        } else {
-            // always use our implementation when we need to transform the
-            // term enum
+   /**
+    * {@inheritDoc}
+    */
+   public void setTransformation(int transformation)
+   {
+      this.transform = transformation;
+   }
+
+   /**
+    * Tries to rewrite this query into a standard lucene RangeQuery.
+    * This rewrite might fail with a TooManyClauses exception. If that
+    * happens, we use our own implementation.
+    *
+    * @param reader the index reader.
+    * @return the rewritten query or this query if rewriting is not possible.
+    * @throws IOException if an error occurs.
+    */
+   public Query rewrite(IndexReader reader) throws IOException
+   {
+      if (transform == TRANSFORM_NONE)
+      {
+         Query stdRangeQueryImpl =
+            new TermRangeQuery(lowerTerm.field(), lowerTerm.text(), upperTerm.text(), inclusive, inclusive);
+         try
+         {
+            stdRangeQuery = stdRangeQueryImpl.rewrite(reader);
+            return stdRangeQuery;
+         }
+         catch (BooleanQuery.TooManyClauses e)
+         {
+            // failed, use own implementation
             return this;
-        }
-    }
+         }
+      }
+      else
+      {
+         // always use our implementation when we need to transform the
+         // term enum
+         return this;
+      }
+   }
 
-    /**
-     * Creates the <code>Weight</code> for this query.
-     *
-     * @param searcher the searcher to use for the <code>Weight</code>.
-     * @return the <code>Weigth</code> for this query.
-     */
-    @Override
-   public Weight createWeight(Searcher searcher) {
-        return new RangeQueryWeight(searcher);
-    }
+   /**
+    * Creates the <code>Weight</code> for this query.
+    *
+    * @param searcher the searcher to use for the <code>Weight</code>.
+    * @return the <code>Weigth</code> for this query.
+    */
+   public Weight createWeight(Searcher searcher)
+   {
+      return new RangeQueryWeight(searcher);
+   }
 
-    /**
-     * Returns a string representation of this query.
-     * @param field the field name for which to create a string representation.
-     * @return a string representation of this query.
-     */
-    @Override
-   public String toString(String field) {
-        StringBuffer buffer = new StringBuffer();
-        if (!getField().equals(field)) {
-            buffer.append(getField());
-            buffer.append(":");
-        }
-        buffer.append(inclusive ? "[" : "{");
-        buffer.append(lowerTerm != null ? lowerTerm.text() : "null");
-        buffer.append(" TO ");
-        buffer.append(upperTerm != null ? upperTerm.text() : "null");
-        buffer.append(inclusive ? "]" : "}");
-        if (getBoost() != 1.0f) {
-            buffer.append("^");
-            buffer.append(Float.toString(getBoost()));
-        }
-        return buffer.toString();
-    }
+   /**
+    * Returns a string representation of this query.
+    * @param field the field name for which to create a string representation.
+    * @return a string representation of this query.
+    */
+   public String toString(String field)
+   {
+      StringBuffer buffer = new StringBuffer();
+      if (!getField().equals(field))
+      {
+         buffer.append(getField());
+         buffer.append(":");
+      }
+      buffer.append(inclusive ? "[" : "{");
+      buffer.append(lowerTerm != null ? lowerTerm.text() : "null");
+      buffer.append(" TO ");
+      buffer.append(upperTerm != null ? upperTerm.text() : "null");
+      buffer.append(inclusive ? "]" : "}");
+      if (getBoost() != 1.0f)
+      {
+         buffer.append("^");
+         buffer.append(Float.toString(getBoost()));
+      }
+      return buffer.toString();
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-   public void extractTerms(Set terms) {
-        // cannot extract terms
-    }
+   /**
+    * {@inheritDoc}
+    */
+   public void extractTerms(Set terms)
+   {
+      if (stdRangeQuery != null)
+      {
+         stdRangeQuery.extractTerms(terms);
+      }
+   }
 
-    /**
-     * Returns the field name for this query.
-     */
-    private String getField() {
-        return (lowerTerm != null ? lowerTerm.field() : upperTerm.field());
-    }
+   /**
+    * Returns the field name for this query.
+    */
+   private String getField()
+   {
+      return (lowerTerm != null ? lowerTerm.field() : upperTerm.field());
+   }
 
-    //--------------------------< RangeQueryWeight >----------------------------
+   //--------------------------< RangeQueryWeight >----------------------------
 
-    /**
-     * The <code>Weight</code> implementation for this <code>RangeQuery</code>.
-     */
-    private class RangeQueryWeight extends AbstractWeight {
+   /**
+    * The <code>Weight</code> implementation for this <code>RangeQuery</code>.
+    */
+   private class RangeQueryWeight extends AbstractWeight
+   {
 
-        /**
-         * Creates a new <code>RangeQueryWeight</code> instance using
-         * <code>searcher</code>.
-         *
-         * @param searcher a <code>Searcher</code> instance.
-         */
-        RangeQueryWeight(Searcher searcher) {
-            super(searcher);
-        }
+      /**
+       * Creates a new <code>RangeQueryWeight</code> instance using
+       * <code>searcher</code>.
+       *
+       * @param searcher a <code>Searcher</code> instance.
+       */
+      RangeQueryWeight(Searcher searcher)
+      {
+         super(searcher);
+      }
 
-        /**
-         * Creates a {@link RangeQueryScorer} instance.
-         *
-         * @param reader index reader
-         * @return a {@link RangeQueryScorer} instance
-         */
-        @Override
-      protected Scorer createScorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) {
-            return new RangeQueryScorer(searcher.getSimilarity(), reader);
-        };
+      /**
+       * Creates a {@link RangeQueryScorer} instance.
+       *
+       * @param reader index reader
+       * @return a {@link RangeQueryScorer} instance
+       */
+      @Override
+      protected Scorer createScorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer)
+      {
+         return new RangeQueryScorer(searcher.getSimilarity(), reader);
+      };
 
-        /**
-         * Returns this <code>RangeQuery</code>.
-         *
-         * @return this <code>RangeQuery</code>.
-         */
-        @Override
-      public Query getQuery() {
-            return RangeQuery.this;
-        }
+      /**
+       * Returns this <code>RangeQuery</code>.
+       *
+       * @return this <code>RangeQuery</code>.
+       */
+      @Override
+      public Query getQuery()
+      {
+         return RangeQuery.this;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float getValue() {
-            return 1.0f;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public float getValue()
+      {
+         return 1.0f;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float sumOfSquaredWeights() throws IOException {
-            return 1.0f;
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public float sumOfSquaredWeights() throws IOException
+      {
+         return 1.0f;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public void normalize(float norm) {
-        }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public void normalize(float norm)
+      {
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public Explanation explain(IndexReader reader, int doc) throws IOException {
-            return new Explanation();
-        }
-    }
+      /**
+       * {@inheritDoc}
+       */
+      @Override
+      public Explanation explain(IndexReader reader, int doc) throws IOException
+      {
+         return new Explanation();
+      }
+   }
 
-    //------------------------< RangeQueryScorer >------------------------------
+   //------------------------< RangeQueryScorer >------------------------------
+   /**
+    * Implements a <code>Scorer</code> for this <code>RangeQuery</code>.
+    */
+   private final class RangeQueryScorer extends Scorer
+   {
 
-    /**
-     * Implements a <code>Scorer</code> for this <code>RangeQuery</code>.
-     */
-    private final class RangeQueryScorer extends Scorer {
+      /**
+       * The index reader to use for calculating the matching documents.
+       */
+      private final IndexReader reader;
 
-        /**
-         * The index reader to use for calculating the matching documents.
-         */
-        private final IndexReader reader;
+      /**
+       * The documents ids that match this range query.
+       */
+      private final BitSet hits;
 
-        /**
-         * The documents ids that match this range query.
-         */
-        private final BitSet hits;
+      /**
+       * Set to <code>true</code> when the hits have been calculated.
+       */
+      private boolean hitsCalculated = false;
 
-        /**
-         * Set to <code>true</code> when the hits have been calculated.
-         */
-        private boolean hitsCalculated = false;
+      /**
+       * The next document id to return
+       */
+      private int nextDoc = -1;
 
-        /**
-         * The next document id to return
-         */
-        private int nextDoc = -1;
+      /**
+       * The cache key to use to store the results.
+       */
+      private final String cacheKey;
 
-        /**
-         * The cache key to use to store the results.
-         */
-        private final String cacheKey;
+      /**
+       * The map to store the results.
+       */
+      private final Map<String, BitSet> resultMap;
 
-        /**
-         * The map to store the results.
-         */
-        private final Map resultMap;
+      /**
+       * Creates a new RangeQueryScorer.
+       * @param similarity the similarity implementation.
+       * @param reader the index reader to use.
+       */
+      @SuppressWarnings({"unchecked"})
+      RangeQueryScorer(Similarity similarity, IndexReader reader)
+      {
+         super(similarity);
+         this.reader = reader;
+         StringBuffer key = new StringBuffer();
+         key.append(lowerTerm != null ? lowerTerm.field() : upperTerm.field());
+         key.append('\uFFFF');
+         key.append(lowerTerm != null ? lowerTerm.text() : "");
+         key.append('\uFFFF');
+         key.append(upperTerm != null ? upperTerm.text() : "");
+         key.append('\uFFFF');
+         key.append(inclusive);
+         key.append('\uFFFF');
+         key.append(transform);
+         this.cacheKey = key.toString();
+         // check cache
+         PerQueryCache cache = PerQueryCache.getInstance();
+         Map<String, BitSet> m = (Map<String, BitSet>)cache.get(RangeQueryScorer.class, reader);
+         if (m == null)
+         {
+            m = new HashMap<String, BitSet>();
+            cache.put(RangeQueryScorer.class, reader, m);
+         }
+         resultMap = m;
 
-        /**
-         * Creates a new RangeQueryScorer.
-         * @param similarity the similarity implementation.
-         * @param reader the index reader to use.
-         */
-        RangeQueryScorer(Similarity similarity, IndexReader reader) {
-            super(similarity);
-            this.reader = reader;
-            StringBuffer key = new StringBuffer();
-            key.append(lowerTerm != null ? lowerTerm.field() : upperTerm.field());
-            key.append('\uFFFF');
-            key.append(lowerTerm != null ? lowerTerm.text() : "");
-            key.append('\uFFFF');
-            key.append(upperTerm != null ? upperTerm.text() : "");
-            key.append('\uFFFF');
-            key.append(inclusive);
-            key.append('\uFFFF');
-            key.append(transform);
-            this.cacheKey = key.toString();
-            // check cache
-            PerQueryCache cache = PerQueryCache.getInstance();
-            Map m = (Map) cache.get(RangeQueryScorer.class, reader);
-            if (m == null) {
-                m = new HashMap();
-                cache.put(RangeQueryScorer.class, reader, m);
-            }
-            resultMap = m;
+         BitSet result = resultMap.get(cacheKey);
+         if (result == null)
+         {
+            result = new BitSet(reader.maxDoc());
+         }
+         else
+         {
+            hitsCalculated = true;
+         }
+         hits = result;
+      }
 
-            BitSet result = (BitSet) resultMap.get(cacheKey);
-            if (result == null) {
-                result = new BitSet(reader.maxDoc());
-            } else {
-                hitsCalculated = true;
-            }
-            hits = result;
-        }
+      @Override
+      public int nextDoc() throws IOException
+      {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public boolean next() throws IOException {
-            calculateHits();
-            nextDoc = hits.nextSetBit(nextDoc + 1);
-            return nextDoc > -1;
-        }
+         calculateHits();
+         nextDoc = hits.nextSetBit(nextDoc + 1);
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public int doc() {
-            return nextDoc;
-        }
+      @Override
+      public int docID()
+      {
+         return nextDoc;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public float score() {
-            return 1.0f;
-        }
+      @Override
+      public float score()
+      {
+         return 1.0f;
+      }
 
-        /**
-         * {@inheritDoc}
-         */
-        @Override
-      public boolean skipTo(int target) throws IOException {
-            calculateHits();
-            nextDoc = hits.nextSetBit(target);
-            return nextDoc > -1;
-        }
+      @Override
+      public int advance(int target) throws IOException
+      {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
 
-        /**
-         * Returns an empty Explanation object.
-         * @return an empty Explanation object.
-         */
-        @Override
-      public Explanation explain(int doc) {
-            return new Explanation();
-        }
+         calculateHits();
+         nextDoc = hits.nextSetBit(target);
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
+      }
 
-        /**
-         * Calculates the ids of the documents matching this range query.
-         * @throws IOException if an error occurs while reading from the index.
-         */
-        private void calculateHits() throws IOException {
-            if (hitsCalculated) {
-                return;
-            }
+      /**
+       * Calculates the ids of the documents matching this range query.
+       * @throws IOException if an error occurs while reading from the index.
+       */
+      private void calculateHits() throws IOException
+      {
+         if (hitsCalculated)
+         {
+            return;
+         }
 
-            String testField = getField();
+         String testField = getField();
 
-            boolean checkLower = false;
-            if (!inclusive || transform != TRANSFORM_NONE) {
-                // make adjustments to set to exclusive
-                checkLower = true;
-            }
+         boolean checkLower = false;
+         if (!inclusive || transform != TRANSFORM_NONE)
+         {
+            // make adjustments to set to exclusive
+            checkLower = true;
+         }
 
-            int propNameLength = FieldNames.getNameLength(lowerTerm.text());
-            String namePrefix = "";
-            if (propNameLength > 0) {
-                namePrefix = lowerTerm.text().substring(0, propNameLength);
-            }
-            List startTerms = new ArrayList(2);
+         int propNameLength = FieldNames.getNameLength(lowerTerm.text());
+         String namePrefix = "";
+         if (propNameLength > 0)
+         {
+            namePrefix = lowerTerm.text().substring(0, propNameLength);
+         }
+         List<Term> startTerms = new ArrayList<Term>(2);
 
-            if (transform == TRANSFORM_NONE || lowerTerm.text().length() <= propNameLength) {
-                // use lowerTerm as is
-                startTerms.add(lowerTerm);
-            } else {
-                // first enumerate terms using lower case start character
-                StringBuffer termText = new StringBuffer(propNameLength + 1);
-                termText.append(lowerTerm.text().subSequence(0, propNameLength));
-                char startCharacter = lowerTerm.text().charAt(propNameLength);
-                termText.append(Character.toLowerCase(startCharacter));
-                startTerms.add(new Term(lowerTerm.field(), termText.toString()));
-                // second enumerate terms using upper case start character
-                termText.setCharAt(termText.length() - 1, Character.toUpperCase(startCharacter));
-                startTerms.add(new Term(lowerTerm.field(), termText.toString()));
-            }
+         if (transform == TRANSFORM_NONE || lowerTerm.text().length() <= propNameLength)
+         {
+            // use lowerTerm as is
+            startTerms.add(lowerTerm);
+         }
+         else
+         {
+            // first enumerate terms using lower case start character
+            StringBuffer termText = new StringBuffer(propNameLength + 1);
+            termText.append(lowerTerm.text().subSequence(0, propNameLength));
+            char startCharacter = lowerTerm.text().charAt(propNameLength);
+            termText.append(Character.toLowerCase(startCharacter));
+            startTerms.add(new Term(lowerTerm.field(), termText.toString()));
+            // second enumerate terms using upper case start character
+            termText.setCharAt(termText.length() - 1, Character.toUpperCase(startCharacter));
+            startTerms.add(new Term(lowerTerm.field(), termText.toString()));
+         }
 
-            Iterator it = startTerms.iterator();
-            while (it.hasNext()) {
-                Term startTerm = (Term) it.next();
+         for (Term startTerm : startTerms)
+         {
+            TermEnum terms = reader.terms(startTerm);
+            try
+            {
+               TermDocs docs = reader.termDocs();
+               try
+               {
+                  do
+                  {
+                     Term term = terms.term();
+                     if (term != null && term.field() == testField && term.text().startsWith(namePrefix))
+                     {
+                        if (checkLower)
+                        {
+                           int compare = termCompare(term.text(), lowerTerm.text(), propNameLength);
+                           if (compare > 0 || compare == 0 && inclusive)
+                           {
+                              // do not check lower term anymore if no
+                              // transformation is done on the term enum
+                              checkLower = transform != TRANSFORM_NONE;
+                           }
+                           else
+                           {
+                              // continue with next term
+                              continue;
+                           }
+                        }
+                        if (upperTerm != null)
+                        {
+                           int compare = termCompare(term.text(), upperTerm.text(), propNameLength);
+                           // if beyond the upper term, or is exclusive and
+                           // this is equal to the upper term
+                           if ((compare > 0) || (!inclusive && compare == 0))
+                           {
+                              // only break out if no transformation
+                              // was done on the term from the enum
+                              if (transform == TRANSFORM_NONE)
+                              {
+                                 break;
+                              }
+                              else
+                              {
+                                 // because of the transformation
+                                 // it is possible that the next
+                                 // term will be included again if
+                                 // we still enumerate on the same
+                                 // property name
+                                 if (term.text().startsWith(namePrefix))
+                                 {
+                                    continue;
+                                 }
+                                 else
+                                 {
+                                    break;
+                                 }
+                              }
+                           }
+                        }
 
-                TermEnum terms = reader.terms(startTerm);
-                try {
-                    TermDocs docs = reader.termDocs();
-                    try {
-                        do {
-                            Term term = terms.term();
-                            if (term != null
-                                    && term.field() == testField
-                                    && term.text().startsWith(namePrefix)) {
-                                if (checkLower) {
-                                    int compare = termCompare(term.text(), lowerTerm.text(), propNameLength);
-                                    if (compare > 0 || compare == 0 && inclusive) {
-                                        // do not check lower term anymore if no
-                                        // transformation is done on the term enum
-                                        checkLower = transform == TRANSFORM_NONE ? false : true;
-                                    } else {
-                                        // continue with next term
-                                        continue;
-                                    }
-                                }
-                                if (upperTerm != null) {
-                                    int compare = termCompare(term.text(), upperTerm.text(), propNameLength);
-                                    // if beyond the upper term, or is exclusive and
-                                    // this is equal to the upper term
-                                    if ((compare > 0) || (!inclusive && compare == 0)) {
-                                        // only break out if no transformation
-                                        // was done on the term from the enum
-                                        if (transform == TRANSFORM_NONE) {
-                                            break;
-                                        } else {
-                                            // because of the transformation
-                                            // it is possible that the next
-                                            // term will be included again if
-                                            // we still enumerate on the same
-                                            // property name
-                                            if (term.text().startsWith(namePrefix)) {
-                                                continue;
-                                            } else {
-                                                break;
-                                            }
-                                        }
-                                    }
-                                }
-
-                                docs.seek(terms);
-                                while (docs.next()) {
-                                    hits.set(docs.doc());
-                                }
-                            } else {
-                                break;
-                            }
-                        } while(terms.next());
-                    } finally {
-                        docs.close();
-                    }
-                } finally {
-                    terms.close();
-                }
+                        docs.seek(terms);
+                        while (docs.next())
+                        {
+                           hits.set(docs.doc());
+                        }
+                     }
+                     else
+                     {
+                        break;
+                     }
+                  }
+                  while (terms.next());
+               }
+               finally
+               {
+                  docs.close();
+               }
             }
+            finally
+            {
+               terms.close();
+            }
+         }
 
-            hitsCalculated = true;
-            // put to cache
-            resultMap.put(cacheKey, hits);
-        }
+         hitsCalculated = true;
+         // put to cache
+         resultMap.put(cacheKey, hits);
+      }
 
-        /**
-         * Compares the <code>text</code> with the <code>other</code> String. This
-         * implementation behaves like {@link String#compareTo(Object)} but also
-         * respects the {@link RangeQuery#transform} property.
-         *
-         * @param text   the text to compare to <code>other</code>. The
-         *               transformation function is applied to this parameter before
-         *               it is compared to <code>other</code>.
-         * @param other  the other String.
-         * @param offset start comparing the two strings at <code>offset</code>.
-         * @return see {@link String#compareTo(Object)}. But also respects {@link
-         *         RangeQuery#transform}.
-         */
-        private int termCompare(String text, String other, int offset) {
-            OffsetCharSequence seq1 = new OffsetCharSequence(offset, text, transform);
-            OffsetCharSequence seq2 = new OffsetCharSequence(offset, other);
-            return seq1.compareTo(seq2);
-        }
-    }
+      /**
+       * Compares the <code>text</code> with the <code>other</code> String. This
+       * implementation behaves like {@link String#compareTo(Object)} but also
+       * respects the {@link RangeQuery#transform} property.
+       *
+       * @param text   the text to compare to <code>other</code>. The
+       *               transformation function is applied to this parameter before
+       *               it is compared to <code>other</code>.
+       * @param other  the other String.
+       * @param offset start comparing the two strings at <code>offset</code>.
+       * @return see {@link String#compareTo(Object)}. But also respects {@link
+       *         RangeQuery#transform}.
+       */
+      private int termCompare(String text, String other, int offset)
+      {
+         OffsetCharSequence seq1 = new OffsetCharSequence(offset, text, transform);
+         OffsetCharSequence seq2 = new OffsetCharSequence(offset, other);
+         return seq1.compareTo(seq2);
+      }
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/ReadOnlyIndexReader.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,306 +16,337 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
-import java.io.IOException;
-import java.util.BitSet;
-
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.index.TermPositions;
 
+import java.io.IOException;
+import java.util.BitSet;
+import java.util.Map;
+
 /**
  * Overwrites the methods that would modify the index and throws an
  * {@link UnsupportedOperationException} in each of those methods. A
  * <code>ReadOnlyIndexReader</code> will always show all documents that have
  * not been deleted at the time when the index reader is created.
  */
-class ReadOnlyIndexReader extends RefCountingIndexReader {
+class ReadOnlyIndexReader extends RefCountingIndexReader
+{
 
-    /**
-     * The underlying shared reader.
-     */
-    private final SharedIndexReader reader;
+   /**
+    * The underlying shared reader.
+    */
+   private final SharedIndexReader reader;
 
-    /**
-     * The deleted documents as initially read from the IndexReader passed
-     * in the constructor of this class.
-     */
-    private final BitSet deleted;
+   /**
+    * The deleted documents as initially read from the IndexReader passed
+    * in the constructor of this class.
+    */
+   private final BitSet deleted;
 
-    /**
-     * The version of the index reader from where the deleted BitSet was
-     * obtained from.
-     */
-    private long deletedDocsVersion;
+   /**
+    * The version of the index reader from where the deleted BitSet was
+    * obtained from.
+    */
+   private long deletedDocsVersion;
 
-    /**
-     * Creates a new index reader based on <code>reader</code> at
-     * <code>modificationTick</code>.
-     *
-     * @param reader             the underlying <code>IndexReader</code>.
-     * @param deleted            the documents that are deleted in
-     *                           <code>reader</code>.
-     * @param deletedDocsVersion the version of the index reader from where the
-     *                           deleted BitSet was obtained from.
-     */
-    public ReadOnlyIndexReader(SharedIndexReader reader,
-                               BitSet deleted,
-                               long deletedDocsVersion) {
-        super(reader);
-        this.reader = reader;
-        this.deleted = deleted;
-        this.deletedDocsVersion = deletedDocsVersion;
-        // acquire underlying reader
-        reader.acquire();
-    }
+   /**
+    * Creates a new index reader based on <code>reader</code> at
+    * <code>modificationTick</code>.
+    *
+    * @param reader             the underlying <code>IndexReader</code>.
+    * @param deleted            the documents that are deleted in
+    *                           <code>reader</code>.
+    * @param deletedDocsVersion the version of the index reader from where the
+    *                           deleted BitSet was obtained from.
+    */
+   public ReadOnlyIndexReader(SharedIndexReader reader, BitSet deleted, long deletedDocsVersion)
+   {
+      super(reader);
+      this.reader = reader;
+      this.deleted = deleted;
+      this.deletedDocsVersion = deletedDocsVersion;
+      // acquire underlying reader
+      reader.acquire();
+   }
 
-    /**
-     * @return version of the deleted docs.
-     */
-    long getDeletedDocsVersion() {
-        return deletedDocsVersion;
-    }
+   /**
+    * @return version of the deleted docs.
+    */
+   long getDeletedDocsVersion()
+   {
+      return deletedDocsVersion;
+   }
 
-    /**
-     * Returns the tick value when the underlying {@link CachingIndexReader} was
-     * created.
-     *
-     * @return the creation tick for the underlying reader.
-     */
-    long getCreationTick() {
-        return reader.getCreationTick();
-    }
+   /**
+    * Returns the tick value when the underlying {@link CachingIndexReader} was
+    * created.
+    *
+    * @return the creation tick for the underlying reader.
+    */
+   long getCreationTick()
+   {
+      return reader.getCreationTick();
+   }
 
-    /**
-     * Updates the deleted documents in this index reader. When this method
-     * returns this index reader will have the same documents marked as deleted
-     * as the passed <code>reader</code>.
-     * <p/>
-     * This method is not thread-safe! Make sure no other thread is concurrently
-     * using this reader at the same time.
-     *
-     * @param reader the reader from where to obtain the deleted documents
-     *               info.
-     */
-    void updateDeletedDocs(CommittableIndexReader reader) {
-        int maxDoc = reader.maxDoc();
-        for (int i = 0; i < maxDoc; i++) {
-            if (reader.isDeleted(i)) {
-                deleted.set(i);
-            }
-        }
-        deletedDocsVersion = reader.getModificationCount();
-    }
+   /**
+    * Updates the deleted documents in this index reader. When this method
+    * returns this index reader will have the same documents marked as deleted
+    * as the passed <code>reader</code>.
+    * <p/>
+    * This method is not thread-safe! Make sure no other thread is concurrently
+    * using this reader at the same time.
+    *
+    * @param reader the reader from where to obtain the deleted documents
+    *               info.
+    */
+   void updateDeletedDocs(CommittableIndexReader reader)
+   {
+      int maxDoc = reader.maxDoc();
+      for (int i = 0; i < maxDoc; i++)
+      {
+         if (reader.isDeleted(i))
+         {
+            deleted.set(i);
+         }
+      }
+      deletedDocsVersion = reader.getModificationCount();
+   }
 
-    /**
-     * Returns the <code>DocId</code> of the parent of <code>n</code> or
-     * {@link DocId#NULL} if <code>n</code> does not have a parent
-     * (<code>n</code> is the root node).
-     *
-     * @param n the document number.
-     * @return the <code>DocId</code> of <code>n</code>'s parent.
-     * @throws IOException if an error occurs while reading from the index.
-     */
-    public DocId getParent(int n) throws IOException {
-        return getBase().getParent(n, deleted);
-    }
+   /**
+    * Returns the <code>DocId</code> of the parent of <code>n</code> or
+    * {@link DocId#NULL} if <code>n</code> does not have a parent
+    * (<code>n</code> is the root node).
+    *
+    * @param n the document number.
+    * @return the <code>DocId</code> of <code>n</code>'s parent.
+    * @throws IOException if an error occurs while reading from the index.
+    */
+   public DocId getParent(int n) throws IOException
+   {
+      return getBase().getParent(n, deleted);
+   }
 
-    /**
-     * Returns the {@link SharedIndexReader} this reader is based on.
-     *
-     * @return the {@link SharedIndexReader} this reader is based on.
-     */
-    public SharedIndexReader getBase() {
-        return (SharedIndexReader) in;
-    }
+   /**
+    * Returns the {@link SharedIndexReader} this reader is based on.
+    *
+    * @return the {@link SharedIndexReader} this reader is based on.
+    */
+   public SharedIndexReader getBase()
+   {
+      return (SharedIndexReader)in;
+   }
 
-    //---------------------< IndexReader overwrites >---------------------------
+   //---------------------< IndexReader overwrites >---------------------------
 
-    /**
-     * Returns true if document <code>n</code> has been deleted
-     * @param n the document number
-     * @return true if document <code>n</code> has been deleted
-     */
-    public boolean isDeleted(int n) {
-        return deleted.get(n);
-    }
+   /**
+    * Returns true if document <code>n</code> has been deleted
+    * @param n the document number
+    * @return true if document <code>n</code> has been deleted
+    */
+   public boolean isDeleted(int n)
+   {
+      return deleted.get(n);
+   }
 
-    /**
-     * Returns <code>true</code> if any documents have been deleted.
-     *
-     * @return <code>true</code> if any documents have been deleted.
-     */
-    public boolean hasDeletions() {
-        return !deleted.isEmpty();
-    }
+   /**
+    * Returns <code>true</code> if any documents have been deleted.
+    *
+    * @return <code>true</code> if any documents have been deleted.
+    */
+   public boolean hasDeletions()
+   {
+      return !deleted.isEmpty();
+   }
 
-    /**
-     * Returns the number of documents in this index reader.
-     *
-     * @return the number of documents in this index reader.
-     */
-    public int numDocs() {
-        return maxDoc() - deleted.cardinality();
-    }
+   /**
+    * Returns the number of documents in this index reader.
+    *
+    * @return the number of documents in this index reader.
+    */
+   public int numDocs()
+   {
+      return maxDoc() - deleted.cardinality();
+   }
 
-    /**
-     * @exception UnsupportedOperationException always
-     */
-    protected final void doDelete(int docNum) {
-        throw new UnsupportedOperationException("IndexReader is read-only");
-    }
+   /**
+    * @exception UnsupportedOperationException always
+    */
+   protected final void doDelete(int docNum)
+   {
+      throw new UnsupportedOperationException("IndexReader is read-only");
+   }
 
-    /**
-     * @exception UnsupportedOperationException always
-     */
-    protected final void doUndeleteAll() {
-        throw new UnsupportedOperationException("IndexReader is read-only");
-    }
+   /**
+    * @exception UnsupportedOperationException always
+    */
+   protected final void doUndeleteAll()
+   {
+      throw new UnsupportedOperationException("IndexReader is read-only");
+   }
 
-    /**
-     * @exception UnsupportedOperationException always
-     */
-    protected final void doCommit() {
-        throw new UnsupportedOperationException("IndexReader is read-only");
-    }
+   /**
+    * @exception UnsupportedOperationException always
+    */
+   protected final void doCommit(Map commitUserData)
+   {
+      throw new UnsupportedOperationException("IndexReader is read-only");
+   }
 
-    /**
-     * Wraps the underlying <code>TermDocs</code> and filters out documents
-     * marked as deleted.<br/>
-     * If <code>term</code> is for a {@link FieldNames#UUID} field and this
-     * <code>ReadOnlyIndexReader</code> does not have such a document,
-     * {@link EmptyTermDocs#INSTANCE} is returned.
-     *
-     * @param term the term to enumerate the docs for.
-     * @return TermDocs for <code>term</code>.
-     * @throws IOException if an error occurs while reading from the index.
-     */
-    public TermDocs termDocs(Term term) throws IOException {
-        // do not wrap for empty TermDocs
-        TermDocs td = reader.termDocs(term);
-        if (td != EmptyTermDocs.INSTANCE) {
-            td = new FilteredTermDocs(td);
-        }
-        return td;
-    }
+   /**
+    * Wraps the underlying <code>TermDocs</code> and filters out documents
+    * marked as deleted.<br/>
+    * If <code>term</code> is for a {@link FieldNames#UUID} field and this
+    * <code>ReadOnlyIndexReader</code> does not have such a document,
+    * {@link EmptyTermDocs#INSTANCE} is returned.
+    *
+    * @param term the term to enumerate the docs for.
+    * @return TermDocs for <code>term</code>.
+    * @throws IOException if an error occurs while reading from the index.
+    */
+   public TermDocs termDocs(Term term) throws IOException
+   {
+      // do not wrap for empty TermDocs
+      TermDocs td = reader.termDocs(term);
+      if (td != EmptyTermDocs.INSTANCE)
+      {
+         td = new FilteredTermDocs(td);
+      }
+      return td;
+   }
 
-    /**
-     * Wraps the underlying <code>TermDocs</code> and filters out documents
-     * marked as deleted.
-     *
-     * @return TermDocs over the whole index.
-     * @throws IOException if an error occurs while reading from the index.
-     */
-    public TermDocs termDocs() throws IOException {
-        return new FilteredTermDocs(super.termDocs());
-    }
+   /**
+    * Wraps the underlying <code>TermDocs</code> and filters out documents
+    * marked as deleted.
+    *
+    * @return TermDocs over the whole index.
+    * @throws IOException if an error occurs while reading from the index.
+    */
+   public TermDocs termDocs() throws IOException
+   {
+      return new FilteredTermDocs(super.termDocs());
+   }
 
-    /**
-     * Wraps the underlying <code>TermPositions</code> and filters out documents
-     * marked as deleted.
-     *
-     * @return TermPositions over the whole index.
-     * @throws IOException if an error occurs while reading from the index.
-     */
-    public TermPositions termPositions() throws IOException {
-        return new FilteredTermPositions(super.termPositions());
-    }
+   /**
+    * Wraps the underlying <code>TermPositions</code> and filters out documents
+    * marked as deleted.
+    *
+    * @return TermPositions over the whole index.
+    * @throws IOException if an error occurs while reading from the index.
+    */
+   public TermPositions termPositions() throws IOException
+   {
+      return new FilteredTermPositions(super.termPositions());
+   }
 
-    //----------------------< FilteredTermDocs >--------------------------------
+   //----------------------< FilteredTermDocs >--------------------------------
 
-    /**
-     * Filters a wrapped TermDocs by omitting documents marked as deleted.
-     */
-    private class FilteredTermDocs extends FilterTermDocs {
+   /**
+    * Filters a wrapped TermDocs by omitting documents marked as deleted.
+    */
+   private class FilteredTermDocs extends FilterTermDocs
+   {
 
-        /**
-         * Creates a new filtered TermDocs based on <code>in</code>.
-         *
-         * @param in the TermDocs to filter.
-         */
-        public FilteredTermDocs(TermDocs in) {
-            super(in);
-        }
+      /**
+       * Creates a new filtered TermDocs based on <code>in</code>.
+       *
+       * @param in the TermDocs to filter.
+       */
+      public FilteredTermDocs(TermDocs in)
+      {
+         super(in);
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public boolean next() throws IOException {
-            boolean hasNext = super.next();
-            while (hasNext && deleted.get(super.doc())) {
-                hasNext = super.next();
-            }
-            return hasNext;
-        }
+      /**
+       * @inheritDoc
+       */
+      public boolean next() throws IOException
+      {
+         boolean hasNext = super.next();
+         while (hasNext && deleted.get(super.doc()))
+         {
+            hasNext = super.next();
+         }
+         return hasNext;
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public int read(int[] docs, int[] freqs) throws IOException {
-            int count;
-            for (count = 0; count < docs.length && next(); count++) {
-                docs[count] = doc();
-                freqs[count] = freq();
-            }
-            return count;
-        }
+      /**
+       * @inheritDoc
+       */
+      public int read(int[] docs, int[] freqs) throws IOException
+      {
+         int count;
+         for (count = 0; count < docs.length && next(); count++)
+         {
+            docs[count] = doc();
+            freqs[count] = freq();
+         }
+         return count;
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public boolean skipTo(int i) throws IOException {
-            boolean exists = super.skipTo(i);
-            while (exists && deleted.get(doc())) {
-                exists = next();
-            }
-            return exists;
-        }
-    }
+      /**
+       * @inheritDoc
+       */
+      public boolean skipTo(int i) throws IOException
+      {
+         boolean exists = super.skipTo(i);
+         while (exists && deleted.get(doc()))
+         {
+            exists = next();
+         }
+         return exists;
+      }
+   }
 
-    //---------------------< FilteredTermPositions >----------------------------
+   //---------------------< FilteredTermPositions >----------------------------
 
-    /**
-     * Filters a wrapped TermPositions by omitting documents marked as deleted.
-     */
-    private final class FilteredTermPositions extends FilteredTermDocs
-            implements TermPositions {
+   /**
+    * Filters a wrapped TermPositions by omitting documents marked as deleted.
+    */
+   private final class FilteredTermPositions extends FilteredTermDocs implements TermPositions
+   {
 
-        /**
-         * Creates a new filtered TermPositions based on <code>in</code>.
-         *
-         * @param in the TermPositions to filter.
-         */
-        public FilteredTermPositions(TermPositions in) {
-            super(in);
-        }
+      /**
+       * Creates a new filtered TermPositions based on <code>in</code>.
+       *
+       * @param in the TermPositions to filter.
+       */
+      public FilteredTermPositions(TermPositions in)
+      {
+         super(in);
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public int nextPosition() throws IOException {
-            return ((TermPositions) this.in).nextPosition();
-        }
+      /**
+       * @inheritDoc
+       */
+      public int nextPosition() throws IOException
+      {
+         return ((TermPositions)this.in).nextPosition();
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public int getPayloadLength() {
-            return ((TermPositions) in).getPayloadLength();
-        }
+      /**
+       * @inheritDoc
+       */
+      public int getPayloadLength()
+      {
+         return ((TermPositions)in).getPayloadLength();
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public byte[] getPayload(byte[] data, int offset) throws IOException {
-            return ((TermPositions) in).getPayload(data, offset);
-        }
+      /**
+       * @inheritDoc
+       */
+      public byte[] getPayload(byte[] data, int offset) throws IOException
+      {
+         return ((TermPositions)in).getPayload(data, offset);
+      }
 
-        /**
-         * @inheritDoc
-         */
-        public boolean isPayloadAvailable() {
-            return ((TermPositions) in).isPayloadAvailable();
-        }
+      /**
+       * @inheritDoc
+       */
+      public boolean isPayloadAvailable()
+      {
+         return ((TermPositions)in).isPayloadAvailable();
+      }
 
-    }
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SearchIndex.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -22,6 +22,8 @@
 import org.apache.commons.collections.iterators.TransformIterator;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Fieldable;
@@ -29,11 +31,11 @@
 import org.apache.lucene.index.MultiReader;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.search.FieldComparatorSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.SortComparatorSource;
 import org.apache.lucene.search.SortField;
 import org.exoplatform.commons.utils.ClassLoading;
 import org.exoplatform.commons.utils.PrivilegedFileHelper;
@@ -82,16 +84,7 @@
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 import java.util.concurrent.CountDownLatch;
 
 import javax.jcr.RepositoryException;
@@ -452,7 +445,7 @@
    /**
     * The sort comparator source for indexed properties.
     */
-   private SortComparatorSource scs;
+   private FieldComparatorSource scs;
 
    /**
     * Flag that indicates whether the hierarchy cache should be initialized
@@ -641,7 +634,7 @@
          }
       }
 
-      scs = new SharedFieldSortComparator(FieldNames.PROPERTIES, context.getItemStateManager(), nsMappings);
+      scs = new SharedFieldComparatorSource(FieldNames.PROPERTIES, context.getItemStateManager(), nsMappings);
       npResolver = new LocationFactory(nsMappings);
 
       indexingConfig = createIndexingConfiguration(nsMappings);
@@ -672,10 +665,9 @@
                   }
                   catch (IOException e)
                   {
-                     log
-                        .error(
-                           "Error while reindexing the workspace. Please fix the problem, delete index and restart server.",
-                           e);
+                     log.error(
+                        "Error while reindexing the workspace. Please fix the problem, delete index and restart server.",
+                        e);
                   }
                }
             }, "Reindexing-" + context.getRepositoryName() + "-" + context.getContainer().getWorkspaceName()).start();
@@ -1109,8 +1101,7 @@
             }
             catch (RepositoryException e)
             {
-               log
-                  .warn("Exception while creating document for node: " + state.getIdentifier() + ": " + e.toString(), e);
+               log.warn("Exception while creating document for node: " + state.getIdentifier() + ": " + e.toString(), e);
             }
             return doc;
          }
@@ -1137,8 +1128,7 @@
                }
                catch (RepositoryException e)
                {
-                  log
-                     .warn("Exception while creating document for node: " + state.getIdentifier() + ": " + e.toString());
+                  log.warn("Exception while creating document for node: " + state.getIdentifier() + ": " + e.toString());
                }
                return null;
             }
@@ -1287,9 +1277,7 @@
       waitForResuming();
 
       checkOpen();
-
       Sort sort = new Sort(createSortFields(orderProps, orderSpecs));
-
       final IndexReader reader = getIndexReader(queryImpl.needsSystemTree());
       JcrIndexSearcher searcher = new JcrIndexSearcher(session, reader, getContext().getItemStateManager());
       searcher.setSimilarity(getSimilarity());
@@ -1651,28 +1639,11 @@
    /**
     * @return the sort comparator source for this index.
     */
-   protected SortComparatorSource getSortComparatorSource()
+   protected FieldComparatorSource getSortComparatorSource()
    {
       return scs;
    }
 
-   // /**
-   // * Factory method to create the <code>TextExtractor</code> instance.
-   // *
-   // * @return the <code>TextExtractor</code> instance this index should use.
-   // */
-   // protected TextExtractor createTextExtractor()
-   // {
-   // TextExtractor txtExtr = new JackrabbitTextExtractor(textFilterClasses);
-   // if (extractorPoolSize > 0)
-   // {
-   // // wrap with pool
-   // txtExtr = new PooledTextExtractor(txtExtr, extractorPoolSize,
-   // extractorBackLog, extractorTimeout);
-   // }
-   // return txtExtr;
-   // }
-
    /**
     * @param namespaceMappings
     *            The namespace mappings
@@ -1981,7 +1952,13 @@
                            Fieldable field = fields[k];
                            // assume properties fields use
                            // SingleTokenStream
-                           t = field.tokenStreamValue().next(t);
+                           //t = field.tokenStreamValue().next(t);
+                           field.tokenStreamValue().incrementToken();
+                           TermAttribute term =
+                              (TermAttribute)field.tokenStreamValue().getAttribute(TermAttribute.class);
+                           PayloadAttribute payload =
+                              (PayloadAttribute)field.tokenStreamValue().getAttribute(PayloadAttribute.class);
+
                            String value = new String(t.termBuffer(), 0, t.termLength());
                            if (value.startsWith(namePrefix))
                            {
@@ -1992,7 +1969,8 @@
                               String path = getNamespaceMappings().translatePath(p);
                               value = FieldNames.createNamedValue(path, value);
                               t.setTermBuffer(value);
-                              doc.add(new Field(field.name(), new SingletonTokenStream(t)));
+                              doc.add(new Field(field.name(), new SingletonTokenStream(term.term(), payload
+                                 .getPayload())));
                               doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, parent.getIdentifier(),
                                  Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
                            }
@@ -2015,8 +1993,7 @@
          catch (Exception e)
          {
             // do not fail if aggregate cannot be created
-            log
-               .warn("Exception while building indexing aggregate for" + " node with UUID: " + state.getIdentifier(), e);
+            log.warn("Exception while building indexing aggregate for" + " node with UUID: " + state.getIdentifier(), e);
          }
       }
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldCache.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldCache.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldCache.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,6 +16,13 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.index.TermEnum;
+import org.apache.lucene.index.TermPositions;
+import org.apache.lucene.search.FieldComparator;
+
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
@@ -23,298 +30,335 @@
 
 import javax.jcr.PropertyType;
 
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.search.SortComparator;
-
 /**
  * Implements a variant of the lucene class <code>org.apache.lucene.search.FieldCacheImpl</code>.
  * The lucene FieldCache class has some sort of support for custom comparators
  * but it only works on the basis of a field name. There is no further control
  * over the terms to iterate, that's why we use our own implementation.
  */
-public class SharedFieldCache {
+public class SharedFieldCache
+{
 
-    /**
-     * Expert: Stores term text values and document ordering data.
-     */
-    public static class ValueIndex {
+   /**
+    * Expert: Stores term text values and document ordering data.
+    */
+   public static class ValueIndex
+   {
 
-        /**
-         * Some heuristic factor that determines whether the array is sparse. Note that if less then
-         * 1% is set, we already count the array as sparse. This is because it will become memory consuming
-         * quickly by keeping the (sparse) arrays 
-         */
-        private static final int SPARSE_FACTOR = 100;
+      /**
+       * Some heuristic factor that determines whether the array is sparse. Note that if less then
+       * 1% is set, we already count the array as sparse. This is because it will become memory consuming
+       * quickly by keeping the (sparse) arrays 
+       */
+      private static final int SPARSE_FACTOR = 100;
 
-        /**
-         * Values indexed by document id.
-         */
-        private final Comparable[] values;
+      /**
+       * Values indexed by document id.
+       */
+      private final Comparable[] values;
 
-        /**
-         * Values (Comparable) map indexed by document id.
-         */
-        public final Map valuesMap;
+      /**
+       * Values (Comparable) map indexed by document id.
+       */
+      public final Map<Integer, Comparable> valuesMap;
 
-        /**
-         * Boolean indicating whether the {@link #valuesMap} impl has to be used
-         */
-        public final boolean sparse;
+      /**
+       * Boolean indicating whether the {@link #valuesMap} impl has to be used
+       */
+      public final boolean sparse;
 
-        /**
-         * Creates one of these objects
-         */
-        public ValueIndex(Comparable[] values, int setValues) {
-            if (isSparse(values, setValues)) {
-                this.sparse = true;
-                this.values = null;
-                if (setValues == 0) {
-                    this.valuesMap = null;
-                } else {
-                    this.valuesMap = getValuesMap(values, setValues);
-                }
-            } else {
-                this.sparse = false;
-                this.values = values;
-                this.valuesMap = null;
+      /**
+       * Creates one of these objects
+       */
+      public ValueIndex(Comparable[] values, int setValues)
+      {
+         if (isSparse(values, setValues))
+         {
+            this.sparse = true;
+            this.values = null;
+            if (setValues == 0)
+            {
+               this.valuesMap = null;
             }
-        }
-
-        public Comparable getValue(int i) {
-            if (sparse) {
-                return valuesMap == null ? null : (Comparable) valuesMap.get(new Integer(i));
-            } else {
-                return values[i];
+            else
+            {
+               this.valuesMap = getValuesMap(values, setValues);
             }
-        }
+         }
+         else
+         {
+            this.sparse = false;
+            this.values = values;
+            this.valuesMap = null;
+         }
+      }
 
-        private Map getValuesMap(Comparable[] values, int setValues) {
-            Map map = new HashMap(setValues);
-            for (int i = 0; i < values.length && setValues > 0; i++) {
-                if (values[i] != null) {
-                    map.put(new Integer(i), values[i]);
-                    setValues--;
-                }
-            }
-            return map;
-        }
+      public Comparable getValue(int i)
+      {
+         if (sparse)
+         {
+            return valuesMap == null ? null : valuesMap.get(i);
+         }
+         else
+         {
+            return values[i];
+         }
+      }
 
-        private boolean isSparse(Comparable[] values, int setValues) {
-            // some really simple test to test whether the array is sparse. 
-            // Currently, when less then 1% is set, the array is already sparse 
-            // for this typical cache to avoid memory issues
-            if (setValues * SPARSE_FACTOR < values.length) {
-                return true;
+      private Map<Integer, Comparable> getValuesMap(Comparable[] values, int setValues)
+      {
+         Map<Integer, Comparable> map = new HashMap<Integer, Comparable>(setValues);
+         for (int i = 0; i < values.length && setValues > 0; i++)
+         {
+            if (values[i] != null)
+            {
+               map.put(i, values[i]);
+               setValues--;
             }
-            return false;
-        }
-    }
+         }
+         return map;
+      }
 
-    /**
-     * Reference to the single instance of <code>SharedFieldCache</code>.
-     */
-    public static final SharedFieldCache INSTANCE = new SharedFieldCache();
+      private boolean isSparse(Comparable[] values, int setValues)
+      {
+         // some really simple test to test whether the array is sparse. Currently, when less then 1% is set, the array is already sparse 
+         // for this typical cache to avoid memory issues
+         if (setValues * SPARSE_FACTOR < values.length)
+         {
+            return true;
+         }
+         return false;
+      }
+   }
 
-    /**
-     * The internal cache. Maps Entry to array of interpreted term values.
-     */
-    private final Map cache = new WeakHashMap();
+   /**
+    * Reference to the single instance of <code>SharedFieldCache</code>.
+    */
+   public static final SharedFieldCache INSTANCE = new SharedFieldCache();
 
-    /**
-     * Private constructor.
-     */
-    private SharedFieldCache() {
-    }
+   /**
+    * The internal cache. Maps Entry to array of interpreted term values.
+    */
+   private final Map<IndexReader, Map<Key, ValueIndex>> cache = new WeakHashMap<IndexReader, Map<Key, ValueIndex>>();
 
-    /**
-     * Creates a <code>ValueIndex</code> for a <code>field</code> and a term
-     * <code>prefix</code>. The term prefix acts as the property name for the
-     * shared <code>field</code>.
-     * <p/>
-     * This method is an adapted version of: <code>FieldCacheImpl.getStringIndex()</code>
-     *
-     * @param reader     the <code>IndexReader</code>.
-     * @param field      name of the shared field.
-     * @param prefix     the property name, will be used as term prefix.
-     * @param comparator the sort comparator instance.
-     * @return a ValueIndex that contains the field values and order
-     *         information.
-     * @throws IOException if an error occurs while reading from the index.
-     */
-    public ValueIndex getValueIndex(IndexReader reader,
-                                    String field,
-                                    String prefix,
-                                    SortComparator comparator)
-            throws IOException {
+   /**
+    * Private constructor.
+    */
+   private SharedFieldCache()
+   {
+   }
 
-        if (reader instanceof ReadOnlyIndexReader) {
-            reader = ((ReadOnlyIndexReader) reader).getBase();
-        }
+   /**
+    * Creates a <code>ValueIndex</code> for a <code>field</code> and a term
+    * <code>prefix</code>. The term prefix acts as the property name for the
+    * shared <code>field</code>.
+    * <p/>
+    * This method is an adapted version of: <code>FieldCacheImpl.getStringIndex()</code>
+    *
+    * @param reader     the <code>IndexReader</code>.
+    * @param field      name of the shared field.
+    * @param prefix     the property name, will be used as term prefix.
+    * @param comparator the field comparator instance.
+    * @return a ValueIndex that contains the field values and order
+    *         information.
+    * @throws IOException if an error occurs while reading from the index.
+    */
+   public ValueIndex getValueIndex(IndexReader reader, String field, String prefix, FieldComparator comparator)
+      throws IOException
+   {
 
-        field = field.intern();
-        ValueIndex ret = lookup(reader, field, prefix, comparator);
-        if (ret == null) {
-            Comparable[] retArray = new Comparable[reader.maxDoc()];
-            int setValues = 0;
-            if (retArray.length > 0) {
-                IndexFormatVersion version = IndexFormatVersion.getVersion(reader);
-                boolean hasPayloads = version.isAtLeast(IndexFormatVersion.V3);
-                TermDocs termDocs;
-                byte[] payload = null;
-                int type;
-                if (hasPayloads) {
-                    termDocs = reader.termPositions();
-                    payload = new byte[1];
-                } else {
-                    termDocs = reader.termDocs();
-                }
-                TermEnum termEnum = reader.terms(new Term(field, prefix));
+      if (reader instanceof ReadOnlyIndexReader)
+      {
+         reader = ((ReadOnlyIndexReader)reader).getBase();
+      }
 
-                char[] tmp = new char[16];
-                try {
-                    if (termEnum.term() == null) {
-                        throw new RuntimeException("no terms in field " + field);
-                    }
-                    do {
-                        Term term = termEnum.term();
-                        if (term.field() != field || !term.text().startsWith(prefix)) {
-                            break;
-                        }
+      field = field.intern();
+      ValueIndex ret = lookup(reader, field, prefix, comparator);
+      if (ret == null)
+      {
+         Comparable[] retArray = new Comparable[reader.maxDoc()];
+         int setValues = 0;
+         if (retArray.length > 0)
+         {
+            IndexFormatVersion version = IndexFormatVersion.getVersion(reader);
+            boolean hasPayloads = version.isAtLeast(IndexFormatVersion.V3);
+            TermDocs termDocs;
+            byte[] payload = null;
+            int type;
+            if (hasPayloads)
+            {
+               termDocs = reader.termPositions();
+               payload = new byte[1];
+            }
+            else
+            {
+               termDocs = reader.termDocs();
+            }
+            TermEnum termEnum = reader.terms(new Term(field, prefix));
 
-                        // make sure term is compacted
-                        String text = term.text();
-                        int len = text.length() - prefix.length();
-                        if (tmp.length < len) {
-                            // grow tmp
-                            tmp = new char[len];
-                        }
-                        text.getChars(prefix.length(), text.length(), tmp, 0);
-                        String value = new String(tmp, 0, len);
+            char[] tmp = new char[16];
+            try
+            {
+               if (termEnum.term() == null)
+               {
+                  throw new RuntimeException("no terms in field " + field);
+               }
+               do
+               {
+                  Term term = termEnum.term();
+                  if (term.field() != field || !term.text().startsWith(prefix))
+                  {
+                     break;
+                  }
 
-                        termDocs.seek(termEnum);
-                        while (termDocs.next()) {
-                            type = PropertyType.UNDEFINED;
-                            if (hasPayloads) {
-                                TermPositions termPos = (TermPositions) termDocs;
-                                termPos.nextPosition();
-                                if (termPos.isPayloadAvailable()) {
-                                    payload = termPos.getPayload(payload, 0);
-                                    type = PropertyMetaData.fromByteArray(payload).getPropertyType();
-                                }
-                            }
-                            setValues++;
-                            retArray[termDocs.doc()] = getValue(value, type);
+                  // make sure term is compacted
+                  String text = term.text();
+                  int len = text.length() - prefix.length();
+                  if (tmp.length < len)
+                  {
+                     // grow tmp
+                     tmp = new char[len];
+                  }
+                  text.getChars(prefix.length(), text.length(), tmp, 0);
+                  String value = new String(tmp, 0, len);
+
+                  termDocs.seek(termEnum);
+                  while (termDocs.next())
+                  {
+                     type = PropertyType.UNDEFINED;
+                     if (hasPayloads)
+                     {
+                        TermPositions termPos = (TermPositions)termDocs;
+                        termPos.nextPosition();
+                        if (termPos.isPayloadAvailable())
+                        {
+                           payload = termPos.getPayload(payload, 0);
+                           type = PropertyMetaData.fromByteArray(payload).getPropertyType();
                         }
-                    } while (termEnum.next());
-                } finally {
-                    termDocs.close();
-                    termEnum.close();
-                }
+                     }
+                     setValues++;
+                     retArray[termDocs.doc()] = getValue(value, type);
+                  }
+               }
+               while (termEnum.next());
             }
-            ValueIndex value = new ValueIndex(retArray, setValues);
-            store(reader, field, prefix, comparator, value);
+            finally
+            {
+               termDocs.close();
+               termEnum.close();
+            }
+         }
+         ValueIndex value = new ValueIndex(retArray, setValues);
+         store(reader, field, prefix, comparator, value);
+         return value;
+      }
+      return ret;
+   }
+
+   /**
+    * See if a <code>ValueIndex</code> object is in the cache.
+    */
+   ValueIndex lookup(IndexReader reader, String field, String prefix, FieldComparator comparer)
+   {
+      Key key = new Key(field, prefix, comparer);
+      synchronized (this)
+      {
+         Map<Key, ValueIndex> readerCache = cache.get(reader);
+         if (readerCache == null)
+         {
+            return null;
+         }
+         return readerCache.get(key);
+      }
+   }
+
+   /**
+    * Put a <code>ValueIndex</code> <code>value</code> to cache.
+    */
+   ValueIndex store(IndexReader reader, String field, String prefix, FieldComparator comparer, ValueIndex value)
+   {
+      Key key = new Key(field, prefix, comparer);
+      synchronized (this)
+      {
+         Map<Key, ValueIndex> readerCache = cache.get(reader);
+         if (readerCache == null)
+         {
+            readerCache = new HashMap<Key, ValueIndex>();
+            cache.put(reader, readerCache);
+         }
+         return readerCache.put(key, value);
+      }
+   }
+
+   /**
+    * Returns a comparable for the given <code>value</code> that is read from
+    * the index.
+    *
+    * @param value the value as read from the index.
+    * @param type the property type.
+    * @return a comparable for the <code>value</code>.
+    */
+   private Comparable getValue(String value, int type)
+   {
+      switch (type)
+      {
+         case PropertyType.BOOLEAN :
+            return ComparableBoolean.valueOf(Boolean.valueOf(value).booleanValue());
+         case PropertyType.DATE :
+            return new Long(DateField.stringToTime(value));
+         case PropertyType.LONG :
+            return new Long(LongField.stringToLong(value));
+         case PropertyType.DOUBLE :
+            return new Double(DoubleField.stringToDouble(value));
+         default :
             return value;
-        }
-        return ret;
-    }
+      }
+   }
 
-    /**
-     * See if a <code>ValueIndex</code> object is in the cache.
-     */
-    ValueIndex lookup(IndexReader reader, String field,
-                                  String prefix, SortComparator comparer) {
-        Key key = new Key(field, prefix, comparer);
-        synchronized (this) {
-            HashMap readerCache = (HashMap) cache.get(reader);
-            if (readerCache == null) {
-                return null;
-            }
-            return (ValueIndex) readerCache.get(key);
-        }
-    }
+   /**
+    * A compound <code>Key</code> that consist of <code>field</code>
+    * <code>prefix</code> and <code>comparator</code>.
+    */
+   static class Key
+   {
 
-    /**
-     * Put a <code>ValueIndex</code> <code>value</code> to cache.
-     */
-    Object store(IndexReader reader, String field, String prefix,
-                 SortComparator comparer, ValueIndex value) {
-        Key key = new Key(field, prefix, comparer);
-        synchronized (this) {
-            HashMap readerCache = (HashMap) cache.get(reader);
-            if (readerCache == null) {
-                readerCache = new HashMap();
-                cache.put(reader, readerCache);
-            }
-            return readerCache.put(key, value);
-        }
-    }
+      private final String field;
 
-    /**
-     * Returns a comparable for the given <code>value</code> that is read from
-     * the index.
-     *
-     * @param value the value as read from the index.
-     * @param type the property type.
-     * @return a comparable for the <code>value</code>.
-     */
-    private Comparable getValue(String value, int type) {
-        switch (type) {
-            case PropertyType.BOOLEAN:
-                return ComparableBoolean.valueOf(Boolean.valueOf(value).booleanValue());
-            case PropertyType.DATE:
-                return new Long(DateField.stringToTime(value));
-            case PropertyType.LONG:
-                return new Long(LongField.stringToLong(value));
-            case PropertyType.DOUBLE:
-                return new Double(DoubleField.stringToDouble(value));
-            default:
-                return value;
-        }
-    }
+      private final String prefix;
 
-    /**
-     * A compound <code>Key</code> that consist of <code>field</code>
-     * <code>prefix</code> and <code>comparator</code>.
-     */
-    static class Key {
+      private final Object comparator;
 
-        private final String field;
-        private final String prefix;
-        private final SortComparator comparator;
+      /**
+       * Creates <code>Key</code> for ValueIndex lookup.
+       */
+      Key(String field, String prefix, FieldComparator comparator)
+      {
+         this.field = field.intern();
+         this.prefix = prefix.intern();
+         this.comparator = comparator;
+      }
 
-        /**
-         * Creates <code>Key</code> for ValueIndex lookup.
-         */
-        Key(String field, String prefix, SortComparator comparator) {
-            this.field = field.intern();
-            this.prefix = prefix.intern();
-            this.comparator = comparator;
-        }
+      /**
+       * Returns <code>true</code> if <code>o</code> is a <code>Key</code>
+       * instance and refers to the same field, prefix and comparator object.
+       */
+      public boolean equals(Object o)
+      {
+         if (o instanceof Key)
+         {
+            Key other = (Key)o;
+            return other.field == field && other.prefix == prefix && other.comparator.equals(comparator);
+         }
+         return false;
+      }
 
-        /**
-         * Returns <code>true</code> if <code>o</code> is a <code>Key</code>
-         * instance and refers to the same field, prefix and comparator object.
-         */
-        public boolean equals(Object o) {
-            if (o instanceof Key) {
-                Key other = (Key) o;
-                return other.field == field
-                        && other.prefix == prefix
-                        && other.comparator.equals(comparator);
-            }
-            return false;
-        }
+      /**
+       * Composes a hashcode based on the field, prefix and comparator.
+       */
+      public int hashCode()
+      {
+         return field.hashCode() ^ prefix.hashCode() ^ comparator.hashCode();
+      }
+   }
 
-        /**
-         * Composes a hashcode based on the field, prefix and comparator.
-         */
-        public int hashCode() {
-            return field.hashCode() ^ prefix.hashCode() ^ comparator.hashCode();
-        }
-    }
-
 }

Added: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldComparatorSource.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldComparatorSource.java	                        (rev 0)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldComparatorSource.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -0,0 +1,403 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.exoplatform.services.jcr.impl.core.query.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldComparatorSource;
+import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
+import org.exoplatform.services.jcr.datamodel.IllegalNameException;
+import org.exoplatform.services.jcr.datamodel.IllegalPathException;
+import org.exoplatform.services.jcr.datamodel.ItemData;
+import org.exoplatform.services.jcr.datamodel.ItemType;
+import org.exoplatform.services.jcr.datamodel.NodeData;
+import org.exoplatform.services.jcr.datamodel.PropertyData;
+import org.exoplatform.services.jcr.datamodel.QPath;
+import org.exoplatform.services.jcr.datamodel.QPathEntry;
+import org.exoplatform.services.jcr.datamodel.ValueData;
+import org.exoplatform.services.jcr.impl.Constants;
+import org.exoplatform.services.jcr.impl.core.JCRPath;
+import org.exoplatform.services.jcr.impl.core.LocationFactory;
+import org.exoplatform.services.log.ExoLogger;
+import org.exoplatform.services.log.Log;
+
+import java.io.IOException;
+import java.util.List;
+
+import javax.jcr.InvalidItemStateException;
+import javax.jcr.RepositoryException;
+
+/**
+ * Created by The eXo Platform SAS
+ * Author : eXoPlatform
+ *          exo at exoplatform.com
+ * Feb 18, 2012  
+ */
+public class SharedFieldComparatorSource extends FieldComparatorSource
+{
+
+   /**
+    * The logger 
+    */
+   private static Log LOG = ExoLogger.getLogger("exo.jcr.component.core.SharedFieldSortComparator");
+
+   /**
+    * The name of the shared field in the lucene index.
+    */
+   private final String field;
+
+   /**
+    * The item state manager.
+    */
+   private final ItemDataConsumer ism;
+
+   /**
+    * LocationFactory.
+    */
+   private final LocationFactory locationFactory;
+
+   /**
+    * The index internal namespace mappings.
+    */
+   private final NamespaceMappings nsMappings;
+
+   /**
+    * Creates a new <code>SharedFieldSortComparator</code> for a given shared
+    * field.
+    *
+    * @param fieldname the shared field.
+    * @param ism       the item state manager of this workspace.
+    * @param hmgr      the hierarchy manager of this workspace.
+    * @param nsMappings the index internal namespace mappings.
+    */
+   public SharedFieldComparatorSource(String fieldname, ItemDataConsumer ism, NamespaceMappings nsMappings)
+   {
+      this.field = fieldname;
+      this.ism = ism;
+      this.locationFactory = new LocationFactory(nsMappings);
+      this.nsMappings = nsMappings;
+   }
+
+   /**
+    * Create a new <code>FieldComparator</code> for an embedded <code>propertyName</code>
+    * and a <code>reader</code>.
+    *
+    * @param propertyName the relative path to the property to sort on as returned
+    *          by {@link org.apache.jackrabbit.spi.Path#getString()}.
+    * @return a <code>FieldComparator</code>
+    * @throws java.io.IOException if an error occurs
+    */
+   @Override
+   public FieldComparator newComparator(String propertyName, int numHits, int sortPos, boolean reversed)
+      throws IOException
+   {
+
+      try
+      {
+         QPath path = locationFactory.parseJCRPath(propertyName).getInternalPath();
+         SimpleFieldComparator simple = new SimpleFieldComparator(nsMappings.translatePath(path), field, numHits);
+         if (path.getEntries().length == 1)
+         {
+            return simple;
+         }
+         else
+         {
+            return new CompoundScoreFieldComparator(new FieldComparator[]{simple,
+               new RelPathFieldComparator(path, numHits)}, numHits);
+         }
+      }
+      catch (IllegalNameException e)
+      {
+         throw Util.createIOException(e);
+      }
+      catch (RepositoryException e)
+      {
+         throw Util.createIOException(e);
+      }
+      //
+      //       PathFactory factory = PathFactoryImpl.getInstance();
+      //       Path path = factory.create(propertyName);
+      //
+      //       try {
+      //           SimpleFieldComparator simple = new SimpleFieldComparator(nsMappings.translatePath(path), field, numHits);
+      //
+      //           return path.getLength() == 1
+      //               ? simple
+      //               : new CompoundScoreFieldComparator(
+      //                       new FieldComparator[] { simple, new RelPathFieldComparator(path, numHits) }, numHits);
+      //
+      //       }
+      //       catch (IllegalNameException e) {
+      //           throw Util.createIOException(e);
+      //       }
+   }
+
+   //   /**
+   //    * Creates a new <code>ScoreDocComparator</code> for an embedded
+   //    * <code>propertyName</code> and a <code>reader</code>.
+   //    *
+   //    */
+   //   public ScoreDocComparator newComparator(IndexReader reader, String relPath) throws IOException
+   //   {
+   //
+   //      try
+   //      {
+   //         QPath p = locationFactory.parseJCRPath(relPath).getInternalPath();
+   //         ScoreDocComparator simple = new SimpleScoreDocComparator(reader, nsMappings.translatePath(p));
+   //         if (p.getEntries().length == 1)
+   //         {
+   //            return simple;
+   //         }
+   //         else
+   //         {
+   //            return new CompoundScoreDocComparator(reader, new ScoreDocComparator[]{simple,
+   //               new RelPathScoreDocComparator(reader, p)});
+   //         }
+   //      }
+   //      catch (IllegalNameException e)
+   //      {
+   //         throw Util.createIOException(e);
+   //      }
+   //      catch (RepositoryException e)
+   //      {
+   //         throw Util.createIOException(e);
+   //      }
+   //   }
+
+   private ItemData getItemData(NodeData parent, QPathEntry name, ItemType itemType) throws RepositoryException
+   {
+      if (name.getName().equals(JCRPath.PARENT_RELPATH) && name.getNamespace().equals(Constants.NS_DEFAULT_URI))
+      {
+         if (parent.getIdentifier().equals(Constants.ROOT_UUID))
+         {
+            return null;
+         }
+         else
+         {
+            return ism.getItemData(parent.getParentIdentifier());
+         }
+      }
+
+      return ism.getItemData(parent, name, itemType);
+
+   }
+
+   private ItemData getItemData(NodeData parent, QPath relPath, ItemType itemType) throws RepositoryException
+   {
+
+      QPathEntry[] relPathEntries = relPath.getEntries(); //relPath.getRelPath(relPath.getDepth());
+
+      ItemData item = parent;
+      for (int i = 0; i < relPathEntries.length; i++)
+      {
+         if (i == relPathEntries.length - 1)
+         {
+            item = getItemData(parent, relPathEntries[i], itemType);
+         }
+         else
+         {
+            item = getItemData(parent, relPathEntries[i], ItemType.UNKNOWN);
+         }
+
+         if (item == null)
+         {
+            break;
+         }
+
+         if (item.isNode())
+         {
+            parent = (NodeData)item;
+         }
+         else if (i < relPathEntries.length - 1)
+         {
+            throw new IllegalPathException("Path can not contains a property as the intermediate element");
+         }
+      }
+      return item;
+
+   }
+
+   static final class SimpleFieldComparator extends AbstractFieldComparator
+   {
+
+      /**
+       * The term look ups of the index segments.
+       */
+      protected SharedFieldCache.ValueIndex[] indexes;
+
+      /**
+       * The name of the property
+       */
+      private final String propertyName;
+
+      /**
+       * The name of the field in the index
+       */
+      private final String fieldName;
+
+      /**
+       * Create a new instance of the <code>FieldComparator</code>.
+       *
+       * @param propertyName  the name of the property
+       * @param fieldName     the name of the field in the index
+       * @param numHits       the number of values 
+       */
+      public SimpleFieldComparator(String propertyName, String fieldName, int numHits)
+      {
+         super(numHits);
+         this.propertyName = propertyName;
+         this.fieldName = fieldName;
+      }
+
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) throws IOException
+      {
+         super.setNextReader(reader, docBase);
+
+         indexes = new SharedFieldCache.ValueIndex[readers.size()];
+
+         String namedValue = FieldNames.createNamedValue(propertyName, "");
+         for (int i = 0; i < readers.size(); i++)
+         {
+            IndexReader r = readers.get(i);
+            indexes[i] = SharedFieldCache.INSTANCE.getValueIndex(r, fieldName, namedValue, this);
+         }
+      }
+
+      @Override
+      protected Comparable sortValue(int doc)
+      {
+         int idx = readerIndex(doc);
+         return indexes[idx].getValue(doc - starts[idx]);
+      }
+   }
+
+   /**
+    * Implements a compound <code>FieldComparator</code> which delegates to several
+    * other comparators. The comparators are asked for a sort value in the
+    * sequence they are passed to the constructor.
+    */
+   private static final class CompoundScoreFieldComparator extends AbstractFieldComparator
+   {
+      private final FieldComparator[] fieldComparators;
+
+      /**
+       * Create a new instance of the <code>FieldComparator</code>.
+       *
+       * @param fieldComparators  delegatees
+       * @param numHits           the number of values
+       */
+      public CompoundScoreFieldComparator(FieldComparator[] fieldComparators, int numHits)
+      {
+         super(numHits);
+         this.fieldComparators = fieldComparators;
+      }
+
+      @Override
+      public Comparable sortValue(int doc)
+      {
+         for (FieldComparator fieldComparator : fieldComparators)
+         {
+            if (fieldComparator instanceof FieldComparatorBase)
+            {
+               Comparable c = ((FieldComparatorBase)fieldComparator).sortValue(doc);
+
+               if (c != null)
+               {
+                  return c;
+               }
+            }
+         }
+         return null;
+      }
+
+      @Override
+      public void setNextReader(IndexReader reader, int docBase) throws IOException
+      {
+         for (FieldComparator fieldComparator : fieldComparators)
+         {
+            fieldComparator.setNextReader(reader, docBase);
+         }
+      }
+   }
+
+   /**
+    * A <code>FieldComparator</code> which works with order by clauses that use a
+    * relative path to a property to sort on.
+    */
+   private final class RelPathFieldComparator extends AbstractFieldComparator
+   {
+
+      /**
+       * Relative path to the property
+       */
+      private final QPath relPath;
+
+      /**
+       * Create a new instance of the <code>FieldComparator</code>.
+       *
+       * @param propertyName  relative path of the property
+       * @param numHits       the number of values
+       */
+      public RelPathFieldComparator(QPath relPath, int numHits)
+      {
+         super(numHits);
+         this.relPath = relPath;
+      }
+
+      @Override
+      protected Comparable sortValue(int doc)
+      {
+         try
+         {
+            int idx = readerIndex(doc);
+            IndexReader reader = readers.get(idx);
+            Document document = reader.document(doc - starts[idx], FieldSelectors.UUID);
+            String uuid = document.get(FieldNames.UUID);
+            ItemData parent = ism.getItemData(uuid);
+            if (!parent.isNode())
+            {
+               throw new InvalidItemStateException();
+            }
+            ItemData property = getItemData((NodeData)parent, relPath, ItemType.PROPERTY);
+            if (property != null)
+            {
+               if (property.isNode())
+               {
+                  throw new InvalidItemStateException();
+               }
+               PropertyData propertyData = (PropertyData)property;
+               List<ValueData> values = propertyData.getValues();
+               if (values.size() > 0)
+               {
+                  return Util.getComparable(values.get(0), propertyData.getType());
+               }
+            }
+            return null;
+         }
+         catch (Exception ignore)
+         {
+            LOG.error(ignore.getLocalizedMessage(), ignore);
+         }
+
+         return null;
+      }
+
+   }
+
+}

Deleted: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldSortComparator.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldSortComparator.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SharedFieldSortComparator.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -1,428 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.exoplatform.services.jcr.impl.core.query.lucene;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.ScoreDocComparator;
-import org.apache.lucene.search.SortComparator;
-import org.apache.lucene.search.SortField;
-import org.exoplatform.services.jcr.dataflow.ItemDataConsumer;
-import org.exoplatform.services.jcr.datamodel.IllegalNameException;
-import org.exoplatform.services.jcr.datamodel.IllegalPathException;
-import org.exoplatform.services.jcr.datamodel.ItemData;
-import org.exoplatform.services.jcr.datamodel.ItemType;
-import org.exoplatform.services.jcr.datamodel.NodeData;
-import org.exoplatform.services.jcr.datamodel.PropertyData;
-import org.exoplatform.services.jcr.datamodel.QPath;
-import org.exoplatform.services.jcr.datamodel.QPathEntry;
-import org.exoplatform.services.jcr.datamodel.ValueData;
-import org.exoplatform.services.jcr.impl.Constants;
-import org.exoplatform.services.jcr.impl.core.JCRPath;
-import org.exoplatform.services.jcr.impl.core.LocationFactory;
-import org.exoplatform.services.log.ExoLogger;
-import org.exoplatform.services.log.Log;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.jcr.InvalidItemStateException;
-import javax.jcr.RepositoryException;
-
-/**
- * Implements a <code>SortComparator</code> which knows how to sort on a lucene
- * field that contains values for multiple properties.
- */
-public class SharedFieldSortComparator extends SortComparator
-{
-
-   /**
-    * The logger 
-    */
-   private static Log LOG = ExoLogger.getLogger("exo.jcr.component.core.SharedFieldSortComparator");
-
-   /**
-    * The name of the shared field in the lucene index.
-    */
-   private final String field;
-
-   /**
-    * The item state manager.
-    */
-   private final ItemDataConsumer ism;
-
-   /**
-    * LocationFactory.
-    */
-   private final LocationFactory locationFactory;
-
-   /**
-    * The index internal namespace mappings.
-    */
-   private final NamespaceMappings nsMappings;
-
-   /**
-    * Creates a new <code>SharedFieldSortComparator</code> for a given shared
-    * field.
-    *
-    * @param fieldname the shared field.
-    * @param ism       the item state manager of this workspace.
-    * @param hmgr      the hierarchy manager of this workspace.
-    * @param nsMappings the index internal namespace mappings.
-    */
-   public SharedFieldSortComparator(String fieldname, ItemDataConsumer ism, NamespaceMappings nsMappings)
-   {
-      this.field = fieldname;
-      this.ism = ism;
-      this.locationFactory = new LocationFactory(nsMappings);
-      this.nsMappings = nsMappings;
-   }
-
-   /**
-    * Creates a new <code>ScoreDocComparator</code> for an embedded
-    * <code>propertyName</code> and a <code>reader</code>.
-    *
-    * @param reader the index reader.
-    * @param relPath the relative path to the property to sort on as returned
-    *          by {@link Path#getString()}.
-    * @return a <code>ScoreDocComparator</code> for the
-    * @throws IOException if an error occurs while reading from the index.
-    */
-   @Override
-   public ScoreDocComparator newComparator(IndexReader reader, String relPath) throws IOException
-   {
-
-      try
-      {
-         QPath p = locationFactory.parseJCRPath(relPath).getInternalPath();
-         ScoreDocComparator simple = new SimpleScoreDocComparator(reader, nsMappings.translatePath(p));
-         if (p.getEntries().length == 1)
-         {
-            return simple;
-         }
-         else
-         {
-            return new CompoundScoreDocComparator(reader, new ScoreDocComparator[]{simple,
-               new RelPathScoreDocComparator(reader, p)});
-         }
-      }
-      catch (IllegalNameException e)
-      {
-         throw Util.createIOException(e);
-      }
-      catch (RepositoryException e)
-      {
-         throw Util.createIOException(e);
-      }
-   }
-
-   /**
-    * @throws UnsupportedOperationException always.
-    */
-   @Override
-   protected Comparable getComparable(String termtext)
-   {
-      throw new UnsupportedOperationException();
-   }
-
-   /**
-    * Checks if <code>reader</code> is of type {@link MultiIndexReader} and if
-    * that's the case calls this method recursively for each reader within the
-    * multi index reader; otherwise the reader is simply added to the list.
-    *
-    * @param readers the list of index readers.
-    * @param reader  the reader to check.
-    */
-   private static void getIndexReaders(List readers, IndexReader reader)
-   {
-      if (reader instanceof MultiIndexReader)
-      {
-         IndexReader[] r = ((MultiIndexReader)reader).getIndexReaders();
-         for (int i = 0; i < r.length; i++)
-         {
-            getIndexReaders(readers, r[i]);
-         }
-      }
-      else
-      {
-         readers.add(reader);
-      }
-   }
-
-   /**
-    * Abstract base class of {@link ScoreDocComparator} implementations.
-    */
-   abstract class AbstractScoreDocComparator implements ScoreDocComparator
-   {
-
-      /**
-       * The index readers.
-       */
-      protected final List readers = new ArrayList();
-
-      /**
-       * The document number starts for the {@link #readers}.
-       */
-      protected final int[] starts;
-
-      public AbstractScoreDocComparator(IndexReader reader) throws IOException
-      {
-         getIndexReaders(readers, reader);
-
-         int maxDoc = 0;
-         this.starts = new int[readers.size() + 1];
-
-         for (int i = 0; i < readers.size(); i++)
-         {
-            IndexReader r = (IndexReader)readers.get(i);
-            starts[i] = maxDoc;
-            maxDoc += r.maxDoc();
-         }
-         starts[readers.size()] = maxDoc;
-      }
-
-      /**
-       * Compares sort values of <code>i</code> and <code>j</code>. If the
-       * sort values have differing types, then the sort order is defined on
-       * the type itself by calling <code>compareTo()</code> on the respective
-       * type class names.
-       *
-       * @param i first score doc.
-       * @param j second score doc.
-       * @return a negative integer if <code>i</code> should come before
-       *         <code>j</code><br> a positive integer if <code>i</code>
-       *         should come after <code>j</code><br> <code>0</code> if they
-       *         are equal
-       */
-      public int compare(ScoreDoc i, ScoreDoc j)
-      {
-         return Util.compare(sortValue(i), sortValue(j));
-      }
-
-      public int sortType()
-      {
-         return SortField.CUSTOM;
-      }
-
-      /**
-       * Returns the reader index for document <code>n</code>.
-       *
-       * @param n document number.
-       * @return the reader index.
-       */
-      protected int readerIndex(int n)
-      {
-         int lo = 0;
-         int hi = readers.size() - 1;
-
-         while (hi >= lo)
-         {
-            int mid = (lo + hi) >> 1;
-            int midValue = starts[mid];
-            if (n < midValue)
-            {
-               hi = mid - 1;
-            }
-            else if (n > midValue)
-            {
-               lo = mid + 1;
-            }
-            else
-            {
-               while (mid + 1 < readers.size() && starts[mid + 1] == midValue)
-               {
-                  mid++;
-               }
-               return mid;
-            }
-         }
-         return hi;
-      }
-   }
-
-   /**
-    * A score doc comparator that works for order by clauses with properties
-    * directly on the result nodes.
-    */
-   private final class SimpleScoreDocComparator extends AbstractScoreDocComparator
-   {
-
-      /**
-       * The term look ups of the index segments.
-       */
-      protected final SharedFieldCache.ValueIndex[] indexes;
-
-      public SimpleScoreDocComparator(IndexReader reader, String propertyName) throws IOException
-      {
-         super(reader);
-         this.indexes = new SharedFieldCache.ValueIndex[readers.size()];
-
-         String namedValue = FieldNames.createNamedValue(propertyName, "");
-         for (int i = 0; i < readers.size(); i++)
-         {
-            IndexReader r = (IndexReader)readers.get(i);
-            indexes[i] = SharedFieldCache.INSTANCE.getValueIndex(r, field, namedValue, SharedFieldSortComparator.this);
-         }
-      }
-
-      /**
-       * Returns the index term for the score doc <code>i</code>.
-       *
-       * @param i the score doc.
-       * @return the sort value if available.
-       */
-      public Comparable sortValue(ScoreDoc i)
-      {
-         int idx = readerIndex(i.doc);
-         return indexes[idx].getValue(i.doc - starts[idx]);
-      }
-   }
-
-   /**
-    * A score doc comparator that works with order by clauses that use a
-    * relative path to a property to sort on.
-    */
-   private final class RelPathScoreDocComparator extends AbstractScoreDocComparator
-   {
-
-      private final QPath relPath;
-
-      public RelPathScoreDocComparator(IndexReader reader, QPath relPath) throws IOException
-      {
-         super(reader);
-         this.relPath = relPath;
-      }
-
-      /**
-       * Returns the sort value for the given {@link ScoreDoc}. The value is
-       * retrieved from the item state manager.
-       *
-       * @param i the score doc.
-       * @return the sort value for the score doc.
-       */
-      public Comparable sortValue(ScoreDoc i)
-      {
-         try
-         {
-            int idx = readerIndex(i.doc);
-            IndexReader reader = (IndexReader)readers.get(idx);
-            Document doc = reader.document(i.doc - starts[idx], FieldSelectors.UUID);
-            String uuid = doc.get(FieldNames.UUID);
-            ItemData parent = ism.getItemData(uuid);
-            if (!parent.isNode())
-               throw new InvalidItemStateException();
-            ItemData property = getItemData((NodeData)parent, relPath, ItemType.PROPERTY);
-            if (property != null)
-            {
-               if (property.isNode())
-                  throw new InvalidItemStateException();
-               PropertyData propertyData = (PropertyData)property;
-               List<ValueData> values = propertyData.getValues();
-               if (values.size() > 0)
-               {
-                  return Util.getComparable(values.get(0), propertyData.getType());
-               }
-            }
-            return null;
-         }
-         catch (Exception e)
-         {
-            LOG.error(e.getLocalizedMessage(), e);
-            return null;
-         }
-      }
-   }
-
-   private ItemData getItemData(NodeData parent, QPathEntry name, ItemType itemType) throws RepositoryException
-   {
-      if (name.getName().equals(JCRPath.PARENT_RELPATH) && name.getNamespace().equals(Constants.NS_DEFAULT_URI))
-      {
-         if (parent.getIdentifier().equals(Constants.ROOT_UUID))
-            return null;
-         else
-            return ism.getItemData(parent.getParentIdentifier());
-      }
-
-      return ism.getItemData(parent, name, itemType);
-
-   }
-
-   private ItemData getItemData(NodeData parent, QPath relPath, ItemType itemType) throws RepositoryException
-   {
-
-      QPathEntry[] relPathEntries = relPath.getEntries(); //relPath.getRelPath(relPath.getDepth());
-
-      ItemData item = parent;
-      for (int i = 0; i < relPathEntries.length; i++)
-      {
-         if (i == relPathEntries.length - 1)
-         {
-            item = getItemData(parent, relPathEntries[i], itemType);
-         }
-         else
-         {
-            item = getItemData(parent, relPathEntries[i], ItemType.UNKNOWN);
-         }
-
-         if (item == null)
-            break;
-
-         if (item.isNode())
-            parent = (NodeData)item;
-         else if (i < relPathEntries.length - 1)
-            throw new IllegalPathException("Path can not contains a property as the intermediate element");
-      }
-      return item;
-
-   }
-
-   /**
-    * Implements a compound score doc comparator that delegates to several
-    * other comparators. The comparators are asked for a sort value in the
-    * sequence they are passed to the constructor. The first non-null value
-    * will be returned by {@link #sortValue(ScoreDoc)}.
-    */
-   private final class CompoundScoreDocComparator extends AbstractScoreDocComparator
-   {
-
-      private final ScoreDocComparator[] comparators;
-
-      public CompoundScoreDocComparator(IndexReader reader, ScoreDocComparator[] comparators) throws IOException
-      {
-         super(reader);
-         this.comparators = comparators;
-      }
-
-      /**
-       * {@inheritDoc}
-       */
-      public Comparable sortValue(ScoreDoc i)
-      {
-         for (int j = 0; j < comparators.length; j++)
-         {
-            Comparable c = comparators[j].sortValue(i);
-            if (c != null)
-            {
-               return c;
-            }
-         }
-         return null;
-      }
-   }
-}

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -18,6 +18,8 @@
 
 import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.index.Payload;
 import org.exoplatform.services.jcr.impl.Constants;
 
@@ -33,7 +35,6 @@
  */
 public final class SingletonTokenStream extends TokenStream implements Externalizable
 {
-
    /**
     * The string value of the token.
     */
@@ -44,26 +45,54 @@
     */
    private Payload payload;
 
-   private boolean hasNext = true;
+   /**
+    * The term attribute of the current token
+    */
+   private TermAttribute termAttribute;
 
    /**
-    * for serialization 
+    * The payload attribute of the current token
     */
+   private PayloadAttribute payloadAttribute;
+
+   private boolean consumed = false;
+
+   /**
+    * Default constructor for serialization
+    */
    public SingletonTokenStream()
    {
+
    }
 
    /**
+    * Creates a new SingleTokenStream with the given value and payload.
+    * 
+    * @param value
+    *            the string value that will be returned with the token.
+    * @param payload
+    *            the payload that will be attached to this token
+    */
+   public SingletonTokenStream(String value, Payload payload)
+   {
+      this.value = value;
+      this.payload = payload;
+      termAttribute = (TermAttribute)addAttribute(TermAttribute.class);
+      payloadAttribute = (PayloadAttribute)addAttribute(PayloadAttribute.class);
+   }
+
+   /**
     * Creates a new SingleTokenStream with the given value and a property
     * <code>type</code>.
-    *
-    * @param value the string value that will be returned with the token.
-    * @param type the JCR property type.
+    * 
+    * @param value
+    *            the string value that will be returned with the token.
+    * @param type
+    *            the JCR property type.
     */
    public SingletonTokenStream(String value, int type)
    {
-      this.value = value;
-      this.payload = new Payload(new PropertyMetaData(type).toByteArray());
+      this(value, new Payload(new PropertyMetaData(type).toByteArray()));
    }
 
    /**
@@ -71,33 +100,51 @@
     *
     * @param t the token.
     */
+   @Deprecated
    public SingletonTokenStream(Token t)
    {
-      this.value = t.term();
-      this.payload = t.getPayload();
+      this(t.term(), t.getPayload());
    }
 
+   @Override
+   public boolean incrementToken() throws IOException
+   {
+      if (consumed)
+      {
+         return false;
+      }
+      clearAttributes();
+      termAttribute.setTermBuffer(value);
+      payloadAttribute.setPayload(payload);
+      consumed = true;
+      return true;
+   }
+
    /**
     * {@inheritDoc}
     */
-   public Token next(Token reusableToken) throws IOException
+   @Override
+   public void reset() throws IOException
    {
-      if (hasNext)
-      {
-         reusableToken.clear();
-         reusableToken.setTermBuffer(value);
-         reusableToken.setPayload(payload);
-         reusableToken.setStartOffset(0);
-         reusableToken.setEndOffset(value.length());
-         hasNext = false;
-         return reusableToken;
-      }
-      return null;
+      consumed = false;
    }
 
    /**
     * {@inheritDoc}
     */
+   @Override
+   public void close() throws IOException
+   {
+      consumed = true;
+      value = null;
+      payload = null;
+      payloadAttribute = null;
+      termAttribute = null;
+   }
+
+   /**
+    * {@inheritDoc}
+    */
    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException
    {
       payload = (Payload)in.readObject();

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SortedLuceneQueryHits.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SortedLuceneQueryHits.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SortedLuceneQueryHits.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -20,7 +20,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Sort;
-import org.apache.lucene.search.TopFieldDocCollector;
+import org.apache.lucene.search.TopFieldCollector;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -154,8 +154,9 @@
 
    private void getHits() throws IOException
    {
-      TopFieldDocCollector collector = new TopFieldDocCollector(reader, sort, numHits);
+      TopFieldCollector collector = TopFieldCollector.create(sort, numHits, false, true, false, false);
       searcher.search(query, collector);
+
       this.size = collector.getTotalHits();
       ScoreDoc[] docs = collector.topDocs().scoreDocs;
       for (int i = scoreNodes.size(); i < docs.length; i++)
@@ -167,5 +168,6 @@
       log.debug("getHits() {}/{}", new Integer(scoreNodes.size()), new Integer(numHits));
       // double hits for next round
       numHits *= 2;
+
    }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WeightedHighlighter.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -138,7 +138,7 @@
                   break;
                }
             }
-            bestFragments.insert(fi);
+            bestFragments.insertWithOverflow(fi);
          }
       }
 

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WildcardQuery.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WildcardQuery.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/WildcardQuery.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,7 +17,6 @@
 package org.exoplatform.services.jcr.impl.core.query.lucene;
 
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
 import org.apache.lucene.index.TermDocs;
 import org.apache.lucene.index.TermEnum;
 import org.apache.lucene.search.BooleanQuery;
@@ -29,6 +28,7 @@
 import org.apache.lucene.search.Searcher;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.ToStringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -132,13 +132,24 @@
    @Override
    public Query rewrite(IndexReader reader) throws IOException
    {
-      Query stdWildcardQuery = new MultiTermQuery(new Term(field, pattern))
+      Query stdWildcardQuery = new MultiTermQuery()
       {
          @Override
          protected FilteredTermEnum getEnum(IndexReader reader) throws IOException
          {
             return new WildcardTermEnum(reader, field, propName, pattern, transform);
          }
+
+         /** Prints a user-readable version of this query. */
+         @Override
+         public String toString(String field)
+         {
+            StringBuffer buffer = new StringBuffer();
+            buffer.append(field);
+            buffer.append(':');
+            buffer.append(ToStringUtils.boost(getBoost()));
+            return buffer.toString();
+         }
       };
       try
       {
@@ -338,18 +349,27 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean next() throws IOException
+      public int nextDoc() throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateHits();
          nextDoc = hits.nextSetBit(nextDoc + 1);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
       /**
        * {@inheritDoc}
        */
       @Override
-      public int doc()
+      public int docID()
       {
          return nextDoc;
       }
@@ -367,24 +387,23 @@
        * {@inheritDoc}
        */
       @Override
-      public boolean skipTo(int target) throws IOException
+      public int advance(int target) throws IOException
       {
+         if (nextDoc == NO_MORE_DOCS)
+         {
+            return nextDoc;
+         }
+
          calculateHits();
          nextDoc = hits.nextSetBit(target);
-         return nextDoc > -1;
+         if (nextDoc < 0)
+         {
+            nextDoc = NO_MORE_DOCS;
+         }
+         return nextDoc;
       }
 
       /**
-       * Returns an empty Explanation object.
-       * @return an empty Explanation object.
-       */
-      @Override
-      public Explanation explain(int doc)
-      {
-         return new Explanation();
-      }
-
-      /**
        * Calculates the ids of the documents matching this wildcard query.
        * @throws IOException if an error occurs while reading from the index.
        */

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/directory/FSDirectoryManager.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/directory/FSDirectoryManager.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/directory/FSDirectoryManager.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -18,6 +18,8 @@
 
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.FSLockFactory;
+import org.apache.lucene.store.LockFactory;
 import org.apache.lucene.store.NativeFSLockFactory;
 import org.exoplatform.commons.utils.PropertyManager;
 import org.exoplatform.commons.utils.SecurityHelper;
@@ -27,6 +29,7 @@
 import java.io.File;
 import java.io.FileFilter;
 import java.io.IOException;
+import java.lang.reflect.Constructor;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
 
@@ -37,38 +40,48 @@
 public class FSDirectoryManager implements DirectoryManager
 {
 
-   /**
-    * The full qualified name of the lock factory to use by default, if not
-    * specified org.apache.lucene.store.NativeFSLockFactory will be used
-    */
-   public static final String LOCK_FACTORY_CLASS;
+   private static Class<? extends FSDirectory> FS_DIRECTORY_CLASS;
 
-   /**
-    * The full qualified name of the lock factory to use by default, if not
-    * specified org.apache.lucene.store.NativeFSLockFactory will be used
-    */
-   public static final String FS_DIRECTORY_CLASS;
+   private static Class<? extends LockFactory> LOCK_FACTORY_CLASS;
 
-   /**
-    * Static block, used to initialize (map) org.exoplatform.jcr.lucene* 
-    * properties to org.apache.lucene.* and make it only once at a system
-    * start 
-    * 
-    * Required to set custom Index Directory and Lock Factory implementations for Lucene 2.x.
-    */
    static
    {
       // get eXo system properties 
-      LOCK_FACTORY_CLASS = PropertyManager.getProperty("org.exoplatform.jcr.lucene.store.FSDirectoryLockFactoryClass");
-      FS_DIRECTORY_CLASS = PropertyManager.getProperty("org.exoplatform.jcr.lucene.FSDirectory.class");
+      String lockFactoryClassName =
+         PropertyManager.getProperty("org.exoplatform.jcr.lucene.store.FSDirectoryLockFactoryClass");
+      String fsDirectoryClassName = PropertyManager.getProperty("org.exoplatform.jcr.lucene.FSDirectory.class");
       // map to Lucene ones. Works only with Lucene 2.x.
-      if (LOCK_FACTORY_CLASS != null)
+      if (lockFactoryClassName != null)
       {
-         PropertyManager.setProperty("org.apache.lucene.store.FSDirectoryLockFactoryClass", LOCK_FACTORY_CLASS);
+         try
+         {
+            // avoid case when abstract base class used
+            if (!FSLockFactory.class.getName().equals(lockFactoryClassName))
+            {
+               LOCK_FACTORY_CLASS = (Class<? extends LockFactory>)Class.forName(lockFactoryClassName);
+            }
+         }
+         catch (ClassNotFoundException e)
+         {
+            throw new RuntimeException("cannot load LockFactory class: " + e.toString(), e);
+         }
       }
-      if (FS_DIRECTORY_CLASS != null)
+
+      if (fsDirectoryClassName != null)
       {
-         PropertyManager.setProperty("org.apache.lucene.FSDirectory.class", FS_DIRECTORY_CLASS);
+         try
+         {
+            // avoid case when abstract base class used
+            if (!FSDirectory.class.getName().equals(fsDirectoryClassName))
+            {
+               FS_DIRECTORY_CLASS = (Class<? extends FSDirectory>)Class.forName(fsDirectoryClassName);
+            }
+            // else rely on Lucene FSDirectory instantiation 
+         }
+         catch (ClassNotFoundException e)
+         {
+            throw new RuntimeException("cannot load FSDirectory class: " + e.toString(), e);
+         }
       }
    }
 
@@ -133,15 +146,18 @@
                   throw new IOException("Cannot create directory: " + dir);
                }
             }
-            // if both not defined, using FSDirectory.open
-            if (FS_DIRECTORY_CLASS == null && LOCK_FACTORY_CLASS == null)
+            LockFactory lockFactory =
+               (LOCK_FACTORY_CLASS == null) ? new NativeFSLockFactory() : (LockFactory)LOCK_FACTORY_CLASS.newInstance();
+
+            if (FS_DIRECTORY_CLASS == null)
             {
-               return FSDirectory.open(dir, new NativeFSLockFactory(dir));
+               return FSDirectory.open(dir, lockFactory);
             }
-            // LOCK FACTORY only defined, using deprecated getDirectory method
             else
             {
-               return FSDirectory.getDirectory(dir, LOCK_FACTORY_CLASS != null ? null : new NativeFSLockFactory(dir));
+               Constructor<? extends FSDirectory> constructor =
+                  FS_DIRECTORY_CLASS.getConstructor(File.class, LockFactory.class);
+               return constructor.newInstance(dir, lockFactory);
             }
          }
       });

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/hits/ScorerHits.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/hits/ScorerHits.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/hits/ScorerHits.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,47 +16,61 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene.hits;
 
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Scorer;
+
 import java.io.IOException;
 
-import org.apache.lucene.search.Scorer;
-
 /**
  * Wraps a {@link org.apache.lucene.search.Scorer} in a {@link Hits} instance.
  */
-public class ScorerHits implements Hits {
+public class ScorerHits implements Hits
+{
 
-    private final Scorer scorer;
+   private final Scorer scorer;
 
-    public ScorerHits(Scorer scorer) {
-        this.scorer = scorer;
-    }
+   public ScorerHits(Scorer scorer)
+   {
+      this.scorer = scorer;
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public void set(int doc) {
-        throw new UnsupportedOperationException();
-    }
+   /**
+    * {@inheritDoc}
+    */
+   public void set(int doc)
+   {
+      throw new UnsupportedOperationException();
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public int next() throws IOException {
-        if (scorer.next()) {
-            return scorer.doc();
-        } else {
-            return -1;
-        }
-    }
+   /**
+    * {@inheritDoc}
+    */
+   public int next() throws IOException
+   {
+      int docId = scorer.nextDoc();
+      if (docId != DocIdSetIterator.NO_MORE_DOCS)
+      {
+         return docId;
+      }
+      else
+      {
+         return -1;
+      }
+   }
 
-    /**
-     * {@inheritDoc}
-     */
-    public int skipTo(int target) throws IOException {
-        if (scorer.skipTo(target)) {
-            return scorer.doc();
-        } else {
-            return -1;
-        }
-    }
+   /**
+    * {@inheritDoc}
+    */
+   public int skipTo(int target) throws IOException
+   {
+      int docId = scorer.advance(target);
+      if (docId != DocIdSetIterator.NO_MORE_DOCS)
+      {
+         return docId;
+      }
+      else
+      {
+         return -1;
+      }
+   }
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/spell/LuceneSpellChecker.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/spell/LuceneSpellChecker.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/spell/LuceneSpellChecker.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,8 +16,10 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene.spell;
 
-import org.apache.lucene.analysis.Token;
 import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.spell.Dictionary;
 import org.apache.lucene.search.spell.LuceneDictionary;
@@ -54,7 +56,7 @@
     * Logger instance for this class.
     */
    private static final Log LOG = ExoLogger.getLogger("exo.jcr.component.core.LuceneSpellChecker");
-   
+
    public static final class FiveSecondsRefreshInterval extends LuceneSpellChecker
    {
       public FiveSecondsRefreshInterval()
@@ -256,7 +258,7 @@
             public Object run() throws Exception
             {
                spellIndexDirectory = handler.getDirectoryManager().getDirectory("spellchecker");
-                  
+
                if (IndexReader.indexExists(spellIndexDirectory))
                {
                   lastRefresh = System.currentTimeMillis();
@@ -283,7 +285,7 @@
       {
          // tokenize the statement (field name doesn't matter actually...)
          List<String> words = new ArrayList<String>();
-         List<Token> tokens = new ArrayList<Token>();
+         List<TokenData> tokens = new ArrayList<TokenData>();
          tokenize(statement, words, tokens);
 
          String[] suggestions = check(words.toArray(new String[words.size()]));
@@ -294,11 +296,11 @@
             StringBuffer sb = new StringBuffer(statement);
             for (int i = suggestions.length - 1; i >= 0; i--)
             {
-               Token t = tokens.get(i);
+               TokenData t = tokens.get(i);
                // only replace if word acutally changed
-               if (!t.termText().equalsIgnoreCase(suggestions[i]))
+               if (!t.word.equalsIgnoreCase(suggestions[i]))
                {
-                  sb.replace(t.startOffset(), t.endOffset(), suggestions[i]);
+                  sb.replace(t.startOffset, t.endOffset, suggestions[i]);
                }
             }
             // if suggestion is same as a statement return null
@@ -357,41 +359,74 @@
        * @throws IOException
        *             if an error occurs while parsing the statement.
        */
-      private void tokenize(String statement, List<String> words, List<Token> tokens) throws IOException
+      private void tokenize(String statement, List<String> words, List<TokenData> tokens) throws IOException
       {
          TokenStream ts = handler.getTextAnalyzer().tokenStream(FieldNames.FULLTEXT, new StringReader(statement));
+         TermAttribute term = (TermAttribute)ts.getAttribute(TermAttribute.class);
+         PositionIncrementAttribute positionIncrement =
+            (PositionIncrementAttribute)ts.getAttribute(PositionIncrementAttribute.class);
+         OffsetAttribute offset = (OffsetAttribute)ts.getAttribute(OffsetAttribute.class);
          try
          {
-            Token t;
-            while ((t = ts.next()) != null)
+            while (ts.incrementToken())
             {
-               String origWord = statement.substring(t.startOffset(), t.endOffset());
-               if (t.getPositionIncrement() > 0)
+
+               String word = term.term();
+               //            while ((t = ts.next()) != null)
+               //            {
+               String origWord = statement.substring(offset.startOffset(), offset.endOffset());
+               if (positionIncrement.getPositionIncrement() > 0)
                {
-                  words.add(t.termText());
-                  tokens.add(t);
+                  words.add(word);
+                  tokens.add(new TokenData(offset.startOffset(), offset.endOffset(), term.term()));
                }
                else
                {
                   // very simple implementation: use termText with length
                   // closer to original word
-                  Token current = tokens.get(tokens.size() - 1);
-                  if (Math.abs(origWord.length() - current.termText().length()) > Math.abs(origWord.length()
-                     - t.termText().length()))
+                  TokenData current = tokens.get(tokens.size() - 1);
+                  if (Math.abs(origWord.length() - current.termLength()) > Math.abs(origWord.length() - word.length()))
                   {
                      // replace current token and word
-                     words.set(words.size() - 1, t.termText());
-                     tokens.set(tokens.size() - 1, t);
+                     words.set(words.size() - 1, word);
+                     tokens
+                        .set(tokens.size() - 1, new TokenData(offset.startOffset(), offset.endOffset(), term.term()));
                   }
                }
             }
          }
          finally
          {
+            ts.end();
             ts.close();
          }
       }
 
+      class TokenData
+      {
+         int startOffset;
+
+         int endOffset;
+
+         String word;
+
+         public TokenData(int startOffset, int endOffset, String word)
+         {
+            this.startOffset = startOffset;
+            this.endOffset = endOffset;
+            this.word = word;
+         }
+
+         /**
+          * @return
+          */
+         public int termLength()
+         {
+            return word.length();
+         }
+
+      }
+
       /**
        * Checks the spelling of the passed <code>words</code> and returns a
        * suggestion.

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/synonym/WordNetSynonyms.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/synonym/WordNetSynonyms.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/synonym/WordNetSynonyms.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,7 +16,7 @@
  */
 package org.exoplatform.services.jcr.impl.core.query.lucene.synonym;
 
-import org.apache.lucene.index.memory.SynonymMap;
+import org.apache.lucene.wordnet.SynonymMap;
 import org.exoplatform.services.jcr.impl.core.query.lucene.SynonymProvider;
 import org.exoplatform.services.log.ExoLogger;
 import org.exoplatform.services.log.Log;

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/BaseStandaloneTest.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/BaseStandaloneTest.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/BaseStandaloneTest.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -366,7 +366,9 @@
             }
 
             if (dread < eread)
+            {
                dbuff = new byte[eread - dread];
+            }
          }
       }
 
@@ -446,7 +448,9 @@
          for (String nodeMixin : nodeMixins)
          {
             if (mixin.equals(nodeMixin))
+            {
                continue nextMixin;
+            }
          }
 
          fail("Mixin '" + mixin + "' isn't accessible");
@@ -530,61 +534,52 @@
     */
    public class TestWorkspaceDataContainer implements WorkspaceDataContainer
    {
-      @Override
+
       public String getInfo()
       {
          return null;
       }
 
-      @Override
       public String getName()
       {
          return null;
       }
 
-      @Override
       public String getUniqueName()
       {
          return null;
       }
 
-      @Override
       public String getStorageVersion()
       {
          return null;
       }
 
-      @Override
       public Calendar getCurrentTime()
       {
          return null;
       }
 
-      @Override
       public boolean isSame(WorkspaceDataContainer another)
       {
          return false;
       }
 
-      @Override
       public WorkspaceStorageConnection openConnection() throws RepositoryException
       {
          return null;
       }
 
-      @Override
       public WorkspaceStorageConnection openConnection(boolean readOnly) throws RepositoryException
       {
          return null;
       }
 
-      @Override
       public WorkspaceStorageConnection reuseConnection(WorkspaceStorageConnection original) throws RepositoryException
       {
          return null;
       }
 
-      @Override
       public boolean isCheckSNSNewConnection()
       {
          return false;
@@ -601,145 +596,123 @@
    public class TestWorkspaceStorageConnection implements WorkspaceStorageConnection
    {
 
-      @Override
       public ItemData getItemData(NodeData parentData, QPathEntry name, ItemType itemType) throws RepositoryException,
          IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public ItemData getItemData(String identifier) throws RepositoryException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<NodeData> getChildNodesData(NodeData parent) throws RepositoryException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<NodeData> getChildNodesData(NodeData parent, List<QPathEntryFilter> pattern)
          throws RepositoryException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public int getChildNodesCount(NodeData parent) throws RepositoryException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public int getLastOrderNumber(NodeData parent) throws RepositoryException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<PropertyData> getChildPropertiesData(NodeData parent) throws RepositoryException,
          IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<PropertyData> getChildPropertiesData(NodeData parent, List<QPathEntryFilter> pattern)
          throws RepositoryException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<PropertyData> listChildPropertiesData(NodeData parent) throws RepositoryException,
          IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<PropertyData> getReferencesData(String nodeIdentifier) throws RepositoryException,
          IllegalStateException, UnsupportedOperationException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void add(NodeData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void add(PropertyData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void update(NodeData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void update(PropertyData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void rename(NodeData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void delete(NodeData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void delete(PropertyData data) throws RepositoryException, UnsupportedOperationException,
          InvalidItemStateException, IllegalStateException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void commit() throws IllegalStateException, RepositoryException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void rollback() throws IllegalStateException, RepositoryException
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public void close() throws IllegalStateException, RepositoryException
       {
       }
 
-      @Override
       public boolean isOpened()
       {
          throw new UnsupportedOperationException("TestWorkspaceStorageConnection: operation is unsupported.");
       }
 
-      @Override
       public List<ACLHolder> getACLHolders() throws RepositoryException, IllegalStateException,
          UnsupportedOperationException
       {
@@ -749,7 +722,7 @@
       /**
        * @see org.exoplatform.services.jcr.storage.WorkspaceStorageConnection#getValue(java.lang.String, int, int)
        */
-      @Override
+
       public ValueData getValue(String propertyId, int orderNumb, int persistedVersion) throws IllegalStateException,
          RepositoryException
       {
@@ -759,7 +732,7 @@
       /**
        * @see org.exoplatform.services.jcr.storage.WorkspaceStorageConnection#getChildNodesDataByPage(org.exoplatform.services.jcr.datamodel.NodeData, int, int, java.util.List)
        */
-      @Override
+
       public boolean getChildNodesDataByPage(NodeData parent, int fromOrderNum, int toOrderNum, List<NodeData> childs)
          throws RepositoryException
       {

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/api/core/query/lucene/SlowQueryHandler.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -100,7 +100,7 @@
    public void apply(ChangesHolder changes) throws RepositoryException, IOException
    {
       // TODO Auto-generated method stub
-      
+
    }
 
    public ChangesHolder getChanges(Iterator<String> remove, Iterator<NodeData> add)
@@ -123,16 +123,14 @@
    public void setOnline(boolean isOnline, boolean allowQuery, boolean dropStaleIndexes) throws IOException
    {
       // TODO Auto-generated method stub
-      
+
    }
 
    /**
     * @see org.exoplatform.services.jcr.impl.core.query.QueryHandler#checkIndex(org.exoplatform.services.jcr.dataflow.ItemDataConsumer, boolean, InspectionLog)
     */
-   @Override
    public void checkIndex(ItemDataConsumer itemStateManager, boolean isSystem, InspectionReport inspectionLog)
-      throws RepositoryException,
-      IOException
+      throws RepositoryException, IOException
    {
       // do nothing
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/BaseQueryTest.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/BaseQueryTest.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/BaseQueryTest.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -16,12 +16,12 @@
  */
 package org.exoplatform.services.jcr.impl.core.query;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.exoplatform.services.jcr.JcrImplBaseTest;
 import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
 import org.exoplatform.services.jcr.impl.core.query.lucene.SearchIndex;
@@ -52,21 +52,21 @@
 
    protected SearchIndex defaultSearchIndex;
 
-   protected Document getDocument(String nodeIdentifer, boolean includeSystemIndex) throws IOException,
+   protected ScoreDoc getDocument(String nodeIdentifer, boolean includeSystemIndex) throws IOException,
       RepositoryException
    {
       IndexReader reader = defaultSearchIndex.getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.UUID, nodeIdentifer));
 
-      Hits result = is.search(query);
+      TopDocs result = is.search(query, null, Integer.MAX_VALUE);
       try
       {
-         if (result.length() == 1)
+         if (result.totalHits == 1)
          {
-            return result.doc(0);
+            return result.scoreDocs[0];
          }
-         else if (result.length() > 1)
+         else if (result.totalHits > 1)
          {
             throw new RepositoryException("Results more then one");
          }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestArabicSearch.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestArabicSearch.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestArabicSearch.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,12 +17,12 @@
 
 package org.exoplatform.services.jcr.impl.core.query;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.exoplatform.services.jcr.impl.core.NodeImpl;
 import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
 import org.exoplatform.services.jcr.impl.core.query.lucene.Util;
@@ -67,21 +67,21 @@
       String word = "\u0627\u0644\u0644\u0627\u062a\u064a\u0646\u064a\u0629";
 
       // Check is node indexed
-      Document doc = getDocument(cont.getInternalIdentifier(), false);
+      ScoreDoc doc = getDocument(cont.getInternalIdentifier(), false);
       assertNotNull("Node is not indexed", doc);
 
       IndexReader reader = defaultSearchIndex.getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.FULLTEXT, word));
-      Hits result = is.search(query);
-      assertEquals(1, result.length());
+      TopDocs search = is.search(query, null, Integer.MAX_VALUE);
+      assertEquals(1, search.totalHits);
 
       QueryManager qman = this.workspace.getQueryManager();
 
       Query q = qman.createQuery("SELECT * FROM nt:resource " + " WHERE  CONTAINS(., '" + word + "')", Query.SQL);
       QueryResult res = q.execute();
       assertEquals(1, res.getNodes().getSize());
-      
+
       is.close();
       Util.closeOrRelease(reader);
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestDateSearch.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestDateSearch.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestDateSearch.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,12 +17,12 @@
 
 package org.exoplatform.services.jcr.impl.core.query;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.exoplatform.services.jcr.impl.core.NodeImpl;
 import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
 import org.exoplatform.services.jcr.impl.core.query.lucene.Util;
@@ -64,22 +64,22 @@
       String word = "2005-10-02".toLowerCase();// "ronaldo";//-10-06T00:00:00.000+0300
 
       // Check is node indexed
-      Document doc = getDocument(cont.getInternalIdentifier(), false);
+      ScoreDoc doc = getDocument(cont.getInternalIdentifier(), false);
       assertNotNull("Node is not indexed", doc);
       System.out.println("its doc " + doc);
 
       IndexReader reader = defaultSearchIndex.getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.FULLTEXT, word));
-      Hits result = is.search(query);
-      assertEquals(1, result.length());
+      TopDocs topDocs = is.search(query, Integer.MAX_VALUE);
+      assertEquals(1, topDocs.totalHits);
 
       QueryManager qman = this.workspace.getQueryManager();
 
       Query q = qman.createQuery("SELECT * FROM nt:resource " + " WHERE  CONTAINS(., '" + word + "')", Query.SQL);
       QueryResult res = q.execute();
       assertEquals(1, res.getNodes().getSize());
-      
+
       is.close();
       Util.closeOrRelease(reader);
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestExcelFileSearch.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestExcelFileSearch.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestExcelFileSearch.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,12 +17,12 @@
 
 package org.exoplatform.services.jcr.impl.core.query;
 
-import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.exoplatform.services.document.DocumentReader;
 import org.exoplatform.services.document.DocumentReaderService;
 import org.exoplatform.services.document.impl.MSExcelDocumentReader;
@@ -93,14 +93,14 @@
       String word = "eric";
 
       // Check is node indexed
-      Document doc = getDocument(cont.getInternalIdentifier(), false);
+      ScoreDoc doc = getDocument(cont.getInternalIdentifier(), false);
       assertNotNull("Node is not indexed", doc);
 
       IndexReader reader = defaultSearchIndex.getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.FULLTEXT, word));
-      Hits result = is.search(query);
-      assertEquals(1, result.length());
+      TopDocs topDocs = is.search(query, null, Integer.MAX_VALUE);
+      assertEquals(1, topDocs.totalHits);
       is.close();
       Util.closeOrRelease(reader);
    }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestIndexingConfig.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestIndexingConfig.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestIndexingConfig.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -22,11 +22,13 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.util.Version;
 import org.exoplatform.services.jcr.impl.core.NodeImpl;
 import org.exoplatform.services.jcr.impl.core.RepositoryImpl;
 import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
@@ -101,7 +103,7 @@
 
       indexingConfigurationImpl.addPropertyAnalyzer("FULL:" + simple, new SimpleAnalyzer());
       indexingConfigurationImpl.addPropertyAnalyzer("FULL:" + whitespace, new WhitespaceAnalyzer());
-      indexingConfigurationImpl.addPropertyAnalyzer("FULL:" + stop, new StopAnalyzer());
+      indexingConfigurationImpl.addPropertyAnalyzer("FULL:" + stop, new StopAnalyzer(Version.LUCENE_24));
       testRoot = testSession.getRootNode().addNode("testrootAnalyzers");
       root.save();
    }
@@ -130,7 +132,7 @@
          // There must be [the] [quick] [brown] [fox] [jumped] [over] [the] [lazy] [dogs]
          // in Node1
 
-         Document doc = this.getDocument(testNode1.getInternalIdentifier(), false);
+         ScoreDoc doc = this.getDocument(testNode1.getInternalIdentifier(), false);
          assertNotNull(doc);
 
          TermQuery the = new TermQuery(new Term("FULL:" + simple, "the"));
@@ -155,8 +157,8 @@
          IndexReader ir = searchIndex.getIndexReader();
          IndexSearcher is = new IndexSearcher(ir);
 
-         Hits hits = is.search(compl);
-         assertEquals(1, hits.length());
+         TopDocs search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          // Test is there are all terms
          // There must be [xy] [z] [corporation] [xyz] [example] [com]
@@ -176,8 +178,8 @@
          compl.add(example, Occur.MUST);
          compl.add(com, Occur.MUST);
 
-         hits = is.search(compl);
-         assertEquals(1, hits.length());
+         search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          is.close();
          Util.closeOrRelease(ir);
@@ -230,8 +232,8 @@
          IndexReader ir = searchIndex.getIndexReader();
          IndexSearcher is = new IndexSearcher(ir);
 
-         Hits hits = is.search(compl);
-         assertEquals(1, hits.length());
+         TopDocs search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          // Test is there are all terms
          // There must be [XY&Z] [Corporation] [-] [xyz at example.com]
@@ -247,8 +249,8 @@
          compl.add(defiz, Occur.MUST);
          compl.add(example, Occur.MUST);
 
-         hits = is.search(compl);
-         assertEquals(1, hits.length());
+         search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          is.close();
          Util.closeOrRelease(ir);
@@ -296,8 +298,8 @@
          IndexReader ir = searchIndex.getIndexReader();
          IndexSearcher is = new IndexSearcher(ir);
 
-         Hits hits = is.search(compl);
-         assertEquals(1, hits.length());
+         TopDocs search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          // Test is there are all terms
          // There must be [xy] [z] [corporation] [xyz] [example] [com]
@@ -317,8 +319,8 @@
          compl.add(example, Occur.MUST);
          compl.add(com, Occur.MUST);
 
-         hits = is.search(compl);
-         assertEquals(1, hits.length());
+         search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          is.close();
          Util.closeOrRelease(ir);
@@ -370,8 +372,8 @@
          IndexReader ir = searchIndex.getIndexReader();
          IndexSearcher is = new IndexSearcher(ir);
 
-         Hits hits = is.search(compl);
-         assertEquals(1, hits.length());
+         TopDocs search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          // Test is there are all terms
          // Terms [xy&z] [corporation] [xyz at example] [com] - it's a default
@@ -388,8 +390,8 @@
          compl.add(corporation, Occur.MUST);
          compl.add(com, Occur.MUST);
 
-         hits = is.search(compl);
-         assertEquals(1, hits.length());
+         search = is.search(compl, null, Integer.MAX_VALUE);
+         assertEquals(1, search.totalHits);
 
          is.close();
          Util.closeOrRelease(ir);
@@ -401,22 +403,22 @@
       }
    }
 
-   protected Document getDocument(String nodeIdentifer, boolean includeSystemIndex) throws IOException,
+   protected ScoreDoc getDocument(String nodeIdentifer, boolean includeSystemIndex) throws IOException,
       RepositoryException
    {
       IndexReader reader = ((SearchIndex)searchManager.getHandler()).getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.UUID, nodeIdentifer));
 
-      Hits result = is.search(query);
+      TopDocs topDocs = is.search(query, null, Integer.MAX_VALUE);
 
       try
       {
-         if (result.length() == 1)
+         if (topDocs.totalHits == 1)
          {
-            return result.doc(0);
+            return topDocs.scoreDocs[0];
          }
-         else if (result.length() > 1)
+         else if (topDocs.totalHits > 1)
          {
             throw new RepositoryException("Results more then one");
          }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestMultiValueSearch.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestMultiValueSearch.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestMultiValueSearch.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -17,7 +17,7 @@
 
 package org.exoplatform.services.jcr.impl.core.query;
 
-import org.apache.lucene.document.Document;
+import org.apache.lucene.search.ScoreDoc;
 import org.exoplatform.services.jcr.impl.core.NodeImpl;
 
 import javax.jcr.PropertyType;
@@ -39,7 +39,7 @@
       root.save();
 
       // Check is node indexed
-      Document doc = getDocument(node.getInternalIdentifier(), false);
+      ScoreDoc doc = getDocument(node.getInternalIdentifier(), false);
       assertNotNull("Node is not indexed", doc);
       System.out.println("its doc " + doc);
 
@@ -65,7 +65,7 @@
       root.save();
 
       // Check is node indexed
-      Document doc = getDocument(node.getInternalIdentifier(), false);
+      ScoreDoc doc = getDocument(node.getInternalIdentifier(), false);
       assertNotNull("Node is not indexed", doc);
       System.out.println("its doc " + doc);
 

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestRewriteNode.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestRewriteNode.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/TestRewriteNode.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -19,9 +19,9 @@
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Hits;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
 import org.exoplatform.services.jcr.impl.core.NodeImpl;
 import org.exoplatform.services.jcr.impl.core.query.lucene.FieldNames;
 import org.exoplatform.services.jcr.impl.core.query.lucene.Util;
@@ -54,8 +54,8 @@
       IndexReader reader = defaultSearchIndex.getIndexReader();
       IndexSearcher is = new IndexSearcher(reader);
       TermQuery query = new TermQuery(new Term(FieldNames.FULLTEXT, "fox"));
-      Hits result = is.search(query);
-      assertEquals(1, result.length());
+      TopDocs topDocs = is.search(query, null, Integer.MAX_VALUE);
+      assertEquals(1, topDocs.totalHits);
 
       cont.setProperty("jcr:data", "Bahama mama");
       root.save();
@@ -63,18 +63,18 @@
       reader = defaultSearchIndex.getIndexReader();
       is = new IndexSearcher(reader);
       query = new TermQuery(new Term(FieldNames.FULLTEXT, "mama"));
-      result = is.search(query);
-      assertEquals(1, result.length());
+      topDocs = is.search(query, null, Integer.MAX_VALUE);
+      assertEquals(1, topDocs.totalHits);
 
       reader = defaultSearchIndex.getIndexReader();
       is = new IndexSearcher(reader);
       query = new TermQuery(new Term(FieldNames.FULLTEXT, "fox"));
-      result = is.search(query);
-      assertEquals(0, result.length());
+      topDocs = is.search(query, null, Integer.MAX_VALUE);
+      assertEquals(0, topDocs.totalHits);
 
       is.close();
       Util.closeOrRelease(reader);
-      
+
    }
 
 }

Modified: jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestChangesHolder.java
===================================================================
--- jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestChangesHolder.java	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/core/query/lucene/TestChangesHolder.java	2012-02-22 14:18:12 UTC (rev 5691)
@@ -20,11 +20,13 @@
 
 import junit.framework.TestCase;
 
+import org.apache.lucene.document.AbstractField;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.document.Field.Index;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.document.Fieldable;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -46,13 +48,14 @@
 
    public void testSerNDeserializeDocs() throws Exception
    {
-      System.out.println("###       testSerNDeserializeDocs    ###");
+      //System.out.println("###       testSerNDeserializeDocs    ###");
       Collection<Document> add = new ArrayList<Document>(3);
       Document doc = new Document();
       doc.setBoost(2.0f);
-      Field fieldFull = new Field("full", "full-value", Store.COMPRESS, Index.ANALYZED_NO_NORMS, TermVector.WITH_POSITIONS_OFFSETS);
+      Field fieldFull =
+         new Field("full", "full-value", Store.YES, Index.ANALYZED_NO_NORMS, TermVector.WITH_POSITIONS_OFFSETS);
       fieldFull.setBoost(2.0f);
-      fieldFull.setOmitTf(true);
+      fieldFull.setOmitTermFreqAndPositions(true);
       doc.add(fieldFull);
       Field fieldEmpty = new Field("empty", "empty-value", Store.NO, Index.NOT_ANALYZED, TermVector.NO);
       doc.add(fieldEmpty);
@@ -63,9 +66,9 @@
       doc = new Document();
       doc.add(fieldEmpty);
       add.add(doc);
-      
+
       ByteArrayOutputStream baos = null;
-      
+
       int total = 100000;
       long start;
       Collection<String> remove = Collections.emptyList();
@@ -78,7 +81,7 @@
          oos.writeObject(new ChangesHolder(remove, add));
          oos.close();
       }
-      System.out.println("Custom serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
+      //System.out.println("Custom serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
 
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
@@ -87,7 +90,7 @@
          addResult = ((ChangesHolder)ois.readObject()).getAdd();
          ois.close();
       }
-      System.out.println("Custom deserialization: total time = " + (System.currentTimeMillis() - start));
+      //System.out.println("Custom deserialization: total time = " + (System.currentTimeMillis() - start));
       checkDocs(addResult);
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
@@ -97,7 +100,7 @@
          oos.writeObject(add);
          oos.close();
       }
-      System.out.println("Native serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
+      //System.out.println("Native serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
       {
@@ -105,7 +108,7 @@
          addResult = (Collection<Document>)ois.readObject();
          ois.close();
       }
-      System.out.println("Native deserialization: total time = " + (System.currentTimeMillis() - start));
+      //System.out.println("Native deserialization: total time = " + (System.currentTimeMillis() - start));
       checkDocs(addResult);
    }
 
@@ -116,7 +119,7 @@
       Iterator<Document> it = addResult.iterator();
       Document doc = it.next();
       assertEquals(2.0f, doc.getBoost());
-      List<Field> fields = doc.getFields();
+      List<Fieldable> fields = doc.getFields();
       assertNotNull(fields);
       assertEquals(2, fields.size());
       checkFieldFull(fields.get(0));
@@ -135,19 +138,21 @@
       checkFieldEmpty(fields.get(0));
    }
 
-   private void checkFieldFull(Field field)
+   private void checkFieldFull(Fieldable field)
    {
       assertEquals("full", field.name());
       assertEquals("full-value", field.stringValue());
       assertTrue(field.isStored());
-      assertTrue(field.isCompressed());
       assertTrue(field.isIndexed());
       assertTrue(field.isTokenized());
       assertTrue(field.getOmitNorms());
       assertTrue(field.isTermVectorStored());
       assertTrue(field.isStoreOffsetWithTermVector());
       assertTrue(field.isStorePositionWithTermVector());
-      assertTrue(field.getOmitTf());
+      if (field instanceof AbstractField)
+      {
+         assertTrue(((AbstractField)field).getOmitTermFreqAndPositions());
+      }
       assertFalse(field.isBinary());
       assertFalse(field.isLazy());
       assertEquals(2.0f, field.getBoost());
@@ -155,35 +160,37 @@
       assertEquals(0, field.getBinaryOffset());
    }
 
-   private void checkFieldEmpty(Field field)
+   private void checkFieldEmpty(Fieldable field)
    {
       assertEquals("empty", field.name());
       assertEquals("empty-value", field.stringValue());
       assertFalse(field.isStored());
-      assertFalse(field.isCompressed());
       assertTrue(field.isIndexed());
       assertFalse(field.isTokenized());
       assertFalse(field.getOmitNorms());
       assertFalse(field.isTermVectorStored());
       assertFalse(field.isStoreOffsetWithTermVector());
       assertFalse(field.isStorePositionWithTermVector());
-      assertFalse(field.getOmitTf());
+      if (field instanceof AbstractField)
+      {
+         assertFalse(((AbstractField)field).getOmitTermFreqAndPositions());
+      }
       assertFalse(field.isBinary());
       assertFalse(field.isLazy());
       assertEquals(1.0f, field.getBoost());
       assertEquals(0, field.getBinaryLength());
       assertEquals(0, field.getBinaryOffset());
    }
-   
+
    public void testSerNDeserializeIds() throws Exception
    {
-      System.out.println("###       testSerNDeserializeIds    ###");      
+      //System.out.println("###       testSerNDeserializeIds    ###");      
       Collection<String> remove = new ArrayList<String>(3);
       remove.add(UUID.randomUUID().toString());
       remove.add(UUID.randomUUID().toString());
       remove.add(UUID.randomUUID().toString());
       ByteArrayOutputStream baos = null;
-      
+
       int total = 100000;
       long start;
       Collection<Document> add = Collections.emptyList();
@@ -196,7 +203,7 @@
          oos.writeObject(new ChangesHolder(remove, add));
          oos.close();
       }
-      System.out.println("Custom serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
+      //System.out.println("Custom serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
 
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
@@ -205,7 +212,7 @@
          addResult = ((ChangesHolder)ois.readObject()).getRemove();
          ois.close();
       }
-      System.out.println("Custom deserialization: total time = " + (System.currentTimeMillis() - start));
+      //System.out.println("Custom deserialization: total time = " + (System.currentTimeMillis() - start));
       checkIds(remove, addResult);
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
@@ -215,7 +222,7 @@
          oos.writeObject(remove);
          oos.close();
       }
-      System.out.println("Native serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
+      //System.out.println("Native serialization: total time = " + (System.currentTimeMillis() - start) + ", size = " + baos.size());
       start = System.currentTimeMillis();
       for (int i = 0; i < total; i++)
       {
@@ -223,8 +230,8 @@
          addResult = (Collection<String>)ois.readObject();
          ois.close();
       }
-      System.out.println("Native deserialization: total time = " + (System.currentTimeMillis() - start));
-      checkIds(remove, addResult);     
+      //System.out.println("Native deserialization: total time = " + (System.currentTimeMillis() - start));
+      checkIds(remove, addResult);
    }
 
    private void checkIds(Collection<String> remove, Collection<String> addResult)

Modified: jcr/branches/1.15.x/pom.xml
===================================================================
--- jcr/branches/1.15.x/pom.xml	2012-02-22 13:02:08 UTC (rev 5690)
+++ jcr/branches/1.15.x/pom.xml	2012-02-22 14:18:12 UTC (rev 5691)
@@ -274,19 +274,24 @@
       <dependency>
         <groupId>org.apache.lucene</groupId>
         <artifactId>lucene-core</artifactId>
-        <version>2.9.4</version>
+        <version>3.0.3</version>
       </dependency>
       <dependency>
         <groupId>org.apache.lucene</groupId>
         <artifactId>lucene-spellchecker</artifactId>
-        <version>2.9.4</version>
+        <version>3.0.3</version>
       </dependency>
       <dependency>
         <groupId>org.apache.lucene</groupId>
         <artifactId>lucene-memory</artifactId>
-        <version>2.9.4</version>
+        <version>3.0.3</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.lucene</groupId>
+        <artifactId>lucene-wordnet</artifactId>
+        <version>3.0.3</version>
+      </dependency>            
+      <dependency>
         <groupId>com.sun.xml.stream</groupId>
         <artifactId>sjsxp</artifactId>
         <version>1.0.1</version>



More information about the exo-jcr-commits mailing list