[hibernate-commits] Hibernate SVN: r17541 - search/branches/Branch_3_0_1_GA_CP/src/test/org/hibernate/search/test/directoryProvider.

hibernate-commits at lists.jboss.org hibernate-commits at lists.jboss.org
Fri Sep 25 05:18:24 EDT 2009


Author: jcosta at redhat.com
Date: 2009-09-25 05:18:24 -0400 (Fri, 25 Sep 2009)
New Revision: 17541

Modified:
   search/branches/Branch_3_0_1_GA_CP/src/test/org/hibernate/search/test/directoryProvider/FSSlaveAndMasterDPTest.java
Log:
HSEARCH-401 - Backported FSSlaveAndMasterDPTest test

Modified: search/branches/Branch_3_0_1_GA_CP/src/test/org/hibernate/search/test/directoryProvider/FSSlaveAndMasterDPTest.java
===================================================================
--- search/branches/Branch_3_0_1_GA_CP/src/test/org/hibernate/search/test/directoryProvider/FSSlaveAndMasterDPTest.java	2009-09-24 20:46:38 UTC (rev 17540)
+++ search/branches/Branch_3_0_1_GA_CP/src/test/org/hibernate/search/test/directoryProvider/FSSlaveAndMasterDPTest.java	2009-09-25 09:18:24 UTC (rev 17541)
@@ -7,108 +7,181 @@
 
 import org.apache.lucene.analysis.StopAnalyzer;
 import org.apache.lucene.queryParser.QueryParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.hibernate.Session;
+import org.hibernate.Transaction;
+import org.hibernate.HibernateException;
 import org.hibernate.cfg.Configuration;
-import org.hibernate.event.PostDeleteEventListener;
-import org.hibernate.event.PostInsertEventListener;
-import org.hibernate.event.PostUpdateEventListener;
 import org.hibernate.search.FullTextSession;
 import org.hibernate.search.Search;
-import org.hibernate.search.event.FullTextIndexEventListener;
 import org.hibernate.search.util.FileHelper;
 
 /**
+ * Test case for master/slave directories.
+ *
  * @author Emmanuel Bernard
+ * @author Hardy Ferentschik
  */
 public class FSSlaveAndMasterDPTest extends MultipleSFTestCase {
 
+	private static final Logger log = LoggerFactory.getLogger(FSSlaveAndMasterDPTest.class);
+
+	private static File root;
+
+	static {
+		String buildDir = System.getProperty( "build.dir" );
+		if ( buildDir == null ) {
+			buildDir = ".";
+		}
+		root = new File( buildDir, "lucenedirs" );
+		log.info( "Using {} as test directory.", root.getAbsolutePath() );
+	}
+
+	/**
+	 * The lucene index directory which is shared bewtween master and slave.
+	 */
+	private String masterCopy = "/master/copy";
+
+	/**
+	 * The lucene index directory which is specific to the master node.
+	 */
+	private String masterMain = "/master/main";
+
+	/**
+	 * The lucene index directory which is specific to the slave node.
+	 */
+	private String slave = "/slave";
+
+	/**
+	 * Verifies that copies of the master get properly copied to the slaves.
+	 *
+	 * @throws Exception in case the test fails.
+	 */
 	public void testProperCopy() throws Exception {
-		Session s1 = getSessionFactories()[0].openSession( );
-		SnowStorm sn = new SnowStorm();
-		sn.setDate( new Date() );
-		sn.setLocation( "Dallas, TX, USA");
 
-		FullTextSession fts2 = Search.createFullTextSession( getSessionFactories()[1].openSession( ) );
-		QueryParser parser = new QueryParser("id", new StopAnalyzer() );
-		List result = fts2.createFullTextQuery( parser.parse( "location:texas" ) ).list();
+		// assert that the salve index is empty
+		FullTextSession fullTextSession = Search.createFullTextSession( getSlaveSession() );
+		Transaction tx = fullTextSession.beginTransaction();
+		QueryParser parser = new QueryParser( "id", new StopAnalyzer() );
+		List result = fullTextSession.createFullTextQuery( parser.parse( "location:texas" ) ).list();
 		assertEquals( "No copy yet, fresh index expected", 0, result.size() );
+		tx.commit();
+		fullTextSession.close();
 
-		s1.persist( sn );
-		s1.flush(); //we don' commit so we need to flush manually
 
-		fts2.close();
-		s1.close();
+		// create an entity on the master and persist it in order to index it
+		Session session = getMasterSession();
+		tx = session.beginTransaction();
+		SnowStorm sn = new SnowStorm();
+		sn.setDate( new Date() );
+		sn.setLocation( "Dallas, TX, USA" );
+		session.persist( sn );
+		tx.commit();
+		session.close();
 
-		int waitPeroid = 2 * 1 * 1000 + 10; //wait a bit more than 2 refresh (one master / one slave)
-		Thread.sleep( waitPeroid );
+		int waitPeriodMilli = 2010; // wait  a bit more than 2 refresh periods (one master / one slave)  -  2 * 1 * 1000 + 10
+		Thread.sleep( waitPeriodMilli );
 
-		//temp test original
-		fts2 = Search.createFullTextSession( getSessionFactories()[0].openSession( ) );
-		result = fts2.createFullTextQuery( parser.parse( "location:dallas" ) ).list();
+		// assert that the master hass indexed the snowstorm
+		log.info( "Searching master" );
+		fullTextSession = Search.createFullTextSession( getMasterSession() );
+		tx = fullTextSession.beginTransaction();
+		result = fullTextSession.createFullTextQuery( parser.parse( "location:dallas" ) ).list();
 		assertEquals( "Original should get one", 1, result.size() );
+		tx.commit();
+		fullTextSession.close();
 
-		fts2 = Search.createFullTextSession( getSessionFactories()[1].openSession( ) );
-		result = fts2.createFullTextQuery( parser.parse( "location:dallas" ) ).list();
-		assertEquals("First copy did not work out", 1, result.size() );
+		// assert that index got copied to the salve as well
+		log.info( "Searching slave" );
+		fullTextSession = Search.createFullTextSession( getSlaveSession() );
+		tx = fullTextSession.beginTransaction();
+		result = fullTextSession.createFullTextQuery( parser.parse( "location:dallas" ) ).list();
+		assertEquals( "First copy did not work out", 1, result.size() );
+		tx.commit();
+		fullTextSession.close();
 
-		s1 = getSessionFactories()[0].openSession( );
+		// add a new snowstorm to the master
+		session = getMasterSession();
+		tx = session.beginTransaction();
 		sn = new SnowStorm();
 		sn.setDate( new Date() );
-		sn.setLocation( "Chennai, India");
+		sn.setLocation( "Chennai, India" );
+		session.persist( sn );
+		tx.commit();
+		session.close();
 
-		s1.persist( sn );
-		s1.flush(); //we don' commit so we need to flush manually
+		Thread.sleep( waitPeriodMilli ); //wait a bit more than 2 refresh (one master / one slave)
 
-		fts2.close();
-		s1.close();
+		// assert that the new snowstorm made it into the slave
+		log.info( "Searching slave" );
+		fullTextSession = Search.createFullTextSession( getSlaveSession() );
+		tx = fullTextSession.beginTransaction();
+		result = fullTextSession.createFullTextQuery( parser.parse( "location:chennai" ) ).list();
+		assertEquals( "Second copy did not work out", 1, result.size() );
+		tx.commit();
+		fullTextSession.close();
 
-		Thread.sleep( waitPeroid ); //wait a bit more than 2 refresh (one master / one slave)
-
-		fts2 = Search.createFullTextSession( getSessionFactories()[1].openSession( ) );
-		result = fts2.createFullTextQuery( parser.parse( "location:chennai" ) ).list();
-		assertEquals("Second copy did not work out", 1, result.size() );
-
-		s1 = getSessionFactories()[0].openSession( );
+		session = getMasterSession();
+		tx = session.beginTransaction();
 		sn = new SnowStorm();
 		sn.setDate( new Date() );
-		sn.setLocation( "Melbourne, Australia");
+		sn.setLocation( "Melbourne, Australia" );
+		session.persist( sn );
+		tx.commit();
+		session.close();
 
-		s1.persist( sn );
-		s1.flush(); //we don' commit so we need to flush manually
+		Thread.sleep( waitPeriodMilli ); //wait a bit more than 2 refresh (one master / one slave)
 
-		fts2.close();
-		s1.close();
+		// once more - assert that the new snowstorm made it into the slave
+		log.info( "Searching slave" );
+		fullTextSession = Search.createFullTextSession( getSlaveSession() );
+		tx = fullTextSession.beginTransaction();
+		result = fullTextSession.createFullTextQuery( parser.parse( "location:melbourne" ) ).list();
+		assertEquals( "Third copy did not work out", 1, result.size() );
+		tx.commit();
+		fullTextSession.close();
+	}
 
-		Thread.sleep( waitPeroid ); //wait a bit more than 2 refresh (one master / one slave)
+	private Session getMasterSession() {
+		return getSessionFactories()[0].openSession();
+	}
 
-		fts2 = Search.createFullTextSession( getSessionFactories()[1].openSession( ) );
-		result = fts2.createFullTextQuery( parser.parse( "location:melbourne" ) ).list();
-		assertEquals("Third copy did not work out", 1, result.size() );
-
-		fts2.close();
+	private Session getSlaveSession() {
+		return getSessionFactories()[1].openSession();
 	}
 
-
 	protected void setUp() throws Exception {
-		File base = new File(".");
-		File root = new File(base, "lucenedirs");
-		root.mkdir();
 
-		File master = new File(root, "master/main");
-		master.mkdirs();
-		master = new File(root, "master/copy");
-		master.mkdirs();
+		if ( root.exists() ) {
+			FileHelper.delete( root );
+		}
 
-		File slave = new File(root, "slave");
-		slave.mkdir();
+		if ( !root.mkdir() ) {
+			throw new HibernateException( "Unable to setup test directories" );
+		}
 
+		File master = new File( root, masterMain );
+		if ( !master.mkdirs() ) {
+			throw new HibernateException( "Unable to setup master directory" );
+		}
+
+		master = new File( root, masterCopy );
+		if ( !master.mkdirs() ) {
+			throw new HibernateException( "Unable to setup master copy directory" );
+		}
+
+		File slaveFile = new File( root, slave );
+		if ( !slaveFile.mkdirs() ) {
+			throw new HibernateException( "Unable to setup slave directory" );
+		}
 		super.setUp();
 	}
 
 	protected void tearDown() throws Exception {
 		super.tearDown();
-		File base = new File(".");
-		File root = new File(base, "lucenedirs");
+		log.info( "Deleting test directory {} ", root.getAbsolutePath() );
 		FileHelper.delete( root );
 	}
 
@@ -124,15 +197,19 @@
 
 	protected void configure(Configuration[] cfg) {
 		//master
-		cfg[0].setProperty( "hibernate.search.default.sourceBase", "./lucenedirs/master/copy");
-		cfg[0].setProperty( "hibernate.search.default.indexBase", "./lucenedirs/master/main");
-		cfg[0].setProperty( "hibernate.search.default.refresh", "1"); //every minute
-		cfg[0].setProperty( "hibernate.search.default.directory_provider", "org.hibernate.search.store.FSMasterDirectoryProvider");
+		cfg[0].setProperty( "hibernate.search.default.sourceBase", root.getAbsolutePath() + masterCopy );
+		cfg[0].setProperty( "hibernate.search.default.indexBase", root.getAbsolutePath() + masterMain );
+		cfg[0].setProperty( "hibernate.search.default.refresh", "1" ); //every second
+		cfg[0].setProperty(
+				"hibernate.search.default.directory_provider", "org.hibernate.search.store.FSMasterDirectoryProvider"
+		);
 
 		//slave(s)
-		cfg[1].setProperty( "hibernate.search.default.sourceBase", "./lucenedirs/master/copy");
-		cfg[1].setProperty( "hibernate.search.default.indexBase", "./lucenedirs/slave");
-		cfg[1].setProperty( "hibernate.search.default.refresh", "1"); //every minute
-		cfg[1].setProperty( "hibernate.search.default.directory_provider", "org.hibernate.search.store.FSSlaveDirectoryProvider");
+		cfg[1].setProperty( "hibernate.search.default.sourceBase", root.getAbsolutePath() + masterCopy );
+		cfg[1].setProperty( "hibernate.search.default.indexBase", root.getAbsolutePath() + slave );
+		cfg[1].setProperty( "hibernate.search.default.refresh", "1" ); //every second
+		cfg[1].setProperty(
+				"hibernate.search.default.directory_provider", "org.hibernate.search.store.FSSlaveDirectoryProvider"
+		);
 	}
 }



More information about the hibernate-commits mailing list