[infinispan-commits] Infinispan SVN: r1907 - in branches/4.1.x: server/core/src/main/scala/org/infinispan/server/core and 3 other directories.
infinispan-commits at lists.jboss.org
infinispan-commits at lists.jboss.org
Sun Jun 13 11:16:14 EDT 2010
Author: galder.zamarreno at jboss.com
Date: 2010-06-13 11:16:13 -0400 (Sun, 13 Jun 2010)
New Revision: 1907
Added:
branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodProxyTest.scala
Modified:
branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/infinispan-ec2-config.xml
branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-gossiprouter-aws.xml
branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-s3_ping-aws.xml
branches/4.1.x/server/core/src/main/scala/org/infinispan/server/core/Main.scala
branches/4.1.x/server/hotrod/src/main/scala/org/infinispan/server/hotrod/HotRodServer.scala
branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodMultiNodeTest.scala
branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodReplicationTest.scala
branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/test/HotRodTestingUtil.scala
Log:
[ISPN-494] (Hot Rod needs proxy address and port to work on cloud-like envs) Done.
Modified: branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/infinispan-ec2-config.xml
===================================================================
--- branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/infinispan-ec2-config.xml 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/infinispan-ec2-config.xml 2010-06-13 15:16:13 UTC (rev 1907)
@@ -1,87 +1,87 @@
<?xml version="1.0" encoding="UTF-8"?>
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="urn:infinispan:config:4.0 http://www.infinispan.org/schemas/infinispan-config-4.0.xsd"
- xmlns="urn:infinispan:config:4.0">
+ xsi:schemaLocation="urn:infinispan:config:4.0 http://www.infinispan.org/schemas/infinispan-config-4.0.xsd"
+ xmlns="urn:infinispan:config:4.0">
- <global>
- <asyncListenerExecutor
- factory="org.infinispan.executors.DefaultExecutorFactory">
- <properties>
- <property name="maxThreads" value="15" />
- <property name="threadNamePrefix" value="AsyncListenerThread" />
- </properties>
- </asyncListenerExecutor>
- <asyncTransportExecutor
- factory="org.infinispan.executors.DefaultExecutorFactory">
- <properties>
- <property name="maxThreads" value="25" />
- <property name="threadNamePrefix" value="AsyncSerializationThread" />
- </properties>
- </asyncTransportExecutor>
- <evictionScheduledExecutor
- factory="org.infinispan.executors.DefaultScheduledExecutorFactory">
- <properties>
- <property name="threadNamePrefix" value="EvictionThread" />
- </properties>
- </evictionScheduledExecutor>
- <replicationQueueScheduledExecutor
- factory="org.infinispan.executors.DefaultScheduledExecutorFactory">
- <properties>
- <property name="threadNamePrefix" value="ReplicationQueueThread" />
- </properties>
- </replicationQueueScheduledExecutor>
- <globalJmxStatistics enabled="true" jmxDomain="infinispan" />
+ <global>
+ <asyncListenerExecutor
+ factory="org.infinispan.executors.DefaultExecutorFactory">
+ <properties>
+ <property name="maxThreads" value="15"/>
+ <property name="threadNamePrefix" value="AsyncListenerThread"/>
+ </properties>
+ </asyncListenerExecutor>
+ <asyncTransportExecutor
+ factory="org.infinispan.executors.DefaultExecutorFactory">
+ <properties>
+ <property name="maxThreads" value="25"/>
+ <property name="threadNamePrefix" value="AsyncSerializationThread"/>
+ </properties>
+ </asyncTransportExecutor>
+ <evictionScheduledExecutor
+ factory="org.infinispan.executors.DefaultScheduledExecutorFactory">
+ <properties>
+ <property name="threadNamePrefix" value="EvictionThread"/>
+ </properties>
+ </evictionScheduledExecutor>
+ <replicationQueueScheduledExecutor
+ factory="org.infinispan.executors.DefaultScheduledExecutorFactory">
+ <properties>
+ <property name="threadNamePrefix" value="ReplicationQueueThread"/>
+ </properties>
+ </replicationQueueScheduledExecutor>
+ <globalJmxStatistics enabled="true" jmxDomain="infinispan"/>
- <transport clusterName="infinispan-cluster"
- distributedSyncTimeout="50000"
- transportClass="org.infinispan.remoting.transport.jgroups.JGroupsTransport">
- <properties>
- <property name="configurationFile" value="${CFGPath}/jgroups-s3_ping-aws.xml" />
- </properties>
- </transport>
- <serialization marshallerClass="org.infinispan.marshall.VersionAwareMarshaller"
- version="1.0" />
- <shutdown hookBehavior="DEFAULT" />
- </global>
+ <transport clusterName="infinispan-cluster"
+ distributedSyncTimeout="50000"
+ transportClass="org.infinispan.remoting.transport.jgroups.JGroupsTransport">
+ <properties>
+ <property name="configurationFile" value="${CFGPath}/jgroups-s3_ping-aws.xml"/>
+ </properties>
+ </transport>
+ <serialization marshallerClass="org.infinispan.marshall.VersionAwareMarshaller"
+ version="1.0"/>
+ <shutdown hookBehavior="DEFAULT"/>
+ </global>
- <!-- *************************** -->
- <!-- Default "template" settings -->
- <!-- *************************** -->
- <default>
- <invocationBatching enabled="true" />
- <clustering mode="distribution">
- <sync replTimeout="200000" />
- <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000" />
- </clustering>
- </default>
+ <!-- *************************** -->
+ <!-- Default "template" settings -->
+ <!-- *************************** -->
+ <default>
+ <invocationBatching enabled="true"/>
+ <clustering mode="distribution">
+ <sync replTimeout="200000"/>
+ <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000"/>
+ </clustering>
+ </default>
- <!-- ************************************** -->
- <!-- Individually configured "named" caches -->
- <!-- ************************************** -->
- <namedCache name="InfluenzaCache">
- <unsafe unreliableReturnValues="true" />
- <clustering mode="distribution">
- <sync replTimeout="200000" />
- <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000" />
- <l1 enabled="true" lifespan="600000" />
- </clustering>
- </namedCache>
+ <!-- ************************************** -->
+ <!-- Individually configured "named" caches -->
+ <!-- ************************************** -->
+ <namedCache name="InfluenzaCache">
+ <unsafe unreliableReturnValues="true"/>
+ <clustering mode="distribution">
+ <sync replTimeout="200000"/>
+ <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000"/>
+ <l1 enabled="true" lifespan="600000"/>
+ </clustering>
+ </namedCache>
- <namedCache name="NucleotideCache">
- <unsafe unreliableReturnValues="true" />
- <clustering mode="distribution">
- <sync replTimeout="20000" />
- <hash numOwners="2" rehashWait="120000" rehashRpcTimeout="600000" />
- <l1 enabled="true" lifespan="600000" />
- </clustering>
- </namedCache>
+ <namedCache name="NucleotideCache">
+ <unsafe unreliableReturnValues="true"/>
+ <clustering mode="distribution">
+ <sync replTimeout="20000"/>
+ <hash numOwners="2" rehashWait="120000" rehashRpcTimeout="600000"/>
+ <l1 enabled="true" lifespan="600000"/>
+ </clustering>
+ </namedCache>
- <namedCache name="ProteinCache">
- <unsafe unreliableReturnValues="true" />
- <clustering mode="distribution">
- <sync replTimeout="20000" />
- <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000" />
- <l1 enabled="true" lifespan="600000" />
- </clustering>
- </namedCache>
+ <namedCache name="ProteinCache">
+ <unsafe unreliableReturnValues="true"/>
+ <clustering mode="distribution">
+ <sync replTimeout="20000"/>
+ <hash numOwners="2" rehashWait="60000" rehashRpcTimeout="600000"/>
+ <l1 enabled="true" lifespan="600000"/>
+ </clustering>
+ </namedCache>
</infinispan>
Modified: branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-gossiprouter-aws.xml
===================================================================
--- branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-gossiprouter-aws.xml 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-gossiprouter-aws.xml 2010-06-13 15:16:13 UTC (rev 1907)
@@ -1,19 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<config>
- <TCP bind_port="7800" />
- <TCPGOSSIP timeout="3000" initial_hosts="localhost[5555]" num_initial_members="3" />
- <MERGE2 max_interval="30000" min_interval="10000" />
- <FD_SOCK />
- <FD timeout="10000" max_tries="5" shun="true" />
- <VERIFY_SUSPECT timeout="1500" />
- <pbcast.NAKACK use_mcast_xmit="false" gc_lag="0" retransmit_timeout="300,600,1200,2400,4800"
- discard_delivered_msgs="true" />
- <UNICAST timeout="300,600,1200,2400,3600" />
- <pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000" max_bytes="400000" />
- <pbcast.GMS print_local_addr="true" join_timeout="3000" shun="false" view_bundling="true" />
- <FC max_credits="20000000" min_threshold="0.10" />
- <FRAG2 frag_size="60000" />
- <pbcast.STATE_TRANSFER />
+ <TCP bind_port="7800"/>
+ <TCPGOSSIP timeout="3000" initial_hosts="localhost[5555]" num_initial_members="3"/>
+ <MERGE2 max_interval="30000" min_interval="10000"/>
+ <FD_SOCK/>
+ <FD timeout="10000" max_tries="5" shun="true"/>
+ <VERIFY_SUSPECT timeout="1500"/>
+ <pbcast.NAKACK use_mcast_xmit="false" gc_lag="0" retransmit_timeout="300,600,1200,2400,4800"
+ discard_delivered_msgs="true"/>
+ <UNICAST timeout="300,600,1200,2400,3600"/>
+ <pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000" max_bytes="400000"/>
+ <pbcast.GMS print_local_addr="true" join_timeout="3000" shun="false" view_bundling="true"/>
+ <FC max_credits="20000000" min_threshold="0.10"/>
+ <FRAG2 frag_size="60000"/>
+ <pbcast.STATE_TRANSFER/>
</config>
Modified: branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-s3_ping-aws.xml
===================================================================
--- branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-s3_ping-aws.xml 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/demos/ec2/src/main/resources/config-samples/ec2-demo/jgroups-s3_ping-aws.xml 2010-06-13 15:16:13 UTC (rev 1907)
@@ -1,20 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<config>
- <TCP bind_port="7800" />
- <S3_PING secret_access_key="replace with your secret access key" access_key="replace with your access key"
- location="replace with your s3 bucket name" />
- <MERGE2 max_interval="30000" min_interval="10000" />
- <FD_SOCK />
- <FD timeout="10000" max_tries="5" />
- <VERIFY_SUSPECT timeout="1500" />
- <pbcast.NAKACK use_mcast_xmit="false" gc_lag="0" retransmit_timeout="300,600,1200,2400,4800"
- discard_delivered_msgs="true" />
- <UNICAST timeout="300,600,1200,2400,3600" />
- <pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000" max_bytes="400000" />
- <VIEW_SYNC avg_send_interval="60000" />
- <pbcast.GMS print_local_addr="true" join_timeout="60000" view_bundling="true" />
- <FC max_credits="20000000" min_threshold="0.10" />
- <FRAG2 frag_size="60000" />
- <pbcast.STATE_TRANSFER />
- <pbcast.FLUSH timeout="0" />
+ <TCP bind_port="7800"/>
+ <S3_PING secret_access_key="replace with your secret access key" access_key="replace with your access key"
+ location="replace with your s3 bucket name"/>
+ <MERGE2 max_interval="30000" min_interval="10000"/>
+ <FD_SOCK/>
+ <FD timeout="10000" max_tries="5"/>
+ <VERIFY_SUSPECT timeout="1500"/>
+ <pbcast.NAKACK use_mcast_xmit="false" gc_lag="0" retransmit_timeout="300,600,1200,2400,4800"
+ discard_delivered_msgs="true"/>
+ <UNICAST timeout="300,600,1200,2400,3600"/>
+ <pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000" max_bytes="400000"/>
+ <VIEW_SYNC avg_send_interval="60000"/>
+ <pbcast.GMS print_local_addr="true" join_timeout="60000" view_bundling="true"/>
+ <FC max_credits="20000000" min_threshold="0.10"/>
+ <FRAG2 frag_size="60000"/>
+ <pbcast.STATE_TRANSFER/>
+ <pbcast.FLUSH timeout="0"/>
</config>
Modified: branches/4.1.x/server/core/src/main/scala/org/infinispan/server/core/Main.scala
===================================================================
--- branches/4.1.x/server/core/src/main/scala/org/infinispan/server/core/Main.scala 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/server/core/src/main/scala/org/infinispan/server/core/Main.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -30,7 +30,8 @@
val PROP_KEY_TCP_NO_DELAY = "infinispan.server.tcp_no_delay"
val PROP_KEY_SEND_BUF_SIZE = "infinispan.server.send_buf_size"
val PROP_KEY_RECV_BUF_SIZE = "infinispan.server.recv_buf_size"
- val PORT_DEFAULT = 11211
+ val PROP_KEY_PROXY_HOST = "infinispan.server.proxy_host"
+ val PROP_KEY_PROXY_PORT = "infinispan.server.proxy_port"
val HOST_DEFAULT = "127.0.0.1"
val MASTER_THREADS_DEFAULT = "0"
val WORKER_THREADS_DEFAULT = "0"
@@ -150,6 +151,13 @@
}
props.setProperty(PROP_KEY_PORT, port.toString)
+ // If no proxy host given, external host defaults to configured host
+ val externalHost = props.getProperty(PROP_KEY_PROXY_HOST, props.getProperty(PROP_KEY_HOST))
+ props.setProperty(PROP_KEY_PROXY_HOST, externalHost)
+ // If no proxy port given, external port defaults to configured port
+ val externalPort = props.getProperty(PROP_KEY_PROXY_PORT, props.getProperty(PROP_KEY_PORT))
+ props.setProperty(PROP_KEY_PROXY_PORT, externalPort)
+
val configFile = props.getProperty(PROP_KEY_CACHE_CONFIG)
val cacheManager = if (configFile == null) new DefaultCacheManager else new DefaultCacheManager(configFile)
addShutdownHook(new ShutdownHook(server, cacheManager))
@@ -171,7 +179,9 @@
new LongOpt("idle_timeout", LongOpt.REQUIRED_ARGUMENT, null, 'i'),
new LongOpt("tcp_no_delay", LongOpt.REQUIRED_ARGUMENT, null, 'n'),
new LongOpt("send_buf_size", LongOpt.REQUIRED_ARGUMENT, null, 's'),
- new LongOpt("recv_buf_size", LongOpt.REQUIRED_ARGUMENT, null, 'e')
+ new LongOpt("recv_buf_size", LongOpt.REQUIRED_ARGUMENT, null, 'e'),
+ new LongOpt("proxy_host", LongOpt.REQUIRED_ARGUMENT, null, 'o'),
+ new LongOpt("proxy_port", LongOpt.REQUIRED_ARGUMENT, null, 'x')
)
var getopt = new Getopt(programName, args, sopts, lopts)
var code: Int = 0
@@ -193,7 +203,9 @@
case 'i' => props.setProperty(PROP_KEY_IDLE_TIMEOUT, getopt.getOptarg)
case 'n' => props.setProperty(PROP_KEY_TCP_NO_DELAY, getopt.getOptarg)
case 's' => props.setProperty(PROP_KEY_SEND_BUF_SIZE, getopt.getOptarg)
- case 'e' => props.setProperty(PROP_KEY_RECV_BUF_SIZE, getopt.getOptarg)
+ case 'e' => props.setProperty(PROP_KEY_RECV_BUF_SIZE, getopt.getOptarg)
+ case 'o' => props.setProperty(PROP_KEY_PROXY_HOST, getopt.getOptarg)
+ case 'x' => props.setProperty(PROP_KEY_PROXY_PORT, getopt.getOptarg)
case 'D' => {
val arg = getopt.getOptarg
var name = ""
@@ -255,6 +267,10 @@
println
println(" -e, --recv_buf_size=<num> Receive buffer size (default: as defined by the OS).")
println
+ println(" -o, --proxy_host=<host or ip> Host address to expose in topology information sent to clients. If not present, it defaults to configured host. Servers that do not transmit topology information ignore this setting.")
+ println
+ println(" -x, --proxy_port=<num> Port to expose in topology information sent to clients. If not present, it defaults to configured port. Servers that do not transmit topology information ignore this setting.")
+ println
println(" -D<name>[=<value>] Set a system property")
println
System.exit(0)
Modified: branches/4.1.x/server/hotrod/src/main/scala/org/infinispan/server/hotrod/HotRodServer.scala
===================================================================
--- branches/4.1.x/server/hotrod/src/main/scala/org/infinispan/server/hotrod/HotRodServer.scala 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/server/hotrod/src/main/scala/org/infinispan/server/hotrod/HotRodServer.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -14,6 +14,7 @@
import org.infinispan.remoting.transport.Address
import org.infinispan.manager.EmbeddedCacheManager
import java.util.{Properties, Random};
+import org.infinispan.server.core.Main._
/**
* // TODO: Document this
@@ -42,8 +43,11 @@
isClustered = cacheManager.getGlobalConfiguration.getTransportClass != null
// If clustered, set up a cache for topology information
- if (isClustered)
- addSelfToTopologyView(getHost, getPort, cacheManager)
+ if (isClustered) {
+ val externalHost = properties.getProperty(PROP_KEY_PROXY_HOST, properties.getProperty(PROP_KEY_HOST))
+ val externalPort = properties.getProperty(PROP_KEY_PROXY_PORT, properties.getProperty(PROP_KEY_PORT)).toInt
+ addSelfToTopologyView(externalHost, externalPort, cacheManager)
+ }
}
private def addSelfToTopologyView(host: String, port: Int, cacheManager: EmbeddedCacheManager) {
Modified: branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodMultiNodeTest.scala
===================================================================
--- branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodMultiNodeTest.scala 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodMultiNodeTest.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -5,6 +5,7 @@
import org.testng.annotations.{AfterMethod, AfterClass, Test}
import test.HotRodClient
import test.HotRodTestingUtil._
+import org.infinispan.manager.EmbeddedCacheManager
/**
* Base test class for multi node or clustered Hot Rod tests.
@@ -22,13 +23,17 @@
val cm = addClusterEnabledCacheManager()
cm.defineConfiguration(cacheName, createCacheConfig)
}
- hotRodServers = hotRodServers ::: List(startHotRodServer(cacheManagers.get(0)))
- hotRodServers = hotRodServers ::: List(startHotRodServer(cacheManagers.get(1), hotRodServers.head.getPort + 50))
+ hotRodServers = hotRodServers ::: List(startTestHotRodServer(cacheManagers.get(0)))
+ hotRodServers = hotRodServers ::: List(startTestHotRodServer(cacheManagers.get(1), hotRodServers.head.getPort + 50))
hotRodServers.foreach {s =>
hotRodClients = new HotRodClient("127.0.0.1", s.getPort, cacheName, 60) :: hotRodClients
}
}
+ protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager) = startHotRodServer(cacheManager)
+
+ protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager, port: Int) = startHotRodServer(cacheManager, port)
+
@AfterClass(alwaysRun = true)
override def destroy {
log.debug("Test finished, close Hot Rod server", null)
Added: branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodProxyTest.scala
===================================================================
--- branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodProxyTest.scala (rev 0)
+++ branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodProxyTest.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -0,0 +1,50 @@
+package org.infinispan.server.hotrod
+
+import org.infinispan.manager.EmbeddedCacheManager
+import test.HotRodTestingUtil._
+import org.infinispan.server.hotrod.OperationStatus._
+import org.testng.annotations.Test
+import org.testng.Assert._
+import org.infinispan.config.Configuration
+import org.infinispan.test.AbstractCacheTest._
+
+/**
+ * // TODO: Document this
+ * @author Galder Zamarreño
+ * @since // TODO
+ */
+ at Test(groups = Array("functional"), testName = "server.hotrod.HotRodProxyTest")
+class HotRodProxyTest extends HotRodMultiNodeTest {
+
+ private val proxyHost1 = "1.2.3.4"
+ private val proxyHost2 = "2.3.4.5"
+ private val proxyPort1 = 8123
+ private val proxyPort2 = 9123
+
+ override protected def cacheName: String = "hotRodProxy"
+
+ override protected def createCacheConfig: Configuration = {
+ val config = getDefaultClusteredConfig(Configuration.CacheMode.REPL_SYNC)
+ config.setFetchInMemoryState(true)
+ config
+ }
+
+ override protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager) =
+ startHotRodServer(cacheManager, proxyHost1, proxyPort1)
+
+ override protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager, port: Int) =
+ startHotRodServer(cacheManager, port, proxyHost2, proxyPort2)
+
+ def testTopologyWithProxiesReturned {
+ val resp = clients.head.ping(2, 0)
+ assertStatus(resp.status, Success)
+ val topoResp = resp.topologyResponse.get
+ assertEquals(topoResp.view.topologyId, 2)
+ assertEquals(topoResp.view.members.size, 2)
+ assertEquals(topoResp.view.members.head.host, proxyHost1)
+ assertEquals(topoResp.view.members.head.port, proxyPort1)
+ assertEquals(topoResp.view.members.tail.head.host, proxyHost2)
+ assertEquals(topoResp.view.members.tail.head.port, proxyPort2)
+ }
+
+}
\ No newline at end of file
Modified: branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodReplicationTest.scala
===================================================================
--- branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodReplicationTest.scala 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodReplicationTest.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -54,7 +54,7 @@
assertSuccess(clients.tail.head.assertGet(m), v(m, "v2-"))
}
- def testPingWithTopologyAwareClient(m: Method) {
+ def testPingWithTopologyAwareClient {
var resp = clients.head.ping
assertStatus(resp.status, Success)
assertEquals(resp.topologyResponse, None)
Modified: branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/test/HotRodTestingUtil.scala
===================================================================
--- branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/test/HotRodTestingUtil.scala 2010-06-13 12:28:33 UTC (rev 1906)
+++ branches/4.1.x/server/hotrod/src/test/scala/org/infinispan/server/hotrod/test/HotRodTestingUtil.scala 2010-06-13 15:16:13 UTC (rev 1907)
@@ -27,25 +27,36 @@
def startHotRodServer(manager: EmbeddedCacheManager): HotRodServer =
startHotRodServer(manager, UniquePortThreadLocal.get.intValue)
+ def startHotRodServer(manager: EmbeddedCacheManager, proxyHost: String, proxyPort: Int): HotRodServer =
+ startHotRodServer(manager, UniquePortThreadLocal.get.intValue, 0, proxyHost, proxyPort)
+
def startHotRodServer(manager: EmbeddedCacheManager, port: Int): HotRodServer =
startHotRodServer(manager, port, 0)
- def startHotRodServer(manager: EmbeddedCacheManager, port: Int, idleTimeout: Int): HotRodServer = {
+ def startHotRodServer(manager: EmbeddedCacheManager, port:Int, proxyHost: String, proxyPort: Int): HotRodServer =
+ startHotRodServer(manager, port, 0, proxyHost, proxyPort)
+
+ def startHotRodServer(manager: EmbeddedCacheManager, port: Int, idleTimeout: Int): HotRodServer =
+ startHotRodServer(manager, port, idleTimeout, host, port)
+
+ def startHotRodServer(manager: EmbeddedCacheManager, port: Int, idleTimeout: Int, proxyHost: String, proxyPort: Int): HotRodServer = {
val server = new HotRodServer {
import HotRodServer._
override protected def defineTopologyCacheConfig(cacheManager: EmbeddedCacheManager) {
cacheManager.defineConfiguration(TopologyCacheName, createTopologyCacheConfig)
}
}
- server.start(getProperties(host, port, idleTimeout), manager)
+ server.start(getProperties(host, port, idleTimeout, proxyHost, proxyPort), manager)
server
}
- private def getProperties(host: String, port: Int, idleTimeout: Int): Properties = {
+ private def getProperties(host: String, port: Int, idleTimeout: Int, proxyHost: String, proxyPort: Int): Properties = {
val properties = new Properties
properties.setProperty(PROP_KEY_HOST, host)
properties.setProperty(PROP_KEY_PORT, port.toString)
properties.setProperty(PROP_KEY_IDLE_TIMEOUT, idleTimeout.toString)
+ properties.setProperty(PROP_KEY_PROXY_HOST, proxyHost)
+ properties.setProperty(PROP_KEY_PROXY_PORT, proxyPort.toString)
properties
}
@@ -61,7 +72,7 @@
// but has been evicted from JGroups cluster.
}
}
- server.start(getProperties(host, port, 0), manager)
+ server.start(getProperties(host, port, 0, host, port), manager)
server
}
More information about the infinispan-commits
mailing list