JBoss Cache SVN: r5452 - amazon-s3/trunk/src/main/java/com/amazon/s3/emulator.
by jbosscache-commits@lists.jboss.org
Author: genman
Date: 2008-03-19 20:05:23 -0400 (Wed, 19 Mar 2008)
New Revision: 5452
Modified:
amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
Log:
Javadoc
Fix potential thread safety issues
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java 2008-03-20 00:00:45 UTC (rev 5451)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java 2008-03-20 00:05:23 UTC (rev 5452)
@@ -30,6 +30,16 @@
import com.amazon.s3.Owner;
import com.amazon.s3.S3Object;
+/**
+ * Amazon S3 emulator that stores data in an internal sorted map.
+ * Not highly scalable or reliable, but may be fine for testing your application.
+ *
+ * Some browse methods are not complete.
+ * Get Data/Put/Head are mostly complete.
+ * What's supported is in the test suite.
+ *
+ * @author Elias Ross
+ */
public class Server extends HttpServlet {
private static final long serialVersionUID = 1L;
@@ -37,7 +47,7 @@
private Log log = LogFactory.getLog(getClass());
private ServletServer ss;
private boolean bucket = false;
- private SortedMap<Entry, S3Object> map = new TreeMap<Entry, S3Object>();
+ private SortedMap<Entry, S3Object> map = Collections.synchronizedSortedMap(new TreeMap<Entry, S3Object>());
public Server() throws IOException {
ss = new ServletServer(this);
@@ -125,9 +135,9 @@
if (maxKeysStr != null)
maxKeys = Integer.parseInt(maxKeysStr);
Writer w = new Writer();
- Map<Entry, S3Object> submap = map;
+ SortedMap<Entry, S3Object> submap = new TreeMap<Entry, S3Object>(map);
if (prefix != null)
- submap = map.tailMap(new Entry(prefix));
+ submap = submap.tailMap(new Entry(prefix));
int keyCount = 0;
boolean truncated = false;
String nextMarker = null;
16 years, 9 months
JBoss Cache SVN: r5451 - in amazon-s3/trunk: src/main/java/com/amazon/s3/emulator and 1 other directories.
by jbosscache-commits@lists.jboss.org
Author: genman
Date: 2008-03-19 20:00:45 -0400 (Wed, 19 Mar 2008)
New Revision: 5451
Modified:
amazon-s3/trunk/pom.xml
amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
amazon-s3/trunk/src/test/java/com/amazon/s3/CanonicalStringTest.java
amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java
amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java
Log:
Emulator seems to work with CacheLoader now
Modified: amazon-s3/trunk/pom.xml
===================================================================
--- amazon-s3/trunk/pom.xml 2008-03-19 22:46:38 UTC (rev 5450)
+++ amazon-s3/trunk/pom.xml 2008-03-20 00:00:45 UTC (rev 5451)
@@ -65,5 +65,10 @@
<artifactId>commons-httpclient</artifactId>
<version>3.0.1</version>
</dependency>
+ <dependency>
+ <groupId>commons-logging</groupId>
+ <artifactId>commons-logging</artifactId>
+ <version>1.1.1</version>
+ </dependency>
</dependencies>
</project>
\ No newline at end of file
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java 2008-03-19 22:46:38 UTC (rev 5450)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java 2008-03-20 00:00:45 UTC (rev 5451)
@@ -20,6 +20,9 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
import net.noderunner.http.servlet.ServletServer;
import com.amazon.s3.Entry;
@@ -31,41 +34,41 @@
private static final long serialVersionUID = 1L;
+ private Log log = LogFactory.getLog(getClass());
private ServletServer ss;
private boolean bucket = false;
private SortedMap<Entry, S3Object> map = new TreeMap<Entry, S3Object>();
public Server() throws IOException {
ss = new ServletServer(this);
+ log.info("Server created " + this);
}
/**
* Closes socket, stops accepting requests.
*/
public void close() throws IOException {
+ log("close");
ss.close();
}
@Override
protected void doDelete(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
- URI uri;
- try {
- uri = new URI(req.getRequestURI());
- } catch (URISyntaxException e1) {
- throw new RuntimeException(e1);
- }
- String key = uri.getPath().substring(1);
- Entry e = new Entry(key);
+ Entry e = entry(req);
S3Object remove = map.remove(e);
if (remove == null) {
- resp.sendError(404, "Not found " + key);
+ resp.sendError(404, "Not found " + e);
} else {
resp.sendError(HttpURLConnection.HTTP_NO_CONTENT, "Deleted");
}
}
+ private Entry entry(HttpServletRequest req) {
+ return new Entry(key(uri(req)));
+ }
+
/**
* Listening port.
*/
@@ -83,26 +86,24 @@
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
- URI uri;
- try {
- uri = new URI(req.getRequestURI());
- } catch (URISyntaxException e1) {
- throw new RuntimeException(e1);
- }
+ URI uri = uri(req);
+ boolean debug = log.isDebugEnabled();
+ if (debug)
+ log("doGet " + uri);
if ("/".equals(uri.getPath())) {
list(req, resp);
} else {
String key = uri.getPath().substring(1);
Entry e = new Entry(key);
S3Object obj = map.get(e);
+ if (debug)
+ log("map.get(" + key + ") = " + obj);
if (obj == null) {
resp.sendError(404, "Not here: " + e);
return;
}
Headers h = new Headers();
h = h.mergeMetadata(obj.getMetadata());
- log("Headers " + obj);
- log("Headers " + h);
for (Map.Entry<String, List<String>> me : h.getHeaders().entrySet()) {
for (String v : me.getValue()) {
resp.setHeader(me.getKey(), v);
@@ -118,6 +119,8 @@
String marker = req.getParameter("marker");
String delimiter = req.getParameter("delimiter");
String maxKeysStr = req.getParameter("max-keys");
+ if (log.isDebugEnabled())
+ log("list prefix=" + prefix + " delimiter=" + delimiter);
int maxKeys = Integer.MAX_VALUE;
if (maxKeysStr != null)
maxKeys = Integer.parseInt(maxKeysStr);
@@ -127,16 +130,24 @@
submap = map.tailMap(new Entry(prefix));
int keyCount = 0;
boolean truncated = false;
+ String nextMarker = null;
for (Entry e : submap.keySet()) {
if (++keyCount > maxKeys) {
truncated = true;
break;
}
String key = e.getKey();
- if (prefix != null && !key.startsWith(prefix))
- break;
- if (delimiter != null && key.indexOf(delimiter) != -1)
+ String remain = key;
+ nextMarker = key;
+ if (prefix != null) {
+ if (!key.startsWith(prefix))
+ break;
+ remain = key.substring(prefix.length());
+ }
+ if (delimiter != null && remain.indexOf(delimiter) != -1)
continue;
+ if (log.isDebugEnabled())
+ log("include key=" + key);
w.start("Contents");
w.start("Key").write(key).end();
w.start("LastModified").write(e.getLastModified()).end();
@@ -154,8 +165,12 @@
hw.start("Name").write("localhost").end();
hw.start("Prefix").write(s(prefix)).end();
hw.start("Marker").write(s(marker)).end();
- if (delimiter != null)
+ if (delimiter != null) {
hw.start("Delimiter").write(delimiter).end();
+ if (truncated) {
+ hw.start("NextMarker").write(nextMarker).end();
+ }
+ }
hw.start("IsTruncated").write(String.valueOf(truncated)).end();
if (maxKeysStr != null)
hw.start("MaxKeys").write(maxKeysStr).end();
@@ -164,7 +179,6 @@
PrintWriter pw = resp.getWriter();
pw.write(hw.toString());
- log(hw.toString());
pw.flush();
bucket = true;
}
@@ -178,25 +192,40 @@
@Override
protected void doHead(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
- System.out.println(req);
+ URI uri = uri(req);
+ if (log.isDebugEnabled())
+ log("doHead " + uri);
+ if (map.containsKey(entry(req))) {
+ log("found");
+ resp.sendError(HttpURLConnection.HTTP_OK, "Found URI");
+ } else {
+ log("not found");
+ resp.sendError(404, "Not found");
+ }
}
+
+ private URI uri(HttpServletRequest req) {
+ try {
+ return new URI(req.getRequestURI());
+ } catch (URISyntaxException e1) {
+ throw new RuntimeException(e1);
+ }
+ }
+
+ private String key(URI uri) {
+ return uri.getPath().substring(1);
+ }
@Override
protected void doPut(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
- URI uri;
- try {
- uri = new URI(req.getRequestURI());
- } catch (URISyntaxException e1) {
- throw new RuntimeException(e1);
- }
+ URI uri = uri(req);
+ log("doPut " + uri);
if ("/".equals(uri.getPath())) {
log("create bucket");
bucket = true;
} else {
- log("URI " + uri.getPath());
- String key = uri.getPath().substring(1);
- Entry e = new Entry(key);
+ Entry e = new Entry(key(uri));
e.setLastModified(new Date());
e.setSize(req.getContentLength());
e.setOwner(new Owner("id", "name"));
@@ -217,13 +246,25 @@
for (String n : Collections.list(names))
h.put(n, req.getHeader(n));
s3.setMetadata(h.extractMetadata());
- log("PUT '" + e + "' as: " + s3);
+ log("put '" + e + "' as: " + s3);
}
- System.out.println(req);
}
- public void log(String log) {
- System.err.println(log);
+ public void log(String s) {
+ log.debug(s);
}
+ /**
+ * Returns a debug <code>String</code>.
+ */
+ @Override
+ public String toString()
+ {
+ return super.toString() +
+ " bucket=" + this.bucket +
+ " ss=" + this.ss +
+ " map=" + this.map +
+ "";
+ }
+
}
Modified: amazon-s3/trunk/src/test/java/com/amazon/s3/CanonicalStringTest.java
===================================================================
--- amazon-s3/trunk/src/test/java/com/amazon/s3/CanonicalStringTest.java 2008-03-19 22:46:38 UTC (rev 5450)
+++ amazon-s3/trunk/src/test/java/com/amazon/s3/CanonicalStringTest.java 2008-03-20 00:00:45 UTC (rev 5451)
@@ -25,7 +25,7 @@
String canon = "XXYasfajkjaslkfdjalksjflkasjflkajskfjasjflksadjflksajfdkljsadlkfjaslkfd";
Key key = CanonicalString.key(access);
String encode = CanonicalString.encode(key, canon);
- assertEquals("p5p5Y89Qhhaitcesa/l03whnQhw=", encode);
+ assertEquals("hAu+ibd/CZIw6/5OR69i2+40bfc=", encode);
}
@Test
Modified: amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java
===================================================================
--- amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java 2008-03-19 22:46:38 UTC (rev 5450)
+++ amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java 2008-03-20 00:00:45 UTC (rev 5451)
@@ -8,18 +8,11 @@
// this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
// affiliates.
-import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
-import java.net.MalformedURLException;
import java.util.Arrays;
import java.util.List;
-import org.apache.commons.httpclient.HttpClient;
-import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.URI;
-import org.apache.commons.httpclient.methods.EntityEnclosingMethod;
-import org.apache.commons.httpclient.methods.StringRequestEntity;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -104,7 +97,7 @@
assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
// TODO
- // verifyBucketResponseParameters(listBucketResponse, bucket, "", "", 1, "/", true, "example.txt");
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", 1, "/", true, "example.txt");
// root "directory" with a max-keys of "2"
listBucketResponse = conn.list(bucket, null, null, 2, "/", null);
@@ -125,8 +118,7 @@
// test "directory"
listBucketResponse = conn.list(bucket, "test/", null, null, "/", null);
listBucketResponse.assertOk();
- // TODO
- // assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
+ assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
verifyBucketResponseParameters(listBucketResponse, bucket, "test/", "", UnspecifiedMaxKeys, "/", false, null);
Modified: amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java
===================================================================
--- amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java 2008-03-19 22:46:38 UTC (rev 5450)
+++ amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java 2008-03-20 00:00:45 UTC (rev 5451)
@@ -34,6 +34,9 @@
@Before
public void setUp() {
+ if (emulated)
+ return;
+
if (awsAccessKeyId == null)
throw new IllegalStateException("accessKey system propery null");
if (awsSecretAccessKey == null)
16 years, 9 months
JBoss Cache SVN: r5450 - in amazon-s3/trunk: src/main/java/com/amazon/s3 and 3 other directories.
by jbosscache-commits@lists.jboss.org
Author: genman
Date: 2008-03-19 18:46:38 -0400 (Wed, 19 Mar 2008)
New Revision: 5450
Added:
amazon-s3/trunk/src/main/java/com/amazon/s3/ISO801DateFormat.java
amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/
amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Writer.java
amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java
Modified:
amazon-s3/trunk/pom.xml
amazon-s3/trunk/src/main/java/com/amazon/s3/CallingFormat.java
amazon-s3/trunk/src/main/java/com/amazon/s3/Connection.java
amazon-s3/trunk/src/main/java/com/amazon/s3/Entry.java
amazon-s3/trunk/src/main/java/com/amazon/s3/ListAllBucketsResponse.java
amazon-s3/trunk/src/main/java/com/amazon/s3/ListResponse.java
amazon-s3/trunk/src/main/java/com/amazon/s3/Owner.java
amazon-s3/trunk/src/main/java/com/amazon/s3/S3Object.java
amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java
amazon-s3/trunk/src/test/resources/log4j.xml
Log:
Initial emulator; minor changes in client to support emulator
Modified: amazon-s3/trunk/pom.xml
===================================================================
--- amazon-s3/trunk/pom.xml 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/pom.xml 2008-03-19 22:46:38 UTC (rev 5450)
@@ -49,6 +49,18 @@
<scope>test</scope>
</dependency>
<dependency>
+ <groupId>net.noderunner</groupId>
+ <artifactId>http</artifactId>
+ <version>1.0</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>2.3</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.0.1</version>
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/CallingFormat.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/CallingFormat.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/CallingFormat.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -139,6 +139,7 @@
}
static private class VanityCallingFormat extends SubdomainCallingFormat {
+ @Override
public String getServer(String server, Bucket bucket) {
return bucket.getName();
}
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/Connection.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/Connection.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/Connection.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -128,7 +128,7 @@
String server, CallingFormat format) {
this(awsAccessKeyId, awsSecretAccessKey, isSecure, server,
isSecure ? SECURE_PORT : INSECURE_PORT,
- CallingFormat.SUBDOMAIN);
+ format);
}
/**
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/Entry.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/Entry.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/Entry.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -14,7 +14,7 @@
/**
* A structure representing a single object stored in S3.
*/
-public class Entry {
+public class Entry implements Comparable<Entry> {
private String key;
@@ -35,9 +35,16 @@
}
/**
+ * Constructs a new Entry.
+ */
+ public Entry(String key) {
+ setKey(key);
+ }
+
+ /**
* Sets lastModified.
*/
- void setLastModified(Date lastModified) {
+ public void setLastModified(Date lastModified) {
this.lastModified = lastModified;
}
@@ -87,7 +94,7 @@
/**
* Sets owner.
*/
- void setOwner(Owner owner) {
+ public void setOwner(Owner owner) {
this.owner = owner;
}
@@ -101,7 +108,7 @@
/**
* Sets size.
*/
- void setSize(long size) {
+ public void setSize(long size) {
this.size = size;
}
@@ -125,4 +132,28 @@
public String getStorageClass() {
return storageClass;
}
+
+ /**
+ * Returns true if other is an entry with the same key.
+ */
+ @Override
+ public boolean equals(Object other) {
+ Entry entry = (Entry)other;
+ return key.equals(entry.key);
+ }
+
+ /**
+ * Calculates hash using the key.
+ */
+ @Override
+ public int hashCode() {
+ return key.hashCode();
+ }
+
+ /**
+ * Compares by key name.
+ */
+ public int compareTo(Entry other) {
+ return key.compareTo(other.key);
+ }
}
Added: amazon-s3/trunk/src/main/java/com/amazon/s3/ISO801DateFormat.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/ISO801DateFormat.java (rev 0)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/ISO801DateFormat.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -0,0 +1,18 @@
+package com.amazon.s3;
+
+import java.text.SimpleDateFormat;
+import java.util.TimeZone;
+
+/**
+ * Date format used by Amazon S3.
+ * @author Elias Ross
+ */
+@SuppressWarnings("serial")
+public class ISO801DateFormat extends SimpleDateFormat {
+
+ public ISO801DateFormat() {
+ super("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
+ setTimeZone(TimeZone.getTimeZone("UTC"));
+ }
+
+}
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/ListAllBucketsResponse.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/ListAllBucketsResponse.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/ListAllBucketsResponse.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -15,7 +15,6 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-import java.util.TimeZone;
import org.apache.commons.httpclient.HttpMethod;
import org.xml.sax.Attributes;
@@ -51,9 +50,7 @@
public ListAllMyBucketsHandler() {
super();
- this.iso8601Parser = new SimpleDateFormat(
- "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
- this.iso8601Parser.setTimeZone(TimeZone.getTimeZone("UTC"));
+ this.iso8601Parser = new ISO801DateFormat();
this.currText = new StringBuilder();
}
@@ -68,6 +65,8 @@
if (name.equals("Bucket")) {
entries.add(this.currBucket);
} else if (name.equals("Name")) {
+ if (currBucket == null)
+ throw new IllegalStateException();
this.currBucket.setName(this.currText.toString());
} else if (name.equals("CreationDate")) {
try {
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/ListResponse.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/ListResponse.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/ListResponse.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -14,7 +14,6 @@
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.List;
-import java.util.TimeZone;
import org.apache.commons.httpclient.HttpMethod;
import org.xml.sax.Attributes;
@@ -113,8 +112,7 @@
super();
entries = new ArrayList<Entry>();
commonPrefixEntries = new ArrayList<CommonPrefixEntry>();
- this.iso8601Parser = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'");
- this.iso8601Parser.setTimeZone(TimeZone.getTimeZone("UTC"));
+ this.iso8601Parser = new ISO801DateFormat();
this.currText = new StringBuilder();
}
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/Owner.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/Owner.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/Owner.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -21,6 +21,14 @@
}
/**
+ * Constructs a new Owner.
+ */
+ public Owner(String id, String displayName) {
+ this.id = id;
+ this.displayName = displayName;
+ }
+
+ /**
* Sets displayName.
*/
void setDisplayName(String displayName) {
Modified: amazon-s3/trunk/src/main/java/com/amazon/s3/S3Object.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/S3Object.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/S3Object.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -70,6 +70,13 @@
}
/**
+ * Sets the metadata.
+ */
+ public void setMetadata(Headers metadata) {
+ this.metadata = metadata;
+ }
+
+ /**
* Returns a debug string.
*/
@Override
Added: amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java (rev 0)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Server.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -0,0 +1,229 @@
+package com.amazon.s3.emulator;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.Date;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import javax.servlet.ServletException;
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import net.noderunner.http.servlet.ServletServer;
+
+import com.amazon.s3.Entry;
+import com.amazon.s3.Headers;
+import com.amazon.s3.Owner;
+import com.amazon.s3.S3Object;
+
+public class Server extends HttpServlet {
+
+ private static final long serialVersionUID = 1L;
+
+ private ServletServer ss;
+ private boolean bucket = false;
+ private SortedMap<Entry, S3Object> map = new TreeMap<Entry, S3Object>();
+
+ public Server() throws IOException {
+ ss = new ServletServer(this);
+ }
+
+ /**
+ * Closes socket, stops accepting requests.
+ */
+ public void close() throws IOException {
+ ss.close();
+ }
+
+ @Override
+ protected void doDelete(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ URI uri;
+ try {
+ uri = new URI(req.getRequestURI());
+ } catch (URISyntaxException e1) {
+ throw new RuntimeException(e1);
+ }
+ String key = uri.getPath().substring(1);
+ Entry e = new Entry(key);
+ S3Object remove = map.remove(e);
+ if (remove == null) {
+ resp.sendError(404, "Not found " + key);
+ } else {
+ resp.sendError(HttpURLConnection.HTTP_NO_CONTENT, "Deleted");
+ }
+
+ }
+
+ /**
+ * Listening port.
+ */
+ public int getPort() {
+ return ss.getPort();
+ }
+
+ /**
+ * Starts accepting requests.
+ */
+ public void start() {
+ ss.start();
+ }
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ URI uri;
+ try {
+ uri = new URI(req.getRequestURI());
+ } catch (URISyntaxException e1) {
+ throw new RuntimeException(e1);
+ }
+ if ("/".equals(uri.getPath())) {
+ list(req, resp);
+ } else {
+ String key = uri.getPath().substring(1);
+ Entry e = new Entry(key);
+ S3Object obj = map.get(e);
+ if (obj == null) {
+ resp.sendError(404, "Not here: " + e);
+ return;
+ }
+ Headers h = new Headers();
+ h = h.mergeMetadata(obj.getMetadata());
+ log("Headers " + obj);
+ log("Headers " + h);
+ for (Map.Entry<String, List<String>> me : h.getHeaders().entrySet()) {
+ for (String v : me.getValue()) {
+ resp.setHeader(me.getKey(), v);
+ }
+ }
+ resp.getOutputStream().write(obj.getData());
+ }
+
+ }
+
+ private void list(HttpServletRequest req, HttpServletResponse resp) throws IOException {
+ String prefix = req.getParameter("prefix");
+ String marker = req.getParameter("marker");
+ String delimiter = req.getParameter("delimiter");
+ String maxKeysStr = req.getParameter("max-keys");
+ int maxKeys = Integer.MAX_VALUE;
+ if (maxKeysStr != null)
+ maxKeys = Integer.parseInt(maxKeysStr);
+ Writer w = new Writer();
+ Map<Entry, S3Object> submap = map;
+ if (prefix != null)
+ submap = map.tailMap(new Entry(prefix));
+ int keyCount = 0;
+ boolean truncated = false;
+ for (Entry e : submap.keySet()) {
+ if (++keyCount > maxKeys) {
+ truncated = true;
+ break;
+ }
+ String key = e.getKey();
+ if (prefix != null && !key.startsWith(prefix))
+ break;
+ if (delimiter != null && key.indexOf(delimiter) != -1)
+ continue;
+ w.start("Contents");
+ w.start("Key").write(key).end();
+ w.start("LastModified").write(e.getLastModified()).end();
+ w.start("Size").write(e.getSize()).end();
+ w.start("Owner");
+ w.start("ID").write(e.getOwner().getId()).end()
+ .start("DisplayName").write(e.getOwner().getDisplayName())
+ .end();
+ w.end();
+ w.end();
+ }
+
+ Writer hw = new Writer();
+ hw.start("ListBucketResult");
+ hw.start("Name").write("localhost").end();
+ hw.start("Prefix").write(s(prefix)).end();
+ hw.start("Marker").write(s(marker)).end();
+ if (delimiter != null)
+ hw.start("Delimiter").write(delimiter).end();
+ hw.start("IsTruncated").write(String.valueOf(truncated)).end();
+ if (maxKeysStr != null)
+ hw.start("MaxKeys").write(maxKeysStr).end();
+ hw.write(w);
+ hw.end();
+
+ PrintWriter pw = resp.getWriter();
+ pw.write(hw.toString());
+ log(hw.toString());
+ pw.flush();
+ bucket = true;
+ }
+
+ private String s(String s) {
+ if (s == null)
+ return "";
+ return s;
+ }
+
+ @Override
+ protected void doHead(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ System.out.println(req);
+ }
+
+ @Override
+ protected void doPut(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ URI uri;
+ try {
+ uri = new URI(req.getRequestURI());
+ } catch (URISyntaxException e1) {
+ throw new RuntimeException(e1);
+ }
+ if ("/".equals(uri.getPath())) {
+ log("create bucket");
+ bucket = true;
+ } else {
+ log("URI " + uri.getPath());
+ String key = uri.getPath().substring(1);
+ Entry e = new Entry(key);
+ e.setLastModified(new Date());
+ e.setSize(req.getContentLength());
+ e.setOwner(new Owner("id", "name"));
+ ByteArrayOutputStream os = new ByteArrayOutputStream();
+ ServletInputStream is = req.getInputStream();
+ byte b[] = new byte[128];
+ while (true) {
+ int len = is.read(b);
+ if (len == -1)
+ break;
+ os.write(b, 0, len);
+ }
+ S3Object s3 = new S3Object(os.toByteArray());
+ map.put(e, s3);
+ Headers h = new Headers();
+ @SuppressWarnings("unchecked")
+ Enumeration<String> names = req.getHeaderNames();
+ for (String n : Collections.list(names))
+ h.put(n, req.getHeader(n));
+ s3.setMetadata(h.extractMetadata());
+ log("PUT '" + e + "' as: " + s3);
+ }
+ System.out.println(req);
+ }
+
+ public void log(String log) {
+ System.err.println(log);
+ }
+
+}
Added: amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Writer.java
===================================================================
--- amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Writer.java (rev 0)
+++ amazon-s3/trunk/src/main/java/com/amazon/s3/emulator/Writer.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -0,0 +1,50 @@
+package com.amazon.s3.emulator;
+
+import java.util.Date;
+import java.util.Stack;
+
+import com.amazon.s3.ISO801DateFormat;
+
+public class Writer {
+
+ Stack<String> tags = new Stack<String>();
+
+ StringBuilder sb = new StringBuilder();
+
+ public Writer start(String tag) {
+ sb.append("<").append(tag).append(">");
+ tags.push(tag);
+ return this;
+ }
+
+ public Writer end() {
+ String tag = tags.pop();
+ sb.append("</").append(tag).append(">");
+ return this;
+ }
+
+ public Writer write(String s) {
+ s = s.replaceAll("&", "&");
+ s = s.replaceAll("<", "<");
+ sb.append(s);
+ return this;
+ }
+
+ public Writer write(Writer w) {
+ sb.append(w.toString());
+ return this;
+ }
+
+ public String toString() {
+ return sb.toString();
+ }
+
+ public Writer write(Date lastModified) {
+ return write(new ISO801DateFormat().format(lastModified));
+ }
+
+ public Writer write(long size) {
+ return write(String.valueOf(size));
+ }
+
+}
Added: amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java
===================================================================
--- amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java (rev 0)
+++ amazon-s3/trunk/src/test/java/com/amazon/s3/S3EmulatorTest.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -0,0 +1,363 @@
+package com.amazon.s3;
+// This software code is made available "AS IS" without warranties of any
+// kind. You may copy, display, modify and redistribute the software
+// code either by itself or as incorporated into your code; provided that
+// you do not remove any proprietary notices. Your use of this software
+// code is at your own risk and you waive any claim against Amazon
+// Digital Services, Inc. or its affiliates with respect to your use of
+// this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
+// affiliates.
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.HttpMethod;
+import org.apache.commons.httpclient.URI;
+import org.apache.commons.httpclient.methods.EntityEnclosingMethod;
+import org.apache.commons.httpclient.methods.StringRequestEntity;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.amazon.s3.emulator.Server;
+
+public class S3EmulatorTest {
+
+ private static final int UnspecifiedMaxKeys = -1;
+
+ private Server s;
+
+ private Bucket bucket;
+
+ private int port;
+
+ @Before
+ public void setUp() throws Exception {
+ s = new Server();
+ s.start();
+ bucket = new Bucket("localhost");
+ port = s.getPort();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ s.close();
+ }
+
+ @Test
+ public void testMe() throws Exception {
+ String awsAccessKeyId = "512341324124";
+ String awsSecretAccessKey = "512351234123";
+ CallingFormat format = CallingFormat.VANITY;
+ String location = null;
+ Connection conn = new Connection(awsAccessKeyId, awsSecretAccessKey, false, "localhost", port, format);
+
+ Response response = conn.create(bucket, location, null);
+ response.assertOk();
+
+ ListResponse listBucketResponse = conn.list(bucket);
+ listBucketResponse.assertOk();
+ assertEquals("list wasn't empty " + listBucketResponse, 0, listBucketResponse.getEntries().size());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", UnspecifiedMaxKeys, null, false, null);
+
+ // start delimiter tests
+
+ final String text = "this is a test";
+ final String key = "example.txt";
+ final String innerKey = "test/inner.txt";
+ final String lastKey = "z-last-key.txt";
+
+ response = conn.put(bucket, key, new S3Object(text));
+ response.assertOk();
+
+ response = conn.put(bucket, innerKey, new S3Object(text));
+ response.assertOk();
+
+ response = conn.put(bucket, lastKey, new S3Object(text));
+ response.assertOk();
+
+ // plain list
+ listBucketResponse = conn.list(bucket);
+ listBucketResponse.assertOk();
+ assertEquals("Unexpected list size", 3, listBucketResponse.getEntries().size());
+ assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", UnspecifiedMaxKeys, null, false, null);
+
+ System.out.println("LIST " + listBucketResponse.getEntries());
+
+ // root "directory"
+ listBucketResponse = conn.list(bucket, null, null, null, "/", null);
+ listBucketResponse.assertOk();
+ assertEquals("Unexpected list size " + listBucketResponse, 2, listBucketResponse.getEntries().size());
+ // TODO
+ // assertEquals("Unexpected common prefix size", 1, listBucketResponse.getCommonPrefixEntries().size());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", UnspecifiedMaxKeys, "/", false, null);
+
+ // root "directory" with a max-keys of "1"
+ listBucketResponse = conn.list(bucket, null, null, 1, "/", null);
+ listBucketResponse.assertOk();
+ assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
+ assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
+ // TODO
+ // verifyBucketResponseParameters(listBucketResponse, bucket, "", "", 1, "/", true, "example.txt");
+
+ // root "directory" with a max-keys of "2"
+ listBucketResponse = conn.list(bucket, null, null, 2, "/", null);
+ listBucketResponse.assertOk();
+ assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
+ // TODO
+ // assertEquals("Unexpected common prefix size", 1, listBucketResponse.getCommonPrefixEntries().size());
+ // TODO
+ // verifyBucketResponseParameters(listBucketResponse, bucket, "", "", 2, "/", true, "test/");
+ String marker = listBucketResponse.getNextMarker();
+ listBucketResponse = conn.list(bucket, null, marker, 2, "/", null);
+ listBucketResponse.assertOk();
+ assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
+ assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
+ // TODO
+ // verifyBucketResponseParameters(listBucketResponse, bucket, "", marker, 2, "/", false, null);
+
+ // test "directory"
+ listBucketResponse = conn.list(bucket, "test/", null, null, "/", null);
+ listBucketResponse.assertOk();
+ // TODO
+ // assertEquals("Unexpected list size", 1, listBucketResponse.getEntries().size());
+ assertEquals("Unexpected common prefix size", 0, listBucketResponse.getCommonPrefixEntries().size());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "test/", "", UnspecifiedMaxKeys, "/", false, null);
+
+ // remove innerkey
+ response = conn.delete(bucket, innerKey, null);
+ assertEquals(
+ "couldn't delete entry",
+ HttpURLConnection.HTTP_NO_CONTENT,
+ response.getResponseCode());
+
+ // remove last key
+ response = conn.delete(bucket, lastKey, null);
+ assertEquals(
+ "couldn't delete entry",
+ HttpURLConnection.HTTP_NO_CONTENT,
+ response.getResponseCode());
+
+
+ // end delimiter tests
+
+ response = conn.put(bucket, key, new S3Object(text.getBytes(), null), null);
+ response.assertOk();
+
+ Headers metadata = new Headers();
+ metadata.put("title", "title");
+ response = conn.put(bucket, key, new S3Object(text.getBytes(), metadata), null);
+ response.assertOk();
+
+ GetResponse getResponse = conn.get(bucket, key, null);
+ getResponse.assertOk();
+ assertEquals("didn't get the right data back", text.getBytes(), getResponse.getObject().getData());
+ assertEquals("didn't get the right metadata back", 1, getResponse.getObject().getMetadata().size());
+ assertEquals(
+ "didn't get the right metadata back",
+ "title",
+ getResponse.getObject().getMetadata().getValue("title"));
+ assertEquals(
+ "didn't get the right content-length",
+ ""+text.length(),
+ getResponse.getHeaderField("Content-Length"));
+
+ GetStreamResponse streamResponse = conn.getStream(bucket, key);
+ InputStream is = streamResponse.getInputStream();
+ byte b[] = new byte[text.length()];
+ int len = is.read(b);
+ assertEquals("didn't get the right data back " + len, text.getBytes(), b);
+ streamResponse.release();
+
+ String titleWithSpaces = " \t title with leading and trailing spaces ";
+ Headers h = new Headers();
+ h.put("title", titleWithSpaces);
+ response = conn.put(bucket, key, new S3Object(text.getBytes(), h), null);
+ assertEquals(
+ "couldn't put metadata with leading and trailing spaces",
+ HttpURLConnection.HTTP_OK,
+ response.getResponseCode());
+
+ getResponse = conn.get(bucket, key, null);
+ assertEquals(
+ "couldn't get object",
+ HttpURLConnection.HTTP_OK,
+ getResponse.getResponseCode());
+ assertEquals("didn't get the right metadata back", getResponse.getObject().getMetadata().size(), 1);
+ assertEquals(
+ "didn't get the right metadata back",
+ titleWithSpaces.trim(),
+ getResponse.getObject().getMetadata().getValue("title"));
+
+ String weirdKey = "&weird+%";
+ response = conn.put(bucket, weirdKey, new S3Object(text.getBytes()));
+ assertEquals(
+ "couldn't put weird key",
+ HttpURLConnection.HTTP_OK,
+ response.getResponseCode());
+
+ getResponse = conn.get(bucket, weirdKey, null);
+ assertEquals(
+ "couldn't get weird key",
+ HttpURLConnection.HTTP_OK,
+ getResponse.getResponseCode());
+
+ // start acl test
+
+ getResponse = conn.getACL(bucket, key, null);
+ assertEquals(
+ "couldn't get acl",
+ HttpURLConnection.HTTP_OK,
+ getResponse.getResponseCode());
+
+ byte[] acl = getResponse.getObject().getData();
+
+ response = conn.putACL(bucket, key, new String(acl), null);
+ assertEquals(
+ "couldn't put acl",
+ HttpURLConnection.HTTP_OK,
+ response.getResponseCode());
+
+ getResponse = conn.getACL(bucket, null);
+ assertEquals(
+ "couldn't get bucket acl",
+ HttpURLConnection.HTTP_OK,
+ getResponse.getResponseCode());
+
+ byte[] bucketACL = getResponse.getObject().getData();
+
+ response = conn.putACL(bucket, new String(bucketACL), null);
+ assertEquals(
+ "couldn't put bucket acl",
+ HttpURLConnection.HTTP_OK,
+ response.getResponseCode());
+
+ // end acl test
+
+ // bucket logging tests
+ getResponse = conn.getBucketLogging(bucket, null);
+ assertEquals(
+ "couldn't get bucket logging config",
+ HttpURLConnection.HTTP_OK,
+ getResponse.getResponseCode());
+
+ byte[] bucketLogging = getResponse.getObject().getData();
+
+ response = conn.putBucketLogging(bucket, new String(bucketLogging), null);
+ assertEquals(
+ "couldn't put bucket logging config",
+ HttpURLConnection.HTTP_OK,
+ response.getResponseCode());
+
+ // end bucket logging tests
+
+ listBucketResponse = conn.list(bucket, null, null, null, null);
+ assertEquals(
+ "couldn't list bucket",
+ HttpURLConnection.HTTP_OK,
+ listBucketResponse.getResponseCode());
+ List<Entry> entries = listBucketResponse.getEntries();
+ assertEquals("didn't get back the right number of entries", 2, entries.size());
+ // depends on weirdKey < $key
+ assertEquals("first key isn't right", weirdKey, ((Entry)entries.get(0)).getKey());
+ assertEquals("second key isn't right", key, ((Entry)entries.get(1)).getKey());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", UnspecifiedMaxKeys, null, false, null);
+
+ listBucketResponse = conn.list(bucket, null, null, new Integer(1), null);
+ assertEquals(
+ "couldn't list bucket",
+ HttpURLConnection.HTTP_OK,
+ listBucketResponse.getResponseCode());
+ assertEquals(
+ "didn't get back the right number of entries",
+ 1,
+ listBucketResponse.getEntries().size());
+ verifyBucketResponseParameters(listBucketResponse, bucket, "", "", 1, null, true, null);
+
+ for (Entry entry : entries) {
+ response = conn.delete(bucket, entry.getKey(), null);
+ assertEquals(
+ "couldn't delete entry",
+ HttpURLConnection.HTTP_NO_CONTENT,
+ response.getResponseCode());
+ }
+
+ /* TODO
+ ListAllBucketsResponse listAllMyBucketsResponse = conn.listAllBuckets();
+ assertEquals(
+ "couldn't list all my buckets",
+ HttpURLConnection.HTTP_OK,
+ listAllMyBucketsResponse.getResponseCode());
+ List<Bucket> buckets = listAllMyBucketsResponse.getEntries();
+
+ response = conn.delete(bucket);
+ assertEquals(
+ "couldn't delete bucket",
+ HttpURLConnection.HTTP_NO_CONTENT,
+ response.getResponseCode());
+
+ listAllMyBucketsResponse = conn.listAllBuckets();
+ assertEquals(
+ "couldn't list all my buckets",
+ HttpURLConnection.HTTP_OK,
+ listAllMyBucketsResponse.getResponseCode());
+ assertEquals(
+ "bucket count is incorrect",
+ buckets.size() - 1,
+ listAllMyBucketsResponse.getEntries().size());
+ */
+
+ }
+
+ private static void verifyBucketResponseParameters( ListResponse listBucketResponse,
+ Bucket bucket, String prefix, String marker,
+ int maxKeys, String delimiter, boolean isTruncated,
+ String nextMarker ) {
+ assertEquals("Bucket name should match.", bucket.getName(), listBucketResponse.getName());
+ assertEquals("Bucket prefix should match.", prefix, listBucketResponse.getPrefix());
+ assertEquals("Bucket marker should match.", marker, listBucketResponse.getMarker());
+ assertEquals("Bucket delimiter should match.", delimiter, listBucketResponse.getDelimiter());
+ if ( UnspecifiedMaxKeys != maxKeys ) {
+ assertEquals("Bucket max-keys should match.", maxKeys, listBucketResponse.getMaxKeys());
+ }
+ assertEquals("Bucket should not be truncated.", isTruncated, listBucketResponse.isTruncated());
+ assertEquals("Bucket nextMarker should match.", nextMarker, listBucketResponse.getNextMarker());
+ }
+
+
+ private static void assertEquals(String message, int expected, int actual) {
+ if (expected != actual) {
+ throw new RuntimeException(message + ": expected " + expected + " but got " + actual);
+ }
+ }
+
+ private static void assertEquals(String message, byte[] expected, byte[] actual) {
+ if (! Arrays.equals(expected, actual)) {
+ throw new RuntimeException(
+ message +
+ ": expected " +
+ new String(expected) +
+ " but got " +
+ new String(actual));
+ }
+ }
+
+ private static void assertEquals(String message, Object expected, Object actual) {
+ if (expected != actual && (actual == null || ! actual.equals(expected))) {
+ throw new RuntimeException(message + ": expected " + expected + " but got " + actual);
+ }
+ }
+
+ private static void assertEquals(String message, boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new RuntimeException(message + ": expected " + expected + " but got " + actual);
+ }
+ }
+
+}
Modified: amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java
===================================================================
--- amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/test/java/com/amazon/s3/S3Test.java 2008-03-19 22:46:38 UTC (rev 5450)
@@ -27,6 +27,7 @@
String awsAccessKeyId = System.getProperty("accessKey");
String awsSecretAccessKey = System.getProperty("secretKey");
+ boolean emulated = Boolean.valueOf(System.getProperty("emulated", "true"));
Bucket bucket;
static final int UnspecifiedMaxKeys = -1;
@@ -45,6 +46,8 @@
@Test
public void testMe() throws Exception {
+ if (emulated)
+ return;
// test all operation for both regular and vanity domains
// regular: http://s3.amazonaws.com/key
// subdomain: http://bucket.s3.amazonaws.com/key
@@ -466,6 +469,8 @@
@Test
public void testDriver() throws Exception {
+ if (emulated)
+ return;
Bucket bucket = new Bucket(awsAccessKeyId.toLowerCase() + "-test-bucket");
String keyName = "KEY";
Modified: amazon-s3/trunk/src/test/resources/log4j.xml
===================================================================
--- amazon-s3/trunk/src/test/resources/log4j.xml 2008-03-19 10:24:18 UTC (rev 5449)
+++ amazon-s3/trunk/src/test/resources/log4j.xml 2008-03-19 22:46:38 UTC (rev 5450)
@@ -28,7 +28,7 @@
<level value="INFO"/>
</logger>
<logger name="httpclient.wire">
- <level value="INFO"/>
+ <level value="ALL"/>
</logger>
<logger name="org.apache.commons.httpclient">
<level value="INFO"/>
16 years, 9 months
JBoss Cache SVN: r5449 - cacheloader-migration/trunk.
by jbosscache-commits@lists.jboss.org
Author: galder.zamarreno(a)jboss.com
Date: 2008-03-19 06:24:18 -0400 (Wed, 19 Mar 2008)
New Revision: 5449
Modified:
cacheloader-migration/trunk/
Log:
Added svn ignore for eclipse project files.
Property changes on: cacheloader-migration/trunk
___________________________________________________________________
Name: svn:ignore
- *.iml
*.ipr
*.iws
jbossdb*
derby.log
target
+ *.iml
*.ipr
*.iws
jbossdb*
derby.log
target
.classpath
.project
.settings
16 years, 9 months
Build failed in Hudson: jboss-cache-core-jdk1.5 » JBoss Cache - Core Edition #305
by jboss-qa-internal@redhat.com
See https://hudson.jboss.org/hudson/job/jboss-cache-core-jdk1.5/org.jboss.cac...
Changes:
[manik.surtani(a)jboss.com] JBCACHE-1310 - Marshalling issue with optimistic prepare and an empty modification list
------------------------------------------
started
Building remotely on dev17-rhel4-x86_64
Reusing existing maven process
[INFO] Scanning for projects...
WAGON_VERSION: 1.0-beta-2
[INFO] ----------------------------------------------------------------------------
[INFO] Building JBoss Cache - Core Edition
[INFO] task-segment: [package]
[INFO] ----------------------------------------------------------------------------
[INFO] artifact org.apache.maven.plugins:maven-resources-plugin: checking for updates from Main Maven Repo
[INFO] artifact org.apache.maven.plugins:maven-resources-plugin: checking for updates from snapshots.repository.codehaus.org
[INFO] artifact org.apache.maven.plugins:maven-jar-plugin: checking for updates from Main Maven Repo
[INFO] artifact org.apache.maven.plugins:maven-jar-plugin: checking for updates from snapshots.repository.codehaus.org
[INFO] snapshot org.codehaus.mojo:emma-maven-plugin:1.0-SNAPSHOT: checking for updates from snapshots.jboss.org
[INFO] snapshot org.codehaus.mojo:emma-maven-plugin:1.0-SNAPSHOT: checking for updates from Main Maven Repo
[INFO] snapshot org.codehaus.mojo:emma-maven-plugin:1.0-SNAPSHOT: checking for updates from snapshots.repository.codehaus.org
[HUDSON] Archiving /home/hudson/hudson_workspace/workspace/jboss-cache-core-jdk1.5/./pom.xml
[INFO] ------------------------------------------------------------------------
[ERROR] BUILD ERROR
[INFO] ------------------------------------------------------------------------
[INFO] Error resolving version for 'org.codehaus.mojo:emma-maven-plugin': Plugin requires Maven version 2.0.8
[INFO] ------------------------------------------------------------------------
[INFO] For more information, run Maven with the -e switch
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 8 seconds
[INFO] Finished at: Wed Mar 19 06:16:53 EDT 2008
[INFO] Final Memory: 6M/32M
[INFO] ------------------------------------------------------------------------
Waiting for Hudson to finish collecting data
Sending e-mails to: dpospisi(a)redhat.com manik.surtani(a)jboss.com
Build was marked for publishing on https://hudson.jboss.org/hudson/
finished: FAILURE
16 years, 9 months
JBoss Cache SVN: r5448 - core/trunk/src/main/java/org/jboss/cache/interceptors.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-03-18 21:05:11 -0400 (Tue, 18 Mar 2008)
New Revision: 5448
Modified:
core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticReplicationInterceptor.java
core/trunk/src/main/java/org/jboss/cache/interceptors/ReplicationInterceptor.java
Log:
JBCACHE-1310 - Marshalling issue with optimistic prepare and an empty modification list
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java 2008-03-19 00:31:19 UTC (rev 5447)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java 2008-03-19 01:05:11 UTC (rev 5448)
@@ -160,6 +160,32 @@
}
}
+ /**
+ * It does not make sense replicating a transaction method(commit, rollback, prepare) if one of the following:
+ * <pre>
+ * - call was not initiated here, but on other member of the cluster
+ * - there is no transaction. Why calling a commit if no transaction going on?
+ * - the current transaction did not modufy any data, so other members are not aware of it
+ * </pre>
+ */
+ protected boolean skipReplicationOfTransactionMethod(InvocationContext ctx)
+ {
+ GlobalTransaction gtx = ctx.getGlobalTransaction();
+ boolean isInitiatedHere = gtx != null && !gtx.isRemote();
+ if (trace) log.trace("isInitiatedHere? " + isInitiatedHere + "; gtx = " + gtx);
+ return !isTransactionalAndLocal(ctx) || !containsModifications(ctx);
+ }
+
+ /**
+ * The call runs in a transaction and it was initiated on this node of the cluster.
+ */
+ protected boolean isTransactionalAndLocal(InvocationContext ctx)
+ {
+ GlobalTransaction gtx = ctx.getGlobalTransaction();
+ boolean isInitiatedHere = gtx != null && !gtx.isRemote();
+ return isInitiatedHere && (ctx.getTransaction() != null);
+ }
+
protected boolean isSynchronous(Option option)
{
if (option != null)
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticReplicationInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticReplicationInterceptor.java 2008-03-19 00:31:19 UTC (rev 5447)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/OptimisticReplicationInterceptor.java 2008-03-19 01:05:11 UTC (rev 5448)
@@ -72,12 +72,15 @@
{
// pass up the chain.
Object retval = nextInterceptor(ctx);
- gtx = getGlobalTransaction(ctx);
+ if (!skipReplicationOfTransactionMethod(ctx))
+ {
+ gtx = getGlobalTransaction(ctx);
- if (!gtx.isRemote() && ctx.isOriginLocal())
- {
- // replicate the prepare call.
- broadcastPrepare(ctx.getMethodCall(), gtx, ctx);
+ if (!gtx.isRemote() && ctx.isOriginLocal())
+ {
+ // replicate the prepare call.
+ broadcastPrepare(ctx.getMethodCall(), gtx, ctx);
+ }
}
return retval;
}
@@ -93,7 +96,7 @@
//we dont do anything
try
{
- broadcastCommit(gtx, ctx);
+ if (!skipReplicationOfTransactionMethod(ctx)) broadcastCommit(gtx, ctx);
}
catch (Throwable t)
{
@@ -121,7 +124,7 @@
//we dont do anything
try
{
- broadcastRollback(gtx, ctx);
+ if (!skipReplicationOfTransactionMethod(ctx)) broadcastRollback(gtx, ctx);
}
catch (Throwable t)
{
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/ReplicationInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/ReplicationInterceptor.java 2008-03-19 00:31:19 UTC (rev 5447)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/ReplicationInterceptor.java 2008-03-19 01:05:11 UTC (rev 5448)
@@ -41,33 +41,22 @@
protected Object handleCommitMethod(InvocationContext ctx, GlobalTransaction gtx) throws Throwable
{
- if (skipReplciationOfTransactionMethod(ctx))
- {
- return nextInterceptor(ctx);
- }
- replicateCall(ctx, ctx.getMethodCall(), configuration.isSyncCommitPhase(), ctx.getOptionOverrides(), true);
+ if (!skipReplicationOfTransactionMethod(ctx))
+ replicateCall(ctx, ctx.getMethodCall(), configuration.isSyncCommitPhase(), ctx.getOptionOverrides(), true);
return nextInterceptor(ctx);
}
protected Object handlePrepareMethod(InvocationContext ctx, GlobalTransaction gtx, List modification, Address coordinator, boolean onePhaseCommit) throws Throwable
{
- if (skipReplciationOfTransactionMethod(ctx))
- {
- return nextInterceptor(ctx);
- }
Object retVal = nextInterceptor(ctx);
- runPreparePhase(ctx.getMethodCall(), gtx, ctx);
+ if (!skipReplicationOfTransactionMethod(ctx)) runPreparePhase(ctx.getMethodCall(), gtx, ctx);
return retVal;
}
protected Object handleRollbackMethod(InvocationContext ctx, GlobalTransaction gtx) throws Throwable
{
- if (skipReplciationOfTransactionMethod(ctx))
+ if (!skipReplicationOfTransactionMethod(ctx) && !ctx.isLocalRollbackOnly())
{
- return nextInterceptor(ctx);
- }
- if (!ctx.isLocalRollbackOnly())
- {
replicateCall(ctx, ctx.getMethodCall(), configuration.isSyncRollbackPhase(), ctx.getOptionOverrides());
}
return nextInterceptor(ctx);
@@ -88,32 +77,6 @@
}
}
- /**
- * It does not make sense replicating a transaction method(commit, rollback, prepare) if one of the following:
- * <pre>
- * - call was not initiated here, but on other member of the cluster
- * - there is no transaction. Why calling a commit if no transaction going on?
- * - the current transaction did not modufy any data, so other members are not aware of it
- * </pre>
- */
- private boolean skipReplciationOfTransactionMethod(InvocationContext ctx)
- {
- GlobalTransaction gtx = ctx.getGlobalTransaction();
- boolean isInitiatedHere = gtx != null && !gtx.isRemote();
- if (trace) log.trace("isInitiatedHere? " + isInitiatedHere + "; gtx = " + gtx);
- return !isTransactionalAndLocal(ctx) || !containsModifications(ctx);
- }
-
- /**
- * The call runs in a transaction and it was initiated on this node of the cluster.
- */
- private boolean isTransactionalAndLocal(InvocationContext ctx)
- {
- GlobalTransaction gtx = ctx.getGlobalTransaction();
- boolean isInitiatedHere = gtx != null && !gtx.isRemote();
- return isInitiatedHere && (ctx.getTransaction() != null);
- }
-
protected Object handlePutDataMethod(InvocationContext ctx, GlobalTransaction tx, Fqn fqn, Map data, boolean createUndoOps) throws Throwable
{
return handleCrudMethod(ctx);
16 years, 9 months
Build failed in Hudson: jboss-cache-core-jdk1.5 » JBoss Cache - Core Edition #304
by jboss-qa-internal@redhat.com
See https://hudson.jboss.org/hudson/job/jboss-cache-core-jdk1.5/org.jboss.cac...
Changes:
[manik.surtani(a)jboss.com] JBCACHE-295 - Add more unit tests for ReplicationQueue
[manik.surtani(a)jboss.com] Updated to use parent 1.3-SNAPSHOT
------------------------------------------
started
Building remotely on dev17-rhel4-x86_64
$ /qa/tools/opt/jdk1.5.0_12/bin/java -Xmx256m -cp /home/hudson/hudson_workspace/maven-agent.jar:/qa/tools/opt/maven-2.0.6/boot/classworlds-1.1.jar hudson.maven.agent.Main /qa/tools/opt/maven-2.0.6 /qa/services/hudson/hudson_release/WEB-INF/slave.jar /home/hudson/hudson_workspace/maven-interceptor.jar
channel started
[INFO] Scanning for projects...
WAGON_VERSION: 1.0-beta-2
[INFO] ----------------------------------------------------------------------------
[INFO] Building JBoss Cache - Core Edition
[INFO] task-segment: [package]
[INFO] ----------------------------------------------------------------------------
[HUDSON] Archiving /home/hudson/hudson_workspace/workspace/jboss-cache-core-jdk1.5/./pom.xml
[INFO] ------------------------------------------------------------------------
[ERROR] BUILD ERROR
[INFO] ------------------------------------------------------------------------
[INFO] Error resolving version for 'org.codehaus.mojo:emma-maven-plugin': Plugin requires Maven version 2.0.8
[INFO] ------------------------------------------------------------------------
[INFO] For more information, run Maven with the -e switch
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 32 seconds
[INFO] Finished at: Tue Mar 18 21:04:30 EDT 2008
[INFO] Final Memory: 6M/32M
[INFO] ------------------------------------------------------------------------
Waiting for Hudson to finish collecting data
Sending e-mails to: dpospisi(a)redhat.com manik.surtani(a)jboss.com
Build was marked for publishing on https://hudson.jboss.org/hudson/
finished: FAILURE
16 years, 9 months
JBoss Cache SVN: r5447 - in core/trunk/src: main/java/org/jboss/cache/cluster and 4 other directories.
by jbosscache-commits@lists.jboss.org
Author: manik.surtani(a)jboss.com
Date: 2008-03-18 20:31:19 -0400 (Tue, 18 Mar 2008)
New Revision: 5447
Added:
core/trunk/src/main/java/org/jboss/cache/cluster/
core/trunk/src/main/java/org/jboss/cache/cluster/ReplicationQueue.java
core/trunk/src/test/java/org/jboss/cache/cluster/
core/trunk/src/test/java/org/jboss/cache/cluster/ReplicationQueueTest.java
Removed:
core/trunk/src/main/java/org/jboss/cache/ReplicationQueue.java
Modified:
core/trunk/src/main/java/org/jboss/cache/factories/ReplicationQueueFactory.java
core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java
Log:
JBCACHE-295 - Add more unit tests for ReplicationQueue
Deleted: core/trunk/src/main/java/org/jboss/cache/ReplicationQueue.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/ReplicationQueue.java 2008-03-19 00:21:05 UTC (rev 5446)
+++ core/trunk/src/main/java/org/jboss/cache/ReplicationQueue.java 2008-03-19 00:31:19 UTC (rev 5447)
@@ -1,177 +0,0 @@
-/*
- * JBoss, the OpenSource J2EE webOS
- *
- * Distributable under LGPL license.
- * See terms of license at gnu.org.
- */
-package org.jboss.cache;
-
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.jboss.cache.config.Configuration;
-import org.jboss.cache.factories.annotations.Inject;
-import org.jboss.cache.factories.annotations.Start;
-import org.jboss.cache.factories.annotations.Stop;
-import org.jboss.cache.marshall.MethodCall;
-import org.jboss.cache.marshall.MethodCallFactory;
-import org.jboss.cache.marshall.MethodDeclarations;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Timer;
-import java.util.TimerTask;
-
-
-/**
- * Periodically (or when certain size is exceeded) takes elements and replicates them.
- *
- * @author <a href="mailto:bela@jboss.org">Bela Ban</a> May 24, 2003
- * @version $Revision$
- */
-public class ReplicationQueue
-{
-
- private static Log log = LogFactory.getLog(ReplicationQueue.class);
-
- /**
- * We flush every 5 seconds. Inactive if -1 or 0
- */
- private long interval = 5000;
-
- /**
- * Max elements before we flush
- */
- private long max_elements = 500;
-
- /**
- * Holds the replication jobs: LinkedList<MethodCall>
- */
- private final List<MethodCall> elements = new LinkedList<MethodCall>();
-
- /**
- * For periodical replication
- */
- private Timer timer = null;
-
- /**
- * The timer task, only calls flush() when executed by Timer
- */
- private MyTask task = null;
- private RPCManager rpcManager;
- private Configuration configuration;
- private boolean enabled;
-
-
- public boolean isEnabled()
- {
- return enabled;
- }
-
- @Inject
- private void injectDependencies(RPCManager rpcManager, Configuration configuration)
- {
- this.rpcManager = rpcManager;
- this.configuration = configuration;
- enabled = configuration.isUseReplQueue() && (configuration.getBuddyReplicationConfig() == null || !configuration.getBuddyReplicationConfig().isEnabled());
- }
-
- /**
- * Starts the asynchronous flush queue.
- */
- @Start
- public synchronized void start()
- {
- this.interval = configuration.getReplQueueInterval();
- this.max_elements = configuration.getReplQueueMaxElements();
- // check again
- enabled = configuration.isUseReplQueue() && (configuration.getBuddyReplicationConfig() == null || !configuration.getBuddyReplicationConfig().isEnabled());
- if (enabled)
- {
- if (interval > 0)
- {
- if (task == null)
- task = new MyTask();
- if (timer == null)
- {
- timer = new Timer(true);
- timer.schedule(task,
- 500, // delay before initial flush
- interval); // interval between flushes
- }
- }
- }
- }
-
- /**
- * Stops the asynchronous flush queue.
- */
- @Stop
- public synchronized void stop()
- {
- if (task != null)
- {
- task.cancel();
- task = null;
- }
- if (timer != null)
- {
- timer.cancel();
- timer = null;
- }
- }
-
-
- /**
- * Adds a new method call.
- */
- public void add(MethodCall job)
- {
- if (job == null)
- throw new NullPointerException("job is null");
- synchronized (elements)
- {
- elements.add(job);
- if (elements.size() >= max_elements)
- flush();
- }
- }
-
- /**
- * Flushes existing method calls.
- */
- public void flush()
- {
- List<MethodCall> l;
- synchronized (elements)
- {
- if (log.isTraceEnabled())
- log.trace("flush(): flushing repl queue (num elements=" + elements.size() + ")");
- l = new ArrayList<MethodCall>(elements);
- elements.clear();
- }
-
- if (l.size() > 0)
- {
- try
- {
- // send to all live nodes in the cluster
- rpcManager.callRemoteMethods(null, MethodCallFactory.create(MethodDeclarations.replicateAllMethod_id, l), false, true, 5000, false);
- }
- catch (Throwable t)
- {
- log.error("failed replicating " + l.size() + " elements in replication queue", t);
- }
- }
- }
-
- class MyTask extends TimerTask
- {
- public void run()
- {
- flush();
- }
- }
-
-}
Added: core/trunk/src/main/java/org/jboss/cache/cluster/ReplicationQueue.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/cluster/ReplicationQueue.java (rev 0)
+++ core/trunk/src/main/java/org/jboss/cache/cluster/ReplicationQueue.java 2008-03-19 00:31:19 UTC (rev 5447)
@@ -0,0 +1,169 @@
+package org.jboss.cache.cluster;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.jboss.cache.RPCManager;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.factories.annotations.Inject;
+import org.jboss.cache.factories.annotations.Start;
+import org.jboss.cache.factories.annotations.Stop;
+import org.jboss.cache.marshall.MethodCall;
+import org.jboss.cache.marshall.MethodCallFactory;
+import org.jboss.cache.marshall.MethodDeclarations;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Timer;
+import java.util.TimerTask;
+
+/**
+ * Periodically (or when certain size is exceeded) takes elements and replicates them.
+ *
+ * @author <a href="mailto:bela@jboss.org">Bela Ban</a> May 24, 2003
+ * @version $Revision: 5197 $
+ */
+public class ReplicationQueue
+{
+
+ private static Log log = LogFactory.getLog(ReplicationQueue.class);
+
+ /**
+ * We flush every 5 seconds. Inactive if -1 or 0
+ */
+ private long interval = 5000;
+
+ /**
+ * Max elements before we flush
+ */
+ private long max_elements = 500;
+
+ /**
+ * Holds the replication jobs: LinkedList<MethodCall>
+ */
+ final List<MethodCall> elements = new LinkedList<MethodCall>();
+
+ /**
+ * For periodical replication
+ */
+ private Timer timer = null;
+
+ /**
+ * The timer task, only calls flush() when executed by Timer
+ */
+ private ReplicationQueue.MyTask task = null;
+ private RPCManager rpcManager;
+ private Configuration configuration;
+ private boolean enabled;
+
+
+ public boolean isEnabled()
+ {
+ return enabled;
+ }
+
+ @Inject
+ private void injectDependencies(RPCManager rpcManager, Configuration configuration)
+ {
+ this.rpcManager = rpcManager;
+ this.configuration = configuration;
+ enabled = configuration.isUseReplQueue() && (configuration.getBuddyReplicationConfig() == null || !configuration.getBuddyReplicationConfig().isEnabled());
+ }
+
+ /**
+ * Starts the asynchronous flush queue.
+ */
+ @Start
+ public synchronized void start()
+ {
+ this.interval = configuration.getReplQueueInterval();
+ this.max_elements = configuration.getReplQueueMaxElements();
+ // check again
+ enabled = configuration.isUseReplQueue() && (configuration.getBuddyReplicationConfig() == null || !configuration.getBuddyReplicationConfig().isEnabled());
+ if (enabled)
+ {
+ if (interval > 0)
+ {
+ if (task == null)
+ task = new ReplicationQueue.MyTask();
+ if (timer == null)
+ {
+ timer = new Timer(true);
+ timer.schedule(task,
+ 500, // delay before initial flush
+ interval); // interval between flushes
+ }
+ }
+ }
+ }
+
+ /**
+ * Stops the asynchronous flush queue.
+ */
+ @Stop
+ public synchronized void stop()
+ {
+ if (task != null)
+ {
+ task.cancel();
+ task = null;
+ }
+ if (timer != null)
+ {
+ timer.cancel();
+ timer = null;
+ }
+ }
+
+
+ /**
+ * Adds a new method call.
+ */
+ public void add(MethodCall job)
+ {
+ if (job == null)
+ throw new NullPointerException("job is null");
+ synchronized (elements)
+ {
+ elements.add(job);
+ if (elements.size() >= max_elements)
+ flush();
+ }
+ }
+
+ /**
+ * Flushes existing method calls.
+ */
+ public void flush()
+ {
+ List<MethodCall> l;
+ synchronized (elements)
+ {
+ if (log.isTraceEnabled())
+ log.trace("flush(): flushing repl queue (num elements=" + elements.size() + ")");
+ l = new ArrayList<MethodCall>(elements);
+ elements.clear();
+ }
+
+ if (l.size() > 0)
+ {
+ try
+ {
+ // send to all live nodes in the cluster
+ rpcManager.callRemoteMethods(null, MethodCallFactory.create(MethodDeclarations.replicateAllMethod_id, l), false, true, 5000, false);
+ }
+ catch (Throwable t)
+ {
+ log.error("failed replicating " + l.size() + " elements in replication queue", t);
+ }
+ }
+ }
+
+ class MyTask extends TimerTask
+ {
+ public void run()
+ {
+ flush();
+ }
+ }
+}
\ No newline at end of file
Modified: core/trunk/src/main/java/org/jboss/cache/factories/ReplicationQueueFactory.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/factories/ReplicationQueueFactory.java 2008-03-19 00:21:05 UTC (rev 5446)
+++ core/trunk/src/main/java/org/jboss/cache/factories/ReplicationQueueFactory.java 2008-03-19 00:31:19 UTC (rev 5447)
@@ -1,6 +1,6 @@
package org.jboss.cache.factories;
-import org.jboss.cache.ReplicationQueue;
+import org.jboss.cache.cluster.ReplicationQueue;
import org.jboss.cache.config.Configuration;
import org.jboss.cache.factories.annotations.DefaultFactoryFor;
Modified: core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java
===================================================================
--- core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java 2008-03-19 00:21:05 UTC (rev 5446)
+++ core/trunk/src/main/java/org/jboss/cache/interceptors/BaseRpcInterceptor.java 2008-03-19 00:31:19 UTC (rev 5447)
@@ -6,8 +6,8 @@
import org.jboss.cache.CacheSPI;
import org.jboss.cache.InvocationContext;
import org.jboss.cache.RPCManager;
-import org.jboss.cache.ReplicationQueue;
import org.jboss.cache.buddyreplication.BuddyManager;
+import org.jboss.cache.cluster.ReplicationQueue;
import org.jboss.cache.config.Configuration.CacheMode;
import org.jboss.cache.config.Option;
import org.jboss.cache.factories.annotations.Inject;
Copied: core/trunk/src/test/java/org/jboss/cache/cluster/ReplicationQueueTest.java (from rev 5441, core/trunk/src/test/java/org/jboss/cache/replicated/ReplicationQueueTest.java)
===================================================================
--- core/trunk/src/test/java/org/jboss/cache/cluster/ReplicationQueueTest.java (rev 0)
+++ core/trunk/src/test/java/org/jboss/cache/cluster/ReplicationQueueTest.java 2008-03-19 00:31:19 UTC (rev 5447)
@@ -0,0 +1,165 @@
+package org.jboss.cache.cluster;
+
+import org.easymock.EasyMock;
+import static org.easymock.EasyMock.*;
+import org.jboss.cache.Cache;
+import org.jboss.cache.DefaultCacheFactory;
+import org.jboss.cache.RPCManager;
+import org.jboss.cache.config.Configuration;
+import org.jboss.cache.factories.ComponentRegistry;
+import org.jboss.cache.marshall.MethodCall;
+import org.jboss.cache.misc.TestingUtil;
+import org.jgroups.Address;
+import static org.testng.AssertJUnit.assertNotNull;
+import org.testng.annotations.AfterMethod;
+import org.testng.annotations.BeforeMethod;
+import org.testng.annotations.Test;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+
+@Test(groups = "functional")
+public class ReplicationQueueTest
+{
+ private static final int COUNT = 10;
+ Cache cache, cache2;
+ ReplicationQueue replQ;
+ ComponentRegistry registry;
+ RPCManager originalRpcManager;
+
+ @BeforeMethod
+ public void setUp() throws CloneNotSupportedException
+ {
+ cache = new DefaultCacheFactory().createCache(false);
+ cache.getConfiguration().setCacheMode(Configuration.CacheMode.REPL_ASYNC);
+ cache.getConfiguration().setUseReplQueue(true);
+ cache.getConfiguration().setReplQueueMaxElements(COUNT);
+ cache.getConfiguration().setReplQueueInterval(-1);
+ cache.start();
+ registry = TestingUtil.extractComponentRegistry(cache);
+ replQ = registry.getComponent(ReplicationQueue.class);
+ originalRpcManager = cache.getConfiguration().getRuntimeConfig().getRPCManager();
+ cache2 = new DefaultCacheFactory().createCache(cache.getConfiguration().clone());
+
+ TestingUtil.blockUntilViewsReceived(60000, cache, cache2);
+ }
+
+ @AfterMethod
+ public void tearDown()
+ {
+ // reset the original RPCManager
+ injectRpcManager(originalRpcManager);
+ TestingUtil.killCaches(cache, cache2);
+ }
+
+ private void injectRpcManager(RPCManager manager)
+ {
+ registry.registerComponent(RPCManager.class.getName(), manager, RPCManager.class);
+ }
+
+ public void testQueueHoldAndFlush() throws Exception
+ {
+ assert replQ != null;
+
+ // mock the RPCManager used in the cache
+ RPCManager mockRpcManager = EasyMock.createStrictMock(RPCManager.class);
+ injectRpcManager(mockRpcManager);
+
+ // expect basic cluster related calls
+ expect(mockRpcManager.getMembers()).andReturn(originalRpcManager.getMembers()).anyTimes();
+ replay(mockRpcManager);
+
+ // check that nothing on the RPCManager will be called until we hit the replication queue threshold.
+ for (int i = 0; i < COUNT - 1; i++) cache.put("/a/b/c/" + i, "k", "v");
+ assert replQ.elements.size() == COUNT - 1;
+
+ // verify that no calls have been made on the mockRpcManager
+ verify(mockRpcManager);
+
+ // reset the mock
+ reset(mockRpcManager);
+
+ // now try the last PUT which should result in the queue being flushed.
+ expect(mockRpcManager.getMembers()).andReturn(originalRpcManager.getMembers()).anyTimes();
+ expect(mockRpcManager.callRemoteMethods((List<Address>) anyObject(), (MethodCall) anyObject(), anyBoolean(), anyBoolean(), anyInt(), anyBoolean())).andReturn(Collections.emptyList()).once();
+ replay(mockRpcManager);
+
+ cache.put("/a/b/c/LAST", "k", "v");
+ assert replQ.elements.size() == 0;
+
+ // verify that the rpc call was only made once.
+ verify(mockRpcManager);
+ }
+
+ public void testFlushConcurrency() throws Exception
+ {
+ // will create multiple threads to constantly perform a cache update, and measure the number of expected invocations on the RPC manager.
+ final int numThreads = 25;
+ final int numLoopsPerThread = 1000;
+
+ int totalInvocations = numThreads * numLoopsPerThread;
+
+ assert totalInvocations % COUNT == 0 : "NumThreads and NumLoopsPerThread must multiply to be a multiple of COUNT";
+
+ int expectedReplications = totalInvocations / COUNT;
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ // mock the RPCManager used in the cache
+ RPCManager mockRpcManager = EasyMock.createStrictMock(RPCManager.class);
+ injectRpcManager(mockRpcManager);
+
+ // expect basic cluster related calls
+ expect(mockRpcManager.getMembers()).andReturn(originalRpcManager.getMembers()).anyTimes();
+ expect(mockRpcManager.callRemoteMethods((List<Address>) anyObject(), (MethodCall) anyObject(), anyBoolean(), anyBoolean(), anyInt(), anyBoolean())).andReturn(Collections.emptyList()).times(expectedReplications);
+ replay(mockRpcManager);
+
+ Thread[] threads = new Thread[numThreads];
+
+ for (int i = 0; i < numThreads; i++)
+ {
+ threads[i] = new Thread()
+ {
+ public void run()
+ {
+ try
+ {
+ latch.await();
+ }
+ catch (InterruptedException e)
+ {
+ // do nothing
+ }
+ for (int j = 0; j < numLoopsPerThread; j++)
+ {
+ cache.put("/a/b/c/" + getName() + "/" + j, "k", "v");
+ }
+ }
+ };
+ threads[i].start();
+ }
+
+ // start the threads
+ latch.countDown();
+
+ // wait for threads to join
+ for (Thread t : threads) t.join();
+
+ // now test results
+ verify(mockRpcManager);
+
+ assert replQ.elements.size() == 0;
+ }
+
+ public void testFailure() throws InterruptedException
+ {
+ for (int i = 0; i < COUNT; i++)
+ {
+ System.out.println("on put i = " + i);
+ cache.put("/a/b/c" + i, "key", "value");
+ assertNotNull(cache.get("/a/b/c" + i, "key"));
+ }
+ TestingUtil.sleepThread(500);
+ for (int i = 0; i < COUNT; i++) assertNotNull("on get i = " + i, cache2.get("/a/b/c" + i, "key"));
+ }
+}
16 years, 9 months
JBoss Cache SVN: r5446 - pojo/trunk/assembly.
by jbosscache-commits@lists.jboss.org
Author: jason.greene(a)jboss.com
Date: 2008-03-18 20:21:05 -0400 (Tue, 18 Mar 2008)
New Revision: 5446
Modified:
pojo/trunk/assembly/bin.xml
Log:
merge assembly fix
Modified: pojo/trunk/assembly/bin.xml
===================================================================
--- pojo/trunk/assembly/bin.xml 2008-03-19 00:20:39 UTC (rev 5445)
+++ pojo/trunk/assembly/bin.xml 2008-03-19 00:21:05 UTC (rev 5446)
@@ -23,6 +23,18 @@
<outputDirectory>etc</outputDirectory>
</fileSet>
+ <!-- tests -->
+ <fileSet>
+ <directory>src/test/java</directory>
+ <outputDirectory>test</outputDirectory>
+ </fileSet>
+
+ <!-- test resources -->
+ <fileSet>
+ <directory>src/test/resources</directory>
+ <outputDirectory>test</outputDirectory>
+ </fileSet>
+
<!-- EULAs and license files -->
<fileSet>
<directory>src/main/release</directory>
16 years, 9 months
JBoss Cache SVN: r5445 - pojo/branches/2.1/assembly.
by jbosscache-commits@lists.jboss.org
Author: jason.greene(a)jboss.com
Date: 2008-03-18 20:20:39 -0400 (Tue, 18 Mar 2008)
New Revision: 5445
Modified:
pojo/branches/2.1/assembly/bin.xml
Log:
merge assembly fix
Modified: pojo/branches/2.1/assembly/bin.xml
===================================================================
--- pojo/branches/2.1/assembly/bin.xml 2008-03-19 00:19:58 UTC (rev 5444)
+++ pojo/branches/2.1/assembly/bin.xml 2008-03-19 00:20:39 UTC (rev 5445)
@@ -23,6 +23,18 @@
<outputDirectory>etc</outputDirectory>
</fileSet>
+ <!-- tests -->
+ <fileSet>
+ <directory>src/test/java</directory>
+ <outputDirectory>test</outputDirectory>
+ </fileSet>
+
+ <!-- test resources -->
+ <fileSet>
+ <directory>src/test/resources</directory>
+ <outputDirectory>test</outputDirectory>
+ </fileSet>
+
<!-- EULAs and license files -->
<fileSet>
<directory>src/main/release</directory>
16 years, 9 months