Author: shawkins
Date: 2009-09-14 14:09:28 -0400 (Mon, 14 Sep 2009)
New Revision: 1347
Modified:
trunk/build/kit-runtime/deploy.properties
trunk/client/src/main/java/com/metamatrix/dqp/embedded/DQPEmbeddedProperties.java
trunk/engine/src/main/java/org/teiid/dqp/internal/process/CodeTableCache.java
trunk/engine/src/main/java/org/teiid/dqp/internal/process/DQPCore.java
trunk/engine/src/main/java/org/teiid/dqp/internal/process/DataTierManagerImpl.java
trunk/engine/src/main/resources/com/metamatrix/dqp/i18n.properties
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestCodeTableCache.java
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestDataTierManager.java
Log:
TEIID-828 update of code table caching to allow for more tables.
Modified: trunk/build/kit-runtime/deploy.properties
===================================================================
--- trunk/build/kit-runtime/deploy.properties 2009-09-14 15:59:00 UTC (rev 1346)
+++ trunk/build/kit-runtime/deploy.properties 2009-09-14 18:09:28 UTC (rev 1347)
@@ -63,12 +63,15 @@
#The maximum number of query plans that are cached. Note: this is a memory based cache.
(default 250)
PreparedPlanCache.maxCount=250
-#Maximum number of cached lookup tables. Note: this is a memory based cache. (default
20)
-CodeTables.maxCount=20
+#Maximum number of cached lookup tables. Note: this is a memory based cache and should be
set to a value of at least 10 to accomidate system usage. (default 200)
+CodeTables.maxCount=200
#Maximum number of records in a single lookup table (default 10000)
-CodeTables.maxRows=10000
+CodeTables.maxRowsPerTable=10000
+#Maximum number of records in all lookup tables (default 200000)
+CodeTables.maxRows=200000
+
#Denotes whether or not result set caching is enabled. (default false)
ResultSetCache.enabled=false
@@ -85,10 +88,10 @@
# Session Service Settings
#
-#Maximum number of sessions allowed by the system
+#Maximum number of sessions allowed by the system (default 5000)
session.maxSessions=5000
-#Max allowed time before the session is terminated by the system (default unlimited,
below value is 24hrs)
+#Max allowed time before the session is terminated by the system (default unlimited,
default is 86400000 - 24hrs)
#session.expirationTimeInMilli=86400000
#
@@ -120,13 +123,22 @@
server.portNumber=31000
server.bindAddress=localhost
+
+#Max number of threads dedicated to Admin and initial request processing (default 15)
server.maxSocketThreads=15
+
+#SO_RCVBUF size, 0 indicates that system default should be used (default 0)
server.inputBufferSize=0
+
+#SO_SNDBUF size, 0 indicates that system default should be used (default 0)
server.outputBufferSize=0
# SSL Settings
+#Setting to enable the use of SSL for socket connections. Note all client must use the
mms protocol when enabled. (default false)
ssl.enabled=false
#ssl.protocol=SSLv3
+
+#SSL Authentication Mode, may be one of 1-way, 2-way, or annonymous (default 1-way)
#ssl.authenticationMode=1-way
#ssl.keymanagementalgorithm=
#ssl.keystore.filename=ssl.keystore
@@ -142,9 +154,12 @@
# Setting to enable the use of transactions for XA, local,
# and request scope transactions (default true)
xa.enabled=true
+
# default transaction time out in seconds (default 120)
xa.max_timeout=120
+
# Setting to enable recovery scans (default true)
xa.enable_recovery=true
+
# JBoss transactions status port (default 0 - selects an available port)
xa.txnstatus_port=0
\ No newline at end of file
Modified:
trunk/client/src/main/java/com/metamatrix/dqp/embedded/DQPEmbeddedProperties.java
===================================================================
---
trunk/client/src/main/java/com/metamatrix/dqp/embedded/DQPEmbeddedProperties.java 2009-09-14
15:59:00 UTC (rev 1346)
+++
trunk/client/src/main/java/com/metamatrix/dqp/embedded/DQPEmbeddedProperties.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -45,6 +45,7 @@
public static final String MAX_RESULTSET_CACHE_SIZE =
"ResultSetCache.maxSizeInMB"; //$NON-NLS-1$
public static final String MAX_RESULTSET_CACHE_AGE =
"ResultSetCache.maxAgeInSeconds"; //$NON-NLS-1$
public static final String RESULTSET_CACHE_SCOPE = "ResultSetCache.scope";
//$NON-NLS-1$
+ public static final String MAX_CODE_TABLE_RECORDS_PER_TABLE =
"CodeTables.maxRowsPerTable"; //$NON-NLS-1$
public static final String MAX_CODE_TABLE_RECORDS = "CodeTables.maxRows";
//$NON-NLS-1$
public static final String MAX_CODE_TABLES = "CodeTables.maxCount";
//$NON-NLS-1$
public static final String MAX_PLAN_CACHE_SIZE = "PreparedPlanCache.maxCount";
//$NON-NLS-1$
Modified: trunk/engine/src/main/java/org/teiid/dqp/internal/process/CodeTableCache.java
===================================================================
---
trunk/engine/src/main/java/org/teiid/dqp/internal/process/CodeTableCache.java 2009-09-14
15:59:00 UTC (rev 1346)
+++
trunk/engine/src/main/java/org/teiid/dqp/internal/process/CodeTableCache.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -22,43 +22,45 @@
package org.teiid.dqp.internal.process;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.concurrent.atomic.AtomicInteger;
import com.metamatrix.api.exception.MetaMatrixComponentException;
import com.metamatrix.api.exception.MetaMatrixProcessingException;
import com.metamatrix.common.log.LogManager;
import com.metamatrix.core.util.HashCodeUtil;
import com.metamatrix.dqp.DQPPlugin;
+import com.metamatrix.dqp.embedded.DQPEmbeddedProperties;
import com.metamatrix.dqp.util.LogConstants;
import com.metamatrix.query.util.CommandContext;
/**
- * Code table cache.
+ * Code table cache. Heavily synchronized in-memory cache of code tables. There is no
purging policy for this cache. Once the limits have been reached exceptions will occur.
*/
class CodeTableCache {
- // Max number of code tables that can be loaded
+ private static class CodeTable {
+ Map<Object, Object> codeMap;
+ Set<Object> waitingRequests = new HashSet<Object>();
+ }
+
+ // Max number of code tables that can be loaded
private int maxCodeTables;
- // Caches being loaded - key is CacheKey, value is WaitingRequests
- private Map loadingCaches = new HashMap();
-
- // Map of RequestID/nodeID -> CacheKey
- private Map requestToCacheKeyMap = Collections.synchronizedMap(new HashMap());
-
+ // Max number of code records that can be loaded
+ private int maxCodeRecords;
+
+ private int maxCodeTableRecords;
+
+ private int rowCount;
+
// Cache itself - key is CacheKey, value is Map (which is the key value -> return
value for the code table)
- private Map codeTableCache = new HashMap();
+ private Map<CacheKey, CodeTable> codeTableCache = new HashMap<CacheKey,
CodeTable>();
- // Cache keys for stuff already in the cache
- private Set cacheKeyDone = new HashSet();
-
public enum CacheState {
CACHE_EXISTS,
CACHE_LOADING,
@@ -66,13 +68,13 @@
CACHE_OVERLOAD
}
- private AtomicInteger requestSequence = new AtomicInteger();
-
/**
* Construct a code table cache
*/
- public CodeTableCache(int maxCodeTables) {
- this.maxCodeTables = maxCodeTables;
+ public CodeTableCache(int maxCodeTables, int maxCodeRecords, int maxCodeTableRecords)
{
+ this.maxCodeRecords = maxCodeRecords;
+ this.maxCodeTables = maxCodeTables;
+ this.maxCodeTableRecords = maxCodeTableRecords;
}
/**
@@ -95,31 +97,26 @@
// Create a CacheKey
CacheKey cacheKey = new CacheKey(codeTable, returnElement, keyElement,
context.getVdbName(), context.getVdbVersion());
-
- if (cacheKeyDone.contains(cacheKey)) { // CacheKey exists in codeTableCache
+ CodeTable table = this.codeTableCache.get(cacheKey);
+ if (table == null) {
+ if(codeTableCache.size() >= maxCodeTables) {
+ // In this case we already have some number of existing + loading caches
+ // that are >= the max number we are allowed to have. Thus, we cannot load
+ // another cache.
+ return CacheState.CACHE_OVERLOAD;
+ }
+ table = new CodeTable();
+ table.waitingRequests.add(context.getProcessorID());
+ this.codeTableCache.put(cacheKey, table);
+ return CacheState.CACHE_NOT_EXIST;
+ }
+ if (table.waitingRequests == null) { // CacheKey exists in codeTableCache
return CacheState.CACHE_EXISTS;
-
}
- if (loadingCaches.containsKey(cacheKey)) { // CacheKey exists in loadingCache
- // Add context to additional contexts
- WaitingRequests wqr = (WaitingRequests) loadingCaches.get(cacheKey);
- wqr.addRequestID(context.getProcessorID());
- loadingCaches.put(cacheKey, wqr);
- return CacheState.CACHE_LOADING;
-
- } else if(codeTableCache.size() + loadingCaches.size() >= maxCodeTables) {
- // In this case we already have some number of existing + loading caches
- // that are >= the max number we are allowed to have. Thus, we cannot load
- // another cache.
- return CacheState.CACHE_OVERLOAD;
-
- } else { // CacheKey not exists in loadingCache
- // Add to loadingCaches as primary context
- WaitingRequests wqr = new WaitingRequests(context.getProcessorID());
- loadingCaches.put(cacheKey, wqr);
- return CacheState.CACHE_NOT_EXIST;
- }
- }
+ // Add context to additional contexts
+ table.waitingRequests.add(context.getProcessorID());
+ return CacheState.CACHE_LOADING;
+ }
/**
* Set request ID for request key to cache key mapping.
@@ -130,14 +127,8 @@
* @param requestID Request ID
* @param nodeID Plan Node ID
*/
- public Integer createCacheRequest(String codeTable, String returnElement, String
keyElement, CommandContext context) {
- // Create a cache key
- CacheKey cacheKey = new CacheKey(codeTable, returnElement, keyElement,
context.getVdbName(), context.getVdbVersion());
- Integer result = this.requestSequence.getAndIncrement();
-
- // Add requestID/nodeID pair to map for later lookup
- requestToCacheKeyMap.put(result, cacheKey);
- return result;
+ public CacheKey createCacheRequest(String codeTable, String returnElement, String
keyElement, CommandContext context) {
+ return new CacheKey(codeTable, returnElement, keyElement, context.getVdbName(),
context.getVdbVersion());
}
/**
@@ -147,25 +138,32 @@
* @param results QueryResults of <List<List<keyValue, returnValue>>
* @throws MetaMatrixProcessingException
*/
- public synchronized void loadTable(Integer requestKey, List[] records) throws
MetaMatrixProcessingException {
- // Look up cache key by requestID/nodeID pair
- CacheKey cacheKey = (CacheKey) requestToCacheKeyMap.get(requestKey);
-
+ public synchronized void loadTable(CacheKey cacheKey, List[] records) throws
MetaMatrixProcessingException {
// Lookup the existing data
// Map of data: keyValue --> returnValue;
- Map existingMap = (Map) codeTableCache.get(cacheKey);
- if(existingMap == null) {
- existingMap = new HashMap();
- codeTableCache.put(cacheKey, existingMap);
+ CodeTable table = codeTableCache.get(cacheKey);
+ if(table.codeMap == null) {
+ table.codeMap = new HashMap<Object, Object>();
}
-
+
+ // Determine whether the results should be added to code table cache
+ // Depends on size of results and available memory and system parameters
+ int potentialSize = table.codeMap.size() + records.length;
+ if (potentialSize > maxCodeTableRecords) {
+ throw new MetaMatrixProcessingException("ERR.018.005.0100",
DQPPlugin.Util.getString("ERR.018.005.0100",
DQPEmbeddedProperties.MAX_CODE_TABLE_RECORDS_PER_TABLE)); //$NON-NLS-1$ //$NON-NLS-2$
+ }
+
+ if (potentialSize + rowCount > maxCodeRecords) {
+ throw new MetaMatrixProcessingException("ERR.018.005.0100",
DQPPlugin.Util.getString("ERR.018.005.0100",
DQPEmbeddedProperties.MAX_CODE_TABLE_RECORDS)); //$NON-NLS-1$ //$NON-NLS-2$
+ }
+
// Add data: <List<List<keyValue, returnValue>> from results to
the code table cache
for ( int i = 0; i < records.length; i++ ) {
// each record or row
- List record = records[i];
+ List<Object> record = records[i];
Object keyValue = record.get(0);
Object returnValue = record.get(1);
- Object existing = existingMap.put(keyValue, returnValue);
+ Object existing = table.codeMap.put(keyValue, returnValue);
if (existing != null) {
throw new
MetaMatrixProcessingException(DQPPlugin.Util.getString("CodeTableCache.duplicate_key",
cacheKey.getCodeTable(), cacheKey.getKeyElement(), keyValue)); //$NON-NLS-1$
}
@@ -181,21 +179,17 @@
* @return Object of return value in code table cache
*/
public synchronized Object lookupValue(String codeTable, String returnElement, String
keyElement, Object keyValue, CommandContext context) throws MetaMatrixComponentException
{
- Object returnValue = null;
-
// Create CacheKey
CacheKey cacheKey = new CacheKey(codeTable, returnElement, keyElement,
context.getVdbName(), context.getVdbVersion());
// Find the corresponding data map in cache for the cache key
- Map dataMap = (Map) codeTableCache.get(cacheKey);
- if(dataMap == null) {
- Object[] params = new Object[] {codeTable,keyElement,returnElement};
- throw new
MetaMatrixComponentException(DQPPlugin.Util.getString("CodeTableCache.No_code_table",
params)); //$NON-NLS-1$
+ CodeTable table = codeTableCache.get(cacheKey);
+ if(table == null || table.codeMap == null) {
+ throw new
MetaMatrixComponentException(DQPPlugin.Util.getString("CodeTableCache.No_code_table",
cacheKey.codeTable,cacheKey.keyElement,cacheKey.returnElement)); //$NON-NLS-1$
}
- returnValue = dataMap.get(keyValue);
- return returnValue;
+ return table.codeMap.get(keyValue);
}
-
+
/**
* Places the lookup results in the cache and marks the cache loaded
* @param requestID
@@ -203,7 +197,7 @@
* @return the set of waiting requests
* @since 4.2
*/
- public Set markCacheLoaded(Integer requestKey) {
+ public Set<Object> markCacheLoaded(CacheKey requestKey) {
return markCacheDone(requestKey, false);
}
@@ -215,26 +209,27 @@
* @return the set of waiting requests
* @since 4.2
*/
- public Set errorLoadingCache(Integer requestKey) {
+ public Set<Object> errorLoadingCache(CacheKey requestKey) {
return markCacheDone(requestKey, true);
}
- private synchronized Set markCacheDone(Integer requestKey, boolean errorOccurred) {
- // Remove request from requestToCacheKeyMap
- CacheKey cacheKey = (CacheKey) requestToCacheKeyMap.remove(requestKey);
+ private synchronized Set<Object> markCacheDone(CacheKey cacheKey, boolean
errorOccurred) {
if (errorOccurred) {
// Remove any results already cached
- codeTableCache.remove(cacheKey);
- } else {
- cacheKeyDone.add(cacheKey);
+ CodeTable table = codeTableCache.remove(cacheKey);
+ if (table != null) {
+ return table.waitingRequests;
+ }
+ return null;
}
-
- // Remove cache key from loadingCaches
- WaitingRequests waitingRequests =
(WaitingRequests)loadingCaches.remove(cacheKey);
- if (waitingRequests != null) {
- return waitingRequests.getWaitingRequestIDs();
- }
- return null;
+ CodeTable table = codeTableCache.get(cacheKey);
+ if (table == null || table.codeMap == null) {
+ return null;
+ }
+ rowCount += table.codeMap.size();
+ Set<Object> waiting = table.waitingRequests;
+ table.waitingRequests = null;
+ return waiting;
}
public synchronized void clearAll() {
@@ -247,26 +242,25 @@
// Walk through every key in the done cache and remove it
int removedTables = 0;
- int removedRecords = 0;
- Iterator keyIter = cacheKeyDone.iterator();
- while(keyIter.hasNext()) {
- CacheKey cacheKey = (CacheKey) keyIter.next();
- Map codeTable = (Map) codeTableCache.remove(cacheKey);
- removedTables++;
- removedRecords += codeTable.size();
+ int removedRecords = this.rowCount;
+ for (Iterator<CodeTable> iter = codeTableCache.values().iterator();
iter.hasNext();) {
+ CodeTable table = iter.next();
+ if (table.waitingRequests == null) {
+ removedTables++;
+ iter.remove();
+ }
}
// Clear the cacheKeyDone
- cacheKeyDone.clear();
-
+ this.rowCount = 0;
// Log status
- LogManager.logInfo(LogConstants.CTX_DQP,
DQPPlugin.Util.getString("CodeTableCache.Cleared_code_tables", new Object[]{new
Integer(removedTables), new Integer(removedRecords)})); //$NON-NLS-1$
+ LogManager.logInfo(LogConstants.CTX_DQP,
DQPPlugin.Util.getString("CodeTableCache.Cleared_code_tables", removedTables,
removedRecords)); //$NON-NLS-1$
}
/**
* Cache Key consists: codeTable, returnElement and keyElement.
*/
- private static class CacheKey {
+ static class CacheKey {
private String codeTable;
private String returnElement;
private String keyElement;
@@ -294,10 +288,6 @@
return this.codeTable;
}
- public String getReturnElement() {
- return this.returnElement;
- }
-
public String getKeyElement() {
return this.keyElement;
}
@@ -324,45 +314,4 @@
}
}
- /**
- * Waiting Requests consist: primary requestID and list of additional waiting
requestIDs.
- */
- private static class WaitingRequests {
- Object primaryRequestID;
- Set additionalRequestIDs;
-
- public WaitingRequests(Object requestID) {
- this.primaryRequestID = requestID;
- }
-
- public void addRequestID(Object requestID) {
- if(additionalRequestIDs == null) {
- additionalRequestIDs = new HashSet(8, 0.9f);
- }
- additionalRequestIDs.add(requestID);
- }
-
- /**
- * Return the set of requestIDs for waiting requests.
- * @return Set of waiting requests' IDs
- */
- private Set getWaitingRequestIDs() {
- Set requestIDs = null;
-
- // Waiting Requests can contain both primary and additional context
- if (additionalRequestIDs != null) {
- requestIDs = new HashSet(additionalRequestIDs.size() + 1, 1.0f);
- requestIDs.addAll(additionalRequestIDs);
- } else {
- requestIDs = new HashSet(2, 1.0f);
- }
- if (primaryRequestID != null) {
- requestIDs.add(primaryRequestID);
- }
-
- return requestIDs;
- }
-
- }
-
}
Modified: trunk/engine/src/main/java/org/teiid/dqp/internal/process/DQPCore.java
===================================================================
--- trunk/engine/src/main/java/org/teiid/dqp/internal/process/DQPCore.java 2009-09-14
15:59:00 UTC (rev 1346)
+++ trunk/engine/src/main/java/org/teiid/dqp/internal/process/DQPCore.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -121,7 +121,8 @@
//Constants
private static final int DEFAULT_MAX_CODE_TABLE_RECORDS = 10000;
- private static final int DEFAULT_MAX_CODE_TABLES = 20;
+ private static final int DEFAULT_MAX_CODE_TABLES = 200;
+ private static final int DEFAULT_MAX_CODE_RECORDS = 200000;
private static final int DEFAULT_FETCH_SIZE = 2000;
private static final int DEFAULT_PROCESSOR_TIMESLICE = 2000;
private static final String PROCESS_PLAN_QUEUE_NAME =
"QueryProcessorQueue"; //$NON-NLS-1$
@@ -132,6 +133,7 @@
// System properties for Code Table
private int maxCodeTableRecords = DEFAULT_MAX_CODE_TABLE_RECORDS;
private int maxCodeTables = DEFAULT_MAX_CODE_TABLES;
+ private int maxCodeRecords = DEFAULT_MAX_CODE_RECORDS;
private int maxFetchSize = DEFAULT_FETCH_SIZE;
@@ -632,8 +634,9 @@
this.processorTimeslice = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.PROCESS_TIMESLICE, DEFAULT_PROCESSOR_TIMESLICE);
this.maxFetchSize = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.MAX_FETCH_SIZE, DEFAULT_FETCH_SIZE);
this.processorDebugAllowed = PropertiesUtils.getBooleanProperty(props,
DQPEmbeddedProperties.PROCESSOR_DEBUG_ALLOWED, true);
- this.maxCodeTableRecords = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.MAX_CODE_TABLE_RECORDS, DEFAULT_MAX_CODE_TABLE_RECORDS);
+ this.maxCodeTableRecords = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.MAX_CODE_TABLE_RECORDS_PER_TABLE, DEFAULT_MAX_CODE_TABLE_RECORDS);
this.maxCodeTables = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.MAX_CODE_TABLES, DEFAULT_MAX_CODE_TABLES);
+ this.maxCodeRecords = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.MAX_CODE_TABLE_RECORDS, DEFAULT_MAX_CODE_RECORDS);
this.chunkSize = PropertiesUtils.getIntProperty(props,
DQPEmbeddedProperties.STREAMING_BATCH_SIZE, 10) * 1024;
@@ -671,6 +674,7 @@
(VDBService)
env.findService(DQPServiceNames.VDB_SERVICE),
(BufferService)
env.findService(DQPServiceNames.BUFFER_SERVICE),
this.maxCodeTables,
+ this.maxCodeRecords,
this.maxCodeTableRecords);
}
Modified:
trunk/engine/src/main/java/org/teiid/dqp/internal/process/DataTierManagerImpl.java
===================================================================
---
trunk/engine/src/main/java/org/teiid/dqp/internal/process/DataTierManagerImpl.java 2009-09-14
15:59:00 UTC (rev 1346)
+++
trunk/engine/src/main/java/org/teiid/dqp/internal/process/DataTierManagerImpl.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -26,6 +26,8 @@
import java.util.Iterator;
import java.util.List;
+import org.teiid.dqp.internal.process.CodeTableCache.CacheKey;
+
import com.metamatrix.api.exception.MetaMatrixComponentException;
import com.metamatrix.api.exception.MetaMatrixProcessingException;
import com.metamatrix.common.buffer.BlockedException;
@@ -35,6 +37,7 @@
import com.metamatrix.common.log.LogManager;
import com.metamatrix.core.util.Assertion;
import com.metamatrix.dqp.DQPPlugin;
+import com.metamatrix.dqp.embedded.DQPEmbeddedProperties;
import com.metamatrix.dqp.internal.datamgr.ConnectorID;
import com.metamatrix.dqp.message.AtomicRequestID;
import com.metamatrix.dqp.message.AtomicRequestMessage;
@@ -59,23 +62,19 @@
private VDBService vdbService;
private BufferService bufferService;
- // Code table limits
- private int maxCodeTableRecords;
-
// Processor state
private CodeTableCache codeTableCache;
public DataTierManagerImpl(DQPCore requestMgr,
DataService dataService, VDBService vdbService, BufferService bufferService,
- int maxCodeTables, int maxCodeTableRecords) {
+ int maxCodeTables, int maxCodeRecords, int maxCodeTableRecords) {
this.requestMgr = requestMgr;
this.dataService = dataService;
this.vdbService = vdbService;
- this.maxCodeTableRecords = maxCodeTableRecords;
this.bufferService = bufferService;
- this.codeTableCache = new CodeTableCache(maxCodeTables);
+ this.codeTableCache = new CodeTableCache(maxCodeTables, maxCodeRecords,
maxCodeTableRecords);
}
public TupleSource registerRequest(Object processorId, Command command,
@@ -181,7 +180,7 @@
case CACHE_EXISTS:
return this.codeTableCache.lookupValue(codeTableName, returnElementName,
keyElementName, keyValue, context);
case CACHE_OVERLOAD:
- throw new MetaMatrixProcessingException("ERR.018.005.0099",
DQPPlugin.Util.getString("ERR.018.005.0099")); //$NON-NLS-1$ //$NON-NLS-2$
+ throw new MetaMatrixProcessingException("ERR.018.005.0100",
DQPPlugin.Util.getString("ERR.018.005.0100",
DQPEmbeddedProperties.MAX_CODE_TABLES)); //$NON-NLS-1$ //$NON-NLS-2$
default:
throw BlockedException.INSTANCE;
}
@@ -196,7 +195,7 @@
String query = ReservedWords.SELECT + ' ' + keyElementName + "
," + returnElementName + ' ' + ReservedWords.FROM + ' ' +
codeTableName; //$NON-NLS-1$
- final Integer codeRequestId =
this.codeTableCache.createCacheRequest(codeTableName, returnElementName, keyElementName,
context);
+ final CacheKey codeRequestId =
this.codeTableCache.createCacheRequest(codeTableName, returnElementName, keyElementName,
context);
boolean success = false;
QueryProcessor processor = null;
@@ -206,12 +205,6 @@
processor.setBatchHandler(new QueryProcessor.BatchHandler() {
@Override
public void batchProduced(TupleBatch batch) throws
MetaMatrixProcessingException {
- // Determine whether the results should be added to code table cache
- // Depends on size of results and available memory and system
parameters
-
- if (batch.getEndRow() > maxCodeTableRecords) {
- throw new
MetaMatrixProcessingException("ERR.018.005.0100",
DQPPlugin.Util.getString("ERR.018.005.0100", context.getProcessorID(),
codeRequestId)); //$NON-NLS-1$ //$NON-NLS-2$
- }
codeTableCache.loadTable(codeRequestId, batch.getAllTuples());
}
});
Modified: trunk/engine/src/main/resources/com/metamatrix/dqp/i18n.properties
===================================================================
--- trunk/engine/src/main/resources/com/metamatrix/dqp/i18n.properties 2009-09-14 15:59:00
UTC (rev 1346)
+++ trunk/engine/src/main/resources/com/metamatrix/dqp/i18n.properties 2009-09-14 18:09:28
UTC (rev 1347)
@@ -362,8 +362,7 @@
ERR.018.005.0096 = There was an error in the response.
ERR.018.005.0097 = Exception trying to determine maximum number of code tables.
ERR.018.005.0098 = Exception trying to determine maximum record size of a code table.
-ERR.018.005.0099 = Unable to load code table because code table entries exceeds the
allowed parameter - MaxCodeTables.
-ERR.018.005.0100 = Unable to load code table for requestID {0} of and nodeID of {1}
because result sizes exceeds the allowed parameter - MaxCodeTableRecords.
+ERR.018.005.0100 = Unable to load code table for because result sizes exceeds the allowed
parameter - {0}.
# services (003)
ERR.022.003.0001=
Modified:
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestCodeTableCache.java
===================================================================
---
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestCodeTableCache.java 2009-09-14
15:59:00 UTC (rev 1346)
+++
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestCodeTableCache.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -26,6 +26,7 @@
import java.util.List;
import org.teiid.dqp.internal.process.CodeTableCache;
+import org.teiid.dqp.internal.process.CodeTableCache.CacheKey;
import org.teiid.dqp.internal.process.CodeTableCache.CacheState;
import junit.framework.TestCase;
@@ -57,10 +58,10 @@
}
private CodeTableCache setUpSampleCodeTable(boolean setDone) {
- CodeTableCache ctc = new CodeTableCache(10);
-
+ CodeTableCache ctc = new CodeTableCache(10, 10, 10);
+ assertEquals(CacheState.CACHE_NOT_EXIST, ctc.cacheExists("countrycode",
"code", "country", TEST_CONTEXT)); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$
// must set the requestToCacheKeyMap first
- int nodeId = ctc.createCacheRequest("countrycode", "code",
"country", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ CacheKey nodeId = ctc.createCacheRequest("countrycode", "code",
"country", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
List[] results = exampleResultObject();
// table/countrycode (keyElem/country, returnElem/code);
@@ -74,16 +75,18 @@
}
if(setDone) {
ctc.markCacheLoaded(nodeId);
+ } else {
+ ctc.errorLoadingCache(nodeId);
}
return ctc;
}
// Max = 1 and 1 table is set up
private CodeTableCache setUpSampleCodeTable2() {
- CodeTableCache ctc = new CodeTableCache(1);
-
+ CodeTableCache ctc = new CodeTableCache(1, 10, 10);
+ assertEquals(CacheState.CACHE_NOT_EXIST, ctc.cacheExists("countrycode",
"code", "country", TEST_CONTEXT)); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$
// must set the requestToCacheKeyMap first
- int nodeId = ctc.createCacheRequest("countrycode", "code",
"country", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ CacheKey nodeId = ctc.createCacheRequest("countrycode", "code",
"country", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
List[] results = exampleResultObject();
// table/countrycode (keyElem/country, returnElem/code);
@@ -100,7 +103,7 @@
}
public void testLookupValue() throws Exception {
- CodeTableCache ctc = setUpSampleCodeTable(false);
+ CodeTableCache ctc = setUpSampleCodeTable(true);
String code = (String) ctc.lookupValue("countrycode", "code",
"country", "Germany", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$ //$NON-NLS-4$
assertEquals("Actual lookup value doesn't match with expected: ", code,
"GM"); //$NON-NLS-1$ //$NON-NLS-2$
}
@@ -116,7 +119,7 @@
/** state = 1; loading state */
public void testCacheExists2() {
- CodeTableCache ctc = new CodeTableCache(10);
+ CodeTableCache ctc = new CodeTableCache(10, 10, 10);
ctc.cacheExists("countrycode", "code", "country",
TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
CacheState actualState = ctc.cacheExists("countrycode", "code",
"country", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
@@ -190,10 +193,10 @@
}
public void testDuplicateKeyException() {
- CodeTableCache ctc = new CodeTableCache(1);
-
+ CodeTableCache ctc = new CodeTableCache(1, 10, 10);
+ assertEquals(CacheState.CACHE_NOT_EXIST, ctc.cacheExists("table",
"key", "value", TEST_CONTEXT)); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$
// must set the requestToCacheKeyMap first
- int nodeId = ctc.createCacheRequest("table", "key",
"value", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ CacheKey nodeId = ctc.createCacheRequest("table", "key",
"value", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
List[] results = new List[] {
Arrays.asList(1, 2),
Arrays.asList(1, 3),
@@ -206,5 +209,41 @@
assertEquals("Duplicate code table 'table' key 'value' value
'1'", e.getMessage()); //$NON-NLS-1$
}
}
+
+ public void testMaxRecords() {
+ CodeTableCache ctc = new CodeTableCache(1, 1, 10);
+ assertEquals(CacheState.CACHE_NOT_EXIST, ctc.cacheExists("table",
"key", "value", TEST_CONTEXT)); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$
+ // must set the requestToCacheKeyMap first
+ CacheKey nodeId = ctc.createCacheRequest("table", "key",
"value", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ List[] results = new List[] {
+ Arrays.asList(1, 2),
+ Arrays.asList(2, 3),
+ };
+
+ try {
+ ctc.loadTable(nodeId, results);
+ fail("expected exception"); //$NON-NLS-1$
+ } catch (MetaMatrixProcessingException e) {
+ assertEquals("Error Code:ERR.018.005.0100 Message:Unable to load code table for
because result sizes exceeds the allowed parameter - CodeTables.maxRows.",
e.getMessage()); //$NON-NLS-1$
+ }
+ }
+
+ public void testMaxRecordsPerTable() {
+ CodeTableCache ctc = new CodeTableCache(10, 10, 1);
+ assertEquals(CacheState.CACHE_NOT_EXIST, ctc.cacheExists("table",
"key", "value", TEST_CONTEXT)); //$NON-NLS-1$ //$NON-NLS-2$
//$NON-NLS-3$
+ // must set the requestToCacheKeyMap first
+ CacheKey nodeId = ctc.createCacheRequest("table", "key",
"value", TEST_CONTEXT); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ List[] results = new List[] {
+ Arrays.asList(1, 2),
+ Arrays.asList(2, 3),
+ };
+
+ try {
+ ctc.loadTable(nodeId, results);
+ fail("expected exception"); //$NON-NLS-1$
+ } catch (MetaMatrixProcessingException e) {
+ assertEquals("Error Code:ERR.018.005.0100 Message:Unable to load code table for
because result sizes exceeds the allowed parameter - CodeTables.maxRowsPerTable.",
e.getMessage()); //$NON-NLS-1$
+ }
+ }
}
Modified:
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestDataTierManager.java
===================================================================
---
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestDataTierManager.java 2009-09-14
15:59:00 UTC (rev 1346)
+++
trunk/engine/src/test/java/org/teiid/dqp/internal/process/TestDataTierManager.java 2009-09-14
18:09:28 UTC (rev 1347)
@@ -111,6 +111,7 @@
dataService,
vdbService,
bs,
+ 20,
1000,
1000);
command = helpGetCommand(sql, metadata);