exo-jcr SVN: r5179 - jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene.
by do-not-reply@jboss.org
Author: nzamosenchuk
Date: 2011-11-11 09:04:45 -0500 (Fri, 11 Nov 2011)
New Revision: 5179
Modified:
jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java
Log:
EXOJCR-1628 : Index should be closed if replaceIndexes() not invoked.
Modified: jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java 2011-11-10 14:27:38 UTC (rev 5178)
+++ jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/IndexMerger.java 2011-11-11 14:04:45 UTC (rev 5179)
@@ -366,6 +366,8 @@
if (!indexReplacement.attempt(0))
{
log.debug("index merging canceled");
+ // if index not passed to multiIndex, then it will never be closed
+ index.close();
break;
}
try
14 years, 5 months
exo-jcr SVN: r5178 - jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene.
by do-not-reply@jboss.org
Author: nzamosenchuk
Date: 2011-11-10 09:27:38 -0500 (Thu, 10 Nov 2011)
New Revision: 5178
Modified:
jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
Log:
EXOJCR-1625 : fixed
Modified: jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2011-11-10 09:13:24 UTC (rev 5177)
+++ jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/MultiIndex.java 2011-11-10 14:27:38 UTC (rev 5178)
@@ -269,7 +269,7 @@
stopped = true;
}
};
-
+
/**
* The unique id of the workspace corresponding to this multi index
*/
@@ -315,12 +315,6 @@
// as of 1.5 deletable file is not used anymore
removeDeletable();
- // initialize IndexMerger
- merger = new IndexMerger(this);
- merger.setMaxMergeDocs(handler.getMaxMergeDocs());
- merger.setMergeFactor(handler.getMergeFactor());
- merger.setMinMergeDocs(handler.getMinMergeDocs());
-
// copy current index names
Set<String> currentNames = new HashSet<String>(indexNames.getNames());
@@ -344,7 +338,6 @@
index.setUseCompoundFile(handler.getUseCompoundFile());
index.setTermInfosIndexDivisor(handler.getTermInfosIndexDivisor());
indexes.add(index);
- merger.indexAdded(index.getName(), index.getNumDocuments());
}
// this method is run in privileged mode internally
@@ -370,6 +363,7 @@
indexingQueue.initialize(this);
if (modeHandler.getMode() == IndexerIoMode.READ_WRITE)
{
+ // will also initialize IndexMerger
setReadWrite();
}
this.indexNames.setMultiIndex(this);
@@ -388,7 +382,7 @@
// can't register shutdown hook because
// jvm shutdown sequence has already begun,
// silently ignore...
- }
+ }
return null;
}
});
@@ -1357,7 +1351,11 @@
// stop index merger
// when calling this method we must not lock this MultiIndex, otherwise
// a deadlock might occur
- merger.dispose();
+ if (merger != null)
+ {
+ merger.dispose();
+ merger = null;
+ }
synchronized (this)
{
@@ -1625,8 +1623,12 @@
/**
* Initialize IndexMerger.
*/
- private void doInitIndexMerger() throws IOException
+ private void initMerger() throws IOException
{
+ if (merger != null)
+ {
+ log.info("IndexMerger initialization called twice.");
+ }
merger = new IndexMerger(this);
merger.setMaxMergeDocs(handler.getMaxMergeDocs());
merger.setMergeFactor(handler.getMergeFactor());
@@ -1636,6 +1638,7 @@
{
merger.indexAdded(((PersistentIndex)index).getName(), ((PersistentIndex)index).getNumDocuments());
}
+ merger.start();
}
/**
@@ -2512,7 +2515,10 @@
index.indexNames.addName(indexName);
// now that the index is in the active list let the merger know
// about it
- index.merger.indexAdded(indexName, idx.getNumDocuments());
+ if (index.merger != null)
+ {
+ index.merger.indexAdded(indexName, idx.getNumDocuments());
+ }
}
}
@@ -3251,7 +3257,11 @@
protected void setReadOny()
{
// try to stop merger in safe way
- merger.dispose();
+ if (merger != null)
+ {
+ merger.dispose();
+ merger = null;
+ }
flushTask.cancel();
FLUSH_TIMER.purge();
@@ -3284,8 +3294,7 @@
attemptDelete();
// now that we are ready, start index merger
- doInitIndexMerger();
- merger.start();
+ initMerger();
if (redoLogApplied)
{
@@ -3430,8 +3439,7 @@
//invoking offline index
invokeOfflineIndex();
staleIndexes.clear();
- doInitIndexMerger();
- merger.start();
+ initMerger();
}
else
{
@@ -3443,7 +3451,11 @@
else
{
log.info("Setting index OFFLINE ({})", handler.getContext().getWorkspacePath(true));
- merger.dispose();
+ if (merger != null)
+ {
+ merger.dispose();
+ merger = null;
+ }
offlineIndex =
new OfflinePersistentIndex(handler.getTextAnalyzer(), handler.getSimilarity(), cache, indexingQueue,
directoryManager);
14 years, 5 months
exo-jcr SVN: r5177 - jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene.
by do-not-reply@jboss.org
Author: nzamosenchuk
Date: 2011-11-10 04:13:24 -0500 (Thu, 10 Nov 2011)
New Revision: 5177
Modified:
jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java
Log:
EXOJCR-1538: Avoid setting Value to null in SingleTokenStream, because this causes serialization in clustered environment with empty value, thus making indexing unreliable. Adding custom readExternal/writeExternal skipping the serialization of the hasNext field.
Modified: jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java 2011-11-10 07:10:17 UTC (rev 5176)
+++ jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/core/query/lucene/SingletonTokenStream.java 2011-11-10 09:13:24 UTC (rev 5177)
@@ -19,16 +19,19 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.Payload;
+import org.exoplatform.services.jcr.impl.Constants;
+import java.io.Externalizable;
import java.io.IOException;
-import java.io.Serializable;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
/**
* <code>SingletonTokenStream</code> implements a token stream that wraps a
* single value with a given property type. The property type is stored as a
* payload on the single returned token.
*/
-public final class SingletonTokenStream extends TokenStream implements Serializable
+public final class SingletonTokenStream extends TokenStream implements Externalizable
{
/**
@@ -41,12 +44,13 @@
*/
private Payload payload;
+ private boolean hasNext = true;
+
/**
* for serialization
*/
public SingletonTokenStream()
{
- // TODO Auto-generated constructor stub
}
/**
@@ -78,16 +82,40 @@
*/
public Token next(Token reusableToken) throws IOException
{
- if (value == null)
+ if (hasNext)
{
- return null;
+ reusableToken.clear();
+ reusableToken.setTermBuffer(value);
+ reusableToken.setPayload(payload);
+ reusableToken.setStartOffset(0);
+ reusableToken.setEndOffset(value.length());
+ hasNext = false;
+ return reusableToken;
}
- reusableToken.clear();
- reusableToken.setTermBuffer(value);
- reusableToken.setPayload(payload);
- reusableToken.setStartOffset(0);
- reusableToken.setEndOffset(value.length());
- value = null;
- return reusableToken;
+ return null;
}
+
+ /**
+ * {@inheritDoc}
+ */
+ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException
+ {
+ payload = (Payload)in.readObject();
+ int length = in.readInt();
+ byte[] binValue = new byte[length];
+ in.read(binValue);
+ value = new String(binValue, Constants.DEFAULT_ENCODING);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ public void writeExternal(ObjectOutput out) throws IOException
+ {
+ // skip writing hasNext field
+ out.writeObject(payload);
+ byte[] binValue = value.getBytes(Constants.DEFAULT_ENCODING);
+ out.writeInt(binValue.length);
+ out.write(binValue);
+ }
}
14 years, 5 months
exo-jcr SVN: r5176 - jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase.
by do-not-reply@jboss.org
Author: areshetnyak
Date: 2011-11-10 02:10:17 -0500 (Thu, 10 Nov 2011)
New Revision: 5176
Modified:
jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java
Log:
EXOJCR-1611 : Use cases for testing all restore jobs was commented. Will be uncommented in next sprint.
Modified: jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java
===================================================================
--- jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java 2011-11-09 14:14:52 UTC (rev 5175)
+++ jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java 2011-11-10 07:10:17 UTC (rev 5176)
@@ -86,7 +86,7 @@
workspaceBackupRestore("db2", 8);
}
- public void testJobExistingRepositorySameConfigRestoreSingleDB() throws Exception
+ /*public void testJobExistingRepositorySameConfigRestoreSingleDB() throws Exception
{
repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db1", 9);
repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db1", 10);
@@ -156,7 +156,7 @@
{
workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db2", 31);
workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db2", 32);
- }
+ }*/
protected void repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore(String repositoryName,
int number) throws Exception
14 years, 5 months
exo-jcr SVN: r5175 - jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase.
by do-not-reply@jboss.org
Author: areshetnyak
Date: 2011-11-09 09:14:52 -0500 (Wed, 09 Nov 2011)
New Revision: 5175
Modified:
jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java
Log:
EXOJCR-1611 : Use cases for testing all restore jobs was added.
Modified: jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java
===================================================================
--- jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java 2011-11-09 14:14:49 UTC (rev 5174)
+++ jcr/trunk/exo.jcr.component.ext/src/test/java/org/exoplatform/services/jcr/ext/backup/usecase/TestBackupRestore.java 2011-11-09 14:14:52 UTC (rev 5175)
@@ -16,10 +16,13 @@
*/
package org.exoplatform.services.jcr.ext.backup.usecase;
+import org.apache.commons.collections.map.HashedMap;
import org.exoplatform.container.xml.InitParams;
import org.exoplatform.container.xml.PropertiesParam;
+import org.exoplatform.services.jcr.config.RepositoryConfigurationException;
import org.exoplatform.services.jcr.config.WorkspaceEntry;
import org.exoplatform.services.jcr.core.ManageableRepository;
+import org.exoplatform.services.jcr.core.WorkspaceContainerFacade;
import org.exoplatform.services.jcr.ext.backup.BackupChain;
import org.exoplatform.services.jcr.ext.backup.BackupChainLog;
import org.exoplatform.services.jcr.ext.backup.BackupConfig;
@@ -30,13 +33,23 @@
import org.exoplatform.services.jcr.ext.backup.RepositoryBackupChainLog;
import org.exoplatform.services.jcr.ext.backup.RepositoryBackupConfig;
import org.exoplatform.services.jcr.ext.backup.impl.BackupManagerImpl;
+import org.exoplatform.services.jcr.ext.backup.impl.JobExistingRepositoryRestore;
+import org.exoplatform.services.jcr.ext.backup.impl.JobExistingRepositorySameConfigRestore;
+import org.exoplatform.services.jcr.ext.backup.impl.JobExistingWorkspaceRestore;
+import org.exoplatform.services.jcr.ext.backup.impl.JobExistingWorkspaceSameConfigRestore;
import org.exoplatform.services.jcr.ext.backup.impl.JobRepositoryRestore;
import org.exoplatform.services.jcr.ext.backup.impl.JobWorkspaceRestore;
+import org.exoplatform.services.jcr.impl.backup.Backupable;
import org.exoplatform.services.jcr.impl.core.SessionImpl;
+import org.exoplatform.services.jcr.impl.core.SessionRegistry;
import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
import javax.jcr.Node;
+import javax.jcr.RepositoryException;
/**
* Created by The eXo Platform SAS.
@@ -48,6 +61,7 @@
*/
public class TestBackupRestore extends BaseStandaloneBackupRestoreTest
{
+
public void testBackupRestoreExistingRepositorySingleDB() throws Exception
{
repositoryBackupRestore("db1", 1);
@@ -72,6 +86,310 @@
workspaceBackupRestore("db2", 8);
}
+ public void testJobExistingRepositorySameConfigRestoreSingleDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db1", 9);
+ repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db1", 10);
+ }
+
+ public void testJobExistingRepositorySameConfigRestoreMultiDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db2", 10);
+ repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore("db2", 11);
+ }
+
+ public void testJobExistingRepositoryRestoreSingleDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobExistingRepositoryRestore("db1", 12);
+ repositoryBackupRestoreDirectlyOverJobExistingRepositoryRestore("db1", 13);
+ }
+
+ public void testJobExistingRepositoryRestoreMultiDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobExistingRepositoryRestore("db2", 14);
+ repositoryBackupRestoreDirectlyOverJobExistingRepositoryRestore("db2", 15);
+ }
+
+ public void testJobRepositoryRestoreSingleDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobRepositoryRestore("db1", 17);
+ repositoryBackupRestoreDirectlyOverJobRepositoryRestore("db1", 18);
+ }
+
+ public void testJobRepositoryRestoreMultiDB() throws Exception
+ {
+ repositoryBackupRestoreDirectlyOverJobRepositoryRestore("db2", 19);
+ repositoryBackupRestoreDirectlyOverJobRepositoryRestore("db2", 20);
+ }
+
+ public void testJobExistingWorkspaceSameConfigRestoreSingleDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceSameConfigRestore("db1", 21);
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceSameConfigRestore("db1", 22);
+ }
+
+ public void testJobExistingWorkspaceSameConfigRestoreMultiDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceSameConfigRestore("db2", 23);
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceSameConfigRestore("db2", 24);
+ }
+
+ public void testJobExistingWorkspaceRestoreSingleDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceRestore("db1", 25);
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceRestore("db1", 26);
+ }
+
+ public void testJobExistingWorkspaceRestoreMultiDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceRestore("db2", 27);
+ workspaceBackupRestoreDirectlyOverJobExistingWorkspaceRestore("db2", 28);
+ }
+
+ public void testJobWorkspaceRestoreSingleDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db1", 29);
+ workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db1", 30);
+ }
+
+ public void testJobWorkspaceRestoreMultiDB() throws Exception
+ {
+ workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db2", 31);
+ workspaceBackupRestoreDirectlyOverJobWorkspaceRestore("db2", 32);
+ }
+
+ protected void repositoryBackupRestoreDirectlyOverJobExistingRepositorySameConfigRestore(String repositoryName,
+ int number) throws Exception
+ {
+ addConent(repositoryName, number);
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ RepositoryBackupConfig config = new RepositoryBackupConfig();
+ config.setRepository(repositoryName);
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ RepositoryBackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getState() != BackupJob.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // restore
+ RepositoryBackupChainLog rblog = new RepositoryBackupChainLog(new File(bch.getLogFilePath()));
+
+ Map<String, BackupChainLog> workspacesMapping = new HashedMap();
+ Map<String, BackupChainLog> backups = new HashedMap();
+
+ for (String path : rblog.getWorkspaceBackupsInfo())
+ {
+ BackupChainLog bLog = new BackupChainLog(new File(path));
+ backups.put(bLog.getBackupConfig().getWorkspace(), bLog);
+ }
+
+ for (WorkspaceEntry wsEntry : rblog.getOriginalRepositoryEntry().getWorkspaceEntries())
+ {
+ workspacesMapping.put(wsEntry.getName(), backups.get(wsEntry.getName()));
+ }
+
+ JobExistingRepositorySameConfigRestore job =
+ new JobExistingRepositorySameConfigRestore(repositoryService, backupManagerImpl, rblog
+ .getOriginalRepositoryEntry(), workspacesMapping, rblog);
+
+ job.run();
+ assertEquals(JobRepositoryRestore.REPOSITORY_RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
+ protected void repositoryBackupRestoreDirectlyOverJobExistingRepositoryRestore(String repositoryName, int number)
+ throws Exception
+ {
+ addConent(repositoryName, number);
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ RepositoryBackupConfig config = new RepositoryBackupConfig();
+ config.setRepository(repositoryName);
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ RepositoryBackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getState() != BackupJob.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // restore
+ RepositoryBackupChainLog rblog = new RepositoryBackupChainLog(new File(bch.getLogFilePath()));
+
+ Map<String, BackupChainLog> workspacesMapping = new HashedMap();
+ Map<String, BackupChainLog> backups = new HashedMap();
+
+ for (String path : rblog.getWorkspaceBackupsInfo())
+ {
+ BackupChainLog bLog = new BackupChainLog(new File(path));
+ backups.put(bLog.getBackupConfig().getWorkspace(), bLog);
+ }
+
+ for (WorkspaceEntry wsEntry : rblog.getOriginalRepositoryEntry().getWorkspaceEntries())
+ {
+ workspacesMapping.put(wsEntry.getName(), backups.get(wsEntry.getName()));
+ }
+
+ //TODO
+ /*List<WorkspaceContainerFacade> workspacesWaits4Resume = new ArrayList<WorkspaceContainerFacade>();
+ ManageableRepository repository = repositoryService.getRepository(repositoryName);
+ for (String wsName : repository.getWorkspaceNames())
+ {
+ WorkspaceContainerFacade wsContainer = repository.getWorkspaceContainer(wsName);
+ wsContainer.setState(ManageableRepository.SUSPENDED);
+
+ workspacesWaits4Resume.add(wsContainer);
+ }*/
+ //
+
+ JobExistingRepositoryRestore job =
+ new JobExistingRepositoryRestore(repositoryService, backupManagerImpl, rblog.getOriginalRepositoryEntry(),
+ workspacesMapping, rblog);
+
+ job.run();
+
+ //TODO resume components
+ /*for (WorkspaceContainerFacade wsContainer : workspacesWaits4Resume)
+ {
+ wsContainer.setState(ManageableRepository.ONLINE);
+ }
+ //
+ */
+ assertEquals(JobRepositoryRestore.REPOSITORY_RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
+ private int forceCloseSession(String repositoryName, String workspaceName) throws RepositoryException,
+ RepositoryConfigurationException
+ {
+ ManageableRepository mr = repositoryService.getRepository(repositoryName);
+ WorkspaceContainerFacade wc = mr.getWorkspaceContainer(workspaceName);
+
+ SessionRegistry sessionRegistry = (SessionRegistry)wc.getComponent(SessionRegistry.class);
+
+ return sessionRegistry.closeSessions(workspaceName);
+ }
+
+ protected void repositoryBackupRestoreDirectlyOverJobRepositoryRestore(String repositoryName, int number)
+ throws Exception
+ {
+ addConent(repositoryName, number);
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ RepositoryBackupConfig config = new RepositoryBackupConfig();
+ config.setRepository(repositoryName);
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ RepositoryBackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getState() != BackupJob.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // restore
+ RepositoryBackupChainLog rblog = new RepositoryBackupChainLog(new File(bch.getLogFilePath()));
+
+ // clean existing repository
+ // list of components to clean
+ List<Backupable> backupable = new ArrayList<Backupable>();
+
+ //Create local copy of WorkspaceEntry for all workspaces
+ ArrayList<WorkspaceEntry> workspaceList = new ArrayList<WorkspaceEntry>();
+ workspaceList.addAll(rblog.getOriginalRepositoryEntry().getWorkspaceEntries());
+
+ // get all backupable components
+ for (WorkspaceEntry wEntry : workspaceList)
+ {
+ backupable.addAll(repositoryService.getRepository(rblog.getOriginalRepositoryEntry().getName())
+ .getWorkspaceContainer(wEntry.getName()).getComponentInstancesOfType(Backupable.class));
+ }
+
+ //close all session
+ for (WorkspaceEntry wEntry : workspaceList)
+ {
+ forceCloseSession(rblog.getOriginalRepositoryEntry().getName(), wEntry.getName());
+ }
+
+ //remove repository
+ repositoryService.removeRepository(rblog.getOriginalRepositoryEntry().getName());
+
+ // clean
+ for (Backupable component : backupable)
+ {
+ component.clean();
+ }
+
+ Map<String, BackupChainLog> workspacesMapping = new HashedMap();
+ Map<String, BackupChainLog> backups = new HashedMap();
+
+ for (String path : rblog.getWorkspaceBackupsInfo())
+ {
+ BackupChainLog bLog = new BackupChainLog(new File(path));
+ backups.put(bLog.getBackupConfig().getWorkspace(), bLog);
+ }
+
+ for (WorkspaceEntry wsEntry : rblog.getOriginalRepositoryEntry().getWorkspaceEntries())
+ {
+ workspacesMapping.put(wsEntry.getName(), backups.get(wsEntry.getName()));
+ }
+
+ JobRepositoryRestore job =
+ new JobRepositoryRestore(repositoryService, backupManagerImpl, rblog.getOriginalRepositoryEntry(),
+ workspacesMapping, rblog);
+
+ job.run();
+ assertEquals(JobRepositoryRestore.REPOSITORY_RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
protected void workspaceBackupRestore(String repositoryName, int number) throws Exception
{
addConent(repositoryName, number);
@@ -144,6 +462,202 @@
checkConent(repositoryName, number);
}
+ protected void workspaceBackupRestoreDirectlyOverJobExistingWorkspaceSameConfigRestore(String repositoryName,
+ int number) throws Exception
+ {
+ addConent(repositoryName, number);
+ String workspaceName =
+ repositoryService.getRepository(repositoryName).getConfiguration().getSystemWorkspaceName();
+
+ WorkspaceEntry wsEntry = null;
+ for (WorkspaceEntry entry : repositoryService.getRepository(repositoryName).getConfiguration()
+ .getWorkspaceEntries())
+ {
+ if (entry.getName().equals(workspaceName))
+ {
+ wsEntry = entry;
+ break;
+ }
+ }
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ BackupConfig config = new BackupConfig();
+ config.setRepository(repositoryName);
+ config.setWorkspace(workspaceName);
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ BackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getFullBackupState() != BackupChain.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // restore
+ BackupChainLog bclog = new BackupChainLog(new File(bch.getLogFilePath()));
+
+ JobExistingWorkspaceSameConfigRestore job =
+ new JobExistingWorkspaceSameConfigRestore(repositoryService, backupManagerImpl, repositoryName, bclog, bclog
+ .getOriginalWorkspaceEntry());
+
+ job.run();
+ assertEquals(JobWorkspaceRestore.RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
+ /**
+ * JobExistingWorkspaseRrestore is not support restore system workspace,
+ * because repository is not allowed removing system workspace.
+ */
+ protected void workspaceBackupRestoreDirectlyOverJobExistingWorkspaceRestore(String repositoryName,
+ int number) throws Exception
+ {
+ addConent(repositoryName, number);
+
+ WorkspaceEntry wsEntry = null;
+ for (WorkspaceEntry entry : repositoryService.getRepository(repositoryName).getConfiguration()
+ .getWorkspaceEntries())
+ {
+ if (!entry.getName().equals(
+ repositoryService.getRepository(repositoryName).getConfiguration().getSystemWorkspaceName()))
+ {
+ wsEntry = entry;
+ break;
+ }
+ }
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ BackupConfig config = new BackupConfig();
+ config.setRepository(repositoryName);
+ config.setWorkspace(wsEntry.getName());
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ BackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getFullBackupState() != BackupChain.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // restore
+ BackupChainLog bclog = new BackupChainLog(new File(bch.getLogFilePath()));
+
+ JobExistingWorkspaceRestore job =
+ new JobExistingWorkspaceRestore(repositoryService, backupManagerImpl, repositoryName, bclog, bclog
+ .getOriginalWorkspaceEntry());
+
+ job.run();
+ assertEquals(JobWorkspaceRestore.RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
+ /**
+ * JobWorkspaseRrestore is not support restore system workspace,
+ * because repository is not allowed removing system workspace.
+ */
+ protected void workspaceBackupRestoreDirectlyOverJobWorkspaceRestore(String repositoryName, int number)
+ throws Exception
+ {
+ addConent(repositoryName, number);
+
+ WorkspaceEntry wsEntry = null;
+ for (WorkspaceEntry entry : repositoryService.getRepository(repositoryName).getConfiguration()
+ .getWorkspaceEntries())
+ {
+ if (!entry.getName().equals(
+ repositoryService.getRepository(repositoryName).getConfiguration().getSystemWorkspaceName()))
+ {
+ wsEntry = entry;
+ break;
+ }
+ }
+
+ String workspaceName = wsEntry.getName();
+
+ BackupManagerImpl backupManagerImpl = (BackupManagerImpl)getBackupManager();
+ backupManagerImpl.start();
+
+ File backDir = new File("target/backup/" + repositoryName);
+ backDir.mkdirs();
+
+ BackupConfig config = new BackupConfig();
+ config.setRepository(repositoryName);
+ config.setWorkspace(workspaceName);
+ config.setBackupType(BackupManager.FULL_BACKUP_ONLY);
+ config.setBackupDir(backDir);
+
+ BackupChain bch = backupManagerImpl.startBackup(config);
+
+ // wait till full backup will stop
+ while (bch.getFullBackupState() != BackupChain.FINISHED)
+ {
+ Thread.yield();
+ Thread.sleep(30);
+ }
+
+ if (bch != null)
+ {
+ backupManagerImpl.stopBackup(bch);
+ }
+
+ // clean existed workspace
+ // get all backupable components
+ List<Backupable> backupable =
+ repositoryService.getRepository(repositoryName).getWorkspaceContainer(wsEntry.getName())
+ .getComponentInstancesOfType(Backupable.class);
+
+ // close all session
+ forceCloseSession(repositoryName, wsEntry.getName());
+
+ repositoryService.getRepository(repositoryName).removeWorkspace(wsEntry.getName());
+
+ // clean
+ for (Backupable component : backupable)
+ {
+ component.clean();
+ }
+
+ // restore
+ BackupChainLog bclog = new BackupChainLog(new File(bch.getLogFilePath()));
+
+ JobWorkspaceRestore job =
+ new JobWorkspaceRestore(repositoryService, backupManagerImpl, repositoryName, bclog, bclog
+ .getOriginalWorkspaceEntry());
+
+ job.run();
+ assertEquals(JobWorkspaceRestore.RESTORE_SUCCESSFUL, job.getStateRestore());
+
+ checkConent(repositoryName, number);
+ }
+
protected void repositoryBackupRestore(String repositoryName, int number) throws Exception
{
addConent(repositoryName, number);
14 years, 5 months
exo-jcr SVN: r5174 - kernel/trunk/exo.kernel.commons/src/main/java/org/exoplatform/services/log/impl.
by do-not-reply@jboss.org
Author: nfilotto
Date: 2011-11-09 09:14:49 -0500 (Wed, 09 Nov 2011)
New Revision: 5174
Modified:
kernel/trunk/exo.kernel.commons/src/main/java/org/exoplatform/services/log/impl/DynamicLocationAwareLogger.java
Log:
EXOJCR-1623: Logs are polluted by logger warning
Modified: kernel/trunk/exo.kernel.commons/src/main/java/org/exoplatform/services/log/impl/DynamicLocationAwareLogger.java
===================================================================
--- kernel/trunk/exo.kernel.commons/src/main/java/org/exoplatform/services/log/impl/DynamicLocationAwareLogger.java 2011-11-08 13:27:10 UTC (rev 5173)
+++ kernel/trunk/exo.kernel.commons/src/main/java/org/exoplatform/services/log/impl/DynamicLocationAwareLogger.java 2011-11-09 14:14:49 UTC (rev 5174)
@@ -69,7 +69,7 @@
// using java reflection library
// also we're determining number of parameters of 'log' method to know
// what slf4j library version we're dealing with
- for (Method m : logger.getClass().getDeclaredMethods())
+ for (Method m : LocationAwareLogger.class.getDeclaredMethods())
{
if ("log".equals(m.getName()))
{
14 years, 5 months
exo-jcr SVN: r5173 - jcr/trunk/exo.jcr.component.ext/src/main/java/org/exoplatform/services/jcr/ext/hierarchy/impl.
by do-not-reply@jboss.org
Author: tolusha
Date: 2011-11-08 08:27:10 -0500 (Tue, 08 Nov 2011)
New Revision: 5173
Modified:
jcr/trunk/exo.jcr.component.ext/src/main/java/org/exoplatform/services/jcr/ext/hierarchy/impl/NodeHierarchyCreatorImpl.java
Log:
EXOJCR-1621: Migration tool and guideline for migration from 1.12.10-GA to 1.14.x
Modified: jcr/trunk/exo.jcr.component.ext/src/main/java/org/exoplatform/services/jcr/ext/hierarchy/impl/NodeHierarchyCreatorImpl.java
===================================================================
--- jcr/trunk/exo.jcr.component.ext/src/main/java/org/exoplatform/services/jcr/ext/hierarchy/impl/NodeHierarchyCreatorImpl.java 2011-11-08 12:57:43 UTC (rev 5172)
+++ jcr/trunk/exo.jcr.component.ext/src/main/java/org/exoplatform/services/jcr/ext/hierarchy/impl/NodeHierarchyCreatorImpl.java 2011-11-08 13:27:10 UTC (rev 5173)
@@ -379,10 +379,8 @@
{
if (!oldDistribution && autoMigrate)
{
-
ManageableRepository repo = jcrService_.getCurrentRepository();
- Session session = repo.login();
-
+ Session session = repo.getSystemSession(repo.getConfiguration().getDefaultWorkspaceName());
try
{
String userPath = getJcrPath(USERS_PATH);
14 years, 5 months
exo-jcr SVN: r5172 - jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage.
by do-not-reply@jboss.org
Author: tolusha
Date: 2011-11-08 07:57:43 -0500 (Tue, 08 Nov 2011)
New Revision: 5172
Modified:
jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-myisam-utf8.sql
jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-utf8.sql
Log:
EXOJCR-1612: fix mysql scripts
Modified: jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-myisam-utf8.sql
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-myisam-utf8.sql 2011-11-08 11:06:51 UTC (rev 5171)
+++ jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-myisam-utf8.sql 2011-11-08 12:57:43 UTC (rev 5172)
@@ -13,7 +13,7 @@
N_ORDER_NUM INTEGER,
P_TYPE INTEGER,
P_MULTIVALUED BOOLEAN,
- CONSTRAINT JCR_PK_SITEM PRIMARY KEY(ID)
+ CONSTRAINT JCR_PK_SITEM PRIMARY KEY(ID),
CONSTRAINT JCR_FK_SITEM_PARENT FOREIGN KEY(PARENT_ID) REFERENCES JCR_SITEM(ID)
) ENGINE=MyISAM;
CREATE UNIQUE INDEX JCR_IDX_SITEM_PARENT ON JCR_SITEM(CONTAINER_NAME, PARENT_ID, NAME(255), I_INDEX, I_CLASS, VERSION DESC);
@@ -26,7 +26,7 @@
ORDER_NUM INTEGER NOT NULL,
PROPERTY_ID VARCHAR(56) NOT NULL,
STORAGE_DESC VARCHAR(512),
- CONSTRAINT JCR_PK_SVALUE PRIMARY KEY(ID)
+ CONSTRAINT JCR_PK_SVALUE PRIMARY KEY(ID),
CONSTRAINT JCR_FK_SVALUE_PROPERTY FOREIGN KEY(PROPERTY_ID) REFERENCES JCR_SITEM(ID)
) ENGINE=MyISAM;
CREATE UNIQUE INDEX JCR_IDX_SVALUE_PROPERTY ON JCR_SVALUE(PROPERTY_ID, ORDER_NUM);
Modified: jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-utf8.sql
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-utf8.sql 2011-11-08 11:06:51 UTC (rev 5171)
+++ jcr/trunk/exo.jcr.component.core/src/main/resources/conf/storage/jcr-sjdbc.mysql-utf8.sql 2011-11-08 12:57:43 UTC (rev 5172)
@@ -13,7 +13,7 @@
N_ORDER_NUM INTEGER,
P_TYPE INTEGER,
P_MULTIVALUED BOOLEAN,
- CONSTRAINT JCR_PK_SITEM PRIMARY KEY(ID)
+ CONSTRAINT JCR_PK_SITEM PRIMARY KEY(ID),
CONSTRAINT JCR_FK_SITEM_PARENT FOREIGN KEY(PARENT_ID) REFERENCES JCR_SITEM(ID)
) ENGINE=InnoDB;
CREATE UNIQUE INDEX JCR_IDX_SITEM_PARENT ON JCR_SITEM(CONTAINER_NAME, PARENT_ID, NAME(255), I_INDEX, I_CLASS, VERSION DESC);
@@ -26,7 +26,7 @@
ORDER_NUM INTEGER NOT NULL,
PROPERTY_ID VARCHAR(56) NOT NULL,
STORAGE_DESC VARCHAR(512),
- CONSTRAINT JCR_PK_SVALUE PRIMARY KEY(ID)
+ CONSTRAINT JCR_PK_SVALUE PRIMARY KEY(ID),
CONSTRAINT JCR_FK_SVALUE_PROPERTY FOREIGN KEY(PROPERTY_ID) REFERENCES JCR_SITEM(ID)
) ENGINE=InnoDB;
CREATE UNIQUE INDEX JCR_IDX_SVALUE_PROPERTY ON JCR_SVALUE(PROPERTY_ID, ORDER_NUM);
14 years, 5 months
exo-jcr SVN: r5171 - jcr/trunk/applications/exo.jcr.ear.
by do-not-reply@jboss.org
Author: zavizionov
Date: 2011-11-08 06:06:51 -0500 (Tue, 08 Nov 2011)
New Revision: 5171
Modified:
jcr/trunk/applications/exo.jcr.ear/pom.xml
Log:
EXOJCR-1495 TESTING: Image does not uploading
Modified: jcr/trunk/applications/exo.jcr.ear/pom.xml
===================================================================
--- jcr/trunk/applications/exo.jcr.ear/pom.xml 2011-11-08 08:45:44 UTC (rev 5170)
+++ jcr/trunk/applications/exo.jcr.ear/pom.xml 2011-11-08 11:06:51 UTC (rev 5171)
@@ -125,6 +125,11 @@
<groupId>org.infinispan</groupId>
<artifactId>infinispan-cachestore-jdbc</artifactId>
</dependency>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <scope>runtime</scope>
+ </dependency>
<!-- Excludes jboss-logging-spi since it is in conflict with the
latest version that is required by infinispan -->
<dependency>
14 years, 5 months
exo-jcr SVN: r5170 - in jcr/trunk/exo.jcr.component.core/src: test/java/org/exoplatform/services/jcr/impl and 1 other directory.
by do-not-reply@jboss.org
Author: zavizionov
Date: 2011-11-08 03:45:44 -0500 (Tue, 08 Nov 2011)
New Revision: 5170
Modified:
jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jdbc/JDBCWorkspaceDataContainerChecker.java
jcr/trunk/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/TestRepositoryCheckController.java
Log:
EXOJCR-1613 TestRepositoryCheckController failed on Oracle DB
Modified: jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jdbc/JDBCWorkspaceDataContainerChecker.java
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jdbc/JDBCWorkspaceDataContainerChecker.java 2011-11-08 07:47:11 UTC (rev 5169)
+++ jcr/trunk/exo.jcr.component.core/src/main/java/org/exoplatform/services/jcr/impl/storage/jdbc/JDBCWorkspaceDataContainerChecker.java 2011-11-08 08:45:44 UTC (rev 5170)
@@ -122,68 +122,79 @@
// preload queries
queries.add(new InspectionQuery(jdbcDataContainer.multiDb
- ? "select * from JCR_MITEM as I where NOT EXISTS(select * from JCR_MITEM AS P where P.ID = I.PARENT_ID)"
- : "select * from JCR_SITEM as I where I.CONTAINER_NAME='" + jdbcDataContainer.containerName
- + "' and NOT EXISTS(select * from JCR_SITEM AS P where P.ID = I.PARENT_ID)", new String[]{
+ ? "select * from JCR_MITEM I where NOT EXISTS(select * from JCR_MITEM P where P.ID = I.PARENT_ID)"
+ : "select * from JCR_SITEM I where I.CONTAINER_NAME='" + jdbcDataContainer.containerName
+ + "' and NOT EXISTS(select * from JCR_SITEM P where P.ID = I.PARENT_ID)", new String[]{
DBConstants.COLUMN_ID, DBConstants.COLUMN_PARENTID, DBConstants.COLUMN_NAME, DBConstants.COLUMN_CLASS},
"Items that do not have parent nodes", InspectionStatus.ERR));
queries
.add(new InspectionQuery(
jdbcDataContainer.multiDb
- ? "select * from JCR_MITEM as N where N.I_CLASS=1 and NOT EXISTS (select * from JCR_MITEM AS P where P.I_CLASS=2 and P.PARENT_ID=N.ID)"
- : "select * from JCR_SITEM as N where N.CONTAINER_NAME='"
+ ? "select * from JCR_MITEM N where N.I_CLASS=1 and NOT EXISTS (select * from JCR_MITEM P where P.I_CLASS=2 and P.PARENT_ID=N.ID)"
+ : "select * from JCR_SITEM N where N.CONTAINER_NAME='"
+ jdbcDataContainer.containerName
- + "' and N.I_CLASS=1 and NOT EXISTS (select * from JCR_SITEM AS P where P.I_CLASS=2 and P.PARENT_ID=N.ID and P.CONTAINER_NAME='"
+ + "' and N.I_CLASS=1 and NOT EXISTS (select * from JCR_SITEM P where P.I_CLASS=2 and P.PARENT_ID=N.ID and P.CONTAINER_NAME='"
+ jdbcDataContainer.containerName + "')",
new String[]{DBConstants.COLUMN_ID, DBConstants.COLUMN_PARENTID, DBConstants.COLUMN_NAME},
"Nodes that do not have at least one property", InspectionStatus.ERR));
queries
.add(new InspectionQuery(
jdbcDataContainer.multiDb
- ? "select * from JCR_MVALUE as V where NOT EXISTS(select * from JCR_MITEM as P where V.PROPERTY_ID = P.ID and P.I_CLASS=2)"
- : "select V.* from JCR_SVALUE as V, JCR_SITEM as I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ ? "select * from JCR_MVALUE V where NOT EXISTS(select * from JCR_MITEM P where V.PROPERTY_ID = P.ID and P.I_CLASS=2)"
+ : "select V.* from JCR_SVALUE V, JCR_SITEM I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ jdbcDataContainer.containerName
- + "' and NOT EXISTS(select * from JCR_SITEM as P where P.CONTAINER_NAME='"
+ + "' and NOT EXISTS(select * from JCR_SITEM P where P.CONTAINER_NAME='"
+ jdbcDataContainer.containerName + "' and V.PROPERTY_ID = P.ID and P.I_CLASS=2)", new String[]{
DBConstants.COLUMN_ID, DBConstants.COLUMN_VPROPERTY_ID},
"All value records that has not owner-property record", InspectionStatus.ERR));
queries
.add(new InspectionQuery(
jdbcDataContainer.multiDb
- ? "select * from JCR_MITEM as P where P.I_CLASS=2 and NOT EXISTS( select * from JCR_MVALUE as V where V.PROPERTY_ID=P.ID)"
- : "select * from JCR_SITEM as P where P.CONTAINER_NAME='" + jdbcDataContainer.containerName
- + "' and P.I_CLASS=2 and NOT EXISTS( select * from JCR_SVALUE as V where V.PROPERTY_ID=P.ID)",
+ ? "select * from JCR_MITEM P where P.I_CLASS=2 and NOT EXISTS( select * from JCR_MVALUE V where V.PROPERTY_ID=P.ID)"
+ : "select * from JCR_SITEM P where P.CONTAINER_NAME='" + jdbcDataContainer.containerName
+ + "' and P.I_CLASS=2 and NOT EXISTS( select * from JCR_SVALUE V where V.PROPERTY_ID=P.ID)",
new String[]{DBConstants.COLUMN_ID, DBConstants.COLUMN_PARENTID, DBConstants.COLUMN_NAME},
"All properties that have not value record.", InspectionStatus.WARN));
+
+ // The differences in the queries by DB dialect.
+ // Oracle doesn't work correct with default query because empty value stored as null value.
+ String statement;
+ if (jdbcDataContainer.dbDialect.equals(DBConstants.DB_DIALECT_SYBASE)) {
+ statement = jdbcDataContainer.multiDb
+ ? "select * from JCR_MVALUE where (STORAGE_DESC is null and DATA like null) or (STORAGE_DESC is not null and not DATA like null)"
+ : "select V.* from JCR_SVALUE V, JCR_SITEM I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ + jdbcDataContainer.containerName
+ + "' AND ((STORAGE_DESC is null and DATA like null) or (STORAGE_DESC is not null and not DATA like null))";
+ } else if (jdbcDataContainer.dbDialect.equals(DBConstants.DB_DIALECT_ORACLE) || jdbcDataContainer.dbDialect.equals(DBConstants.DB_DIALECT_ORACLEOCI)) {
+ statement = jdbcDataContainer.multiDb
+ ? "select * from JCR_MVALUE where (STORAGE_DESC is not null and DATA is not null)"
+ : "select V.* from JCR_SVALUE V, JCR_SITEM I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ + jdbcDataContainer.containerName
+ + "' AND (STORAGE_DESC is not null and DATA is not null)";
+ } else {
+ statement = jdbcDataContainer.multiDb
+ ? "select * from JCR_MVALUE where (STORAGE_DESC is null and DATA is null) or (STORAGE_DESC is not null and DATA is not null)"
+ : "select V.* from JCR_SVALUE V, JCR_SITEM I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ + jdbcDataContainer.containerName
+ + "' AND ((STORAGE_DESC is null and DATA is null) or (STORAGE_DESC is not null and DATA is not null))";
+ }
queries
- .add(new InspectionQuery(
- jdbcDataContainer.dbDialect.equals(DBConstants.DB_DIALECT_SYBASE)
- ? jdbcDataContainer.multiDb
- ? "select * from JCR_MVALUE where (STORAGE_DESC is null and DATA like null) or (STORAGE_DESC is not null and not DATA like null)"
- : "select V.* from JCR_SVALUE as V, JCR_SITEM as I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
- + jdbcDataContainer.containerName
- + "' AND ((STORAGE_DESC is null and DATA like null) or (STORAGE_DESC is not null and not DATA like null))"
- : jdbcDataContainer.multiDb
- ? "select * from JCR_MVALUE where (STORAGE_DESC is null and DATA is null) or (STORAGE_DESC is not null and DATA is not null)"
- : "select V.* from JCR_SVALUE as V, JCR_SITEM as I where V.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
- + jdbcDataContainer.containerName
- + "' AND ((STORAGE_DESC is null and DATA is null) or (STORAGE_DESC is not null and DATA is not null))",
- new String[]{DBConstants.COLUMN_ID}, "Incorrect JCR_VALUE records", InspectionStatus.ERR));
+ .add(new InspectionQuery(statement, new String[]{DBConstants.COLUMN_ID}, "Incorrect JCR_VALUE records", InspectionStatus.ERR));
queries
.add(new InspectionQuery(
jdbcDataContainer.multiDb
- ? "select * from JCR_MITEM AS P where P.P_TYPE=9 and NOT EXISTS( select * from JCR_MREF AS R where P.ID=R.PROPERTY_ID)"
- : "select * from JCR_SITEM AS P where P.CONTAINER_NAME='" + jdbcDataContainer.containerName
- + "' and P.P_TYPE=9 and NOT EXISTS( select * from JCR_SREF AS R where P.ID=R.PROPERTY_ID)",
+ ? "select * from JCR_MITEM P where P.P_TYPE=9 and NOT EXISTS( select * from JCR_MREF R where P.ID=R.PROPERTY_ID)"
+ : "select * from JCR_SITEM P where P.CONTAINER_NAME='" + jdbcDataContainer.containerName
+ + "' and P.P_TYPE=9 and NOT EXISTS( select * from JCR_SREF R where P.ID=R.PROPERTY_ID)",
new String[]{DBConstants.COLUMN_ID, DBConstants.COLUMN_PARENTID, DBConstants.COLUMN_NAME},
"Reference properties without reference records", InspectionStatus.ERR));
// properties can refer to missing node. It is possible to perform this usecase via JCR API with no exceptions
queries.add(new InspectionQuery(jdbcDataContainer.multiDb
- ? "select * from JCR_MREF AS R where NOT EXISTS(select * from JCR_MITEM AS N where R.NODE_ID=N.ID)"
- : "select * from JCR_SREF AS R, JCR_SITEM as I where R.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ ? "select * from JCR_MREF R where NOT EXISTS(select * from JCR_MITEM N where R.NODE_ID=N.ID)"
+ : "select * from JCR_SREF R, JCR_SITEM I where R.PROPERTY_ID = I.ID and I.CONTAINER_NAME='"
+ jdbcDataContainer.containerName
- + "' and NOT EXISTS(select * from JCR_SITEM AS N where N.CONTAINER_NAME='"
+ + "' and NOT EXISTS(select * from JCR_SITEM N where N.CONTAINER_NAME='"
+ jdbcDataContainer.containerName + "' and R.NODE_ID=N.ID)", new String[]{"NODE_ID", "PROPERTY_ID",
DBConstants.COLUMN_VORDERNUM},
"Reference records that linked to unexisted nodes. Can be normal for some usecases.", InspectionStatus.WARN));
@@ -311,7 +322,7 @@
connection
.prepareStatement(jdbcDataContainer.multiDb
? "SELECT PROPERTY_ID, ORDER_NUM, STORAGE_DESC from JCR_MVALUE where STORAGE_DESC is not null"
- : "SELECT V.PROPERTY_ID, V.ORDER_NUM, V.STORAGE_DESC from JCR_SVALUE as V, JCR_SITEM as I where I.CONTAINER_NAME='"
+ : "SELECT V.PROPERTY_ID, V.ORDER_NUM, V.STORAGE_DESC from JCR_SVALUE V, JCR_SITEM I where I.CONTAINER_NAME='"
+ jdbcDataContainer.containerName + "' and V.PROPERTY_ID = I.ID and STORAGE_DESC is not null");
resultSet = st.executeQuery();
Modified: jcr/trunk/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/TestRepositoryCheckController.java
===================================================================
--- jcr/trunk/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/TestRepositoryCheckController.java 2011-11-08 07:47:11 UTC (rev 5169)
+++ jcr/trunk/exo.jcr.component.core/src/test/java/org/exoplatform/services/jcr/impl/TestRepositoryCheckController.java 2011-11-08 08:45:44 UTC (rev 5170)
@@ -73,10 +73,9 @@
public void testDB()
{
String result = checkController.checkRepositoryDataConsistency(new DataStorage[]{DataStorage.DB});
- assertTrue(result.equals("Repository data is consistent. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath())
- || result.equals("Repository data is consistent, except some warnings. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath()));
+ assertNotNull(result);
+ assertTrue("Repository data is not consistent, result: " + result, result
+ .startsWith("Repository data is consistent"));
}
public void testValueStorage() throws Exception
@@ -91,8 +90,9 @@
root.save();
String result = checkController.checkRepositoryDataConsistency(new DataStorage[]{DataStorage.VALUE_STORAGE});
- assertEquals("Repository data is consistent. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath(), result);
+ assertNotNull(result);
+ assertTrue("Repository data is not consistent, result: " + result, result
+ .startsWith("Repository data is consistent"));
}
finally
{
@@ -104,8 +104,9 @@
public void testSearchIndex()
{
String result = checkController.checkRepositoryDataConsistency(new DataStorage[]{DataStorage.LUCENE_INDEX});
- assertEquals("Repository data is consistent. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath(), result);
+ assertNotNull(result);
+ assertTrue("Repository data is not consistent, result: " + result, result
+ .startsWith("Repository data is consistent"));
}
public void testAll()
@@ -113,9 +114,8 @@
String result =
checkController.checkRepositoryDataConsistency(new DataStorage[]{DataStorage.DB, DataStorage.VALUE_STORAGE,
DataStorage.LUCENE_INDEX});
- assertTrue(result.equals("Repository data is consistent. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath())
- || result.equals("Repository data is consistent, except some warnings. See full report by path "
- + checkController.getLastLogFile().getAbsolutePath()));
+ assertNotNull(result);
+ assertTrue("Repository data is not consistent, result: " + result, result
+ .startsWith("Repository data is consistent"));
}
}
14 years, 5 months