使用 JPA 备份数据库(设计)
我有一个工作代码,但并不总是有效。这是我的方法:
创建备份
- 为源数据库创建实体管理器
- 为目标数据库(嵌入式 Derby 数据库)创建实体管理器
- 复制实体(选择表的所有条目(现在对表顺序进行硬编码)并复制基本上是全选并从源中分离实体并保留在目标上)
- 压缩嵌入式 Derby 数据库。
从备份加载
- 解压缩备份
- 执行备份
- 清理目标数据库(删除所有表)
- 复制实体
在某些时候,我会使用 JPA 2 元数据来获取要复制的表并选择它们需要复制的顺序(由于限制)。
由于某种原因,这种方法并不总是有效,因为我看到“丢失”的条目未恢复。
代码如下:
package com.bluecubs.xinco.core.server;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.TrueFileFilter;
/**
* This is a complex task and is heavily dependant on the architecture
* of the database.
*
* Data needs to be stored in a particular order into the database to comply
* with database constraints. This order can be observed in a dump file or
* create script like the ones generated from MySQL Workbench. Using that
* should be enough. In case that tool is not available basically the logic is
* populating tables from the outside inwards. From the tables with no relationships
* or only one working to the more complex ones. As summary before a table is populated all
* the related tables should be populated already (if we have identifying relationships.
*
* @author Javier A. Ortiz Bultrón <[email protected]>
*/
public class XincoBackupManager {
private static XincoBackupManager instance;
private static EntityManagerFactory liveEMF;
private static EntityManagerFactory backupEMF;
private static EntityManager live, backup;
private static final ArrayList<String> tables = new ArrayList<String>();
private static XincoBackupFile last;
private static String backupPath;
public static HashMap<String, Integer> stats = new HashMap<String, Integer>();
static {
//Non-order-critical tables
tables.add("XincoCoreAceT");
tables.add("XincoCoreDataT");
tables.add("XincoCoreDataTypeAttributeT");
tables.add("XincoCoreGroupT");
tables.add("XincoCoreLanguageT");
tables.add("XincoCoreNodeT");
tables.add("XincoCoreUserHasXincoCoreGroupT");
tables.add("XincoCoreUserT");
tables.add("XincoSettingT");
tables.add("XincoDependencyTypeT");
tables.add("XincoCoreDataHasDependencyT");
tables.add("XincoSetting");
tables.add("XincoId");
//Order critical tables
tables.add("XincoCoreLanguage");
tables.add("XincoCoreNode");
tables.add("XincoCoreDataType");
tables.add("XincoCoreData");
tables.add("XincoDependencyType");
tables.add("XincoCoreDataHasDependency");
tables.add("XincoCoreUser");
tables.add("XincoCoreUserModifiedRecord");
tables.add("XincoCoreGroup");
tables.add("XincoCoreAce");
tables.add("XincoCoreUserHasXincoCoreGroup");
tables.add("XincoAddAttribute");
tables.add("XincoCoreDataTypeAttribute");
tables.add("XincoCoreLog");
}
public static XincoBackupManager get() {
if (instance == null) {
instance = new XincoBackupManager();
}
return instance;
}
private static void setDBSystemDir(String systemDir) {
// Set the db system directory.
System.setProperty("derby.system.home", systemDir);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Derby home set at: {0}", systemDir);
try {
//Start the embeded DB
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
private static void initConnections() {
try {
liveEMF = XincoDBManager.getEntityManagerFactory();
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
try {
backupEMF = Persistence.createEntityManagerFactory("XincoBackup");
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
protected static boolean backup() throws XincoException {
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Initializing connections...");
initConnections();
stats.clear();
backupPath = XincoSettingServer.getSetting("setting.backup.path").getString_value();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
live = liveEMF.createEntityManager();
//Prepare the backup repository. Create dirs if needed.
File backupDir = new File(backupPath);
backupDir.mkdirs();
//Create folder for this backup
SimpleDateFormat format = new SimpleDateFormat("MM-dd-yyyy");
File backupNewDir = new File(backupPath + System.getProperty("file.separator")
+ format.format(new Date()));
backupNewDir.mkdirs();
/*
* Make sure there's no derby database stuff in the folder.
* Any previous interrupted backup might left corrupted database files.
*/
File tempDir = new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "xinco");
if (tempDir.exists()) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.WARNING,
"Deleting potentially corrupted database files at: {0}", tempDir);
FileUtils.deleteDirectory(tempDir);
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Done!");
}
/**
* Prepare system to use derby
*/
setDBSystemDir(backupNewDir.getAbsolutePath());
backup = backupEMF.createEntityManager();
for (String s : tables) {
copyEntities(s, live, backup);
}
/**
* At this point we should have a <Backup Database name> folder in
* <Backup Path>/<Date>.
* Lets zip them for storage.
*/
format = new SimpleDateFormat("MM dd yyyy hh-mm-ss");
zipBackupFiles(backupNewDir, backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "Xinco Backup " + format.format(new Date()));
//Stop Derby database in order to delete
try {
DriverManager.getConnection("jdbc:derby:;shutdown=true");
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
}
//Delete backed up files
String dbName = (String) backup.getProperties().get("javax.persistence.jdbc.url");
dbName = dbName.substring(dbName.lastIndexOf(":") + 1, dbName.indexOf(";"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting temp folder: {0}", dbName);
FileUtils.deleteDirectory(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + dbName));
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backup != null && backup.isOpen()) {
backup.close();
}
if (backupEMF != null && backupEMF.isOpen()) {
backupEMF.close();
}
}
XincoDBManager.setLocked(false);
return true;
}
private static void zipBackupFiles(File path, String zipName) throws XincoException {
if (!zipName.endsWith(".zip")) {
zipName += ".zip";
}
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
@Override
public boolean accept(File file) {
if (file.isDirectory()) {
return true;
}
//Ignore other backup files
if (file.isFile() && !file.getName().endsWith(".zip")) {
return true;
}
return false;
}
@Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
@SuppressWarnings("unchecked")
Collection<File> fileList = FileUtils.listFiles(path, filter, TrueFileFilter.INSTANCE);
Object[] files = fileList.toArray();
// Create a buffer for reading the files
byte[] buf = new byte[1024];
try {
// Create the ZIP file
ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipName));
// Compress the files
for (int i = 0; i < files.length; i++) {
FileInputStream in = new FileInputStream((File) files[i]);
String fileName = ((File) files[i]).getPath();
//Remove not needed folders
fileName = fileName.substring(fileName.indexOf(path.getAbsolutePath()) + path.getAbsolutePath().length() + 1);
// Add ZIP entry to output stream.
out.putNextEntry(new ZipEntry(fileName));
// Transfer bytes from the file to the ZIP file
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
// Complete the entry
out.closeEntry();
in.close();
last = new XincoBackupFile(new File(zipName));
}
// Complete the ZIP file
out.close();
} catch (IOException e) {
throw new XincoException("Error zipping backup: " + e.getLocalizedMessage());
}
}
private static void copyEntities(String table, EntityManager source, EntityManager dest) {
List<Object> result, result2;
result = source.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying from table: {0}", table);
int i = 0;
source.clear();
for (Object o : result) {
i++;
Class<?> persistenceClass = null;
try {
persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
dest.getTransaction().begin();
if (dest.contains(persistenceClass.cast(o))) {
//If no exception do a merge because it exists already
dest.merge(persistenceClass.cast(o));
} else {
dest.persist(persistenceClass.cast(o));
}
dest.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("No persistence enitiy defined for table: " + table);
}catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("Exception copying: " + o);
}
}
stats.put(table, i);
result2 = dest.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying for table: {0} completed! Amount of records: {1}",
new Object[]{table, i});
//Make sure the copy is accurate.
//TODO: For some reason XincoId always return twice the amount of records during this routine.
if (result2.size() != result.size() && !table.equals("XincoId")) {
throw new XincoException("Error copying records for table " + table + ". Got " + result2.size() + " instead of " + result.size());
}
result2.clear();
}
@SuppressWarnings({"unchecked"})
public static ArrayList<XincoBackupFile> getBackupFiles() throws XincoException {
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
@Override
public boolean accept(File file) {
//Only zip files
if (file.isFile() && file.getName().endsWith(".zip")
&& file.getName().startsWith("Xinco Backup")) {
return true;
}
return false;
}
@Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
Collection<File> files = FileUtils.listFiles(
new File(backupPath), filter, TrueFileFilter.INSTANCE);
ArrayList<XincoBackupFile> backupFiles = new ArrayList<XincoBackupFile>();
for (File f : files) {
backupFiles.add(new XincoBackupFile(f));
}
//Sort
Collections.sort(backupFiles, new XincoBackupComparator());
//Sorted from oldest to newer so we need to invert the list.
Collections.reverse(backupFiles);
return backupFiles;
}
protected static boolean restoreFromBackup(XincoBackupFile backupFile) throws XincoException {
try {
stats.clear();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restoring database from: {0}", backupFile.getName());
//First make a backup of current database just in case
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Creating a restore point for your current database...");
backup();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
//Load database from the provided backup
loadDatabaseFromBackup(backupFile);
XincoDBManager.setLocked(false);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restore complete!");
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting restore point...");
FileUtils.forceDelete(last);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Done!");
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
return true;
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
//Recover from last backup
loadDatabaseFromBackup(getLast());
XincoDBManager.setLocked(false);
throw new XincoException("Unable to load backup! Database reverted to original state. \n" + ex.getMessage());
}
}
protected static void loadDatabaseFromBackup(XincoBackupFile backupFile) throws XincoException {
EntityManager backupEM = null;
try {
initConnections();
live = liveEMF.createEntityManager();
//Unzip backup
unzipBackup(backupFile);
//Delete current database (inverse order than writing)
Collections.reverse(tables);
for (String s : tables) {
clearTable(s, live);
}
//Get back to original order
Collections.reverse(tables);
//Make derby start where the backup is
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Connecting to backup data...");
setDBSystemDir(backupPath + "Temp"
+ System.getProperty("file.separator"));
//Connect to backup database
backupEM = Persistence.createEntityManagerFactory("XincoBackup").createEntityManager();
//Start copying
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Starting loading entities...");
for (String s : tables) {
//Copy values from backup
copyEntities(s, backupEM, live);
}
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Load complete!");
//Stop Derby database in order to delete
DriverManager.getConnection("jdbc:derby:;shutdown=true");
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Delete temp folder!");
try {
FileUtils.deleteDirectory(new File(System.getProperty("derby.system.home")));
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backupEM != null && backupEM.isOpen()) {
backupEM.close();
}
}
}
private static void unzipBackup(XincoBackupFile backup) {
try {
//Make sure that the temp directory is empty before unzipping
FileUtils.deleteDirectory(new File(backupPath
+ System.getProperty("file.separator") + "Temp"));
byte[] buf = new byte[1024];
ZipInputStream zipinputstream = null;
ZipEntry zipentry;
zipinputstream = new ZipInputStream(
new FileInputStream(backup.getBackupFile()));
zipentry = zipinputstream.getNextEntry();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping backup file: {0}", backup.getName());
while (zipentry != null) {
//for each entry to be extracted
String entryName = zipentry.getName();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Extracting file: {0}", entryName);
int n;
FileOutputStream fileoutputstream;
File newFile = new File(entryName);
String directory = newFile.getParent();
if (directory == null) {
if (newFile.isDirectory()) {
break;
}
}
if (entryName.contains(System.getProperty("file.separator"))) {
//Create any internal folders required
new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName.substring(
0, entryName.lastIndexOf(
System.getProperty("file.separator")))).mkdirs();
} else {
File tempDir = new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator"));
tempDir.mkdirs();
}
fileoutputstream = new FileOutputStream(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName);
while ((n = zipinputstream.read(buf, 0, 1024)) > -1) {
fileoutputstream.write(buf, 0, n);
}
fileoutputstream.close();
zipinputstream.closeEntry();
zipentry = zipinputstream.getNextEntry();
}//while
zipinputstream.close();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping complete!");
} catch (Exception e) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE,
"Error unzipping file!", e);
}
}
private static void clearTable(String table, EntityManager target) throws XincoException {
try {
List<Object> result;
result = target.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0}", table);
int i = 0;
Class<?> serverClass = null;
boolean special = false;
try {
serverClass = Class.forName("com.bluecubs.xinco.core.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex) {
try {
//Class doesn't exist, try in the add folder
serverClass = Class.forName("com.bluecubs.xinco.add.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex1) {
} catch (InstantiationException ex1) {
} catch (NoClassDefFoundError ex1) {
}
} catch (InstantiationException ex) {
} catch (NoClassDefFoundError ex) {
}
if (serverClass != null && special) {
((XincoCRUDSpecialCase) serverClass.newInstance()).clearTable();
special = false;
} else {
for (Object o : result) {
i++;
try {
Class<?> persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
target.getTransaction().begin();
target.remove(persistenceClass.cast(o));
target.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
}
result = target.createNamedQuery(table + ".findAll").getResultList();
if (!result.isEmpty()) {
throw new IllegalStateException("Unable to delete entities: " + result.size());
}
stats.put(table, i);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0} completed! Amount of records removed: {1}", new Object[]{table, i});
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
/**
* @return the last
*/
public static XincoBackupFile getLast() {
return last;
}
}
设计有什么缺陷吗? 有更好的方法吗? 任何评论都非常受欢迎!
I have a working code that doesn't work always. Here's my approach:
Creating the backup
- Create Entity Manager for source database
- Create Entity Manager for destination database (embedded Derby Database)
- Copy entities (Select all entries of a table (table order hard coded right now) and copy them to the destination database. Basically a select all and the detach entity from source and persist on destination)
- Zip the embedded Derby database.
Loading from backup
- Unzip backup
- Perform a backup
- Clean destination database (delete all tables)
- Copy entities
At some point I would use JPA 2 Metadata to fetch the tables to be copied and select the order they need to be copied (due to constraints).
For some reason this approach doesn't work always as I see "lost" entries that are not recovered.
Here's the code:
package com.bluecubs.xinco.core.server;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.TrueFileFilter;
/**
* This is a complex task and is heavily dependant on the architecture
* of the database.
*
* Data needs to be stored in a particular order into the database to comply
* with database constraints. This order can be observed in a dump file or
* create script like the ones generated from MySQL Workbench. Using that
* should be enough. In case that tool is not available basically the logic is
* populating tables from the outside inwards. From the tables with no relationships
* or only one working to the more complex ones. As summary before a table is populated all
* the related tables should be populated already (if we have identifying relationships.
*
* @author Javier A. Ortiz Bultrón <[email protected]>
*/
public class XincoBackupManager {
private static XincoBackupManager instance;
private static EntityManagerFactory liveEMF;
private static EntityManagerFactory backupEMF;
private static EntityManager live, backup;
private static final ArrayList<String> tables = new ArrayList<String>();
private static XincoBackupFile last;
private static String backupPath;
public static HashMap<String, Integer> stats = new HashMap<String, Integer>();
static {
//Non-order-critical tables
tables.add("XincoCoreAceT");
tables.add("XincoCoreDataT");
tables.add("XincoCoreDataTypeAttributeT");
tables.add("XincoCoreGroupT");
tables.add("XincoCoreLanguageT");
tables.add("XincoCoreNodeT");
tables.add("XincoCoreUserHasXincoCoreGroupT");
tables.add("XincoCoreUserT");
tables.add("XincoSettingT");
tables.add("XincoDependencyTypeT");
tables.add("XincoCoreDataHasDependencyT");
tables.add("XincoSetting");
tables.add("XincoId");
//Order critical tables
tables.add("XincoCoreLanguage");
tables.add("XincoCoreNode");
tables.add("XincoCoreDataType");
tables.add("XincoCoreData");
tables.add("XincoDependencyType");
tables.add("XincoCoreDataHasDependency");
tables.add("XincoCoreUser");
tables.add("XincoCoreUserModifiedRecord");
tables.add("XincoCoreGroup");
tables.add("XincoCoreAce");
tables.add("XincoCoreUserHasXincoCoreGroup");
tables.add("XincoAddAttribute");
tables.add("XincoCoreDataTypeAttribute");
tables.add("XincoCoreLog");
}
public static XincoBackupManager get() {
if (instance == null) {
instance = new XincoBackupManager();
}
return instance;
}
private static void setDBSystemDir(String systemDir) {
// Set the db system directory.
System.setProperty("derby.system.home", systemDir);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Derby home set at: {0}", systemDir);
try {
//Start the embeded DB
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
private static void initConnections() {
try {
liveEMF = XincoDBManager.getEntityManagerFactory();
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
try {
backupEMF = Persistence.createEntityManagerFactory("XincoBackup");
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
protected static boolean backup() throws XincoException {
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Initializing connections...");
initConnections();
stats.clear();
backupPath = XincoSettingServer.getSetting("setting.backup.path").getString_value();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
live = liveEMF.createEntityManager();
//Prepare the backup repository. Create dirs if needed.
File backupDir = new File(backupPath);
backupDir.mkdirs();
//Create folder for this backup
SimpleDateFormat format = new SimpleDateFormat("MM-dd-yyyy");
File backupNewDir = new File(backupPath + System.getProperty("file.separator")
+ format.format(new Date()));
backupNewDir.mkdirs();
/*
* Make sure there's no derby database stuff in the folder.
* Any previous interrupted backup might left corrupted database files.
*/
File tempDir = new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "xinco");
if (tempDir.exists()) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.WARNING,
"Deleting potentially corrupted database files at: {0}", tempDir);
FileUtils.deleteDirectory(tempDir);
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Done!");
}
/**
* Prepare system to use derby
*/
setDBSystemDir(backupNewDir.getAbsolutePath());
backup = backupEMF.createEntityManager();
for (String s : tables) {
copyEntities(s, live, backup);
}
/**
* At this point we should have a <Backup Database name> folder in
* <Backup Path>/<Date>.
* Lets zip them for storage.
*/
format = new SimpleDateFormat("MM dd yyyy hh-mm-ss");
zipBackupFiles(backupNewDir, backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "Xinco Backup " + format.format(new Date()));
//Stop Derby database in order to delete
try {
DriverManager.getConnection("jdbc:derby:;shutdown=true");
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
}
//Delete backed up files
String dbName = (String) backup.getProperties().get("javax.persistence.jdbc.url");
dbName = dbName.substring(dbName.lastIndexOf(":") + 1, dbName.indexOf(";"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting temp folder: {0}", dbName);
FileUtils.deleteDirectory(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + dbName));
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backup != null && backup.isOpen()) {
backup.close();
}
if (backupEMF != null && backupEMF.isOpen()) {
backupEMF.close();
}
}
XincoDBManager.setLocked(false);
return true;
}
private static void zipBackupFiles(File path, String zipName) throws XincoException {
if (!zipName.endsWith(".zip")) {
zipName += ".zip";
}
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
@Override
public boolean accept(File file) {
if (file.isDirectory()) {
return true;
}
//Ignore other backup files
if (file.isFile() && !file.getName().endsWith(".zip")) {
return true;
}
return false;
}
@Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
@SuppressWarnings("unchecked")
Collection<File> fileList = FileUtils.listFiles(path, filter, TrueFileFilter.INSTANCE);
Object[] files = fileList.toArray();
// Create a buffer for reading the files
byte[] buf = new byte[1024];
try {
// Create the ZIP file
ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipName));
// Compress the files
for (int i = 0; i < files.length; i++) {
FileInputStream in = new FileInputStream((File) files[i]);
String fileName = ((File) files[i]).getPath();
//Remove not needed folders
fileName = fileName.substring(fileName.indexOf(path.getAbsolutePath()) + path.getAbsolutePath().length() + 1);
// Add ZIP entry to output stream.
out.putNextEntry(new ZipEntry(fileName));
// Transfer bytes from the file to the ZIP file
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
// Complete the entry
out.closeEntry();
in.close();
last = new XincoBackupFile(new File(zipName));
}
// Complete the ZIP file
out.close();
} catch (IOException e) {
throw new XincoException("Error zipping backup: " + e.getLocalizedMessage());
}
}
private static void copyEntities(String table, EntityManager source, EntityManager dest) {
List<Object> result, result2;
result = source.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying from table: {0}", table);
int i = 0;
source.clear();
for (Object o : result) {
i++;
Class<?> persistenceClass = null;
try {
persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
dest.getTransaction().begin();
if (dest.contains(persistenceClass.cast(o))) {
//If no exception do a merge because it exists already
dest.merge(persistenceClass.cast(o));
} else {
dest.persist(persistenceClass.cast(o));
}
dest.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("No persistence enitiy defined for table: " + table);
}catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("Exception copying: " + o);
}
}
stats.put(table, i);
result2 = dest.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying for table: {0} completed! Amount of records: {1}",
new Object[]{table, i});
//Make sure the copy is accurate.
//TODO: For some reason XincoId always return twice the amount of records during this routine.
if (result2.size() != result.size() && !table.equals("XincoId")) {
throw new XincoException("Error copying records for table " + table + ". Got " + result2.size() + " instead of " + result.size());
}
result2.clear();
}
@SuppressWarnings({"unchecked"})
public static ArrayList<XincoBackupFile> getBackupFiles() throws XincoException {
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
@Override
public boolean accept(File file) {
//Only zip files
if (file.isFile() && file.getName().endsWith(".zip")
&& file.getName().startsWith("Xinco Backup")) {
return true;
}
return false;
}
@Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
Collection<File> files = FileUtils.listFiles(
new File(backupPath), filter, TrueFileFilter.INSTANCE);
ArrayList<XincoBackupFile> backupFiles = new ArrayList<XincoBackupFile>();
for (File f : files) {
backupFiles.add(new XincoBackupFile(f));
}
//Sort
Collections.sort(backupFiles, new XincoBackupComparator());
//Sorted from oldest to newer so we need to invert the list.
Collections.reverse(backupFiles);
return backupFiles;
}
protected static boolean restoreFromBackup(XincoBackupFile backupFile) throws XincoException {
try {
stats.clear();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restoring database from: {0}", backupFile.getName());
//First make a backup of current database just in case
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Creating a restore point for your current database...");
backup();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
//Load database from the provided backup
loadDatabaseFromBackup(backupFile);
XincoDBManager.setLocked(false);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restore complete!");
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting restore point...");
FileUtils.forceDelete(last);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Done!");
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
return true;
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
//Recover from last backup
loadDatabaseFromBackup(getLast());
XincoDBManager.setLocked(false);
throw new XincoException("Unable to load backup! Database reverted to original state. \n" + ex.getMessage());
}
}
protected static void loadDatabaseFromBackup(XincoBackupFile backupFile) throws XincoException {
EntityManager backupEM = null;
try {
initConnections();
live = liveEMF.createEntityManager();
//Unzip backup
unzipBackup(backupFile);
//Delete current database (inverse order than writing)
Collections.reverse(tables);
for (String s : tables) {
clearTable(s, live);
}
//Get back to original order
Collections.reverse(tables);
//Make derby start where the backup is
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Connecting to backup data...");
setDBSystemDir(backupPath + "Temp"
+ System.getProperty("file.separator"));
//Connect to backup database
backupEM = Persistence.createEntityManagerFactory("XincoBackup").createEntityManager();
//Start copying
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Starting loading entities...");
for (String s : tables) {
//Copy values from backup
copyEntities(s, backupEM, live);
}
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Load complete!");
//Stop Derby database in order to delete
DriverManager.getConnection("jdbc:derby:;shutdown=true");
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Delete temp folder!");
try {
FileUtils.deleteDirectory(new File(System.getProperty("derby.system.home")));
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backupEM != null && backupEM.isOpen()) {
backupEM.close();
}
}
}
private static void unzipBackup(XincoBackupFile backup) {
try {
//Make sure that the temp directory is empty before unzipping
FileUtils.deleteDirectory(new File(backupPath
+ System.getProperty("file.separator") + "Temp"));
byte[] buf = new byte[1024];
ZipInputStream zipinputstream = null;
ZipEntry zipentry;
zipinputstream = new ZipInputStream(
new FileInputStream(backup.getBackupFile()));
zipentry = zipinputstream.getNextEntry();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping backup file: {0}", backup.getName());
while (zipentry != null) {
//for each entry to be extracted
String entryName = zipentry.getName();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Extracting file: {0}", entryName);
int n;
FileOutputStream fileoutputstream;
File newFile = new File(entryName);
String directory = newFile.getParent();
if (directory == null) {
if (newFile.isDirectory()) {
break;
}
}
if (entryName.contains(System.getProperty("file.separator"))) {
//Create any internal folders required
new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName.substring(
0, entryName.lastIndexOf(
System.getProperty("file.separator")))).mkdirs();
} else {
File tempDir = new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator"));
tempDir.mkdirs();
}
fileoutputstream = new FileOutputStream(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName);
while ((n = zipinputstream.read(buf, 0, 1024)) > -1) {
fileoutputstream.write(buf, 0, n);
}
fileoutputstream.close();
zipinputstream.closeEntry();
zipentry = zipinputstream.getNextEntry();
}//while
zipinputstream.close();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping complete!");
} catch (Exception e) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE,
"Error unzipping file!", e);
}
}
private static void clearTable(String table, EntityManager target) throws XincoException {
try {
List<Object> result;
result = target.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0}", table);
int i = 0;
Class<?> serverClass = null;
boolean special = false;
try {
serverClass = Class.forName("com.bluecubs.xinco.core.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex) {
try {
//Class doesn't exist, try in the add folder
serverClass = Class.forName("com.bluecubs.xinco.add.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex1) {
} catch (InstantiationException ex1) {
} catch (NoClassDefFoundError ex1) {
}
} catch (InstantiationException ex) {
} catch (NoClassDefFoundError ex) {
}
if (serverClass != null && special) {
((XincoCRUDSpecialCase) serverClass.newInstance()).clearTable();
special = false;
} else {
for (Object o : result) {
i++;
try {
Class<?> persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
target.getTransaction().begin();
target.remove(persistenceClass.cast(o));
target.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
}
result = target.createNamedQuery(table + ".findAll").getResultList();
if (!result.isEmpty()) {
throw new IllegalStateException("Unable to delete entities: " + result.size());
}
stats.put(table, i);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0} completed! Amount of records removed: {1}", new Object[]{table, i});
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
/**
* @return the last
*/
public static XincoBackupFile getLast() {
return last;
}
}
Any flaw in the design?
A better way of doing it?
Any comment is more than welcomed!
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(3)
大多数数据库引擎提供命令或工具,允许转储给定数据库的内容(其中一些甚至支持增量备份)。当您有准备使用解决方案时,JPA 只会效率较低、更加复杂,因此我不认为使用 JPA 来完成此任务有什么意义。
对于 Derby,实际上无需执行任何操作:只需 zip/tar(或使用 rsync)数据库文件即可完成。
如果要将一个数据库引擎的内容复制到另一个引擎,请使用 ETL。
另请参阅
Most database engines provides commands or tooling allowing to dump the content of a given database (some of them even supporting incremental backups). JPA will just be less efficient, more complex while you have ready to use solutions so I don't see the point of using JPA for this task.
For Derby, there is actually nothing to do: just zip/tar (or use rsync) the database files and you're done.
And if you want to copy the content of one database engine to another engine, use an ETL.
See also
在数据存储中总是做得更好。一些 JPA 提供商提供了方法。我们提供的是
http://www.datanucleus.org/products/accessplatform/jpa/replication。 html
Always better done in the datastore. Some JPA providers provide ways. The one we provide is
http://www.datanucleus.org/products/accessplatform/jpa/replication.html
我发现的一个有趣的选项是 Scriptella ,它可以从 Java 代码中调用。 使用示例。我会尝试一下并发布结果。
An interesting option I've found is Scriptella which can be called from Java code. Usage examples. I'll give it a try and post the results.