Home PC Games Linux Windows Database Network Programming Server Mobile  
           
  Home \ Database \ MySQL backup tool to back up mydumper     - ORA-00600: internal error code, arguments: [keltnfy-ldmInit], [46], [1], [], [], [], [], [] (Database)

- GAMIT10.5 under CentOS installation (Linux)

- Learning C ++ Standard Template Library and data structures (Programming)

- Linux System Getting Started Tutorial: How to find the maximum memory your system supports (Linux)

- To deploy MySQL database with separate read and write OneProxy (Database)

- Erlang concurrency and foundation (Programming)

- After installation of Debian 6.0 do a few things first (Linux)

- Nginx logging client ip (Server)

- Installation Elementary OS Freya 20 things to do (Linux)

- Java class HashSet (Programming)

- To batch create users under Linux (Linux)

- Debian SSD ext4 4K aligned (Linux)

- To change CentOS7 runlevel (Linux)

- Eight kinds of techniques to solve hard problems Linux (Linux)

- CentOS 6.5 Linux System Customization and Packaging Quick Implementation Script (Linux)

- SSH without password (Linux)

- Build your own Python coding environment (Linux)

- Cache implementation APP interacts with the server-side interface control Session (Server)

- Linux Security Module (LSM) Introduction (Linux)

- 8 Docker knowledge you may not know (Server)

 
         
  MySQL backup tool to back up mydumper
     
  Add Date : 2018-11-21      
         
         
         
  MySQL backup tool to back up mydumper
#! / Usr / bin / python
import os
import time
import commands
import shutil
import threading
from os.path import join, getsize
import MySQLdb as mydb
# Backup directory
baseDir = "/ data2 / backup / backup_data /"
# Ns or wx; whether you want the compressed backup (mydumper own compression) to compress True, otherwise False.
idc = 'ns'; isZip = True
# Whether to retry the backup fails, True retry, retry is not set False, retry_sleep long after retry (seconds)
is_errRetryBackup = True; retry_sleep = 300
# Backup date
backup_date = time.strftime ( "% Y% m% d")
# Backup command
cmd = "/ usr / local / bin / mydumper -h% s -u root -p password? -P% s% s -t 5 -o% s"
'' '' '
Functional Description: 1. mydumper remote batch backup, the backup list provided by the configuration file 2. upon request for a backup is compressed (mydumper automatically compressed) 3. Allow backup failed to back up and try again 4. Backing up your information into the database
'' '
def main (): thread_pool = [] # whether to enable compression zip = '-c' if isZip == True else '' # read from the configuration file ip, name, port, and patchwork backup statement #f = open ( '/ data2 / backup / cnf / other_list.cnf ',' r ') f = open (' / data / other_list.cnf ',' r ') for lines in f.readlines (): if (not lines.startswith (' # ') and len (lines.strip ())> 0): str = lines.split () host, businessName, port, isMaster = str [0], str [1], str [2], str [3] # business folder does not exist, create dir = baseDir + '/' + businessName; if (not os.path.exists (dir)): os.makedirs (dir) dir + = "/% s% s"% (businessName, backup_date) # business directory: dir, backup directory: dir / name + backup date strcmd = cmd% (host, port, zip, dir) th = threading.Thread (target = mydumper, args = (strcmd, dir, businessName, host, port, is_errRetryBackup, int (isMaster))) thread_pool.append (th) if (thread_pool): for t in thread_pool: t.daemon = True t.start () for t in thread_pool: t.join ()
def mydumper (sCmd, backupDir, businessName, host, port, is_Retry, isMaster): master_host = ""; backup_host = host; name = businessName; port = port; backup_type = 1; file = ""; start_time = ""; stop_time = ""; returncode = 0; file_size = 0; slave_statement = ""; std_err = ""; start_time = time.strftime ( "% Y% m% d% H% m% S") # Clear legacy backup if possible (os.path.exists (backupDir)): shutil.rmtree (backupDir) # perform backups returncode, std_err = execute (sCmd) stop_time = time.strftime ( "% Y% m% d% H% m% S") if (returncode == 0): # backup std_err returned not empty it is also considered an error. if (std_err.strip () = ""!): returncode = 123456 else: # change master to obtain information, check again the backup is valid returncode, std_err, master_host, slave_statement = statement (backupDir, backup_host, isMaster) if (returncode == 0): file = backupDir if (returncode = 0):! # abnormal backup mark: date + _ERR errDir = backupDir + "_ERR" os.rename (backupDir, errDir) file = errDir # Get backup size file_size = getDirsize (file) if (len (std_err)> 255): std_err = std_err [: 250] + "..." my_args = [idc, master_host, backup_host, name, port, backup_type, file, start_time, stop_time, returncode, file_size , slave_statement, std_err] # written to the database call_proc (my_args) # backup failure need heavy equipment heavy equipment allowed once if (is_Retry == True and returncode = 0!):?. time.sleep (retry_sleep) oldfile = sCmd.split ( '-o') [1] pos = oldfile.rfind ( "/") + 1 # backup get the full path, the backup file is marked as the most important words prepared retry_file = oldfile [: pos] + "ReBackup-" + oldfile [pos :] retryCmd = sCmd.replace (oldfile, retry_file) # heavy equipment began mydumper (retryCmd, retry_file.strip (), name, host, port, False, isMaster)
def getDirsize (path): # Get the backup folder size size = 0L for root, dirs, files in os.walk (path): size + = sum ([getsize (join (root, name)) for name in files]) return (size)
def statement (path, backup_host, isMaster): '' '' 'Function: Reads from metadata change master to information 1. The backup procedure: Mr will become: metadata.partial, after the completion of metadata.partial be renamed to: metadata and writing the backup completion time 2. metadata divided into three segments: (1) started dump: backup start time (2) master's log-file and log-pos information (must); slave's host, log-file and log-. pos information (backup machine is a slave only) (3) Finished dump: backup end time 3. return error code, master_host and change master to information '' 'path + = "/ metadata"; sMetadata = ""; master_host = " "; er_code = 654321; er_info ="% s not exists !!! "% (path) if (os.path.exists (path)): if (! isMaster = 1): # backup machine is the slave num = 3 sFinds = "SLAVE STATUS" else: num = 2 sFinds = "MASTER STATUS" f = open (path, 'r') rows = f.readlines (); i = 100; lst = [] for s in rows: if (s .find (sFinds)> 0): i = 1; continue if (i <= num): lst.append (s.split ( ':') [1] .strip ()) i + = 1 if (isMaster = = 1): # backup machine is the master master_host = backup_host log_file, log_pos = lst; else: # backup machine is slave master_host, log_file, log_pos = lst; er_code = 0 er_info = "" sMetadata = "CHANGE MASTER TO MASTER_HOST = '% s', MASTER_LOG_FILE = '% s', MASTER_LOG_POS =% s, MASTER_USER =' rep_user ', MASTER_PASSWORD =' meizu.com ' "% (master_host, log_file, log_pos) return (er_code, er_info, master_host, sMetadata)
def execute (cmd): '' '' '1. 2. execute shell command returns execution information (returncode = 0 is executed successfully, an error message is being given std_err)' '' try: returncode, std_err = commands.getstatusoutput (cmd ) return (returncode, std_err) except os.error, e: # 1001 abnormal returns an error return (1001, e)
def call_proc (my_args): # backup information into the database try: conn = mydb.connect (host = '127.0.0.1', user = 'test', passwd = 'zxc / 213?', db = 'meizu_item') cur = conn.cursor () cur.callproc ( 'sp_backup_i', [my_args [0], my_args [1], my_args [2], my_args [3], my_args [4], my_args [5], my_args [6], my_args [7], my_args [8], my_args [9], my_args [10], my_args [11], my_args [12]]) conn.commit () except mydb.Error, e: pass # print "Mysql Error% d:% s "% (e.args [0], e.args [1]) finally: cur.close (); conn.close ()
if __name__ == '__main__': main ()
#! / Usr / bin / phthon
import os
import time
import commands
import shutil
import threading
from os.path import join, getsize
import MySQLdb as mydb
# Backup directory
baseDir = "/ data2 / backup / backup_data /"
# Ns or wx; whether you want the compressed backup (mydumper own compression) to compress True, otherwise False.
idc = 'ns'; isZip = True
# Whether to retry the backup fails, True retry, retry is not set False, retry_sleep long after retry (seconds)
is_errRetryBackup = True; retry_sleep = 300
# Backup date
backup_date = time.strftime ( "% Y% m% d")
# Backup command
cmd = "/ usr / local / bin / mydumper -h% s -u root -p password? -P% s% s -t 5 -o% s"
'' '
Functional Description: 1. mydumper remote batch backup, the backup list provided by the configuration file 2. upon request for a backup is compressed (mydumper automatically compressed) 3. Allow backup failed to back up and try again 4. Backing up your information into the database
'' '
def main (): thread_pool = [] # whether to enable compression zip = '-c' if isZip == True else '' # read from the configuration file ip, name, port, and patchwork backup statement #f = open ( '/ data2 / backup / cnf / other_list.cnf ',' r ') f = open (' / data / other_list.cnf ',' r ') for lines in f.readlines (): if (not lines.startswith (' # ') and len (lines.strip ())> 0): str = lines.split () host, businessName, port, isMaster = str [0], str [1], str [2], str [3] # business folder does not exist, create dir = baseDir + '/' + businessName; if (not os.path.exists (dir)): os.makedirs (dir) dir + = "/% s% s"% (businessName, backup_date) # business directory: dir, backup directory: dir / name + backup date strcmd = cmd% (host, port, zip, dir) th = threading.Thread (target = mydumper, args = (strcmd, dir, businessName, host, port, is_errRetryBackup, int (isMaster))) thread_pool.append (th) if (thread_pool): for t in thread_pool: t.daemon = True t.start () for t in thread_pool: t.join ()
def mydumper (sCmd, backupDir, businessName, host, port, is_Retry, isMaster): master_host = ""; backup_host = host; name = businessName; port = port; backup_type = 1; file = ""; start_time = ""; stop_time = ""; returncode = 0; file_size = 0; slave_statement = ""; std_err = ""; start_time = time.strftime ( "% Y% m% d% H% m% S") # Clear legacy backup if possible (os.path.exists (backupDir)): shutil.rmtree (backupDir) # perform backups returncode, std_err = execute (sCmd) stop_time = time.strftime ( "% Y% m% d% H% m% S") if (returncode == 0): # backup std_err returned not empty it is also considered an error. if (std_err.strip () = ""!): returncode = 123456 else: # change master to obtain information, check again the backup is valid returncode, std_err, master_host, slave_statement = statement (backupDir, backup_host, isMaster) if (returncode == 0): file = backupDir if (returncode = 0):! # abnormal backup mark: date + _ERR errDir = backupDir + "_ERR" os.rename (backupDir, errDir) file = errDir # Get backup size file_size = getDirsize (file) if (len (std_err)> 255): std_err = std_err [: 250] + "..." my_args = [idc, master_host, backup_host, name, port, backup_type, file, start_time, stop_time, returncode, file_size , slave_statement, std_err] # written to the database call_proc (my_args) # backup failure need heavy equipment heavy equipment allowed once if (is_Retry == True and returncode = 0!):?. time.sleep (retry_sleep) oldfile = sCmd.split ( '-o') [1] pos = oldfile.rfind ( "/") + 1 # backup get the full path, the backup file is marked as the most important words prepared retry_file = oldfile [: pos] + "ReBackup-" + oldfile [pos :] retryCmd = sCmd.replace (oldfile, retry_file) # heavy equipment began mydumper (retryCmd, retry_file.strip (), name, host, port, False, isMaster)
def getDirsize (path): # Get the backup folder size size = 0L for root, dirs, files in os.walk (path): size + = sum ([getsize (join (root, name)) for name in files]) return (size)
def statement (path, backup_host, isMaster): '' 'Function: Reads from metadata change master to information 1. The backup procedure: Mr will become: metadata.partial, after the completion of metadata.partial be renamed to: metadata and write 2. metadata backup completion time divided into three segments: (1) started dump: backup start time (2) master's log-file and log-pos information (must); slave's host, log-file information and log-pos. (preparation machine is a slave only) (3) Finished dump: backup end time 3. return error code, master_host and change master to information '' 'path + = "/ metadata"; sMetadata = ""; master_host = ""; er_code = 654321; er_info = "% s not exists !!!"% (path) if (os.path.exists (path)): if (! isMaster = 1): # backup machine is the slave num = 3 sFinds = " SLAVE STATUS "else: num = 2 sFinds =" MASTER STATUS "f = open (path, 'r') rows = f.readlines (); i = 100; lst = [] for s in rows: if (s.find (sFinds)> 0): i = 1; continue if (i <= num): lst.append (s.split ( ':') [1] .strip ()) i + = 1 if (isMaster == 1 ): # backup machine is the master master_host = backup_host log_file, log_pos = lst; else: # backup machine is slave master_host, log_file, log_pos = lst; er_code = 0 er_info = "" sMetadata = "CHANGE MASTER TO MASTER_HOST = '% s' , MASTER_LOG_FILE = '% s', MASTER_LOG_POS =% s, MASTER_USER = 'rep_user', MASTER_PASSWORD = 'meizu.com' "% (master_host, log_file, log_pos) return (er_code, er_info, master_host, sMetadata)
def execute (cmd): '' '1. 2. execute shell command returns execution information (returncode = 0 is executed successfully, an error message is being given std_err)' '' try: returncode, std_err = commands.getstatusoutput (cmd) return (returncode, std_err) except os.error, e: # 1001 abnormal returns an error return (1001, e)
def call_proc (my_args): # backup information into the database try: conn = mydb.connect (host = '127.0.0.1', user = 'test', passwd = 'zxc / 213?', db = 'meizu_item') cur = conn.cursor () cur.callproc ( 'sp_backup_i', [my_args [0], my_args [1], my_args [2], my_args [3], my_args [4], my_args [5], my_args [6], my_args [7], my_args [8], my_args [9], my_args [10], my_args [11], my_args [12]]) conn.commit () except mydb.Error, e: pass # print "Mysql Error% d:% s "% (e.args [0], e.args [1]) finally: cur.close (); conn.close ()
if __name__ == '__main__': main ()
     
         
         
         
  More:      
 
- Improve WordPress performance (Server)
- HDFS Distributed File System Resource Manager Developer summary (Server)
- Nginx start, stop, smooth start, smooth upgrade (Server)
- Linux NIC configuration (Linux)
- Oracle SQL statement tracking (Database)
- VMware virtual machine operating system log Error in the RPC receive loop resolve (Linux)
- Quick Install software RAID on Linux (Linux)
- Distributed File System using MogileFS (Linux)
- Java concurrent programming combat (using synchronized synchronization method) (Programming)
- To install Google Chrome browser under Ubuntu 14.04 LTS (Linux)
- Vim (Linux)
- Linux cd command Detailed (Linux)
- Linux Getting Started tutorial: XWindow what (Linux)
- mysqldump implement database logical backup (Database)
- An Example of GoldenGate Extract Process Hang Problem Solving (Database)
- VirtualBox virtual machine can not start to solve under Ubuntu (Linux)
- Use ISO document production OpenStack used CoreOS mirror (Linux)
- How to use the Linux command compress JPEG images (Linux)
- PostgreSQL procedural language learning (Database)
- Linux Creating a new user error Creating mailbox file: File exists (Linux)
     
           
     
  CopyRight 2002-2022 newfreesoft.com, All Rights Reserved.