--- /dev/null
+To restore a backup made via bpcdump:
+
+ - Attach the external sata drive
+ - sudo esata mount
+ - Select the backup to restore, for example ve1200-20080605
+ - Examine the contents of /media/esata/ve1200-20080605/info
+ - Ensure a partition (preferably an LVM2 LV) exists to restore into, and that
+ its size is at least as large as the size identified in the 'info' file.
+ Note the partition's device name, for example /dev/vg0/ve1200.
+ - md5sum /media/esata/ve1200-20080605/image
+ Verify the above command outputs the same signature as contained within
+ /media/esata/ve1200-20080605/image.md5sum. If they are not the same,
+ the selected image has become corrupted and another backup should be
+ selected to restore.
+ - dd_rescue -Ay 8192 /media/esata/ve1200-20080605/image /dev/vg0/ve1200
+ - Examine the output from dd_rescue; ensure there are no errors reported and
+ that it returns without error. This command will take some time.
+ - dd if=/dev/vg0/ve1200 bs=<imageblocksize> | md5sum
+ The emitted MD5 signature should match
+ /media/esata/ve1200-20080605/image.md5sum.
+ - The partition is now ready to be mounted for use by the BackupPC VE. It may
+ be necessary to create the VE's conf file in /etc/vz/conf.
+ - sudo vzctl start 1200
+ - Verify correct operation of the restored BackupPC VE.
+
+This process will be replaced by a script, bpcrestore:
+
+ - To restore an old version of VE 1200 over itself:
+ bpcrestore /media/esata/ve1200-20080605
+ (this command will create VE 1200 if it doesn't exist on the HN on which
+ bpcrestore was executed.)
+
+ - To restore an old version of VE 1200, implicitly creating a new VE 1201:
+ bpcrestore /media/esata/ve1200-20080605 1201
+
+ - Perhaps the script will instead be bpcbackup --restore, akin to vzdump's
+ operation.
--- /dev/null
+#!/bin/bash
+#
+# bpcbackup
+# Copyright (C) 2008 by Titanium Mirror, Inc.
+# Author: R. Steve McKown <smckown@titaniummirror.com>
+#
+# A wrapper script for bpcdump, designed to be called from root's crontab
+# on the HNs. Only the HN hosting the BPC VE will do anything.
+
+VEID=1158
+export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/usr/sbin:$PATH
+
+# Complain if this script is running on a server not configured with OpenVZ
+if [ ! -x /usr/sbin/vzctl ]; then
+ echo "Host $(hostname) has bpcbackup installed, but is not an HN" >&2
+ exit 1
+fi
+
+# Silently exit if this script is running on an HN but not currently hosting
+# the BPC VE.
+if vzctl status "$VEID" 2>/dev/null | grep -q deleted; then
+ exit 0
+fi
+
+# OK, we're an HN and we're hosting the VE. Run bpcbackup. When it returns,
+# we want to unmount the esata drive to reduce the chance of damage to the
+# backup data. Note, however, that esata must already be mounted or bpcdump
+# will error. This is correct, since we cannot auto-mount esata, since it
+# has to be physically connected first, and may require authentication on mount
+# if the volume is encrypted.
+/usr/local/bin/bpcdump
+ret=$?
+/usr/local/bin/esata umount
+ret2=$?
+[ "$ret" != "0" ] && exit $ret
+[ "$ret2" != "0" ] && exit $ret2
+exit 0
--- /dev/null
+# Crontab file for bpcbackup.
+# Install to /etc/cron.d/bpcbackup
+# File ownership and permissions root:root, 644
+
+# Run the bpcbackup program Tue and Fri mornings at 4am
+#0 4 * * 2,5 root /usr/local/bin/bpcbackup
+
+# Run the bpcbackup program Fri mornings at 4am
+0 4 * * 5 root /usr/local/bin/bpcbackup
--- /dev/null
+#!/bin/bash
+#
+# bpcdump
+# Copyright (C) 2008 by Titanium Mirror, Inc.
+# Author: R. Steve McKown <smckown@titaniummirror.com>
+#
+# Dumps the BPC VEID to external storage. It must have its storage on a
+# private filesystem mounted at /var/lib/vz/private/$VEID.
+#
+# A generalized version of this script should be created later. A recovery
+# option within this script should also be created.
+
+# CONSTANTS
+
+VEID=1158
+VEDEV=/dev/vg0/ve$VEID
+VEMNT=/var/lib/vz/private/$VEID
+EXTFS=/media/esata
+unset WRITEPAR
+INFO=/dev/null
+export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/usr/sbin:$PATH
+SCRIPT_EXT="start stop mount umount"
+CONF_DIR=/etc/vz/conf
+
+# FUNCTIONS
+
+info()
+{
+ echo "$*" | tee -a "$INFO"
+}
+
+error()
+{
+ echo "$*" | tee -a "$INFO" >&2
+}
+
+restartve()
+{
+ if [ -n "$mounted" ]; then
+ info "remounting VEID $VEID"
+ mount $VEMNT && unset mounted
+ fi
+ if [ -n "$running" ]; then
+ info "restarting VEID $VEID"
+ vzctl start $VEID && unset running
+ if [ $? = 0 ]; then
+ info "VEID $VEID has been started"
+ else
+ error "VEID $VEID failed to start; backup continues"
+ fi
+ fi
+}
+
+cleanup()
+{
+ ret="$1"
+ shift
+ msg="$*"
+ [ -z "$ret" ] && ret=-1
+ [ -z "$msg" ] && msg=undef
+ restartve
+ if [ "$ret" = "0" ]; then
+ info "$(date)"
+ info "cleanup message: $msg"
+ info "exit $ret"
+ touch "$EXTVEIDFS/good"
+ else
+ error "$(date)"
+ error "cleanup message: $msg"
+ error "exit $ret"
+ touch "$EXTVEIDFS/bad"
+ fi
+ [ -n "$backupwarn" ] && info "WARNINGS FOUND" >> "$INFO"
+ exit $ret
+}
+
+# MAIN
+
+trap "cleanup 1 \"termination by signal\"" SIGINT SIGTERM
+
+if [ $(whoami) != "root" ]; then
+ cleanup 1 "script requires super-user privileges"
+fi
+
+set $(vzctl status $VEID)
+unset exist mounted running backupwarn
+[ "$3" = "exist" ] && exist=1
+[ "$4" = "mounted" ] && mounted=1
+[ "$5" = "running" ] && running=1
+
+if ! mount | grep -q "on $EXTFS"; then
+ cleanup 1 "$EXTFS is not mounted"
+else
+ info "$EXTFS is mounted"
+fi
+
+if [ -z "$exist" ]; then
+ cleanup 1 "VEID $VEID does not exist"
+else
+ info "VEID $VEID exists"
+fi
+
+if [ ! -d "$VEMNT" ]; then
+ cleanup 1 "mount point for VEID $VEID does not exist"
+else
+ info "VEID $VEID has private mount point"
+fi
+
+if ! grep -q "$VEMNT[ ]" /etc/fstab; then
+ cleanup 1 "mount point for VEID $VEID not in /etc/fstab"
+fi
+
+if ! mount | grep -q "on $VEMNT"; then
+ cleanup 1 "$VEMNT is not mounted"
+else
+ info "VEID $VEID is mounted on $VEMNT"
+fi
+
+if [ -n "$running" ]; then
+ info "stopping VEID $VEID"
+ vzctl stop $VEID
+fi
+
+# Copy the VE's configuration file into its /etc/vzdump directory, as vzdump
+# does (for consistency)
+for file in $(echo "$SCRIPT_EXT"); do
+ if [ -f "$file" ]; then
+ destdir="${VEMNT}/etc/vzdump"
+ mkdir -p "$destdir"
+ info "Preserve config file ${VEID}.${SCRIPT_EXT}"
+ cp "${CONF_DIR}/${VEID}.${SCRIPT_EXT}" "${destdir}/vps.${SCRIPT_EXT}"
+ fi
+done
+
+# Unmount the filesystem, first getting its size
+if mount | grep -q "on $VEMNT"; then
+ mounted=1 # duplicate; vzctl status told us...
+ srcblks=$(df -P "$VEMNT" | grep "$VEMNT" | awk '{ print $2 }')
+ # Add 5% buffer
+ t=$((srcblks / 20))
+ srcblks=$((srcblks + t))
+ info "VEID fs contains $srcblks blocks"
+ info "unmount VEID $VEID"
+ umount "$VEMNT"
+else
+ cleanup 1 "VEID private fs must be mounted to determine its size"
+fi
+
+# Before we begin writing, remove old backup dirs until we have room
+dstblks=$(df -P "$EXTFS" | grep "$EXTFS" | awk '{ print $4 }')
+while [ $dstblks -le $srcblks ]; do
+ info "Only $dstblks free on $EXTFS"
+ oldest=$(cd $EXTFS && eval ls -td "ve${VEID}*" 2>/dev/null | tail -1)
+ if [ -d "$EXTFS/$oldest" ]; then
+ info "Removing old backup $oldest from $EXTFS"
+ rm -rf "$EXTFS/$oldest"
+ else
+ cleanup 1 "Structure error on $EXTFS. Correct manually."
+ fi
+ dstblks=$(df -P "$EXTFS" | grep "$EXTFS" | awk '{ print $4 }')
+done
+if [ $dstblks -le $srcblks ]; then
+ cleanup 1 "out of space: need $srcblks KB, have $dstblks KB"
+fi
+info "Archive space ok: need $srcblks KB, have $dstblks KB"
+
+EXTVEIDFS="$EXTFS/ve$VEID-$(date +'%Y%m%d')"
+if [ -d "$EXTVEIDFS" ]; then
+ rm -rf "${EXTVEIDFS}.old"
+ mv "$EXTVEIDFS" "${EXTVEIDFS}.old"
+else
+ rm -rf "$EXTVEIDFS"
+fi
+mkdir "$EXTVEIDFS"
+date > "$EXTVEIDFS/begin"
+if ! cd "$EXTVEIDFS"; then
+ cleanup 1 "cannot change into $EXTVEIDFS directory"
+fi
+
+INFO="$EXTVEIDFS/info"
+cat > "$INFO" <<+EOF+
+Date: $(date)
+VEID: $VEID
+Volume: $VEDEV
+dd_rescue log: ddrlog
+dd_rescue bad blocks: ddrbb
+image file: image
+
++EOF+
+info "copy $VEID device $VEDEV to $EXTVEIDFS/image"
+time dd_rescue -Aqy 8192 -l "$EXTVEIDFS/ddrlog" -o "$EXTVEIDFS/ddrbb" \
+ $VEDEV "$EXTVEIDFS/image"
+ret=$?
+if [ "$ret" != "0" ]; then
+ backupwarn=1
+ error "WARNING: dd_rescue returned $ret"
+fi
+info "calculate md5sum for $VEDEV (src)"
+srcmd5=$(md5sum "$VEDEV" 2>&1 | awk '{ print $1 }')
+info "calculate md5sum for image (dst)"
+
+# We're done with the partition. We can restart the VE now.
+restartve
+
+# Continue on with archive and validation
+(cd "$EXTVEIDFS" && md5sum image >image.md5sum 2>&1 | awk '{ print $1 }')
+dstmd5=$(md5sum "$EXTVEIDFS/image" 2>&1 | awk '{ print $1 }')
+echo "$dstmd5 image.md5sum" > "$EXTVEIDFS/image.md5sum"
+info "$srcmd5 source md5sum"
+info "$dstmd5 dest md5sum"
+if [ "$srcmd5" != "$dstmd5" -o -z "$srcmd5" ]; then
+ backupwarn=1
+ error "WARNING: md5 signatures do not match"
+else
+ info "md5 signatures match"
+fi
+if [ -n "$WRITEPAR" ]; then
+ (cd "$EXTVEIDFS" && time par2 c img)
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ backupwarn=1
+ info "WARNING: redundancy failed: par2 returned $ret"
+ fi
+ (cd "$EXTVEIDFS" && time par2 v img)
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ backupwarn=1
+ info "WARNING: redundancy inexact: par2 returned $ret"
+ fi
+fi
+
+cleanup 0 "successful"
+
+# vi: set shiftwidth=4:
--- /dev/null
+#!/bin/sh
+# Copyright (C) 2000-2007 SWsoft. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# vzmigrate is used for VE migration to another node
+#
+# Usage:
+# vzmigrate [-r yes|no] [--ssh=<options>] [--keep-dst] [--online] [-v]
+# destination_address VEID
+# Options:
+# -r, --remove-area yes|no
+# Whether to remove VE on source HN for successfully migrated VE.
+# --ssh=<ssh options>
+# Additional options that will be passed to ssh while establishing
+# connection to destination HN. Please be careful with options
+# passed, DO NOT pass destination hostname.
+# --keep-dst
+# Do not clean synced destination VE private area in case of some
+# error. It makes sense to use this option on big VE migration to
+# avoid syncing VE private area again in case some error
+# (on VE stop for example) occurs during first migration attempt.
+# --online
+# Perform online (zero-downtime) migration: during the migration the
+# VE hangs for a while and after the migration it continues working
+# as though nothing has happened.
+# -v
+# Verbose mode. Causes vzmigrate to print debugging messages about
+# its progress (including some time statistics).
+#
+# Examples:
+# Online migration of VE #101 to foo.com:
+# vzmigrate --online foo.com 101
+# Migration of VE #102 to foo.com with downtime:
+# vzmigrate foo.com 102
+# NOTE:
+# This program uses ssh as a transport layer. You need to put ssh
+# public key to destination node and be able to connect without
+# entering a password.
+
+
+ACT_SCRIPTS_SFX="start stop mount umount"
+SSH_OPTIONS=""
+SSH="ssh $SSH_OPTIONS"
+SCP_OPTIONS=""
+SCP="scp $SCP_OPTIONS"
+RSYNC_OPTIONS="-aH --delete --numeric-ids"
+RSYNC="rsync $RSYNC_OPTIONS"
+
+online=0
+verbose=0
+remove_area=1
+keep_dst=0
+debug=0
+confdir="/etc/vz/conf"
+vzconf="/etc/vz/vz.conf"
+tmpdir="/var/tmp"
+act_scripts=
+
+# Errors:
+MIG_ERR_USAGE=1
+MIG_ERR_VPS_IS_STOPPED=2
+MIG_ERR_CANT_CONNECT=4
+MIG_ERR_COPY=6
+MIG_ERR_START_VPS=7
+MIG_ERR_STOP_SOURCE=8
+MIG_ERR_EXISTS=9
+MIG_ERR_NOEXIST=10
+MIG_ERR_IP_IN_USE=12
+MIG_ERR_QUOTA=13
+MIG_ERR_CHECKPOINT=$MIG_ERR_STOP_SOURCE
+MIG_ERR_MOUNT_VPS=$MIG_ERR_START_VPS
+MIG_ERR_RESTORE_VPS=$MIG_ERR_START_VPS
+MIG_ERR_OVZ_NOT_RUNNING=14
+MIG_ERR_APPLY_CONFIG=15
+
+usage() {
+ cat >&2 <<EOF
+This program is used for VE migration to another node
+Usage:
+vzmigrate [-r yes|no] [--ssh=<options>] [--keep-dst] [--online] [-v]
+ destination_address <VEID>
+Options:
+-r, --remove-area yes|no
+ Whether to remove VE on source HN for successfully migrated VE.
+--ssh=<ssh options>
+ Additional options that will be passed to ssh while establishing
+ connection to destination HN. Please be careful with options
+ passed, DO NOT pass destination hostname.
+--keep-dst
+ Do not clean synced destination VE private area in case of some
+ error. It makes sense to use this option on big VE migration to
+ avoid syncing VE private area again in case some error
+ (on VE stop for example) occurs during first migration attempt.
+--online
+ Perform online (zero-downtime) migration: during the migration the
+ VE hangs for a while and after the migration it continues working
+ as though nothing has happened.
+-v
+ Verbose mode. Causes vzmigrate to print debugging messages about
+ its progress (including some time statistics).
+EOF
+ exit $MIG_ERR_USAGE
+}
+
+# Logs message
+# There are 3 types of messages:
+# 0 - error messages (print to stderr)
+# 1 - normal messages (print to stdout)
+# 2 - debug messages (print to stdout if in verbose mode)
+log () {
+ if [ $1 -eq 0 ]; then
+ shift
+ printf "Error: " $@"\n" >&2
+ elif [ $1 -eq 1 ]; then
+ shift
+ printf $@"\n"
+ elif [ $verbose -eq 1 ]; then
+ shift
+ printf " " $@"\n"
+ fi
+}
+
+# Executes command and returns result of execution
+# There are 2 types of execution:
+# 1 - normal execution (all output will be printed)
+# 2 - debug execution (output will be printed if verbose mode is set,
+# in other case stdout and stderr redirected to /dev/null)
+logexec () {
+ if [ $1 -eq 1 -o $verbose -eq 1 ]; then
+ shift
+ $@
+ else
+ shift
+ $@ >/dev/null 2>&1
+ fi
+}
+
+undo_conf () {
+ $SSH "root@$host" "rm -f $vpsconf"
+}
+
+undo_act_scripts () {
+ if [ -n "$act_scripts" ] ; then
+ $SSH "root@$host" "rm -f $act_scripts"
+ fi
+ undo_conf
+}
+
+undo_private () {
+ if [ $keep_dst -eq 0 ]; then
+ $SSH "root@$host" "rm -rf $VE_PRIVATE"
+ fi
+ undo_act_scripts
+}
+
+undo_root () {
+ $SSH "root@$host" "rm -rf $VE_ROOT"
+ undo_private
+}
+
+undo_quota_init () {
+ [ "${DISK_QUOTA}" = 'no' ] || $SSH "root@$host" "vzquota drop $VEID"
+ undo_root
+}
+
+undo_quota_on () {
+ [ "${DISK_QUOTA}" = 'no' ] || $SSH "root@$host" "vzquota off $VEID"
+ undo_quota_init
+}
+
+undo_sync () {
+ # Root will be destroed in undo_root
+ undo_quota_on
+}
+
+undo_suspend () {
+ logexec 2 vzctl chkpnt $VEID --resume
+ undo_sync
+}
+
+undo_dump () {
+ if [ $debug -eq 0 ]; then
+ rm -f "$VE_DUMPFILE"
+ fi
+ undo_suspend
+}
+
+undo_copy_dump () {
+ $SSH "root@$host" "rm -f $VE_DUMPFILE"
+ undo_suspend
+}
+
+undo_stop () {
+ if [ "$state" = "running" ]; then
+ vzctl start $VEID
+ elif [ "$mounted" = "mounted" ]; then
+ vzctl mount $VEID
+ fi
+ undo_sync
+}
+
+undo_source_stage() {
+ if [ $online -eq 1 ]; then
+ undo_copy_dump
+ else
+ undo_stop
+ fi
+}
+
+undo_quota_dump () {
+ rm -f "$VE_QUOTADUMP"
+ undo_source_stage
+}
+
+undo_copy_quota () {
+ $SSH "root@$host" "rm -f $VE_QUOTADUMP"
+ undo_quota_dump
+}
+
+undo_undump () {
+ logexec 2 $SSH root@$host vzctl restore $VEID --kill
+ undo_copy_quota
+}
+
+get_status() {
+ exist=$3
+ mounted=$4
+ state=$5
+}
+
+get_time () {
+ awk -v t2=$2 -v t1=$1 'BEGIN{print t2-t1}'
+}
+
+if [ $# -lt 2 ]; then
+ usage
+fi
+
+while [ ! -z "$1" ]; do
+ log 1 "OPT:$1"
+ case "$1" in
+ --online)
+ online=1
+ ;;
+ -v)
+ verbose=1
+ ;;
+ --remove-area|-r)
+ shift
+ if [ "$1" = "yes" ]; then
+ remove_area=1
+ elif [ "$1" = "no" ]; then
+ remove_area=0
+ else
+ usage
+ fi
+ ;;
+ --keep-dst)
+ keep_dst=1
+ ;;
+ --ssh=*)
+ SSH_OPTIONS="$SSH_OPTIONS $(echo $1 | cut -c7-)"
+ SSH="ssh $SSH_OPTIONS"
+ SCP_OPTIONS="`echo $SSH_OPTIONS | sed 's/-p/-P/1'`"
+ SCP="scp $SCP_OPTIONS"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+host=$1
+shift
+VEID=$1
+shift
+
+if [ -z "$host" -o -z "$VEID" -o $# -ne 0 ]; then
+ usage
+fi
+
+vpsconf="$confdir/$VEID.conf"
+
+if [ ! -r "$vzconf" -o ! -r "$vpsconf" ]; then
+ log 0 "Can't read global config or VE #$VEID config file"
+ exit $MIG_ERR_NOEXIST
+fi
+
+get_status $(vzctl status $VEID)
+if [ "$exist" = "deleted" ]; then
+ log 0 "VE #$VEID doesn't exist"
+ exit $MIG_ERR_NOEXIST
+fi
+
+if [ $online -eq 1 ]; then
+ log 1 "Starting online migration of VE $VEID on $host"
+else
+ log 1 "Starting migration of VE $VEID on $host"
+fi
+
+# Try to connect to destination
+if ! logexec 2 $SSH -o BatchMode=yes root@$host /bin/true; then
+ log 0 "Can't connect to destination address using public key"
+ log 0 "Please put your public key to destination node"
+ exit $MIG_ERR_CANT_CONNECT
+fi
+
+# Check if OpenVZ is running
+if ! logexec 2 $SSH -o BatchMode=yes root@$host /etc/init.d/vz status ; then
+ log 0 "OpenVZ is not running on the target machine"
+ log 0 "Can't continue migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+fi
+
+# Check if CPT modules are loaded for online migration
+if [ $online -eq 1 ]; then
+ if [ ! -f /proc/cpt ]; then
+ log 0 "vzcpt module is not loaded on the source node"
+ log 0 "Can't continue online migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+ fi
+ if ! logexec 2 $SSH -o BatchMode=yes root@$host "test -f /proc/rst";
+ then
+ log 0 "vzrst module is not loaded on the destination node"
+ log 0 "Can't continue online migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+ fi
+fi
+
+dst_exist=$($SSH "root@$host" "vzctl status $VEID" | awk '{print $3}')
+if [ "$dst_exist" = "exist" ]; then
+ log 0 "VE #$VEID already exists on destination node"
+ exit $MIG_ERR_EXISTS
+fi
+
+if [ $online -eq 1 -a "$state" != "running" ]; then
+ log 0 "Can't perform online migration of stopped VE"
+ exit $MIG_ERR_VPS_IS_STOPPED
+fi
+
+log 2 "Loading $vzconf and $vpsconf files"
+
+. "$vzconf"
+. "$vpsconf"
+VE_DUMPFILE="$tmpdir/dump.$VEID"
+VE_QUOTADUMP="$tmpdir/quotadump.$VEID"
+
+log 2 "Check IPs on destination node: $IP_ADDRESS"
+for IP in $IP_ADDRESS; do
+ if [ $($SSH "root@$host" "grep -c \" $IP \" /proc/vz/veip") -gt 0 ];
+ then
+ log 0 "IP address $IP already in use on destination node"
+ exit $MIG_ERR_IP_IN_USE
+ fi
+done
+
+log 1 "Preparing remote node"
+
+log 2 "Copying config file"
+if ! logexec 2 $SCP $vpsconf root@$host:$vpsconf ; then
+ log 0 "Failed to copy config file"
+ exit $MIG_ERR_COPY
+fi
+
+logexec 2 $SSH root@$host vzctl set $VEID --applyconfig_map name --save
+# vzctl return code 20 or 21 in case of unrecognized option
+if [ $? != 20 && $? != 21 && $? != 0 ]; then
+ log 0 "Failed to apply config on destination node"
+ undo_conf
+ exit $MIG_ERR_APPLY_CONFIG
+fi
+
+for sfx in $ACT_SCRIPTS_SFX; do
+ file="$confdir/$VEID.$sfx"
+ if [ -f "$file" ]; then
+ act_scripts="$act_scripts $file"
+ fi
+done
+if [ -n "$act_scripts" ]; then
+ log 2 "Copying action scripts"
+ if ! logexec 2 $SCP $act_scripts root@$host:$confdir ; then
+ log 0 "Failed to copy action scripts"
+ undo_conf
+ exit $MIG_ERR_COPY
+ fi
+fi
+
+log 2 "Creating remote VE root dir"
+if ! $SSH "root@$host" "mkdir -p $VE_ROOT"; then
+ log 0 "Failed to make VE root"
+ undo_act_scripts
+ exit $MIG_ERR_COPY
+fi
+
+log 2 "Creating remote VE private dir"
+if ! $SSH "root@$host" "mkdir -p $VE_PRIVATE"; then
+ log 0 "Failed to make VE private area"
+ undo_private
+ exit $MIG_ERR_COPY
+fi
+
+if [ "${DISK_QUOTA}" != "no" ]; then
+ log 1 "Initializing remote quota"
+
+ log 2 "Quota init"
+ if ! $SSH "root@$host" "vzctl quotainit $VEID"; then
+ log 0 "Failed to initialize quota"
+ undo_root
+ exit $MIG_ERR_QUOTA
+ fi
+
+ log 2 "Turning remote quota on"
+ if ! $SSH "root@$host" "vzctl quotaon $VEID"; then
+ log 0 "Failed to turn quota on"
+ undo_quota_init
+ exit $MIG_ERR_QUOTA
+ fi
+else
+ log 2 "VZ disk quota disabled -- skipping quota migration"
+fi
+
+log 1 "Syncing private"
+if ! $RSYNC --progress \
+ "$VE_PRIVATE" "root@$host:${VE_PRIVATE%/*}" |
+ grep "% of" | awk -v ORS="\r" '{print $10}'; then
+ log 0 "Failed to sync VE private areas"
+ undo_quota_on
+ exit $MIG_ERR_COPY
+fi
+
+if [ $online -eq 1 ]; then
+ log 1 "Live migrating VE"
+
+ log 2 "Suspending VE"
+ time_suspend=$(date +%s.%N)
+ if ! logexec 2 vzctl chkpnt $VEID --suspend ; then
+ log 0 "Failed to suspend VE"
+ undo_sync
+ exit $MIG_ERR_CHECKPOINT
+ fi
+
+ log 2 "Dumping VE"
+ if ! logexec 2 vzctl chkpnt $VEID --dump --dumpfile $VE_DUMPFILE ; then
+ log 0 "Failed to dump VE"
+ undo_suspend
+ exit $MIG_ERR_CHECKPOINT
+ fi
+
+ log 2 "Copying dumpfile"
+ time_copy_dump=$(date +%s.%N)
+ if ! logexec 2 $SCP $VE_DUMPFILE root@$host:$VE_DUMPFILE ; then
+ log 0 "Failed to copy dump"
+ undo_dump
+ exit $MIG_ERR_COPY
+ fi
+else
+ if [ "$state" = "running" ]; then
+ log 1 "Stopping VE"
+ if ! logexec 2 vzctl stop $VEID ; then
+ log 0 "Failed to stop VE"
+ undo_sync
+ exit $MIG_ERR_STOP_SOURCE
+ fi
+ elif [ "$mounted" = "mounted" ]; then
+ log 1 "Unmounting VE"
+ if ! logexec 2 vzctl umount $VEID ; then
+ log 0 "Failed to umount VE"
+ undo_sync
+ exit $MIG_ERR_STOP_SOURCE
+ fi
+ fi
+fi
+
+if [ "$state" = "running" ]; then
+ log 2 "Syncing private (2nd pass)"
+ time_rsync2=$(date +%s.%N)
+ if ! $RSYNC \
+ "$VE_PRIVATE" "root@$host:${VE_PRIVATE%/*}"; then
+ log 0 "Failed to sync VE private areas"
+ undo_source_stage
+ exit $MIG_ERR_COPY
+ fi
+fi
+
+if [ "${DISK_QUOTA}" != "no" ]; then
+ log 1 "Syncing 2nd level quota"
+
+ log 2 "Dumping 2nd level quota"
+ time_quota=$(date +%s.%N)
+ if ! vzdqdump $VEID -U -G -T > "$VE_QUOTADUMP"; then
+ log 0 "Failed to dump 2nd level quota"
+ undo_quota_dump
+ exit $MIG_ERR_QUOTA
+ fi
+
+ log 2 "Copying 2nd level quota"
+ if ! logexec 2 $SCP $VE_QUOTADUMP root@$host:$VE_QUOTADUMP ; then
+ log 0 "Failed to copy 2nd level quota dump"
+ undo_quota_dump
+ exit $MIG_ERR_COPY
+ fi
+
+ log 2 "Load 2nd level quota"
+ if ! $SSH "root@$host" "(vzdqload $VEID -U -G -T < $VE_QUOTADUMP &&
+ vzquota reload2 $VEID)"; then
+ log 0 "Failed to load 2nd level quota"
+ undo_copy_quota
+ exit $MIG_ERR_QUOTA
+ fi
+else
+ log 2 "VZ disk quota disabled -- skipping quota migration"
+fi
+
+if [ $online -eq 1 ]; then
+ log 2 "Undumping VE"
+ time_undump=$(date +%s.%N)
+ if ! logexec 2 $SSH root@$host vzctl restore $VEID --undump \
+ --dumpfile $VE_DUMPFILE --skip_arpdetect ; then
+ log 0 "Failed to undump VE"
+ undo_copy_quota
+ exit $MIG_ERR_RESTORE_VPS
+ fi
+
+ log 2 "Resuming VE"
+ if ! logexec 2 $SSH root@$host vzctl restore $VEID --resume ; then
+ log 0 "Failed to resume VE"
+ undo_undump
+ exit $MIG_ERR_RESTORE_VPS
+ fi
+ time_finish=$(date +%s.%N)
+ log 2 "Times:"
+ log 2 "\tSuspend + Dump:\t" $(get_time $time_suspend $time_copy_dump)
+ log 2 "\tCopy dump file:\t" $(get_time $time_copy_dump $time_rsync2)
+ log 2 "\tSecond rsync:\t" $(get_time $time_rsync2 $time_quota)
+ log 2 "\t2nd level quota:\t" $(get_time $time_quota $time_undump)
+ log 2 "\tUndump + Resume:\t" $(get_time $time_undump $time_finish)
+ log 2 "Total time: " $(get_time $time_suspend $time_finish)
+
+ log 1 "Cleanup"
+
+ log 2 "Killing VE"
+ logexec 2 vzctl chkpnt $VEID --kill
+ logexec 2 vzctl umount $VEID
+
+ log 2 "Removing dumpfiles"
+ rm -f "$VE_DUMPFILE"
+ $SSH "root@$host" "rm -f $VE_DUMPFILE"
+else
+ if [ "$state" = "running" ]; then
+ log 1 "Starting VE"
+ if ! logexec 2 $SSH root@$host vzctl start $VEID ; then
+ log 0 "Failed to start VE"
+ undo_copy_quota
+ exit $MIG_ERR_START_VPS
+ fi
+ elif [ "$mounted" = "mounted" ]; then
+ log 1 "Mounting VE"
+ if ! logexec 2 $SSH root@$host vzctl mount $VEID ; then
+ log 0 "Failed to mount VE"
+ undo_copy_quota
+ exit $MIG_ERR_MOUNT_VPS
+ fi
+ else
+ if [ "${DISK_QUOTA}" = 'no' ]; then
+ log 2 "VZ disk quota disabled -- skipping remote quota off"
+ else
+ log 1 "Turning quota off"
+ if ! logexec 2 $SSH root@$host vzquota off $VEID ; then
+ log 0 "failed to turn quota off"
+ undo_copy_quota
+ exit $MIG_ERR_QUOTA
+ fi
+ fi
+ fi
+
+ log 1 "Cleanup"
+fi
+
+if [ $remove_area -eq 1 ]; then
+ log 2 "Destroying VE"
+ logexec 2 vzctl destroy $VEID
+else
+ # Move config as veid.migrated to allow backward migration
+ mv -f $vpsconf $vpsconf.migrated
+fi
--- /dev/null
+#!/bin/sh
+# Copyright (C) 2000-2007 SWsoft. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# vzmigrate is used for VE migration to another node
+#
+# Usage:
+# vzmigrate [-r yes|no] [--ssh=<options>] [--keep-dst] [--online] [-v]
+# destination_address VEID
+# Options:
+# -r, --remove-area yes|no
+# Whether to remove VE on source HN for successfully migrated VE.
+# --ssh=<ssh options>
+# Additional options that will be passed to ssh while establishing
+# connection to destination HN. Please be careful with options
+# passed, DO NOT pass destination hostname.
+# --keep-dst
+# Do not clean synced destination VE private area in case of some
+# error. It makes sense to use this option on big VE migration to
+# avoid syncing VE private area again in case some error
+# (on VE stop for example) occurs during first migration attempt.
+# --online
+# Perform online (zero-downtime) migration: during the migration the
+# VE hangs for a while and after the migration it continues working
+# as though nothing has happened.
+# -v
+# Verbose mode. Causes vzmigrate to print debugging messages about
+# its progress (including some time statistics).
+#
+# Examples:
+# Online migration of VE #101 to foo.com:
+# vzmigrate --online foo.com 101
+# Migration of VE #102 to foo.com with downtime:
+# vzmigrate foo.com 102
+# NOTE:
+# This program uses ssh as a transport layer. You need to put ssh
+# public key to destination node and be able to connect without
+# entering a password.
+
+
+ACT_SCRIPTS_SFX="start stop mount umount"
+SSH_OPTIONS=""
+SSH="ssh $SSH_OPTIONS"
+SCP_OPTIONS=""
+SCP="scp $SCP_OPTIONS"
+RSYNC_OPTIONS="-aH --delete --numeric-ids"
+RSYNC="rsync $RSYNC_OPTIONS"
+
+online=0
+verbose=0
+remove_area=1
+keep_dst=0
+debug=0
+confdir="/etc/vz/conf"
+vzconf="/etc/vz/vz.conf"
+tmpdir="/var/tmp"
+act_scripts=
+
+# Errors:
+MIG_ERR_USAGE=1
+MIG_ERR_VPS_IS_STOPPED=2
+MIG_ERR_CANT_CONNECT=4
+MIG_ERR_COPY=6
+MIG_ERR_START_VPS=7
+MIG_ERR_STOP_SOURCE=8
+MIG_ERR_EXISTS=9
+MIG_ERR_NOEXIST=10
+MIG_ERR_IP_IN_USE=12
+MIG_ERR_QUOTA=13
+MIG_ERR_CHECKPOINT=$MIG_ERR_STOP_SOURCE
+MIG_ERR_MOUNT_VPS=$MIG_ERR_START_VPS
+MIG_ERR_RESTORE_VPS=$MIG_ERR_START_VPS
+MIG_ERR_OVZ_NOT_RUNNING=14
+MIG_ERR_APPLY_CONFIG=15
+
+usage() {
+ cat >&2 <<EOF
+This program is used for VE migration to another node
+Usage:
+vzmigrate [-r yes|no] [--ssh=<options>] [--keep-dst] [--online] [-v]
+ destination_address <VEID>
+Options:
+-r, --remove-area yes|no
+ Whether to remove VE on source HN for successfully migrated VE.
+--ssh=<ssh options>
+ Additional options that will be passed to ssh while establishing
+ connection to destination HN. Please be careful with options
+ passed, DO NOT pass destination hostname.
+--keep-dst
+ Do not clean synced destination VE private area in case of some
+ error. It makes sense to use this option on big VE migration to
+ avoid syncing VE private area again in case some error
+ (on VE stop for example) occurs during first migration attempt.
+--online
+ Perform online (zero-downtime) migration: during the migration the
+ VE hangs for a while and after the migration it continues working
+ as though nothing has happened.
+-v
+ Verbose mode. Causes vzmigrate to print debugging messages about
+ its progress (including some time statistics).
+EOF
+ exit $MIG_ERR_USAGE
+}
+
+# Logs message
+# There are 3 types of messages:
+# 0 - error messages (print to stderr)
+# 1 - normal messages (print to stdout)
+# 2 - debug messages (print to stdout if in verbose mode)
+log () {
+ if [ $1 -eq 0 ]; then
+ shift
+ printf "Error: " $@"\n" >&2
+ elif [ $1 -eq 1 ]; then
+ shift
+ printf $@"\n"
+ elif [ $verbose -eq 1 ]; then
+ shift
+ printf " " $@"\n"
+ fi
+}
+
+# Executes command and returns result of execution
+# There are 2 types of execution:
+# 1 - normal execution (all output will be printed)
+# 2 - debug execution (output will be printed if verbose mode is set,
+# in other case stdout and stderr redirected to /dev/null)
+logexec () {
+ if [ $1 -eq 1 -o $verbose -eq 1 ]; then
+ shift
+ $@
+ else
+ shift
+ $@ >/dev/null 2>&1
+ fi
+}
+
+undo_conf () {
+ $SSH "root@$host" "rm -f $vpsconf"
+}
+
+undo_act_scripts () {
+ if [ -n "$act_scripts" ] ; then
+ $SSH "root@$host" "rm -f $act_scripts"
+ fi
+ undo_conf
+}
+
+undo_private () {
+ if [ $keep_dst -eq 0 ]; then
+ $SSH "root@$host" "rm -rf $VE_PRIVATE"
+ fi
+ undo_act_scripts
+}
+
+undo_root () {
+ $SSH "root@$host" "rm -rf $VE_ROOT"
+ undo_private
+}
+
+undo_quota_init () {
+ [ "${DISK_QUOTA}" = 'no' ] || $SSH "root@$host" "vzquota drop $VEID"
+ undo_root
+}
+
+undo_quota_on () {
+ [ "${DISK_QUOTA}" = 'no' ] || $SSH "root@$host" "vzquota off $VEID"
+ undo_quota_init
+}
+
+undo_sync () {
+ # Root will be destroed in undo_root
+ undo_quota_on
+}
+
+undo_suspend () {
+ logexec 2 vzctl chkpnt $VEID --resume
+ undo_sync
+}
+
+undo_dump () {
+ if [ $debug -eq 0 ]; then
+ rm -f "$VE_DUMPFILE"
+ fi
+ undo_suspend
+}
+
+undo_copy_dump () {
+ $SSH "root@$host" "rm -f $VE_DUMPFILE"
+ undo_suspend
+}
+
+undo_stop () {
+ if [ "$state" = "running" ]; then
+ vzctl start $VEID
+ elif [ "$mounted" = "mounted" ]; then
+ vzctl mount $VEID
+ fi
+ undo_sync
+}
+
+undo_source_stage() {
+ if [ $online -eq 1 ]; then
+ undo_copy_dump
+ else
+ undo_stop
+ fi
+}
+
+undo_quota_dump () {
+ rm -f "$VE_QUOTADUMP"
+ undo_source_stage
+}
+
+undo_copy_quota () {
+ $SSH "root@$host" "rm -f $VE_QUOTADUMP"
+ undo_quota_dump
+}
+
+undo_undump () {
+ logexec 2 $SSH root@$host vzctl restore $VEID --kill
+ undo_copy_quota
+}
+
+get_status() {
+ exist=$3
+ mounted=$4
+ state=$5
+}
+
+get_time () {
+ awk -v t2=$2 -v t1=$1 'BEGIN{print t2-t1}'
+}
+
+if [ $# -lt 2 ]; then
+ usage
+fi
+
+while [ ! -z "$1" ]; do
+ log 1 "OPT:$1"
+ case "$1" in
+ --online)
+ online=1
+ ;;
+ -v)
+ verbose=1
+ ;;
+ --remove-area|-r)
+ shift
+ if [ "$1" = "yes" ]; then
+ remove_area=1
+ elif [ "$1" = "no" ]; then
+ remove_area=0
+ else
+ usage
+ fi
+ ;;
+ --keep-dst)
+ keep_dst=1
+ ;;
+ --ssh=*)
+ SSH_OPTIONS="$SSH_OPTIONS $(echo $1 | cut -c7-)"
+ SSH="ssh $SSH_OPTIONS"
+ SCP_OPTIONS="`echo $SSH_OPTIONS | sed 's/-p/-P/1'`"
+ SCP="scp $SCP_OPTIONS"
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+host=$1
+shift
+VEID=$1
+shift
+
+if [ -z "$host" -o -z "$VEID" -o $# -ne 0 ]; then
+ usage
+fi
+
+vpsconf="$confdir/$VEID.conf"
+
+if [ ! -r "$vzconf" -o ! -r "$vpsconf" ]; then
+ log 0 "Can't read global config or VE #$VEID config file"
+ exit $MIG_ERR_NOEXIST
+fi
+
+get_status $(vzctl status $VEID)
+if [ "$exist" = "deleted" ]; then
+ log 0 "VE #$VEID doesn't exist"
+ exit $MIG_ERR_NOEXIST
+fi
+
+if [ $online -eq 1 ]; then
+ log 1 "Starting online migration of VE $VEID on $host"
+else
+ log 1 "Starting migration of VE $VEID on $host"
+fi
+
+# Try to connect to destination
+if ! logexec 2 $SSH -o BatchMode=yes root@$host /bin/true; then
+ log 0 "Can't connect to destination address using public key"
+ log 0 "Please put your public key to destination node"
+ exit $MIG_ERR_CANT_CONNECT
+fi
+
+# Check if OpenVZ is running
+if ! logexec 2 $SSH -o BatchMode=yes root@$host /etc/init.d/vz status ; then
+ log 0 "OpenVZ is not running on the target machine"
+ log 0 "Can't continue migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+fi
+
+# Check if CPT modules are loaded for online migration
+if [ $online -eq 1 ]; then
+ if [ ! -f /proc/cpt ]; then
+ log 0 "vzcpt module is not loaded on the source node"
+ log 0 "Can't continue online migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+ fi
+ if ! logexec 2 $SSH -o BatchMode=yes root@$host "test -f /proc/rst";
+ then
+ log 0 "vzrst module is not loaded on the destination node"
+ log 0 "Can't continue online migration"
+ exit $MIG_ERR_OVZ_NOT_RUNNING
+ fi
+fi
+
+dst_exist=$($SSH "root@$host" "vzctl status $VEID" | awk '{print $3}')
+if [ "$dst_exist" = "exist" ]; then
+ log 0 "VE #$VEID already exists on destination node"
+ exit $MIG_ERR_EXISTS
+fi
+
+if [ $online -eq 1 -a "$state" != "running" ]; then
+ log 0 "Can't perform online migration of stopped VE"
+ exit $MIG_ERR_VPS_IS_STOPPED
+fi
+
+log 2 "Loading $vzconf and $vpsconf files"
+
+. "$vzconf"
+. "$vpsconf"
+VE_DUMPFILE="$tmpdir/dump.$VEID"
+VE_QUOTADUMP="$tmpdir/quotadump.$VEID"
+
+log 2 "Check IPs on destination node: $IP_ADDRESS"
+for IP in $IP_ADDRESS; do
+ if [ $($SSH "root@$host" "grep -c \" $IP \" /proc/vz/veip") -gt 0 ];
+ then
+ log 0 "IP address $IP already in use on destination node"
+ exit $MIG_ERR_IP_IN_USE
+ fi
+done
+
+log 1 "Preparing remote node"
+
+log 2 "Copying config file"
+if ! logexec 2 $SCP $vpsconf root@$host:$vpsconf ; then
+ log 0 "Failed to copy config file"
+ exit $MIG_ERR_COPY
+fi
+
+logexec 2 $SSH root@$host vzctl set $VEID --applyconfig_map name --save
+# vzctl return code 20 or 21 in case of unrecognized option
+if [ $? != 20 && $? != 21 && $? != 0 ]; then
+ log 0 "Failed to apply config on destination node"
+ undo_conf
+ exit $MIG_ERR_APPLY_CONFIG
+fi
+
+for sfx in $ACT_SCRIPTS_SFX; do
+ file="$confdir/$VEID.$sfx"
+ if [ -f "$file" ]; then
+ act_scripts="$act_scripts $file"
+ fi
+done
+if [ -n "$act_scripts" ]; then
+ log 2 "Copying action scripts"
+ if ! logexec 2 $SCP $act_scripts root@$host:$confdir ; then
+ log 0 "Failed to copy action scripts"
+ undo_conf
+ exit $MIG_ERR_COPY
+ fi
+fi
+
+log 2 "Creating remote VE root dir"
+if ! $SSH "root@$host" "mkdir -p $VE_ROOT"; then
+ log 0 "Failed to make VE root"
+ undo_act_scripts
+ exit $MIG_ERR_COPY
+fi
+
+log 2 "Creating remote VE private dir"
+if ! $SSH "root@$host" "mkdir -p $VE_PRIVATE"; then
+ log 0 "Failed to make VE private area"
+ undo_private
+ exit $MIG_ERR_COPY
+fi
+
+if [ "${DISK_QUOTA}" != "no" ]; then
+ log 1 "Initializing remote quota"
+
+ log 2 "Quota init"
+ if ! $SSH "root@$host" "vzctl quotainit $VEID"; then
+ log 0 "Failed to initialize quota"
+ undo_root
+ exit $MIG_ERR_QUOTA
+ fi
+
+ log 2 "Turning remote quota on"
+ if ! $SSH "root@$host" "vzctl quotaon $VEID"; then
+ log 0 "Failed to turn quota on"
+ undo_quota_init
+ exit $MIG_ERR_QUOTA
+ fi
+else
+ log 2 "VZ disk quota disabled -- skipping quota migration"
+fi
+
+log 1 "Syncing private"
+if ! $RSYNC --progress \
+ "$VE_PRIVATE" "root@$host:${VE_PRIVATE%/*}" |
+ grep "% of" | awk -v ORS="\r" '{print $10}'; then
+ log 0 "Failed to sync VE private areas"
+ undo_quota_on
+ exit $MIG_ERR_COPY
+fi
+
+if [ $online -eq 1 ]; then
+ log 1 "Live migrating VE"
+
+ log 2 "Suspending VE"
+ time_suspend=$(date +%s.%N)
+ if ! logexec 2 vzctl chkpnt $VEID --suspend ; then
+ log 0 "Failed to suspend VE"
+ undo_sync
+ exit $MIG_ERR_CHECKPOINT
+ fi
+
+ log 2 "Dumping VE"
+ if ! logexec 2 vzctl chkpnt $VEID --dump --dumpfile $VE_DUMPFILE ; then
+ log 0 "Failed to dump VE"
+ undo_suspend
+ exit $MIG_ERR_CHECKPOINT
+ fi
+
+ log 2 "Copying dumpfile"
+ time_copy_dump=$(date +%s.%N)
+ if ! logexec 2 $SCP $VE_DUMPFILE root@$host:$VE_DUMPFILE ; then
+ log 0 "Failed to copy dump"
+ undo_dump
+ exit $MIG_ERR_COPY
+ fi
+else
+ if [ "$state" = "running" ]; then
+ log 1 "Stopping VE"
+ if ! logexec 2 vzctl stop $VEID ; then
+ log 0 "Failed to stop VE"
+ undo_sync
+ exit $MIG_ERR_STOP_SOURCE
+ fi
+ elif [ "$mounted" = "mounted" ]; then
+ log 1 "Unmounting VE"
+ if ! logexec 2 vzctl umount $VEID ; then
+ log 0 "Failed to umount VE"
+ undo_sync
+ exit $MIG_ERR_STOP_SOURCE
+ fi
+ fi
+fi
+
+if [ "$state" = "running" ]; then
+ log 2 "Syncing private (2nd pass)"
+ time_rsync2=$(date +%s.%N)
+ if ! $RSYNC \
+ "$VE_PRIVATE" "root@$host:${VE_PRIVATE%/*}"; then
+ log 0 "Failed to sync VE private areas"
+ undo_source_stage
+ exit $MIG_ERR_COPY
+ fi
+fi
+
+if [ "${DISK_QUOTA}" != "no" ]; then
+ log 1 "Syncing 2nd level quota"
+
+ log 2 "Dumping 2nd level quota"
+ time_quota=$(date +%s.%N)
+ if ! vzdqdump $VEID -U -G -T > "$VE_QUOTADUMP"; then
+ log 0 "Failed to dump 2nd level quota"
+ undo_quota_dump
+ exit $MIG_ERR_QUOTA
+ fi
+
+ log 2 "Copying 2nd level quota"
+ if ! logexec 2 $SCP $VE_QUOTADUMP root@$host:$VE_QUOTADUMP ; then
+ log 0 "Failed to copy 2nd level quota dump"
+ undo_quota_dump
+ exit $MIG_ERR_COPY
+ fi
+
+ log 2 "Load 2nd level quota"
+ if ! $SSH "root@$host" "(vzdqload $VEID -U -G -T < $VE_QUOTADUMP &&
+ vzquota reload2 $VEID)"; then
+ log 0 "Failed to load 2nd level quota"
+ undo_copy_quota
+ exit $MIG_ERR_QUOTA
+ fi
+else
+ log 2 "VZ disk quota disabled -- skipping quota migration"
+fi
+
+if [ $online -eq 1 ]; then
+ log 2 "Undumping VE"
+ time_undump=$(date +%s.%N)
+ if ! logexec 2 $SSH root@$host vzctl restore $VEID --undump \
+ --dumpfile $VE_DUMPFILE --skip_arpdetect ; then
+ log 0 "Failed to undump VE"
+ undo_copy_quota
+ exit $MIG_ERR_RESTORE_VPS
+ fi
+
+ log 2 "Resuming VE"
+ if ! logexec 2 $SSH root@$host vzctl restore $VEID --resume ; then
+ log 0 "Failed to resume VE"
+ undo_undump
+ exit $MIG_ERR_RESTORE_VPS
+ fi
+ time_finish=$(date +%s.%N)
+ log 2 "Times:"
+ log 2 "\tSuspend + Dump:\t" $(get_time $time_suspend $time_copy_dump)
+ log 2 "\tCopy dump file:\t" $(get_time $time_copy_dump $time_rsync2)
+ log 2 "\tSecond rsync:\t" $(get_time $time_rsync2 $time_quota)
+ log 2 "\t2nd level quota:\t" $(get_time $time_quota $time_undump)
+ log 2 "\tUndump + Resume:\t" $(get_time $time_undump $time_finish)
+ log 2 "Total time: " $(get_time $time_suspend $time_finish)
+
+ log 1 "Cleanup"
+
+ log 2 "Killing VE"
+ logexec 2 vzctl chkpnt $VEID --kill
+ logexec 2 vzctl umount $VEID
+
+ log 2 "Removing dumpfiles"
+ rm -f "$VE_DUMPFILE"
+ $SSH "root@$host" "rm -f $VE_DUMPFILE"
+else
+ if [ "$state" = "running" ]; then
+ log 1 "Starting VE"
+ if ! logexec 2 $SSH root@$host vzctl start $VEID ; then
+ log 0 "Failed to start VE"
+ undo_copy_quota
+ exit $MIG_ERR_START_VPS
+ fi
+ elif [ "$mounted" = "mounted" ]; then
+ log 1 "Mounting VE"
+ if ! logexec 2 $SSH root@$host vzctl mount $VEID ; then
+ log 0 "Failed to mount VE"
+ undo_copy_quota
+ exit $MIG_ERR_MOUNT_VPS
+ fi
+ else
+ log 1 "Turning quota off"
+ if ! logexec 2 $SSH root@$host vzquota off $VEID ; then
+ log 0 "failed to turn quota off"
+ undo_copy_quota
+ exit $MIG_ERR_QUOTA
+ fi
+ fi
+
+ log 1 "Cleanup"
+fi
+
+if [ $remove_area -eq 1 ]; then
+ log 2 "Destroying VE"
+ logexec 2 vzctl destroy $VEID
+else
+ # Move config as veid.migrated to allow backward migration
+ mv -f $vpsconf $vpsconf.migrated
+fi