--- /dev/null
+#!/usr/bin/perl
+#
+# BackupPC_ovz
+#
+# OpenVZ integration for BackupPC allowing the latter to backup OpenVZ VE's
+# with ovz awareness to improve backup and restore efficiency and features.
+
+# FIXME: signal handling to clean up mount point and snapshot on termination
+# FIXME: saveConfigs and restoreConfigs aren't being used yet
+
+use strict;
+use Socket;
+use File::Basename;
+use File::Path;
+#use IO::File;
+use Proc::PID::File;
+
+# Various constants
+my @HNS = ('pe18001.titaniummirror.com', 'pe18002.titaniummirror.com');
+my @script_ext = qw(start stop mount umount);
+my @velist = ();
+my $pidfile = "/tmp/".basename($0).".pid";
+my $vzsnap = 'vzsnap'; # Mount point and lv names. Mount is relative to /.
+my $snapsize = '1g';
+
+sub cmdExecOrEval
+{
+ my($cmd, @args) = @_;
+
+ if ( (ref($cmd) eq "ARRAY" ? $cmd->[0] : $cmd) =~ /^\&/ ) {
+ $cmd = join(" ", $cmd) if ( ref($cmd) eq "ARRAY" );
+ eval($cmd);
+ print(STDERR "Perl code fragment for exec shouldn't return!!\n");
+ exit(1);
+ } else {
+ $cmd = [split(/\s+/, $cmd)] if ( ref($cmd) ne "ARRAY" );
+ alarm(0);
+ $cmd = [map { m/(.*)/ } @$cmd]; # untaint
+ #
+ # force list-form of exec(), ie: no shell even for 1 arg
+ #
+ exec { $cmd->[0] } @$cmd;
+ print(STDERR "Exec failed for @$cmd\n");
+ exit(1);
+ }
+}
+
+# FIXME: this guy sometimes needs to use /dev/null as stdin, stdout and stderr
+# so that it doesn't pollute the stream back to the backup server.
+sub cmdSystemOrEval
+{
+ my($cmd, @args) = @_;
+
+ $? = 0;
+ $cmd = join(" ", $cmd) if ( ref($cmd) eq "ARRAY" );
+ if ( (ref($cmd) eq "ARRAY" ? $cmd->[0] : $cmd) =~ /^\&/ ) {
+ eval($cmd);
+ } else {
+ system($cmd);
+ }
+}
+
+# To be called by the BackupPC server in a preuser script. These are the
+# actions that if attempting in-line with a backup -- that is as part of the
+# client invocation -- will cause the backup to hang indefinitely.
+sub refreshConfig()
+{
+ # Write the VEs on all HNs to a config file on the BackupPC server for
+ # later use.
+ open my $out, ">/etc/backuppc/vzlist_bpc" ||
+ die "Cannot write to /etc/backuppc/vzlist_bpc";
+ foreach my $hn (@HNS) {
+ open my $fh, "ssh -l root $hn vzlist -a |" ||
+ die "Can run remote vzlist command";
+ while (<$fh>) {
+ chomp;
+ my ($veid, $junk, $running, $junk, $hostname) = split(' ');
+ if ($veid =~ /[0-9]+/ && $hostname ne '-') {
+ if ($running eq "running") {
+ $running = 1;
+ } else {
+ $running = 0;
+ }
+ print $out "$hostname,$veid,$hn,$running\n";
+ }
+ }
+ close($fh);
+ }
+ close($out);
+
+ # Copy the script to the HNs
+ my $remoteCmd = "/usr/bin/".basename($0);
+ foreach my $hn (@HNS) {
+ my $cmd = "rsync -aq -e ssh $0 root\@$hn:$remoteCmd";
+ #print "Executing '$cmd'\n";
+ cmdSystemOrEval($cmd);
+ }
+}
+
+# For use on the BackupPC server.
+sub loadVeList()
+{
+ open my $fh, "</etc/backuppc/vzlist_bpc" ||
+ die "Cannot read from /etc/backuppc/vzlist_bpc. Set a preuser script.";
+
+ while (<$fh>) {
+ chomp;
+ my ($hostname, $veid, $host, $running) = split(',');
+ push(@velist, {
+ "hostname" => $hostname,
+ "VEID" => $veid,
+ "HN" => $host,
+ "running" => $running
+ });
+ }
+ close($fh);
+}
+
+# For use on the HN
+sub localVe($)
+{
+ my ($veid) = @_;
+ die "HN: no veid" if (!defined($veid));
+
+ my $vzdir = '/etc/vz';
+ die "HN is not running OpenVZ" if (!defined($vzdir));
+
+ my $lockdir = undef;
+ my $dumpdir = undef;
+ my $private = undef;
+ my $root = undef;
+ open my $fh, "<$vzdir/vz.conf" || die "Cannot open $vzdir/vz.conf";
+ while (<$fh>) {
+ chomp;
+ my ($name, $value) = split('=');
+ if ($name eq 'LOCKDIR') {
+ $lockdir = $value;
+ die "OpenVZ LOCKDIR ($lockdir) is invalid" if (! -d $lockdir);
+ } elsif ($name eq 'DUMPDIR') {
+ $dumpdir = $value;
+ die "OpenVZ DUMPDIR ($dumpdir) is invalid" if (! -d $dumpdir);
+ } elsif ($name eq 'VE_PRIVATE') {
+ $private = $value;
+ } elsif ($name eq 'VE_ROOT') {
+ $root = $value;
+ }
+ }
+ close($fh);
+
+ my $confdir = "$vzdir/conf";
+ die "OpenVZ conf dir ($confdir) not found" if (! -d $confdir);
+
+ my $hostname = undef;
+ my $conffile = "$confdir/$veid.conf";
+ open $fh, "<$conffile" || die "Cannot open $conffile";
+ while (<$fh>) {
+ chomp;
+ my ($name, $value) = split('=');
+ $private = $value if ($name eq 'VE_PRIVATE');
+ $root = $value if ($name eq 'VE_ROOT');
+ if ($name eq 'HOSTNAME') {
+ $value =~ s/"//g;
+ $hostname = gethostbyaddr(gethostbyname($value), AF_INET);
+ }
+ }
+ close($fh);
+
+ $private =~ s/"//g;
+ $private =~ s|/\$VEID|/$veid|g;
+ die "VE_PRIVATE is not defined" if (!defined($private));
+ die "VE $veid private dir ($private) not found" if (! -d $private);
+
+ $root =~ s|/\$VEID|/$veid|g;
+ $root =~ s/"//g;
+ die "VE_ROOT is not defined" if (!defined($root));
+ die "VE $veid root dir ($root) not found" if (! -d $root);
+
+ die "VE $veid has no HOSTNAME" if (!defined($hostname));
+
+ my $status = `vzctl status $veid`;
+ my $running = 0;
+ $running = 1 if ($status =~ /running/);
+
+ return {
+ "vzdir" => $vzdir,
+ "lockdir" => $lockdir,
+ "dumpdir" => $dumpdir,
+ "confdir" => $confdir,
+ "conffile" => $conffile,
+ "private" => $private,
+ "root" => $root,
+ "hostname" => $hostname,
+ "VEID" => $veid,
+ "running" => $running
+ };
+}
+
+sub getVeEntry($$)
+{
+ my ($field, $arg) = @_;
+ for (my $i = 0; $i <= $#velist; $i++) {
+ my $ve = ${\$velist[$i]};
+ return $ve if ($ve->{$field} eq $arg);
+ }
+ return undef;
+}
+
+sub getVeByHostname($)
+{
+ my ($host) = @_;
+
+ my $hostname = gethostbyaddr(gethostbyname($host), AF_INET);
+ die "Host $host not found" if (!defined($hostname));
+
+ my $ve = getVeEntry('hostname', $hostname);
+ die "Host $hostname is not a VE" if (!defined($ve));
+ return $ve;
+}
+
+sub getVeEntries($$)
+{
+ my ($field, $host) = @_;
+ my @entries;
+
+ for (my $i = 0; $i <= $#velist; $i++) {
+ my $ve = ${\$velist[$i]};
+ push(@entries, $ve) if ($ve->{$field} eq $host);
+ }
+ return @entries;
+}
+
+sub printVeEntry($)
+{
+ my ($ve) = @_;
+
+ die "No VE to print" if (!defined($ve));
+ print STDERR '{ ';
+ #foreach my $k (keys %{$velist[0]}) {
+ foreach my $k (keys %{$ve}) {
+ print STDERR $k.' => '.$ve->{$k}."\n ";
+ }
+ print STDERR "}\n";
+}
+
+sub delSnapshot($)
+{
+ my ($ve) = @_;
+ die "No VE record for delSnapshot" if (!defined($ve));
+
+ #print "delSnapshot: doing nothing for now\n";
+ #printVeEntry($ve);
+ #return;
+
+ my $dir = $ve->{'snaproot'};
+ if (defined($dir) && -d $dir) {
+ #cmdSystemOrEval("rm -rf $dir/etc/vzdump");
+ cmdSystemOrEval("umount $dir");
+ cmdSystemOrEval("rmdir $dir");
+ }
+
+ my $dev = $ve->{'snapdev'};
+ cmdSystemOrEval("lvremove -f $dev >/dev/null 2>&1") if (-b $dev);
+}
+
+sub getDevice($)
+{
+ my ($dir) = @_;
+
+ open my $fh, "df -P '$dir'|" || die "Unable to exec df";
+ <$fh>; # skip header
+ my $df = <$fh>;
+ close($fh);
+ chomp($df);
+ my ($dev, $junk, $junk, $junk, $junk, $mpoint) = split (/\s+/, $df);
+
+ my $vg = undef;
+ my $lv = undef;
+ open $fh, "lvscan|" || die "Unable to exec lvscan";
+ while (my $line = <$fh>) {
+ if ($line =~ m|^\s+ACTIVE\s+\'/dev/([^/]+)/([^\']+)\'\s|) {
+ # vg is $1, lv is $2
+ if ($dev eq "/dev/$1/$2" || $dev eq "/dev/mapper/$1-$2") {
+ $vg = $1;
+ $lv = $2;
+ }
+ }
+ }
+ close($fh);
+
+ die "Device $dev has no LVM entry for volume group" if (!defined($vg));
+ die "Device $dev has no LVM entry for logical volume" if (!defined($lv));
+ return ($dev, $mpoint, $vg, $lv);
+}
+
+sub makeSnapshot($)
+{
+ my ($ve) = @_;
+ die "No VE record for snapshot" if (!defined($ve));
+
+ my ($dev, $lvmpath, $vg, $lv) = getDevice($ve->{'private'});
+ #print "snapshot: dev=$dev, lvmpath=$lvmpath, vg=$vg, lm=$lv\n";
+ die "Can't find device for VE filesystem" if (!defined($dev));
+
+ my $snaproot = "/$vzsnap";
+
+ mkpath "$snaproot" ||
+ die "Can't create snapshot directory (backup in progress?)";
+ $ve->{'snaproot'} = $snaproot;
+
+ my $snapdev = "/dev/$vg/$vzsnap";
+ die "Snapshot dev $snapdev exists (backup in progress?)" if (-b $snapdev);
+ # FIXME: xfs_freeze hangs; without it we are likely to fail at some point.
+ # FIXME: finding the mount point instead of hard coding.
+ #cmdSystemOrEval("xfs_freeze -f /var/lib/vz/private");
+ cmdSystemOrEval("lvcreate --size $snapsize --snapshot --name $vzsnap /dev/$vg/$lv >/dev/null 2>&1");
+ #cmdSystemOrEval("xfs_freeze -u /var/lib/vz/private");
+ if (! -b $snapdev) {
+ delSnapshot($ve);
+ die "Failed to create snapshot device"
+ }
+ $ve->{'snapdev'} = $snapdev;
+ cmdSystemOrEval("mount -o nouuid $snapdev $snaproot");
+
+ my $snapprivate = $ve->{'private'};
+ $snapprivate =~ s|/?$lvmpath/?|/$vzsnap/|;
+ #print "snapshot: snapprivate = $snapprivate\n";
+ if ($snapprivate !~ /$vzsnap/ || ! -d $snapprivate) {
+ delSnapshot($ve);
+ die "Wrong lvm mount point $lvmpath";
+ }
+ $ve->{'snapprivate'} = $snapprivate;
+
+ if (! -d "/$snapprivate/etc") {
+ delSnapshot($ve);
+ die "Mount failure or filesystem doesn't belong to a VE";
+ }
+}
+
+sub saveConfigs($)
+{
+ my ($ve) = @_;
+ die "No VE record for saveConfigs" if (!defined($ve));
+
+ # Copy configuration and other scripts belonging to VE into VE's snapshot
+ my $snapprivate = $ve->{'snapprivate'};
+ mkpath "$snapprivate/etc/vzdump";
+ my $conffile = $ve->{'conffile'};
+ cmdSystemOrEval("cp $conffile $snapprivate/etc/vzdump/vps.conf");
+ foreach my $ext (@script_ext) {
+ my $fn = $ve->{'confdir'}."/".$ve->{'VEID'}.".$ext";
+ cmdSystemOrEval("cp $fn $snapprivate/etc/vzdump/vps.$ext") if (-f $fn);
+ }
+}
+
+sub restoreConfigs($)
+{
+ my ($ve) = @_;
+ die "No VE record for restoreConfigs" if (!defined($ve));
+
+ my $private = $ve->{'private'};
+ die "Can't restore invalid private dir $private" if (! -d $private);
+ my $qprivate = $private;
+ $qprivate =~ s|/|\\\/|g;
+ $qprivate =~ s|/$ve->{'VEID'}$|/\$VEID|;
+
+ my $root = $ve->{'root'};
+ die "Can't restore invalid root dir $root" if (! -d $root);
+ my $qroot = $root;
+ $qroot =~ s|/|\\\/|g;
+ $qroot =~ s|/$ve->{'VEID'}$|/\$VEID|;
+
+ my $conffile = $ve->{'conffile'};
+ my $cmd = "sed -e 's/VE_ROOT=.*/VE_ROOT=\\\"$qroot\\\"/' -e 's/VE_PRIVATE=.*/VE_PRIVATE=\\\"$qprivate\\\"/' <'$private/etc/vzdump/vps.conf' >'$conffile'";
+ cmdSystemOrEval($cmd);
+
+ foreach my $s (@script_ext) {
+ my $cfgdir = $ve->{'confdir'};
+ my $src = $ve->{'private'}."/etc/vzdump/vps.$s";
+ my $dest = "$cfgdir/".$ve->{'VEID'}.".$s";
+ cmdSystemOrEval("mv '$src' '$dest'") if (-f $src);
+ }
+
+ rmtree "$private/etc/vzdump";
+
+ # FIXME: on error, if there was no private dir to start, we should
+ # remove everything we added.?
+}
+
+sub checkRunningClient()
+{
+ die "A backup or restore operation are already in progress"
+ if (Proc::PID::File->running({ dir => '/tmp', verify => 1 }));
+
+ # Clean up any prior backup's mount point and snapshot, if it exists.
+ # Note that the snapshot is small, so we don't really want it lying around!
+ my $vg = undef;
+ open my $fh, "lvscan|" || die "Unable to exec lvscan";
+ while (my $line = <$fh>) {
+ if ($line =~ m|^\s+ACTIVE\s+Snapshot\s+\'/dev/([^/]+)/$vzsnap\'\s|) {
+ $vg = $1;
+ }
+ }
+ close($fh);
+ if (defined($vg)) {
+ my $dev = "/dev/mapper/$vg-$vzsnap";
+ #print "Found vzsnap lv $dev\n";
+ cmdSystemOrEval("umount /$vzsnap");
+ cmdSystemOrEval("rmdir /$vzsnap") if (-d "/$vzsnap");
+ cmdSystemOrEval("lvremove -f $dev >/dev/null 2>&1") if (-b $dev);
+ }
+}
+
+sub runClient($)
+{
+ my ($restore) = @_;
+
+ checkRunningClient();
+
+ my $veid = shift(@ARGV);
+ die "HN needs a VEID argument" if (!defined($veid));
+ die "HN: no command to execute after VEID" if ($#ARGV < 0);
+
+ # Find $host in the list of VEs
+ my $ve = localVe($veid);
+ die "VE $veid not found on this HN" if (!defined($ve));
+ #printVeEntry($ve);
+
+ if (! $restore) {
+ cmdSystemOrEval("vzctl stop $veid >/dev/null 2>&1")
+ if ($ve->{'running'});
+ makeSnapshot($ve);
+ cmdSystemOrEval("vzctl start $veid >/dev/null 2>&1")
+ if ($ve->{'running'});
+ die "Failed to make snapshot of filesystem for VE $veid"
+ if (!defined($ve->{'snaproot'}));
+
+ # Make and exec the backup command. Do it in a chroot to the snapshot
+ # of the VE's root dir so that any relative path information in the
+ # backup command is accurate. This does mean that each VE needs rsync,
+ # etc.
+ my $cmd = "chroot ".$ve->{'snapprivate'}." ".join(' ', @ARGV);
+ #print "HN: cmd |$cmd|\n";
+ $? = 0;
+ cmdSystemOrEval($cmd);
+ my $ret = $?; # FIXME
+
+ # Remove snapshot, we're done
+ delSnapshot($ve);
+
+ # Pass the return code back
+ #exit $ret; FIXME: currently, cmdSystemOrEval doesn't return a retcode.
+ exit 0;
+ } else {
+ # Restores work off the VE's live root filesystem. A full restore
+ # should be done to the HN host, redirecting the restore to the VE's
+ # private directory on the HN when the VE is stopped.
+ my $cmd = "chroot ".$ve->{'private'}." ".join(' ', @ARGV);
+ #print "HN: cmd |$cmd|\n";
+ # A restore can exec, because we have no cleanup to do.
+ cmdExecOrEval($cmd);
+ }
+}
+
+sub runServer($)
+{
+ my ($restore) = @_;
+
+ # Build the beginning remote command
+ my $remoteCmd = "/usr/bin/".basename($0);
+ #print "Remote command is $remoteCmd\n";
+
+ my $host = shift(@ARGV);
+ die "Hostname argument required" if (!defined($host));
+ die "No command to execute after hostname" if ($#ARGV < 0);
+
+ # Find $host in the list of VEs
+ loadVeList();
+ my $ve = getVeByHostname($host);
+ die "VE $host not found" if (!defined($ve));
+ #printVeEntry($ve);
+
+ # The command line is bisected by the next occurrence of $host. Everything
+ # before is the ssh command (sans what to run on the VE) and everything
+ # after is the xfer command to run on the VE.
+ my @sshCmd;
+ my @xferCmd;
+ my $foundHost = 0;
+ foreach my $arg (@ARGV) {
+ if ($arg eq $host) {
+ $foundHost = 1;
+ } else {
+ if ($foundHost) {
+ push(@xferCmd, $arg);
+ } else {
+ push(@sshCmd, $arg);
+ }
+ }
+ }
+ die "No ssh command found" if ($#sshCmd < 0);
+ die "No xfer command found" if ($#xferCmd < 0);
+ #print "ssh command: |".join(' ', @sshCmd)."|\n";
+ #print "xfer command: |".join(' ', @xferCmd)."|\n";
+
+ # Create command line to initiate the remote side of the backup. The
+ # remote side runs on the VE's HN and is given the VE's VEID.
+ my $cmd = join(' ', @sshCmd)." ".$ve->{'HN'}." $remoteCmd ".
+ ($restore ? "restore " : "").$ve->{'VEID'}." ".join(' ', @xferCmd);
+ #print "remote command: |$cmd|\n";
+
+ ## Search and replace
+ #foreach my $key (keys %{$velist[0]}) {
+ # my $val = $ve->{$key};
+ # $cmd =~ s/\@$key\@/$val/g if (defined($val));
+ #}
+
+ cmdExecOrEval($cmd);
+}
+
+# A hard-coded test; didn't seem to help
+sub runServer_test($)
+{
+ my ($restore) = @_;
+
+ # Build the beginning remote command
+ my $remoteCmd = "/usr/bin/".basename($0);
+ #print "Remote command is $remoteCmd\n";
+
+ my $host = shift(@ARGV);
+ die "Hostname argument required" if (!defined($host));
+ die "No command to execute after hostname" if ($#ARGV < 0);
+
+ # Find $host in the list of VEs
+ loadVeList();
+ my $ve = getVeByHostname($host);
+ die "VE $host not found" if (!defined($ve));
+ #printVeEntry($ve);
+
+ # The command line is bisected by the next occurrence of $host. Everything
+ # before is the ssh command (sans what to run on the VE) and everything
+ # after is the xfer command to run on the VE.
+ my @sshCmd;
+ my @xferCmd;
+ my $foundHost = 0;
+ foreach my $arg (@ARGV) {
+ if ($arg eq $host) {
+ $foundHost = 1;
+ } else {
+ if ($foundHost) {
+ push(@xferCmd, $arg);
+ } else {
+ push(@sshCmd, $arg);
+ }
+ }
+ }
+ die "No ssh command found" if ($#sshCmd < 0);
+ die "No xfer command found" if ($#xferCmd < 0);
+ #print "ssh command: |".join(' ', @sshCmd)."|\n";
+ #print "xfer command: |".join(' ', @xferCmd)."|\n";
+
+ # Create command line to initiate the remote side of the backup. The
+ # remote side runs on the VE's HN and is given the VE's VEID.
+ my $cmd = join(' ', @sshCmd)." pe18002.titaniummirror.com $remoteCmd ".
+ ($restore ? "restore " : "")."151 ".join(' ', @xferCmd);
+ #print "remote command: |$cmd|\n";
+
+ ## Search and replace
+ #foreach my $key (keys %{$velist[0]}) {
+ # my $val = $ve->{$key};
+ # $cmd =~ s/\@$key\@/$val/g if (defined($val));
+ #}
+
+ cmdExecOrEval($cmd);
+}
+
+## MAIN
+
+# Determine how to run this script. Each option is valid by itself. Only the
+# options server and restore may be seen together.
+my $server = 0;
+if ($ARGV[0] eq "server") {
+ shift(@ARGV);
+ $server = 1;
+}
+
+my $refresh = 0;
+if ($ARGV[0] eq "refresh") {
+ shift(@ARGV);
+ $refresh = 1;
+}
+
+my $restore = 0;
+if ($ARGV[0] eq "restore") {
+ shift(@ARGV);
+ $restore = 1;
+ #print "Restore mode\n";
+}
+
+if ($server) {
+ runServer($restore);
+} elsif ($refresh) {
+ refreshConfig();
+} else {
+ runClient($restore);
+}
--- /dev/null
+Introduction
+
+BackupPC_ovz is a script that adds OpenVZ integration to BackupPC (BackupPC).
+BackupPC has no problems backing up an OpenVZ (ovz) Hardware Node (HN) or an ovz
+Virtual Environment (VE), but by making BackupPC aware of ovz's internals, the
+backup of VEs can be made far more efficient.
+
+BackupPC_ovz adds the following capabilities to BackupPC:
+
+ * VE backups are taken from a snapshot of the VE's filesystem after the VE
+ has been shut down. This guarantees that the filesystem data are in a
+ consistent state without requiring application specific backup or pre-backup
+ processing activities.
+
+ * The VE is shut down only long enough to snapshot its filesystem, then is
+ automatically restarted. Typical VE downtime will be 30 seconds, depending
+ upon the amount of application-dependent processing occurs at shutdown and
+ startup.
+
+ * Both rsync and tar BackupPC XferMethods are supported. Because the backup
+ and restore agent processes actually run on the HN hosting the VE, direct
+ restore from BackupPC's web interface can be used to do a 'bare metal'
+ recovery of the VE.
+
+ * Any time the VE's /etc directory is backed up, the backup will add an
+ /etc/vzdump directory containing the VE's configuration on the HN, notably
+ the $VEID.conf file.
+
+ * The VE is configured as if it were any other server to be backed up, with
+ the notable addition of the BackupPC_ovz command to its client backup and
+ restore commands, etc.
+
+ * Although VE backups are actually performed by the HN, BackupPC_ovz determines
+ the VE <-> HN mapping just before each backup run, eliminating any static
+ mapping requirement in the BackupPC configuration. It is acceptable to
+ periodically rebalance VE's using the ovz vzmigrate utility, as BackupPC_ovz
+ will correctly locate a moved VE at the next backup.
+
+Requirements
+
+BackupPC_ovz requires that the HN be set up correctly as if it were to be a
+server backed up by BackupPC. Specifically, this means a recent version of
+rsync (we currently use 3.0.0pre6) and an ssh public key installed into the HN
+root user's .ssh/authorized_keys2 file. The companion private key, as usual,
+belongs to the backuppc user on the BackupPC server.
+
+Additionally, BackupPC requires that the private storage area, $VE_PRIVATE, for
+the VE to be backed exists on a file system hosted on an LVM logical volume
+(LV). There are no restrictions imposed by BackupPC_ovz on the filesystem
+used, as long as it is mountable by the HN, which by definition it must be.
+
+Limitations
+
+BackupPC_ovz imposes certain limitations. Primary of these is allowing only
+a single VE backup from a given HN at any time. Other VE backups attempting
+to run while an existing VE backup is in progress will error and BackupPC will
+fail, indicating an inability to retrieve the file list. This is not a
+catastrophic problem, as BackupPC will reschedule another backup attempt at a
+later time. The reason for this limitation is primarily to simplify the first
+releases of BackupPC_ovz. It would be possible to extend BackupPC_ovz to
+remove this limitation. However, this would only be useful in environments
+running very large HNs, one or more BackupPC servers, and gigabit or higher
+network speeds.
+
+BackupPC_ovz uses LVM2, which must be installed on each HN. All VE private
+storage areas must be on filesystem(s) hosted on LVM LVs.
+
+Each HN must have perl installed, including the Proc::PID::File page, which is
+installed in Ubuntu by installing the libproc-pid-file-perl apt package.
+
+VE host names in BackupPC must equate to the exact hostname as returned for
+the VE's primary IP address from the DNS server that BackupPC and all HNs
+use. In other words, "host <vename>" returns an IP addr, and "host IPaddr"
+returns <vename>. It is that <vename> exactly that must be used as the VE
+host name in BackupPC. In our environment, DNS returns fully qualified host
+names, so therefore the hosts in our BackupPC configuration are named with
+their fully qualified domain names.
+
+Installation
+
+To install BackupPC_ovz:
+
+ * Install BackupPC as normal.
+
+ * Configure all HNs as Hosts (backup/restore targets in BackupPC) per normal
+ BackupPC instructions. Until the HNs can be succesfully backed up and
+ restored, these operations cannot be successfully completed on any VEs. We
+ reccommend the rsync or tar XferMethods, using ssh as a transport. Only
+ the rsync method has been tested at this time.
+
+ * Install a recent version of rsync (we use 3.0.0pre6+) into each VE. Note
+ that a recent version of rsync is also required to successfully perform
+ online migration, a very useful ovz function.
+
+ * Install BackupPC_ovz into /usr/bin of each HN. The owner should be root,
+ the group root and file permissions 0755.
+
+ * Create the first VE Host in BackupPC. Set its XferMethod to rsync (or tar).
+ Three changes to the host specific configuration are required to use
+ BackupPC_ovz:
+
+ - On the Backup Settings page, set the PreDumpUserCommand to:
+ /usr/bin/BackupPC_ovz refresh
+
+ - On the Xfer page, add the following to the beginning of the RsyncClientCmd
+ field. Do no change what is already present in that field:
+ /usr/bin/BackupPC_ovz server
+
+ - On the Xfer page, add the following to the beginning of the
+ RsyncClientRestoreCmd field. Do no change what is already present in
+ that field:
+ /usr/bin/BackupPC_ovz server restore
+
+ * To add subsequent VE's to BackupPC, add each new VE into BackupPC using its
+ NEWHOST=COPYHOST mechanism,as documented on the Edit Hosts page. This will
+ automatically copy the modifications made for an existing VE host into a
+ new VE host.
+
+Using BackupPC with VEs
+
+Once a VE has been added as a host to BackupPC, BackupPC will automatically
+schedule the first and each subsequent backup according to the defined backup
+schedule(s). Backups of a VE are no different in terms of BackupPC usage that
+any other host.
+
+Restoring files and directories from BackupPC to a VE also works just like it
+would with a normal host. Using the BackupPC web interface, select a backup,
+select the files or directories desired, click Restore, then use the Direct
+Restore option, or any other that better suits your needs.
+
+Special recovery features of VEs under BackupPC
+
+Because BackupPC actually backs up and recovers VE data using its hosted HN,
+additional recovery features are available. For example, a VE can be
+recovered in its entirey, analogous to a 'bare metal' recovery of a physical
+server:
+
+ * Stop the VE to be fully recovered, if it is running.
+ * Using BackupPC, select all files and directories of the appropriate VE
+ backup and use Direct Restore to restore everything.
+ * After the restore is complete, recover the ovz-specific VE configuration
+ files from the VE's /etc/vzdump directory into the appropriate locations
+ of the HN's /etc/vz/conf directory. There is nothing to do if these
+ configuration files have not been changed (aka vzctl set).
+ * Start the VE using ovz's vzctl utiltiy.
+
+The above strategy works great to restore an existing VE to a prior state.
+Using the rsync xfer method for recovery, a delta recovery is performed,
+dramatically reducing the recovery time.
+
+What happens if we need to recover a VE where no existing version of the VE
+is running anywhere? Consider a disaster recovery case where the HN hosting
+the VE melted and is completely unrecoverable. We can then use a similar
+process as above to recover the VE to another HN -- even one that had never
+hosted the VE before.
+
+ * Using BackupPC, select all files and directories of the appropriate VE
+ backup and use Direct Restore to restore everything.
+ - Restore NOT to the VE host, but to the HN host that will host the newly
+ recovered VE.
+ - In the Direct Restore dialog, select the appropriate HN filesystem
+ location to restore the VE. For example, if recovering VE with VEID 123,
+ the recovery directory may be /var/lib/vz/private/123.
+ * Create an empty /var/lib/vz/root/123 directory on the HN.
+ * After the restore is complete, recover the ovz-specific VE configuration
+ files from the VE's /etc/vzdump directory into the appropriate locations
+ of the HN's /etc/vz/conf directory. There is nothing to do if these
+ configuration files have not been changed (aka vzctl set).
+ * Start the VE using ovz's vzctl utiltiy.