1
1

Working on btcpay backup/restore.

This commit is contained in:
Derek Smith 2022-10-26 21:41:14 -04:00
parent a3db31986b
commit 5455237f1f
Signed by: farscapian
GPG Key ID: 8F1CD799CCA516CC
4 changed files with 34 additions and 23 deletions

View File

@ -2,12 +2,14 @@
The Sovereign Stack scripts in this repository are meant to be cloned to and executed from your management machine. The Sovereign Stack scripts in this repository are meant to be cloned to and executed from your management machine.
You can update Sovereign Stack scripts on your management machine by running `git pull --all`. Generally, you want to use ONLY signed git tags for your deployments. Use `git checkout v0.1.0` for example to switch to a specific version of Sovereign Stack. The scripts ensure check to ensure that the code you're running on your management machine is GREATER THAN OR EQUAL TO each of your active deployments (TODO). You can update Sovereign Stack scripts on your management machine by running `git pull --all --tags`. Generally, you want to use ONLY signed git tags for your deployments. Use `git checkout v0.1.0` for example to switch to a specific version of Sovereign Stack. The scripts check to ensure that the code you're running on your management machine is GREATER THAN OR EQUAL TO the version of your deployments (TODO). The scripts work to bring your old deployments into line with the current Sovereign Stack version.
Once your managent machine is using a specific version of code, you will want to run the various scripts. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management. Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place. Once your managent machine checkedout a specific version of Sovereign stack, you will want to run the various scripts against your remotes. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management (i.e., add it as a remote). Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place.
After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site defintions. These files are stubbed out for the user automatically and documetnation guides the user through the process. After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site defintions. These files are stubbed out for the user automatically and documetnation guides the user through the process.
It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch. It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch.
By default (i.e., without any command line modifiers), Sovereign Stack scripts will back up active deployments resulting in minimal downtime. (zero downtime for Ghost, minimal for Nextcloud/Gitea, BTCPAY Server).
All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org). All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org).

View File

@ -24,14 +24,17 @@ if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
fi fi
DOMAIN_NAME= DOMAIN_NAME=
RESTORE_ARCHIVE=
VPS_HOSTING_TARGET=lxd VPS_HOSTING_TARGET=lxd
RUN_CERT_RENEWAL=false RUN_CERT_RENEWAL=false
RESTORE_WWW=false RESTORE_WWW=false
BACKUP_CERTS=true BACKUP_CERTS=true
BACKUP_APPS=false BACKUP_APPS=false
BACKUP_BTCPAY=false BACKUP_BTCPAY=false
RESTORE_BTCPAY=false RESTORE_BTCPAY=false
BTCPAY_RESTORE_ARCHIVE_PATH=
MIGRATE_WWW=false MIGRATE_WWW=false
MIGRATE_BTCPAY=false MIGRATE_BTCPAY=false
SKIP_WWW=false SKIP_WWW=false
@ -68,10 +71,6 @@ for i in "$@"; do
STOP_SERVICES=true STOP_SERVICES=true
shift shift
;; ;;
--archive=*)
RESTORE_ARCHIVE="${i#*=}"
shift
;;
--domain=*) --domain=*)
DOMAIN_NAME="${i#*=}" DOMAIN_NAME="${i#*=}"
shift shift
@ -96,6 +95,10 @@ for i in "$@"; do
BACKUP_BTCPAY=true BACKUP_BTCPAY=true
shift shift
;; ;;
--restore-archive=*)
BTCPAY_RESTORE_ARCHIVE_PATH="${i#*=}"
shift
;;
--migrate-www) --migrate-www)
MIGRATE_WWW=true MIGRATE_WWW=true
RUN_CERT_RENEWAL=false RUN_CERT_RENEWAL=false
@ -128,7 +131,7 @@ source ./defaults.sh
export CACHES_DIR="$HOME/ss-cache" export CACHES_DIR="$HOME/ss-cache"
export DOMAIN_NAME="$DOMAIN_NAME" export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2" export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_ARCHIVE="$RESTORE_ARCHIVE" export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH"
export RESTORE_WWW="$RESTORE_WWW" export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES" export STOP_SERVICES="$STOP_SERVICES"
export BACKUP_CERTS="$BACKUP_CERTS" export BACKUP_CERTS="$BACKUP_CERTS"
@ -283,13 +286,11 @@ function instantiate_vms {
DDNS_HOST= DDNS_HOST=
MIGRATE_VPS=false MIGRATE_VPS=false
if [ "$VIRTUAL_MACHINE" = www ]; then if [ "$VIRTUAL_MACHINE" = www ]; then
echo "GOT HERE!!!"
if [ "$SKIP_WWW" = true ]; then if [ "$SKIP_WWW" = true ]; then
echo "INFO: Skipping WWW due to command line argument."
continue continue
fi fi
echo "AND HERE"
exit 1
VPS_HOSTNAME="$WWW_HOSTNAME" VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS" MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
DDNS_HOST="$WWW_HOSTNAME" DDNS_HOST="$WWW_HOSTNAME"
@ -346,12 +347,9 @@ function instantiate_vms {
if [ "$MACHINE_EXISTS" = true ]; then if [ "$MACHINE_EXISTS" = true ]; then
# we delete the machine if the user has directed us to # we delete the machine if the user has directed us to
# but before we do, we get a backup of applcation data of the running instance/vm
# that backup becomes the basis for restoring to the newer version of the host host (Type-1 VM)
if [ "$MIGRATE_VPS" = true ]; then if [ "$MIGRATE_VPS" = true ]; then
# if the RESTORE_ARCHIVE is not set, then
if [ -z "$RESTORE_ARCHIVE" ]; then
RESTORE_ARCHIVE="$LOCAL_BACKUP_PATH/$UNIX_BACKUP_TIMESTAMP.tar.gz"
fi
# get a backup of the machine. This is what we restore to the new VPS. # get a backup of the machine. This is what we restore to the new VPS.
echo "INFO: Machine exists. Since we're going to delete it, let's grab a backup. " echo "INFO: Machine exists. Since we're going to delete it, let's grab a backup. "
@ -539,8 +537,13 @@ if [ "$VPS_HOSTING_TARGET" = lxd ]; then
done done
# now let's run the www and btcpay-specific provisioning scripts. # now let's run the www and btcpay-specific provisioning scripts.
bash -c "./deployment/www/go.sh" if [ "$SKIP_WWW" = false ]; then
bash -c "./deployment/btcpayserver/go.sh" bash -c "./deployment/www/go.sh"
fi
if [ "$SKIP_BTCPAY" = false ]; then
bash -c "./deployment/btcpayserver/go.sh"
fi
elif [ "$VPS_HOSTING_TARGET" = aws ]; then elif [ "$VPS_HOSTING_TARGET" = aws ]; then
stub_site_definition stub_site_definition

View File

@ -22,7 +22,7 @@ if [ "$UPDATE_BTCPAY" = true ]; then
elif [ "$RESTORE_BTCPAY" = true ]; then elif [ "$RESTORE_BTCPAY" = true ]; then
# run the update. # run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
sleep 10 sleep 15
./restore.sh ./restore.sh

View File

@ -3,12 +3,16 @@
set -ex set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
if [ -f "$RESTORE_ARCHIVE" ]; then echo "ENTERING RESTORE SCRIPT."
if [ -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then
# push the restoration archive to the remote server # push the restoration archive to the remote server
echo "INFO: Restoring BTCPAY Server: $RESTORE_ARCHIVE" echo "INFO: Restoring BTCPAY Server: $BTCPAY_RESTORE_ARCHIVE_PATH"
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH" ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_HOME/backups/btcpay.tar.gz" REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
scp "$RESTORE_ARCHIVE" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH" scp "$BTCPAY_RESTORE_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
# we clean up any old containers first before restoring. # we clean up any old containers first before restoring.
ssh "$FQDN" docker system prune -f ssh "$FQDN" docker system prune -f
@ -25,3 +29,5 @@ else
echo "ERROR: File does not exist." echo "ERROR: File does not exist."
exit 1 exit 1
fi fi
echo "EXITING RESTORE script."