1
1

Huge updates.

This commit is contained in:
Derek Smith 2022-07-27 12:38:33 -04:00
parent ff706a62ae
commit bb072c67dc
Signed by: farscapian
GPG Key ID: 8F1CD799CCA516CC
44 changed files with 1651 additions and 341 deletions

2
.gitignore vendored
View File

@ -1,3 +1,3 @@
env
clear_lxd.sh
publish_tag.sh
publish_tag.sh

View File

@ -13,8 +13,9 @@
"shellcheck.customArgs": [],
"shellcheck.ignorePatterns": {},
"shellcheck.exclude": [
"1090",
"1091"
"SC1090",
"SC1091",
"SC2029"
],
"terminal.integrated.fontFamily": "monospace",
"workbench.colorCustomizations": {

View File

@ -1,3 +1,3 @@
# Documentation
All documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org). [Click here](https://www.sovereign-stack.org/about) if you are new to Sovereign Stack?
All documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org).

View File

@ -1,51 +1,50 @@
#!/bin/bash
set -ex
set -eux
cd "$(dirname "$0")"
# NOTE This script is meant to be executed on your LXD bare metal servers. This script
# ensures that the LXD daemon is installed via snap package, then initialize the daemon
# to operate in clustered mode
COMMAND="$1"
COMMAND="${1:-}"
DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE=loop
if [ "$COMMAND" = create ]; then
# override the cluster name.
CLUSTER_NAME="$2"
CLUSTER_NAME="${2:-}"
if [ -z "$CLUSTER_NAME" ]; then
echo "ERROR: The cluster name was not provided."
exit 1
fi
#shellcheck disable=SC1091
source ./defaults.sh
export LXD_REMOTE_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
CLUSTER_DEFINITION="$LXD_REMOTE_PATH/cluster_definition"
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
mkdir -p "$LXD_REMOTE_PATH"
mkdir -p "$CLUSTER_PATH"
if [ ! -f "$CLUSTER_DEFINITION" ]; then
# stub out a cluster_definition.
cat >"$CLUSTER_DEFINITION" <<EOL
#!/bin/bash
# Note: the path above ./ corresponds to your LXD Remote. If your remote is set to 'cluster1'
# Then $HOME/clusters/cluster1 will be your cluster working path.
# Then $HOME/ss-clusters/cluster1 will be your cluster working path.
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
# This is REQUIRED. A list of all sites in ~/sites/ that will be deployed.
# This is REQUIRED. A list of all sites in ~/ss-sites/ that will be deployed.
# e.g., 'domain1.tld,domain2.tld,domain3.tld' Add all your domains that will
# run within this SS deployment.
SITE_LIST="domain1.tld"
# Deploy a registry cache on your management machine.
DEPLOY_REGISTRY=true
# only relevant
export REGISTRY_URL="http://${HOSTNAME}:5000"
export REGISTRY_URL="http://$(hostname).$(resolvectl status | grep 'DNS Domain:' | awk '{ print $3 }'):5000"
export REGISTRY_USERNAME=""
export REGISTRY_PASSWORD=""
@ -61,8 +60,7 @@ EOL
source "$CLUSTER_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
FQDN="$3"
echo "FQDN: $FQDN"
FQDN="${3:-}"
if [ -z "$FQDN" ]; then
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
@ -143,7 +141,9 @@ EOL
# The MGMT Plane IP is the IP address that the LXD API binds to, which happens
# to be the same as whichever SSH connection you're coming in on.
MGMT_PLANE_IP="$(ssh ubuntu@"$FQDN" env | grep SSH_CONNECTION | cut -d " " -f 3)"
IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1 )"
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
# if the LXD_CLUSTER_PASSWORD wasnt set, we can generate a random one using gpg.
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
@ -163,13 +163,8 @@ EOL
ssh -t "ubuntu@$FQDN" "
# set host firewall policy.
# allow SSH from management network.
sudo ufw allow from 192.168.1.0/24 proto tcp to $MGMT_PLANE_IP port 22
sudo ufw allow from 192.168.4.0/24 proto tcp to $MGMT_PLANE_IP port 8443
# allow 8443 from management subnets
sudo ufw allow from 192.168.1.0/24 proto tcp to $MGMT_PLANE_IP port 8443
sudo ufw allow from 192.168.4.0/24 proto tcp to $MGMT_PLANE_IP port 8443
# allow LXD API from management network.
sudo ufw allow from ${IP_OF_MGMT_MACHINE}/32 proto tcp to $MGMT_PLANE_IP port 8443
# enable it.
if sudo ufw status | grep -q 'Status: inactive'; then
@ -189,7 +184,7 @@ fi
fi
# stub out the lxd init file for the remote SSH endpoint.
CLUSTER_MASTER_LXD_INIT="$LXD_REMOTE_PATH/$CLUSTER_NAME-primary.yml"
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/$CLUSTER_NAME-primary.yml"
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
config:
core.https_address: ${MGMT_PLANE_IP}:8443
@ -235,18 +230,18 @@ EOF
cat "$CLUSTER_MASTER_LXD_INIT" | ssh "ubuntu@$FQDN" lxd init --preseed
# not ensure the service is active on the remote host.
if wait-for-it -t 5 "$FQDN:8443"; then
if wait-for-it -t 20 "$FQDN:8443"; then
# now create a remote on your local LXC client and switch to it.
# the software will now target the new cluster.
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$CLUSTER_NAME"
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it."
else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
exit 1
fi
echo "SUCCESS: Congrats, you have created a new LXD cluster named '$CLUSTER_NAME'. We create a new lxd remote and switched your local lxd client to it."
echo " You can go inspect by running 'lxc remote list'. Your current cluster path is '$CLUSTER_DEFINITION'."
echo ""
echo "HINT: Now you can consider running 'ss-deploy'."
else
echo "ERROR: invalid command."

View File

@ -70,7 +70,7 @@ export DUPLICITY_BACKUP_PASSPHRASE=
export SSH_HOME="$HOME/.ssh"
export VLAN_INTERFACE=
export VM_NAME=
export VM_NAME="sovereign-stack-base"
export DEV_MEMORY_MB="4096"
export DEV_CPU_COUNT="4"
export SSHFS_PATH="/tmp/sshfs_temp"
@ -91,7 +91,7 @@ export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# exit 1
# fi
ENABLE_NGINX_CACHING=true
ENABLE_NGINX_CACHING=false
# TODO
@ -104,22 +104,35 @@ BTC_CHAIN=regtest
export BTC_CHAIN="$BTC_CHAIN"
DEFAULT_DB_IMAGE="mariadb:10.6.5"
DEFAULT_DB_IMAGE="mariadb:10.8.3-jammy"
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
# run the docker stack.
export GHOST_IMAGE="ghost:4.44.0"
export GHOST_IMAGE="ghost:5.2.4"
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NGINX_IMAGE="nginx:1.21.6"
export NEXTCLOUD_IMAGE="nextcloud:23.0.2"
export NGINX_IMAGE="nginx:1.23.0"
export NEXTCLOUD_IMAGE="nextcloud:24.0.2"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
export GITEA_IMAGE="gitea/gitea:latest"
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export SOVEREIGN_STACK_MAC_ADDRESS="aa:bb:cc:00:00:03"
export WWW_MAC_ADDRESS="aa:bb:cc:00:00:00"
export BTCPAY_MAC_ADDRESS="aa:bb:cc:00:00:01"
export UMBREL_MAC_ADDRESS="aa:bb:cc:00:00:02"
export CLUSTERS_DIR="$HOME/ss-clusters"
export SITES_PATH="$HOME/ss-sites"
# The base VM image.
export BASE_LXC_IMAGE="ubuntu/22.04/cloud"
# Deploy a registry cache on your management machine.
export DEPLOY_MGMT_REGISTRY=true
export REMOTE_HOME="/home/ubuntu"
export BTCPAY_SERVER_APPPATH="$REMOTE_HOME/btcpayserver-docker"

165
deploy.sh
View File

@ -13,7 +13,8 @@ check_dependencies () {
}
# Check system's dependencies
check_dependencies wait-for-it dig rsync sshfs lxc docker-machine duplicity
check_dependencies wait-for-it dig rsync sshfs lxc docker-machine
# TODO remove dependency on Docker-machine. That's what we use to provision VM on 3rd party vendors. Looking for LXD endpoint.
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
@ -24,15 +25,18 @@ fi
MIGRATE_VPS=false
DOMAIN_NAME=
RESTORE_ARCHIVE=
VPS_HOSTING_TARGET=lxd
RUN_CERT_RENEWAL=true
USER_NO_BACKUP=false
USER_RUN_RESTORE=false
RESTORE_BTCPAY=false
USER_SKIP_WWW=false
USER_SKIP_BTCPAY=false
UPDATE_BTCPAY=false
RECONFIGURE_BTCPAY_SERVER=false
DEPLOY_BTCPAY_SERVER=false
CURRENT_REMOTE="$(lxc remote get-default)"
CLUSTER_NAME="$(lxc remote get-default)"
# grab any modifications from the command line.
for i in "$@"; do
@ -41,12 +45,20 @@ for i in "$@"; do
VPS_HOSTING_TARGET=aws
shift
;;
--restore)
--restore-www)
USER_RUN_RESTORE=true
RUN_CERT_RENEWAL=false
USER_NO_BACKUP=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
shift
;;
--archive=*)
RESTORE_ARCHIVE="${i#*=}"
shift
;;
--domain=*)
DOMAIN_NAME="${i#*=}"
shift
@ -55,12 +67,22 @@ for i in "$@"; do
UPDATE_BTCPAY=true
shift
;;
--skip-www)
USER_SKIP_WWW=true
shift
;;
--skip-btcpay)
USER_SKIP_BTCPAY=true
shift
;;
--no-backup)
USER_NO_BACKUP=true
shift
;;
--migrate)
--migrate-btcpay)
MIGRATE_VPS=true
RESTORE_BTCPAY=true
RUN_CERT_RENEWAL=false
shift
;;
--no-cert-renew)
@ -84,29 +106,36 @@ export CACHES_DIR="$HOME/ss-cache"
export SSH_HOME="$HOME/.ssh"
export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_ARCHIVE="$RESTORE_ARCHIVE"
if [ "$VPS_HOSTING_TARGET" = aws ]; then
CURRENT_REMOTE="docker-machine"
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: Please specify a domain name with --domain= when using --aws."
exit 1
fi
CLUSTER_NAME="docker-machine"
fi
export CURRENT_REMOTE="$CURRENT_REMOTE"
export LXD_REMOTE_PATH="$CLUSTERS_DIR/$CURRENT_REMOTE"
export CLUSTER_NAME="$CLUSTER_NAME"
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
# ensure our cluster path is created.
mkdir -p "$LXD_REMOTE_PATH"
mkdir -p "$CLUSTER_PATH"
# if an authorized_keys file does not exist, we'll stub one out with the current user.
# add additional id_rsa.pub entries manually for more administrative logins.
if [ ! -f "$LXD_REMOTE_PATH/authorized_keys" ]; then
cat "$SSH_HOME/id_rsa.pub" >> "$LXD_REMOTE_PATH/authorized_keys"
echo "INFO: Sovereign Stack just stubbed out '$LXD_REMOTE_PATH/authorized_keys'. Go update it."
if [ ! -f "$CLUSTER_PATH/authorized_keys" ]; then
cat "$SSH_HOME/id_rsa.pub" >> "$CLUSTER_PATH/authorized_keys"
echo "INFO: Sovereign Stack just stubbed out '$CLUSTER_PATH/authorized_keys'. Go update it."
echo " Add ssh pubkeys for your various management machines, if any. We've stubbed it out"
echo " with your ssh pubkey at '$HOME/.ssh/id_rsa.pub'."
exit 1
fi
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
CLUSTER_DEFINITION="$LXD_REMOTE_PATH/cluster_definition"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
#########################################
@ -127,7 +156,7 @@ if [ "$VPS_HOSTING_TARGET" = lxd ]; then
# recommended to run a registry cache on your management machine though.
if [ -n "$REGISTRY_URL" ]; then
cat > "$LXD_REMOTE_PATH/registry.yml" <<EOL
cat > "$CLUSTER_PATH/registry.yml" <<EOL
version: 0.1
http:
addr: 0.0.0.0:5000
@ -146,18 +175,35 @@ EOL
mkdir -p "${CACHES_DIR}/registry_images"
# run a docker reigstry pull through cache on the management
if ! docker stack list | grep -q registry; then
docker stack deploy -c management/registry_mirror.yml registry
# run a docker registry pull through cache on the management machine.
if [ "$DEPLOY_MGMT_REGISTRY" = true ]; then
if ! docker stack list | grep -q registry; then
docker stack deploy -c management/registry_mirror.yml registry
fi
fi
fi
fi
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
apg -a 1 -M nc -n 3 -m 26 -E GHIJKLMNOPQRSTUVWXYZ | head -n1 | awk '{print $1;}'
gpg --gen-random --armor 1 25
}
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
# first let's get the DISK_TO_USE and DATA_PLANE_MACVLAN_INTERFACE from the ss-config
# which is set up during LXD cluster creation ss-cluster.
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep ss-config)"
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
DISK_TO_USE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f3)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
export DISK_TO_USE="$DISK_TO_USE"
./deployment/create_lxc_base.sh
fi
function run_domain {
export VPS_HOSTING_TARGET="$VPS_HOSTING_TARGET"
@ -170,7 +216,8 @@ function run_domain {
# iterate over all our server endpoints and provision them if needed.
# www
VPS_HOSTNAME=
for APP_TO_DEPLOY in www btcpay umbrel; do
# OPTINOAL umbrel
for VIRTUAL_MACHINE in www btcpayserver umbrel; do
FQDN=
# shellcheck disable=SC1091
@ -190,11 +237,11 @@ function run_domain {
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
export APP_TO_DEPLOY="$APP_TO_DEPLOY"
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
BACKUP_TIMESTAMP="$(date +"%Y-%m")"
UNIX_BACKUP_TIMESTAMP="$(date +%s)"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
LOCAL_BACKUP_PATH="$SITE_PATH/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/$VIRTUAL_MACHINE/$BACKUP_TIMESTAMP"
LOCAL_BACKUP_PATH="$SITE_PATH/backups/$VIRTUAL_MACHINE/$BACKUP_TIMESTAMP"
export LOCAL_BACKUP_PATH="$LOCAL_BACKUP_PATH"
export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP"
@ -208,16 +255,20 @@ function run_domain {
fi
DDNS_HOST=
if [ "$APP_TO_DEPLOY" = www ]; then
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$DEPLOY_WWW_SERVER" = false ] || [ "$USER_SKIP_WWW" = true ]; then
continue
fi
VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_MAC_ADDRESS"
DDNS_HOST="$WWW_HOSTNAME"
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
if [ "$DEPLOY_WWW_SERVER" = false ]; then
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$USER_SKIP_BTCPAY" = true ]; then
if [ "$DEPLOY_BTCPAY_SERVER" = false ]; then
continue
fi
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
DDNS_HOST="$BTCPAY_HOSTNAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAY_MAC_ADDRESS"
@ -226,12 +277,11 @@ function run_domain {
elif [ "$BTC_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=40
fi
if [ "$DEPLOY_BTCPAY_SERVER" = false ]; then
elif [ "$VIRTUAL_MACHINE" = umbrel ]; then
if [ "$DEPLOY_UMBREL_VPS" = false ]; then
continue
fi
elif [ "$APP_TO_DEPLOY" = umbrel ]; then
DDNS_HOST="$UMBREL_HOSTNAME"
VPS_HOSTNAME="$UMBREL_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$UMBREL_MAC_ADDRESS"
@ -240,15 +290,12 @@ function run_domain {
elif [ "$BTC_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=70
fi
if [ "$DEPLOY_UMBREL_VPS" = false ]; then
continue
fi
elif [ "$APP_TO_DEPLOY" = certonly ]; then
DDNS_HOST="$WWW_HOSTNAME"
elif [ "$VIRTUAL_MACHINE" = "sovereign-stack" ]; then
DDNS_HOST="sovereign-stack-base"
ROOT_DISK_SIZE_GB=8
MAC_ADDRESS_TO_PROVISION="$SOVEREIGN_STACK_MAC_ADDRESS"
else
echo "ERROR: APP_TO_DEPLOY not within allowable bounds."
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
exit
fi
@ -328,7 +375,6 @@ function run_domain {
exit 1
fi
if [ -z "$DEPLOY_UMBREL_VPS" ]; then
echo "ERROR: Ensure DEPLOY_UMBREL_VPS is configured in your site_definition."
exit 1
@ -339,9 +385,7 @@ function run_domain {
echo "INFO: Go to your site_definition file and set the NOSTR_ACCOUNT_PUBKEY variable."
exit 1
fi
# generate the docker yaml and nginx configs.
bash -c ./deployment/stub_docker_yml.sh
bash -c ./deployment/stub_nginxconf.sh
MACHINE_EXISTS=false
@ -360,19 +404,19 @@ function run_domain {
if [ "$MACHINE_EXISTS" = true ]; then
# we delete the machine if the user has directed us to
if [ "$MIGRATE_VPS" = true ]; then
# run the domain_init based on user input.
if [ "$USER_NO_BACKUP" = true ]; then
echo "Machine exists. We don't need to back it up because the user has directed --no-backup."
else
echo "Machine exists. Since we're going to delete it, let's grab a backup. We don't need to restore services since we're deleting it."
RUN_RESTORE=false RUN_BACKUP=true RUN_SERVICES=false "$(pwd)/deployment/domain_init.sh"
fi
# get a backup of the machine. This is what we restore to the new VPS.
echo "INFO: Machine exists. Since we're going to delete it, let's grab a backup. We don't need to restore services since we're deleting it."
RESTORE_BTCPAY=false UPDATE_BTCPAY=false RUN_RESTORE=false RUN_BACKUP=true RUN_SERVICES=false ./deployment/domain_init.sh
# delete the remote VPS.
if [ "$VPS_HOSTING_TARGET" = aws ]; then
if [ "$APP_TO_DEPLOY" != btcpay ]; then
# docker-machine rm -f "$FQDN"
echo "ERROR: NOT IMPLEMENTED"
RESPONSE=
read -r -p "Do you want to continue with deleting '$FQDN' (y/n)": RESPONSE
if [ "$RESPONSE" = y ]; then
docker-machine rm -f "$FQDN"
else
echo "STOPPING the migration. User entered something other than 'y'."
exit 1
fi
elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
lxc delete --force "$LXD_VM_NAME"
@ -382,7 +426,7 @@ function run_domain {
# Then we run the script again to re-instantiate a new VPS, restoring all user data
# if restore directory doesn't exist, then we end up with a new site.
echo "INFO: Recreating the remote VPS then restoring user data."
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP=false RUN_SERVICES=true "$(pwd)/deployment/domain_init.sh"
RESTORE_BTCPAY=true UPDATE_BTCPAY=false RUN_RESTORE=true RUN_BACKUP=false RUN_SERVICES=true ./deployment/domain_init.sh
else
if [ "$USER_NO_BACKUP" = true ]; then
RUN_BACKUP=false
@ -392,7 +436,7 @@ function run_domain {
echo "INFO: Maintaining existing VPS. RUN_BACKUP=$RUN_BACKUP RUN_RESTORE=$USER_RUN_RESTORE"
fi
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP="$RUN_BACKUP" RUN_SERVICES=true "$(pwd)/deployment/domain_init.sh"
RESTORE_BTCPAY=false UPDATE_BTCPAY="$UPDATE_BTCPAY" RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP="$RUN_BACKUP" RUN_SERVICES=true ./deployment/domain_init.sh
fi
else
if [ "$MIGRATE_VPS" = true ]; then
@ -401,7 +445,7 @@ function run_domain {
# The machine does not exist. Let's bring it into existence, restoring from latest backup.
echo "Machine does not exist. RUN_RESTORE=$USER_RUN_RESTORE RUN_BACKUP=false"
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP=false RUN_SERVICES=true "$(pwd)/deployment/domain_init.sh"
RESTORE_BTCPAY=false UPDATE_BTCPAY="$UPDATE_BTCPAY" RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP=false RUN_SERVICES=true ./deployment/domain_init.sh
fi
done
@ -425,7 +469,7 @@ function stub_site_definition {
#!/bin/bash
# Set the domain name for the identity site.
export DOMAIN_NAME="domain.tld"
export DOMAIN_NAME="${DOMAIN_NAME}"
# duplicitiy backup archive password
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
@ -436,9 +480,6 @@ export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
## WWW
export DEPLOY_WWW_SERVER=true
# see https://www.sovereign-stack.org/mac-addresses-for-new-type-vms/ for more info
# export WWW_MAC_ADDRESS="CHANGE_ME"
# Deploy APPS to www
export DEPLOY_GHOST=true
export DEPLOY_NEXTCLOUD=true
@ -466,15 +507,9 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
## BTCPAY SERVER; if true, then a BTCPay server is deployed.
export DEPLOY_BTCPAY_SERVER=false
# https://www.sovereign-stack.org/mac-addresses-for-new-type-vms/
#export BTCPAY_MAC_ADDRESS=""
## Deploy and Umbrel node?
export DEPLOY_UMBREL_VPS=false
# REQUIRED if DEPLOY_UMBREL_VPS=true; https://www.sovereign-stack.org/mac-addresses-for-new-type-vms/
# export UMBREL_MAC_ADDRESS=""
# CHAIN to DEPLOY; valid are 'regtest', 'testnet', and 'mainnet'
export BTC_CHAIN=regtest

View File

@ -1,3 +0,0 @@
# Documentation
Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository.

View File

@ -1,9 +1,22 @@
#!/bin/bash
set -e
set -exo
cd "$(dirname "$0")"
# take the services down, create a backup archive, then pull it down.
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./backup.sh"
# the script executed here from the BTCPAY repo will automatically take services down
# and bring them back up.
#ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# TODO enable encrypted archives
# TODO switch to btcpay-backup.sh when on LXD fully.
scp ./btcpay-backup.sh "$FQDN:$REMOTE_HOME/btcpay-backup.sh"
ssh "$FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
# next we pull the resulting backup archive down to our management machine.
ssh "$FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
scp "$FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$LOCAL_BACKUP_PATH/btcpay-$1.tar.gz"
scp "$FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$LOCAL_BACKUP_PATH/$1.tar.gz"

View File

@ -0,0 +1,113 @@
#!/bin/bash -e
set -o pipefail -o errexit -x
# Please be aware of these important issues:
#
# - Old channel state is toxic and you can loose all your funds, if you or someone
# else closes a channel based on the backup with old state - and the state changes
# often! If you publish an old state (say from yesterday's backup) on chain, you
# WILL LOSE ALL YOUR FUNDS IN A CHANNEL, because the counterparty will publish a
# revocation key!
if [ "$(id -u)" != "0" ]; then
printf "\n🚨 This script must be run as root.\n"
printf "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again.\n\n"
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
backup_dir="$docker_dir/volumes/backup_datadir/_data"
dbdump_path="$docker_dir/$dbdump_name"
backup_path="$backup_dir/backup.tar.gz"
# ensure backup dir exists
if [ ! -d "$backup_dir" ]; then
mkdir -p "$backup_dir"
fi
cd "$btcpay_dir"
. helpers.sh
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
printf "\n"
echo " Database container is not up and running. Starting BTCPay Server …"
docker volume create generated_postgres_datadir
docker-compose -f "$BTCPAY_DOCKER_COMPOSE" up -d postgres
printf "\n"
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
echo "🚨 Database container could not be started or found."
exit 1
fi
fi
printf "\n"
echo " Dumping database …"
{
docker exec "$dbcontainer" pg_dumpall -c -U postgres | gzip > "$dbdump_path"
echo "✅ Database dump done."
} || {
echo "🚨 Dumping failed. Please check the error message above."
exit 1
}
printf "\n Stopping BTCPay Server …\n\n"
btcpay_down
printf "\n"
cd $docker_dir
echo " Archiving files in $(pwd)"
{
tar \
--exclude="volumes/backup_datadir" \
--exclude="volumes/generated_bitcoin_datadir/blocks" \
--exclude="volumes/generated_bitcoin_datadir/chainstate" \
--exclude="volumes/generated_bitcoin_datadir/debug.log" \
--exclude="volumes/generated_litecoin_datadir/blocks" \
--exclude="volumes/generated_litecoin_datadir/chainstate" \
--exclude="volumes/generated_litecoin_datadir/debug.log" \
--exclude="volumes/generated_postgres_datadir" \
--exclude="volumes/generated_clightning_bitcoin_datadir/_data/lightning-rpc" \
--exclude="**/logs/*" \
-cvzf "$backup_path" "$dbdump_name" volumes/generated_*
echo "✅ Archive done."
if [ ! -z "$BTCPAY_BACKUP_PASSPHRASE" ]; then
printf "\n"
echo "🔐 BTCPAY_BACKUP_PASSPHRASE is set, the backup will be encrypted."
{
gpg -o "$backup_path.gpg" --batch --yes -c --passphrase "$BTCPAY_BACKUP_PASSPHRASE" $backup_path
rm "$backup_path"
backup_path="$backup_path.gpg"
echo "✅ Encryption done."
} || {
echo "🚨 Encrypting failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd "$btcpay_dir"
#btcpay_up
exit 1
}
fi
} || {
echo "🚨 Archiving failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
#btcpay_up
exit 1
}
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
#btcpay_up
printf "\n Cleaning up …\n\n"
rm $dbdump_path
printf "✅ Backup done => $backup_path\n\n"

View File

@ -0,0 +1,134 @@
#!/bin/bash -e
set -o pipefail -o errexit -x
if [ "$(id -u)" != "0" ]; then
printf "\n🚨 This script must be run as root.\n"
printf "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again.\n\n"
exit 1
fi
backup_path=$1
if [ -z "$backup_path" ]; then
printf "\n Usage: btcpay-restore.sh /path/to/backup.tar.gz\n\n"
exit 1
fi
if [ ! -f "$backup_path" ]; then
printf "\n🚨 $backup_path does not exist.\n\n"
exit 1
fi
if [[ "$backup_path" == *.gpg && -z "$BTCPAY_BACKUP_PASSPHRASE" ]]; then
printf "\n🔐 $backup_path is encrypted. Please provide the passphrase to decrypt it."
printf "\n Usage: BTCPAY_BACKUP_PASSPHRASE=t0pSeCrEt btcpay-restore.sh /path/to/backup.tar.gz.gpg\n\n"
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
restore_dir="$docker_dir/volumes/backup_datadir/_data/restore"
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
# ensure clean restore dir
printf "\n Cleaning restore directory $restore_dir …\n\n"
rm -rf $restore_dir
mkdir -p $restore_dir
if [[ "$backup_path" == *.gpg ]]; then
echo "🔐 Decrypting backup file …"
{
gpg -o "${backup_path%.*}" --batch --yes --passphrase "$BTCPAY_BACKUP_PASSPHRASE" -d $backup_path
backup_path="${backup_path%.*}"
printf "✅ Decryption done.\n\n"
} || {
echo "🚨 Decryption failed. Please check the error message above."
exit 1
}
fi
cd $restore_dir
echo " Extracting files in $(pwd)"
tar -xvf $backup_path -C $restore_dir
# basic control checks
if [ ! -f "$dbdump_name" ]; then
printf "\n🚨 $dbdump_name does not exist.\n\n"
exit 1
fi
if [ ! -d "volumes" ]; then
printf "\n🚨 volumes directory does not exist.\n\n"
exit 1
fi
cd $btcpay_dir
. helpers.sh
printf "\n Stopping BTCPay Server …\n\n"
btcpay_down
cd $restore_dir
{
printf "\n Restoring volumes …\n"
# ensure volumes dir exists
if [ ! -d "$docker_dir/volumes" ]; then
mkdir -p $docker_dir/volumes
fi
# copy volume directories over
cp -r volumes/* $docker_dir/volumes/
# ensure datadirs excluded in backup exist
mkdir -p $docker_dir/volumes/generated_postgres_datadir/_data
echo "✅ Volume restore done."
} || {
echo "🚨 Restoring volumes failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
{
printf "\n Starting database container …\n"
docker-compose -f $BTCPAY_DOCKER_COMPOSE up -d postgres
dbcontainer=$(docker ps -a -q -f "name=postgres")
if [ -z "$dbcontainer" ]; then
echo "🚨 Database container could not be started or found."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
fi
} || {
echo "🚨 Starting database container failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
cd $restore_dir
{
printf "\n Restoring database …"
gunzip -c $dbdump_name | docker exec -i $dbcontainer psql -U postgres postgres -a
echo "✅ Database restore done."
} || {
echo "🚨 Restoring database failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
printf "\n Cleaning up …\n\n"
rm -rf $restore_dir
printf "✅ Restore done\n\n"

View File

@ -3,55 +3,43 @@
set -eux
cd "$(dirname "$0")"
if [ "$RUN_BACKUP" = true ]; then
# shellcheck disable=SC2029
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
fi
# we will re-run the btcpay provisioning scripts if directed to do so.
# we will re-run the btcpayserver provisioning scripts if directed to do so.
# if an update does occur, we grab another backup.
if [ "$UPDATE_BTCPAY" = true ]; then
if [ "$RUN_BACKUP" = true ]; then
# grab a backup PRIOR to update
./backup.sh "before-update-$UNIX_BACKUP_TIMESTAMP"
fi
# run the update.
# shellcheck disable=SC2029
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-update.sh"
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
if [ "$RUN_BACKUP" = true ]; then
# we just grab a regular backup
./backup.sh "regular-backup-$UNIX_BACKUP_TIMESTAMP"
fi
fi
# btcpay-update.sh brings services back up, but does not take them down.
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
# run a restoration if specified.
if [ "$RUN_RESTORE" = true ]; then
# shellcheck disable=SC2029
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
#./restore.sh
fi
elif [ "$RESTORE_BTCPAY" = true ]; then
./restore.sh
# the administrator may have indicated a reconfig; if so, re-run the setup (useful for adding alternative names to TLS)
if [ "$RECONFIGURE_BTCPAY_SERVER" = true ]; then
# re-run the setup script.
RUN_BACKUP=false
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
# the administrator may have indicated a reconfig;
# if so, we re-run setup script.
./run_setup.sh
exit
fi
if [ "$MIGRATE_VPS" = false ]; then
# if the script gets this far, then we grab a regular backup.
if [ "$RUN_BACKUP" = true ]; then
# we just grab a regular backup
./backup.sh "$UNIX_BACKUP_TIMESTAMP"
fi
if [ "$RUN_SERVICES" = true ]; then
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
# we bring the services back up by default.
# shellcheck disable=SC2029
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; bash -c ./btcpay-up.sh"
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
# we wait for lightning to comone line too.
wait-for-it -t -60 "$FQDN:80"
wait-for-it -t 60 "$FQDN:443"
xdg-open "https://$FQDN"
xdg-open "http://$FQDN"
else
echo "WARNING: The '--migrate' flag was specified. BTCPay Server services HAVE NOT BEEN TURNED ON!"
echo "NOTE: You can restore your latest backup to a new host that has BTCPay Server installed."
echo "WARNING: BTCPAY Server services NOT started. This is probably on purpose."
fi

View File

@ -1,18 +1,31 @@
#!/bin/bash
# this scripts ASSUMES services have already been taken down.
set -ex
cd "$(dirname "$0")"
#ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# first let's ask the user for the absolute path to the backup file that we want to restore.
FILE_PATH=
read -r -p "Please enter the absolute path of the backup file you want to restore: ": FILE_PATH
if [ -f "$FILE_PATH" ]; then
# then we grab a backup of the existing stuff BEFORE the restoration attempt
./btcpayserver/backup.sh "before-restore-$UNIX_BACKUP_TIMESTAMP"
BTCPAY_SERVER_ARCHIVE="$LOCAL_BACKUP_PATH/$UNIX_BACKUP_TIMESTAMP.tar.gz"
if [ ! -f "$BTCPAY_SERVER_ARCHIVE" ]; then
BTCPAY_SERVER_ARCHIVE="$RESTORE_ARCHIVE"
fi
echo "INFO: Restoring BTCPAY Server: $FILE_PATH"
if [ -f "$BTCPAY_SERVER_ARCHIVE" ]; then
# push the restoration archive to the remote server
echo "INFO: Restoring BTCPAY Server: $BTCPAY_SERVER_ARCHIVE"
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
scp "$FILE_PATH" "$FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz"
ssh "$FQDN" "cd /; sudo tar -xzvf $REMOTE_BACKUP_PATH/btcpay.tar.gz"
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_HOME/backups/btcpay.tar.gz"
scp "$BTCPAY_SERVER_ARCHIVE" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
# take down services, if any.
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# push the modified restore script to the remote directory, set permissions, and execute.
scp ./btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
else
echo "ERROR: File does not exist."
exit 1

View File

@ -1,6 +1,7 @@
#!/bin/bash
set -e
set -ex
cd "$(dirname "$0")"
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
@ -11,27 +12,29 @@ cat > "$SITE_PATH/btcpay.sh" <<EOL
#!/bin/bash
set -ex
cd "\$(dirname "\$0")"
# wait for cloud-init to complete yo
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" != "master" ] && [ "$EXISTING_REMOTE" != "master" ]; then
# echo "existing btcpayserver-docker folder found that did not match our specified fork. Moving. (Current branch: $EXISTING_BRANCH, Current remote: $EXISTING_REMOTE)";
# mv "btcpayserver-docker" "btcpayserver-docker_$(date +%s)";
# fi
# if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" == "master" ] && [ "$EXISTING_REMOTE" == "master" ]; then
# echo "existing btcpayserver-docker folder found, pulling instead of cloning.";
# git pull;
# fi
#curl -SL https://github.com/docker/compose/releases/download/v2.6.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
#chmod 0777 /usr/local/bin/docker-compose
if [ ! -d "btcpayserver-docker" ]; then
echo "cloning btcpayserver-docker";
git clone -b master https://github.com/btcpayserver/btcpayserver-docker btcpayserver-docker;
git clone -b master https://github.com/btcpayserver/btcpayserver-docker btcpayserver-docker;
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
else
cd ./btcpayserver-docker
git pull
git pull --all --tags
cd -
fi
cd btcpayserver-docker
export BTCPAY_HOST="${FQDN}"
export NBITCOIN_NETWORK="${BTC_CHAIN}"
export LIGHTNING_ALIAS="${DOMAIN_NAME}"
@ -39,16 +42,12 @@ export LETSENCRYPT_EMAIL="${CERTIFICATE_EMAIL_ADDRESS}"
export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc"
# opt-save-storage keeps 1 year of blocks (prunes to 100 GB)
# opt-add-btctransmuter adds transmuter software
#
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-configurator;opt-add-nostr-relay;opt-add-tor-relay"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-nostr-relay;opt-add-tor-relay"
#export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false
cd btcpayserver-docker
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
# run fast_sync if it's not been done before.
@ -60,9 +59,10 @@ if [ "\$NBITCOIN_NETWORK" != regtest ]; then
fi
fi
# provision the btcpay server
# provision the btcpayserver
. ./btcpay-setup.sh -i
sleep 15
EOL
# send the setup script to the remote machine.

View File

@ -0,0 +1,123 @@
#!/bin/bash
# This script might look like a good idea. Please be aware of these important issues:
#
# - The backup file is not encrypted and it contains your lightning private keys.
# Consider encrypting before uploading or using another backup tool like duplicity.
# - Old channel state is toxic and you can loose all your funds, if you or someone
# else closes a channel based on the backup with old state - and the state changes
# often! If you publish an old state (say from yesterday's backup) on chain, you
# WILL LOSE ALL YOUR FUNDS IN A CHANNEL, because the counterparty will publish a
# revocation key!
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root."
echo "Use the command 'sudo su -' (include the trailing hypen) and try again"
exit 1
fi
case "$BACKUP_PROVIDER" in
"Dropbox")
if [ -z "$DROPBOX_TOKEN" ]; then
echo -e "\033[0;31mSet DROPBOX_TOKEN environment variable and try again.\033[0m"
exit 1
fi
;;
"S3")
echo -e "\033[1;33mUsing S3 backup provider. Make sure you have ran 'aws configure' on your root user and configured an AMI with access to your bucket.\033[0m"
if [ -z "$S3_BUCKET" ]; then
echo -e "\033[0;31mSet S3_BUCKET environment variable and try again.\033[0m"
exit 1
fi
if [ -z "$S3_PATH" ]; then
echo -e "\033[1;33mUsing bucket root for backup, set S3_PATH if you want to backup into a specific folder (Make sure it ends with a trailing slash).\033[0m"
fi
;;
"SCP")
if [ -z "$SCP_TARGET" ]; then
echo -e "\033[0;31mSet SCP_TARGET environment variable and try again.\033[0m"
exit 1
fi
;;
*)
echo "No BACKUP_PROVIDER set. Backing up to local directory."
;;
esac
# preparation
volumes_dir=/var/lib/docker/volumes
backup_dir="$volumes_dir/backup_datadir"
filename="backup.tar.gz"
dumpname="postgres.sql.gz"
if [ "$BACKUP_TIMESTAMP" == true ]; then
timestamp=$(date "+%Y%m%d-%H%M%S")
filename="$timestamp-$filename"
dumpname="$timestamp-$dumpname"
fi
backup_path="$backup_dir/_data/${filename}"
dbdump_path="$backup_dir/_data/${dumpname}"
cd "$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
. helpers.sh
# ensure backup dir exists
if [ ! -d "$backup_dir" ]; then
docker volume create backup_datadir
fi
# dump database
echo "Dumping database …"
btcpay_dump_db $dbdump_path
if [[ "$1" == "--only-db" ]]; then
tar -cvzf $backup_path $dbdump_path
else
# stop docker containers, save files and restart
echo "Stopping BTCPay Server …"
btcpay_down
echo "Backing up files …"
tar --exclude="$backup_path" --exclude="$volumes_dir/generated_bitcoin_datadir/*" --exclude="$volumes_dir/generated_litecoin_datadir/*" --exclude="$volumes_dir/generated_postgres_datadir/*" --exclude="$volumes_dir/**/logs/*" -cvzf $backup_path $dbdump_path $volumes_dir
echo "Restarting BTCPay Server …"
btcpay_up
fi
# post processing
case $BACKUP_PROVIDER in
"Dropbox")
echo "Uploading to Dropbox …"
docker run --name backup --env DROPBOX_TOKEN=$DROPBOX_TOKEN -v backup_datadir:/data jvandrew/btcpay-dropbox:1.0.5 $filename
echo "Deleting local backup …"
rm $backup_path
;;
"S3")
echo "Uploading to S3 …"
docker run --rm -v ~/.aws:/root/.aws -v $backup_path:/aws/$filename amazon/aws-cli s3 cp $filename s3://$S3_BUCKET/$S3_PATH
echo "Deleting local backup …"
rm $backup_path
;;
"SCP")
echo "Uploading via SCP …"
scp $backup_path $SCP_TARGET
echo "Deleting local backup …"
rm $backup_path
;;
*)
echo "Backed up to $backup_path"
;;
esac
# cleanup
rm $dbdump_path
echo "Backup done."

View File

@ -0,0 +1,113 @@
#!/bin/bash -e
set -o pipefail -o errexit
# Please be aware of these important issues:
#
# - Old channel state is toxic and you can loose all your funds, if you or someone
# else closes a channel based on the backup with old state - and the state changes
# often! If you publish an old state (say from yesterday's backup) on chain, you
# WILL LOSE ALL YOUR FUNDS IN A CHANNEL, because the counterparty will publish a
# revocation key!
if [ "$(id -u)" != "0" ]; then
printf "\n🚨 This script must be run as root.\n"
printf "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again.\n\n"
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
backup_dir="$docker_dir/volumes/backup_datadir/_data"
dbdump_path="$docker_dir/$dbdump_name"
backup_path="$backup_dir/backup.tar.gz"
# ensure backup dir exists
if [ ! -d "$backup_dir" ]; then
mkdir -p $backup_dir
fi
cd $btcpay_dir
. helpers.sh
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
printf "\n"
echo " Database container is not up and running. Starting BTCPay Server …"
docker volume create generated_postgres_datadir
docker-compose -f $BTCPAY_DOCKER_COMPOSE up -d postgres
printf "\n"
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
echo "🚨 Database container could not be started or found."
exit 1
fi
fi
printf "\n"
echo " Dumping database …"
{
docker exec $dbcontainer pg_dumpall -c -U postgres | gzip > $dbdump_path
echo "✅ Database dump done."
} || {
echo "🚨 Dumping failed. Please check the error message above."
exit 1
}
printf "\n Stopping BTCPay Server …\n\n"
btcpay_down
printf "\n"
cd $docker_dir
echo " Archiving files in $(pwd)"
{
tar \
--exclude="volumes/backup_datadir" \
--exclude="volumes/generated_bitcoin_datadir/blocks" \
--exclude="volumes/generated_bitcoin_datadir/chainstate" \
--exclude="volumes/generated_bitcoin_datadir/debug.log" \
--exclude="volumes/generated_litecoin_datadir/blocks" \
--exclude="volumes/generated_litecoin_datadir/chainstate" \
--exclude="volumes/generated_litecoin_datadir/debug.log" \
--exclude="volumes/generated_postgres_datadir" \
--exclude="volumes/generated_clightning_bitcoin_datadir/_data/lightning-rpc" \
--exclude="**/logs/*" \
-cvzf $backup_path $dbdump_name volumes/generated_*
echo "✅ Archive done."
if [ ! -z "$BTCPAY_BACKUP_PASSPHRASE" ]; then
printf "\n"
echo "🔐 BTCPAY_BACKUP_PASSPHRASE is set, the backup will be encrypted."
{
gpg -o "$backup_path.gpg" --batch --yes -c --passphrase "$BTCPAY_BACKUP_PASSPHRASE" $backup_path
rm $backup_path
backup_path="$backup_path.gpg"
echo "✅ Encryption done."
} || {
echo "🚨 Encrypting failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
fi
} || {
echo "🚨 Archiving failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
printf "\n Cleaning up …\n\n"
rm $dbdump_path
printf "✅ Backup done => $backup_path\n\n"

View File

@ -0,0 +1,135 @@
#!/bin/bash -e
set -o pipefail -o errexit
if [ "$(id -u)" != "0" ]; then
printf "\n🚨 This script must be run as root.\n"
printf "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again.\n\n"
exit 1
fi
backup_path=$1
if [ -z "$backup_path" ]; then
printf "\n Usage: btcpay-restore.sh /path/to/backup.tar.gz\n\n"
exit 1
fi
if [ ! -f "$backup_path" ]; then
printf "\n🚨 $backup_path does not exist.\n\n"
exit 1
fi
if [[ "$backup_path" == *.gpg && -z "$BTCPAY_BACKUP_PASSPHRASE" ]]; then
printf "\n🔐 $backup_path is encrypted. Please provide the passphrase to decrypt it."
printf "\n Usage: BTCPAY_BACKUP_PASSPHRASE=t0pSeCrEt btcpay-restore.sh /path/to/backup.tar.gz.gpg\n\n"
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
restore_dir="$docker_dir/volumes/backup_datadir/_data/restore"
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
# ensure clean restore dir
printf "\n Cleaning restore directory $restore_dir …\n\n"
rm -rf $restore_dir
mkdir -p $restore_dir
if [[ "$backup_path" == *.gpg ]]; then
echo "🔐 Decrypting backup file …"
{
gpg -o "${backup_path%.*}" --batch --yes --passphrase "$BTCPAY_BACKUP_PASSPHRASE" -d $backup_path
backup_path="${backup_path%.*}"
printf "✅ Decryption done.\n\n"
} || {
echo "🚨 Decryption failed. Please check the error message above."
exit 1
}
fi
cd $restore_dir
echo " Extracting files in $(pwd)"
tar -xvf $backup_path -C $restore_dir
# basic control checks
if [ ! -f "$dbdump_name" ]; then
printf "\n🚨 $dbdump_name does not exist.\n\n"
exit 1
fi
if [ ! -d "volumes" ]; then
printf "\n🚨 volumes directory does not exist.\n\n"
exit 1
fi
cd $btcpay_dir
. helpers.sh
printf "\n Stopping BTCPay Server …\n\n"
btcpay_down
cd $restore_dir
{
printf "\n Restoring volumes …\n"
# ensure volumes dir exists
if [ ! -d "$docker_dir/volumes" ]; then
mkdir -p $docker_dir/volumes
fi
# copy volume directories over
cp -r volumes/* $docker_dir/volumes/
# ensure datadirs excluded in backup exist
mkdir -p $docker_dir/volumes/generated_postgres_datadir/_data
echo "✅ Volume restore done."
} || {
echo "🚨 Restoring volumes failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
{
printf "\n Starting database container …\n"
docker-compose -f $BTCPAY_DOCKER_COMPOSE up -d postgres
sleep 10
dbcontainer=$(docker ps -a -q -f "name=postgres")
if [ -z "$dbcontainer" ]; then
echo "🚨 Database container could not be started or found."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
fi
} || {
echo "🚨 Starting database container failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
cd $restore_dir
{
printf "\n Restoring database …"
gunzip -c $dbdump_name | docker exec -i $dbcontainer psql -U postgres postgres -a
echo "✅ Database restore done."
} || {
echo "🚨 Restoring database failed. Please check the error message above."
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
exit 1
}
printf "\n Restarting BTCPay Server …\n\n"
cd $btcpay_dir
btcpay_up
printf "\n Cleaning up …\n\n"
rm -rf $restore_dir
printf "✅ Restore done\n\n"