forked from ss/sovereign-stack
Migration updates for vertical scaling of VMs.
This commit is contained in:
parent
bd9a76108b
commit
69d5564e44
23
check_dependencies.sh
Executable file
23
check_dependencies.sh
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
check_dependencies () {
|
||||||
|
for cmd in "$@"; do
|
||||||
|
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||||
|
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check system's dependencies
|
||||||
|
check_dependencies wait-for-it dig rsync sshfs lxc
|
||||||
|
|
||||||
|
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||||
|
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
|
||||||
|
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
||||||
|
exit 1
|
||||||
|
fi
|
@ -34,7 +34,7 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
|||||||
|
|
||||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||||
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||||
export PROJECT_NAME="public"
|
export PROJECT_NAME="regtest"
|
||||||
#export REGISTRY_URL="https://index.docker.io/v1/"
|
#export REGISTRY_URL="https://index.docker.io/v1/"
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
@ -146,7 +146,7 @@ if ! command -v lxc >/dev/null 2>&1; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
ssh -t "ubuntu@$FQDN" "
|
ssh -t "ubuntu@$FQDN" "
|
||||||
set -ex
|
set -e
|
||||||
|
|
||||||
# install ufw and allow SSH.
|
# install ufw and allow SSH.
|
||||||
sudo apt update
|
sudo apt update
|
||||||
|
12
defaults.sh
12
defaults.sh
@ -38,10 +38,12 @@ export DUPLICITY_BACKUP_PASSPHRASE=
|
|||||||
|
|
||||||
export SSH_HOME="$HOME/.ssh"
|
export SSH_HOME="$HOME/.ssh"
|
||||||
export PASS_HOME="$HOME/.password-store"
|
export PASS_HOME="$HOME/.password-store"
|
||||||
export VLAN_INTERFACE=
|
|
||||||
export VM_NAME="sovereign-stack-base"
|
export VM_NAME="sovereign-stack-base"
|
||||||
export DEV_MEMORY_MB="8096"
|
|
||||||
export DEV_CPU_COUNT="6"
|
export BTCPAY_SERVER_CPU_COUNT="4"
|
||||||
|
export BTCPAY_SERVER_MEMORY_MB="4096"
|
||||||
|
export WWW_SERVER_CPU_COUNT="4"
|
||||||
|
export WWW_SERVER_MEMORY_MB="4096"
|
||||||
|
|
||||||
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
|
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
|
||||||
|
|
||||||
@ -68,7 +70,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
|
|||||||
|
|
||||||
|
|
||||||
# run the docker stack.
|
# run the docker stack.
|
||||||
export GHOST_IMAGE="ghost:5.20.0"
|
export GHOST_IMAGE="ghost:5.23.0"
|
||||||
|
|
||||||
# TODO switch to mysql. May require intricate export work for existing sites.
|
# TODO switch to mysql. May require intricate export work for existing sites.
|
||||||
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
||||||
@ -79,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
|||||||
export NGINX_IMAGE="nginx:1.23.2"
|
export NGINX_IMAGE="nginx:1.23.2"
|
||||||
|
|
||||||
# version of backup is 24.0.3
|
# version of backup is 24.0.3
|
||||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.0"
|
export NEXTCLOUD_IMAGE="nextcloud:25.0.1"
|
||||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
# TODO PIN the gitea version number.
|
# TODO PIN the gitea version number.
|
||||||
|
84
deploy.sh
84
deploy.sh
@ -1,44 +1,28 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||||
export RESPOSITORY_PATH="$RESPOSITORY_PATH"
|
export RESPOSITORY_PATH="$RESPOSITORY_PATH"
|
||||||
|
|
||||||
check_dependencies () {
|
./check_dependencies.sh
|
||||||
for cmd in "$@"; do
|
|
||||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
|
||||||
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# Check system's dependencies
|
|
||||||
check_dependencies wait-for-it dig rsync sshfs lxc
|
|
||||||
|
|
||||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
|
||||||
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
|
|
||||||
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
DOMAIN_NAME=
|
DOMAIN_NAME=
|
||||||
RUN_CERT_RENEWAL=false
|
RUN_CERT_RENEWAL=true
|
||||||
SKIP_WWW=false
|
SKIP_WWW=false
|
||||||
RESTORE_WWW=false
|
RESTORE_WWW=false
|
||||||
BACKUP_CERTS=false
|
BACKUP_CERTS=true
|
||||||
BACKUP_APPS=false
|
BACKUP_APPS=true
|
||||||
BACKUP_BTCPAY=false
|
BACKUP_BTCPAY=true
|
||||||
|
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||||
RESTORE_BTCPAY=false
|
RESTORE_BTCPAY=false
|
||||||
BTCPAY_RESTORE_ARCHIVE_PATH=
|
|
||||||
BTCPAY_LOCAL_BACKUP_PATH=
|
|
||||||
SKIP_BTCPAY=false
|
SKIP_BTCPAY=false
|
||||||
UPDATE_BTCPAY=false
|
UPDATE_BTCPAY=false
|
||||||
RECONFIGURE_BTCPAY_SERVER=false
|
RECONFIGURE_BTCPAY_SERVER=false
|
||||||
CLUSTER_NAME="$(lxc remote get-default)"
|
CLUSTER_NAME="$(lxc remote get-default)"
|
||||||
STOP_SERVICES=false
|
STOP_SERVICES=false
|
||||||
|
USER_SAYS_YES=false
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
@ -59,6 +43,11 @@ for i in "$@"; do
|
|||||||
BACKUP_CERTS=true
|
BACKUP_CERTS=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--no-backup-www)
|
||||||
|
BACKUP_CERTS=false
|
||||||
|
BACKUP_APPS=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--stop)
|
--stop)
|
||||||
STOP_SERVICES=true
|
STOP_SERVICES=true
|
||||||
shift
|
shift
|
||||||
@ -67,6 +56,10 @@ for i in "$@"; do
|
|||||||
DOMAIN_NAME="${i#*=}"
|
DOMAIN_NAME="${i#*=}"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--backup-archive-path=*)
|
||||||
|
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--update-btcpay)
|
--update-btcpay)
|
||||||
UPDATE_BTCPAY=true
|
UPDATE_BTCPAY=true
|
||||||
shift
|
shift
|
||||||
@ -83,22 +76,18 @@ for i in "$@"; do
|
|||||||
BACKUP_APPS=true
|
BACKUP_APPS=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--backup-btcpay)
|
--no-cert-renew)
|
||||||
BACKUP_BTCPAY=true
|
RUN_CERT_RENEWAL=false
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--restore-archive=*)
|
|
||||||
BTCPAY_RESTORE_ARCHIVE_PATH="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--renew-certs)
|
|
||||||
RUN_CERT_RENEWAL=true
|
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--reconfigure-btcpay)
|
--reconfigure-btcpay)
|
||||||
RECONFIGURE_BTCPAY_SERVER=true
|
RECONFIGURE_BTCPAY_SERVER=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
-y)
|
||||||
|
USER_SAYS_YES=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unexpected option: $1"
|
echo "Unexpected option: $1"
|
||||||
exit 1
|
exit 1
|
||||||
@ -106,10 +95,8 @@ for i in "$@"; do
|
|||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
# do some CLI checking.
|
echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. "
|
||||||
if [ "$RESTORE_BTCPAY" = true ] && [ ! -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then
|
|
||||||
echo "ERROR: The restoration archive is not specified. Ensure --restore-archive= is set on the command line."
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -118,7 +105,6 @@ source ./defaults.sh
|
|||||||
|
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export REGISTRY_DOCKER_IMAGE="registry:2"
|
export REGISTRY_DOCKER_IMAGE="registry:2"
|
||||||
export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH"
|
|
||||||
export RESTORE_WWW="$RESTORE_WWW"
|
export RESTORE_WWW="$RESTORE_WWW"
|
||||||
export STOP_SERVICES="$STOP_SERVICES"
|
export STOP_SERVICES="$STOP_SERVICES"
|
||||||
export BACKUP_CERTS="$BACKUP_CERTS"
|
export BACKUP_CERTS="$BACKUP_CERTS"
|
||||||
@ -128,6 +114,9 @@ export BACKUP_BTCPAY="$BACKUP_BTCPAY"
|
|||||||
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
||||||
export CLUSTER_NAME="$CLUSTER_NAME"
|
export CLUSTER_NAME="$CLUSTER_NAME"
|
||||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
||||||
|
export USER_SAYS_YES="$USER_SAYS_YES"
|
||||||
|
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
|
|
||||||
# ensure our cluster path is created.
|
# ensure our cluster path is created.
|
||||||
mkdir -p "$CLUSTER_PATH"
|
mkdir -p "$CLUSTER_PATH"
|
||||||
@ -170,6 +159,7 @@ function instantiate_vms {
|
|||||||
VPS_HOSTNAME=
|
VPS_HOSTNAME=
|
||||||
|
|
||||||
for VIRTUAL_MACHINE in www btcpayserver; do
|
for VIRTUAL_MACHINE in www btcpayserver; do
|
||||||
|
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||||
FQDN=
|
FQDN=
|
||||||
|
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
@ -251,9 +241,6 @@ function instantiate_vms {
|
|||||||
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||||
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||||
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
||||||
export BTCPAY_LOCAL_BACKUP_PATH="$SITE_PATH/backups/btcpayserver/$BACKUP_TIMESTAMP"
|
|
||||||
export BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$UNIX_BACKUP_TIMESTAMP.tar.gz"
|
|
||||||
|
|
||||||
./deployment/deploy_vms.sh
|
./deployment/deploy_vms.sh
|
||||||
|
|
||||||
# if the local docker client isn't logged in, do so;
|
# if the local docker client isn't logged in, do so;
|
||||||
@ -358,6 +345,10 @@ export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
|||||||
export BTC_CHAIN="regtest|testnet|mainnet"
|
export BTC_CHAIN="regtest|testnet|mainnet"
|
||||||
export PRIMARY_DOMAIN="domain0.tld"
|
export PRIMARY_DOMAIN="domain0.tld"
|
||||||
export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
||||||
|
export BTCPAY_SERVER_CPU_COUNT="4"
|
||||||
|
export BTCPAY_SERVER_MEMORY_MB="4096"
|
||||||
|
export WWW_SERVER_CPU_COUNT="6"
|
||||||
|
export WWW_SERVER_MEMORY_MB="4096"
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
@ -372,7 +363,12 @@ fi
|
|||||||
source "$PROJECT_DEFINITION_PATH"
|
source "$PROJECT_DEFINITION_PATH"
|
||||||
|
|
||||||
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
||||||
export DOMAIN_LIST="${PRIMARY_DOMAIN},${OTHER_SITES_LIST}"
|
DOMAIN_LIST="${PRIMARY_DOMAIN}"
|
||||||
|
if [ -n "$OTHER_SITES_LIST" ]; then
|
||||||
|
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
export DOMAIN_LIST="$DOMAIN_LIST"
|
||||||
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
||||||
|
|
||||||
# let's provision our primary domain first.
|
# let's provision our primary domain first.
|
||||||
@ -404,6 +400,8 @@ if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
|||||||
bash -c "./deployment/www/go.sh"
|
bash -c "./deployment/www/go.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||||
bash -c "./deployment/btcpayserver/go.sh"
|
bash -c "./deployment/btcpayserver/go.sh"
|
||||||
fi
|
fi
|
||||||
|
@ -9,6 +9,8 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
|
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
|
||||||
|
|
||||||
|
sleep 5
|
||||||
|
|
||||||
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
|
||||||
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
||||||
@ -25,8 +27,13 @@ ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BT
|
|||||||
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
|
||||||
|
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
||||||
|
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
||||||
|
if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
|
BACKUP_BTCPAY_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$(date +%s).tar.gz"
|
||||||
|
fi
|
||||||
|
|
||||||
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
||||||
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH"
|
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
echo "INFO: Created backup archive '$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
||||||
|
@ -28,6 +28,7 @@ elif [ "$RESTORE_BTCPAY" = true ]; then
|
|||||||
|
|
||||||
RUN_SERVICES=true
|
RUN_SERVICES=true
|
||||||
OPEN_URL=true
|
OPEN_URL=true
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
|
|
||||||
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
||||||
# the administrator may have indicated a reconfig;
|
# the administrator may have indicated a reconfig;
|
||||||
@ -39,12 +40,12 @@ elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# if the script gets this far, then we grab a regular backup.
|
# if the script gets this far, then we grab a regular backup.
|
||||||
if [ "$BACKUP_BTCPAY" = true ]; then
|
if [ "$BACKUP_BTCPAY" = true ]; then
|
||||||
# we just grab a regular backup
|
# we just grab a regular backup
|
||||||
./backup_btcpay.sh
|
./backup_btcpay.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$RUN_SERVICES" = true ]; then
|
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
|
||||||
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
# we bring the services back up by default.
|
# we bring the services back up by default.
|
||||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
||||||
|
@ -3,14 +3,18 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
if [ -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then
|
if [ "$RESTORE_BTCPAY" = false ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
# push the restoration archive to the remote server
|
# push the restoration archive to the remote server
|
||||||
echo "INFO: Restoring BTCPAY Server: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
|
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
|
||||||
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
scp "$BTCPAY_RESTORE_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
# we clean up any old containers first before restoring.
|
# we clean up any old containers first before restoring.
|
||||||
ssh "$FQDN" docker system prune -f
|
ssh "$FQDN" docker system prune -f
|
||||||
|
@ -70,3 +70,6 @@ scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
|||||||
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "touch $REMOTE_HOME/btcpay.complete"
|
ssh "$BTCPAY_FQDN" "touch $REMOTE_HOME/btcpay.complete"
|
||||||
|
|
||||||
|
# lets give time for the containers to spin up
|
||||||
|
sleep 10
|
@ -17,11 +17,25 @@ YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
|
|||||||
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
||||||
cat > "$YAML_PATH" <<EOF
|
cat > "$YAML_PATH" <<EOF
|
||||||
config:
|
config:
|
||||||
limits.cpu: "${DEV_CPU_COUNT}"
|
EOF
|
||||||
limits.memory: "${DEV_MEMORY_MB}MB"
|
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = www ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
|
||||||
|
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
else [ "$VIRTUAL_MACHINE" = btcpayserver ];
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
|
||||||
|
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image.
|
# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image.
|
||||||
if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
||||||
# this is for the base image only...
|
# this is for the base image only...
|
||||||
@ -161,7 +175,7 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
|||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||||
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
|
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
|
||||||
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
- sudo chmod +x /usr/local/bin/docker-compose
|
- sudo chmod +x /usr/local/bin/docker-compose
|
||||||
- sudo apt-get install -y openssh-server
|
- sudo apt-get install -y openssh-server
|
||||||
|
|
||||||
@ -259,7 +273,8 @@ fi
|
|||||||
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
||||||
lxc profile create "$LXD_HOSTNAME"
|
lxc profile create "$LXD_HOSTNAME"
|
||||||
fi
|
|
||||||
|
|
||||||
# configure the profile with our generated cloud-init.yml file.
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
||||||
|
|
||||||
|
fi
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -ex
|
set -e
|
||||||
|
|
||||||
|
|
||||||
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
|
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# Create the nginx config file which covers all domains.
|
# Create the nginx config file which covers all domains.
|
||||||
@ -84,6 +84,18 @@ done
|
|||||||
|
|
||||||
./stop_docker_stacks.sh
|
./stop_docker_stacks.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
# # ensure the tor image is built
|
# # ensure the tor image is built
|
||||||
# docker build -t tor:latest ./tor
|
# docker build -t tor:latest ./tor
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -eux
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
|
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
|
||||||
@ -10,11 +10,16 @@ if [ "$FILE_COUNT" = 0 ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RESPONSE=
|
# if the user said -y at the cli, we can skip this.
|
||||||
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
|
if [ "$USER_SAYS_YES" = false ]; then
|
||||||
if [ "$RESPONSE" != "y" ]; then
|
|
||||||
echo "STOPPING."
|
RESPONSE=
|
||||||
exit 0
|
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
|
||||||
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
|
echo "STOPPING."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# delete the target backup path so we can push restoration files from the management machine.
|
# delete the target backup path so we can push restoration files from the management machine.
|
||||||
@ -27,5 +32,6 @@ ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
|||||||
scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
# now we run duplicity to restore the archive.
|
# now we run duplicity to restore the archive.
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$BACKUP_TIMESTAMP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||||
|
|
||||||
|
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
@ -30,7 +30,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP"
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
|
|
||||||
# ensure our local backup path exists.
|
# ensure our local backup path exists.
|
||||||
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||||
@ -40,7 +40,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
if [ "$RESTORE_WWW" = true ]; then
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
./restore_path.sh
|
./restore_path.sh
|
||||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||||
elif [ "$BACKUP_APPS" = true ]; then
|
else
|
||||||
# if we're not restoring, then we may or may not back up.
|
# if we're not restoring, then we may or may not back up.
|
||||||
./backup_path.sh
|
./backup_path.sh
|
||||||
fi
|
fi
|
||||||
@ -57,14 +57,13 @@ if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
|||||||
# wait for all docker containers to stop.
|
# wait for all docker containers to stop.
|
||||||
# TODO see if there's a way to check for this.
|
# TODO see if there's a way to check for this.
|
||||||
sleep 15
|
sleep 15
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#
|
#
|
||||||
if [ "$STOP_SERVICES" = true ]; then
|
if [ "$STOP_SERVICES" = true ]; then
|
||||||
echo "STOPPING as indicated by the --stop flag."
|
echo "STOPPING as indicated by the --stop flag."
|
||||||
|
exit 0
|
||||||
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# generate the certs and grab a backup
|
# generate the certs and grab a backup
|
||||||
@ -72,19 +71,23 @@ if [ "$RUN_CERT_RENEWAL" = true ]; then
|
|||||||
./generate_certs.sh
|
./generate_certs.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Back each domain's certificates under /home/ubuntu/letsencrypt/domain
|
# let's backup all our letsencrypt certs
|
||||||
|
export APP="letsencrypt"
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# source the site path so we know what features it has.
|
||||||
|
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||||
|
source "$SITE_PATH/site_definition"
|
||||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
# these variable are used by both backup/restore scripts.
|
||||||
export APP="letsencrypt"
|
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP"
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
|
||||||
if [ "$RESTORE_WWW" = true ]; then
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
@ -95,6 +98,5 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
elif [ "$BACKUP_APPS" = true ]; then
|
elif [ "$BACKUP_APPS" = true ]; then
|
||||||
# if we're not restoring, then we may or may not back up.
|
# if we're not restoring, then we may or may not back up.
|
||||||
./backup_path.sh
|
./backup_path.sh
|
||||||
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
@ -103,9 +103,10 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
||||||
sleep 2
|
sleep 2
|
||||||
|
fi
|
||||||
|
|
||||||
done # language code
|
done # language code
|
||||||
|
|
||||||
|
@ -80,9 +80,10 @@ EOL
|
|||||||
${DBNET_NAME}:
|
${DBNET_NAME}:
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
sleep 1
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
@ -74,7 +74,9 @@ networks:
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
@ -129,5 +129,9 @@ EOL
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
|
||||||
# iterate over all our domains and create the nginx config file.
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
||||||
|
# iterate over all our domains and create the nginx config file.
|
||||||
|
sleep 1
|
||||||
|
fi
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
docker pull "$NOSTR_RELAY_IMAGE"
|
docker pull "$NOSTR_RELAY_IMAGE"
|
||||||
@ -75,8 +75,10 @@ messages_per_sec = 3
|
|||||||
#max_event_bytes = 131072
|
#max_event_bytes = 131072
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
sleep 1
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -15,16 +15,11 @@ export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
|||||||
export BTC_CHAIN="$BTC_CHAIN"
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||||
|
|
||||||
BACKUP_TIMESTAMP="$(date +"%Y-%m")"
|
|
||||||
UNIX_BACKUP_TIMESTAMP="$(date +%s)"
|
|
||||||
|
|
||||||
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
|
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
|
||||||
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
|
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
|
||||||
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
|
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
|
||||||
|
|
||||||
export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP"
|
|
||||||
export UNIX_BACKUP_TIMESTAMP="$UNIX_BACKUP_TIMESTAMP"
|
|
||||||
|
|
||||||
export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1))
|
export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1))
|
||||||
|
|
||||||
STACK_NAME="$DOMAIN_IDENTIFIER-en"
|
STACK_NAME="$DOMAIN_IDENTIFIER-en"
|
||||||
|
@ -1,12 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
source ./defaults.sh
|
|
||||||
|
|
||||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||||
if ! lsb_release -d | grep "Ubuntu 22.04" | grep -q "LTS"; then
|
if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then
|
||||||
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
91
migrate.sh
Normal file → Executable file
91
migrate.sh
Normal file → Executable file
@ -1,12 +1,89 @@
|
|||||||
# move all migration logic into this script.
|
#!/bin/bash
|
||||||
|
|
||||||
if machine exists, then
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
get backup.
|
CURRENT_CLUSTER="$(lxc remote get-default)"
|
||||||
don't restart services.
|
|
||||||
|
|
||||||
Then
|
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
||||||
|
echo "ERROR: YOU MUST COMMENT THIS OUT BEFORE YOU CAN RUN MIGRATE ON PROUDCTION/."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
delete machine.
|
source ./defaults.sh
|
||||||
|
|
||||||
Then re-run script with --restor option.
|
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
|
||||||
|
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
||||||
|
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||||
|
|
||||||
|
# ensure the cluster definition exists.
|
||||||
|
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||||
|
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source "$CLUSTER_DEFINITION"
|
||||||
|
|
||||||
|
# source project defition.
|
||||||
|
# Now let's load the project definition.
|
||||||
|
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||||
|
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||||
|
source "$PROJECT_DEFINITION_PATH"
|
||||||
|
|
||||||
|
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||||
|
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||||
|
|
||||||
|
# Check to see if any of the VMs actually don't exist.
|
||||||
|
# (we only migrate instantiated vms)
|
||||||
|
for VM in www btcpayserver; do
|
||||||
|
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||||
|
|
||||||
|
# if the VM doesn't exist, the we emit an error message and hard quit.
|
||||||
|
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
|
||||||
|
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
|
||||||
|
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||||
|
|
||||||
|
# first we run ss-deploy --stop
|
||||||
|
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
|
||||||
|
# the --stop flag ensures that services do NOT come back online.
|
||||||
|
# by default, we grab a backup.
|
||||||
|
|
||||||
|
bash -c "./deploy.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||||
|
|
||||||
|
RESPONSE=
|
||||||
|
read -r -p "Are you sure you want to continue the migration? We have a backup TODO.": RESPONSE
|
||||||
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
|
echo "STOPPING."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
for VM in www btcpayserver; do
|
||||||
|
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||||
|
lxc delete -f "$LXD_NAME"
|
||||||
|
|
||||||
|
lxc profile delete "$LXD_NAME"
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# delete the base image so it can be created.
|
||||||
|
if lxc list | grep -q sovereign-stack-base; then
|
||||||
|
lxc delete -f sovereign-stack-base
|
||||||
|
fi
|
||||||
|
|
||||||
|
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
||||||
|
if lxc profile list | grep -q sovereign-stack; then
|
||||||
|
lxc profile delete sovereign-stack
|
||||||
|
fi
|
||||||
|
|
||||||
|
if lxc image list | grep -q "sovereign-stack-base"; then
|
||||||
|
lxc image rm sovereign-stack-base
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Then we can run a restore operation and specify the backup archive at the CLI.
|
||||||
|
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||||
|
Loading…
Reference in New Issue
Block a user