diff --git a/check_dependencies.sh b/check_dependencies.sh new file mode 100755 index 0000000..a42be9f --- /dev/null +++ b/check_dependencies.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -eu +cd "$(dirname "$0")" + + +check_dependencies () { + for cmd in "$@"; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'." + exit 1 + fi + done +} + +# Check system's dependencies +check_dependencies wait-for-it dig rsync sshfs lxc + +# let's check to ensure the management machine is on the Baseline ubuntu 21.04 +if ! lsb_release -d | grep -q "Ubuntu 22.04"; then + echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." + exit 1 +fi diff --git a/cluster.sh b/cluster.sh index d332d9b..2acea79 100755 --- a/cluster.sh +++ b/cluster.sh @@ -34,7 +34,7 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)" export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED" -export PROJECT_NAME="public" +export PROJECT_NAME="regtest" #export REGISTRY_URL="https://index.docker.io/v1/" EOL @@ -146,7 +146,7 @@ if ! command -v lxc >/dev/null 2>&1; then fi ssh -t "ubuntu@$FQDN" " -set -ex +set -e # install ufw and allow SSH. sudo apt update diff --git a/defaults.sh b/defaults.sh index b79cc47..2109f3f 100755 --- a/defaults.sh +++ b/defaults.sh @@ -38,10 +38,12 @@ export DUPLICITY_BACKUP_PASSPHRASE= export SSH_HOME="$HOME/.ssh" export PASS_HOME="$HOME/.password-store" -export VLAN_INTERFACE= export VM_NAME="sovereign-stack-base" -export DEV_MEMORY_MB="8096" -export DEV_CPU_COUNT="6" + +export BTCPAY_SERVER_CPU_COUNT="4" +export BTCPAY_SERVER_MEMORY_MB="4096" +export WWW_SERVER_CPU_COUNT="4" +export WWW_SERVER_MEMORY_MB="4096" export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io" @@ -68,7 +70,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy" # run the docker stack. -export GHOST_IMAGE="ghost:5.20.0" +export GHOST_IMAGE="ghost:5.23.0" # TODO switch to mysql. May require intricate export work for existing sites. # THIS MUST BE COMPLETED BEFORE v1 RELEASE @@ -79,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE" export NGINX_IMAGE="nginx:1.23.2" # version of backup is 24.0.3 -export NEXTCLOUD_IMAGE="nextcloud:25.0.0" +export NEXTCLOUD_IMAGE="nextcloud:25.0.1" export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE" # TODO PIN the gitea version number. diff --git a/deploy.sh b/deploy.sh index a16cfbb..f7378d5 100755 --- a/deploy.sh +++ b/deploy.sh @@ -1,44 +1,28 @@ #!/bin/bash -set -exu +set -e cd "$(dirname "$0")" RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" export RESPOSITORY_PATH="$RESPOSITORY_PATH" -check_dependencies () { - for cmd in "$@"; do - if ! command -v "$cmd" >/dev/null 2>&1; then - echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'." - exit 1 - fi - done -} - -# Check system's dependencies -check_dependencies wait-for-it dig rsync sshfs lxc - -# let's check to ensure the management machine is on the Baseline ubuntu 21.04 -if ! lsb_release -d | grep -q "Ubuntu 22.04"; then - echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." - exit 1 -fi +./check_dependencies.sh DOMAIN_NAME= -RUN_CERT_RENEWAL=false +RUN_CERT_RENEWAL=true SKIP_WWW=false RESTORE_WWW=false -BACKUP_CERTS=false -BACKUP_APPS=false -BACKUP_BTCPAY=false +BACKUP_CERTS=true +BACKUP_APPS=true +BACKUP_BTCPAY=true +BACKUP_BTCPAY_ARCHIVE_PATH= RESTORE_BTCPAY=false -BTCPAY_RESTORE_ARCHIVE_PATH= -BTCPAY_LOCAL_BACKUP_PATH= SKIP_BTCPAY=false UPDATE_BTCPAY=false RECONFIGURE_BTCPAY_SERVER=false CLUSTER_NAME="$(lxc remote get-default)" STOP_SERVICES=false +USER_SAYS_YES=false # grab any modifications from the command line. for i in "$@"; do @@ -59,6 +43,11 @@ for i in "$@"; do BACKUP_CERTS=true shift ;; + --no-backup-www) + BACKUP_CERTS=false + BACKUP_APPS=false + shift + ;; --stop) STOP_SERVICES=true shift @@ -67,6 +56,10 @@ for i in "$@"; do DOMAIN_NAME="${i#*=}" shift ;; + --backup-archive-path=*) + BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}" + shift + ;; --update-btcpay) UPDATE_BTCPAY=true shift @@ -83,22 +76,18 @@ for i in "$@"; do BACKUP_APPS=true shift ;; - --backup-btcpay) - BACKUP_BTCPAY=true - shift - ;; - --restore-archive=*) - BTCPAY_RESTORE_ARCHIVE_PATH="${i#*=}" - shift - ;; - --renew-certs) - RUN_CERT_RENEWAL=true + --no-cert-renew) + RUN_CERT_RENEWAL=false shift ;; --reconfigure-btcpay) RECONFIGURE_BTCPAY_SERVER=true shift ;; + -y) + USER_SAYS_YES=true + shift + ;; *) echo "Unexpected option: $1" exit 1 @@ -106,10 +95,8 @@ for i in "$@"; do esac done - -# do some CLI checking. -if [ "$RESTORE_BTCPAY" = true ] && [ ! -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then - echo "ERROR: The restoration archive is not specified. Ensure --restore-archive= is set on the command line." +if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then + echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. " exit 1 fi @@ -118,7 +105,6 @@ source ./defaults.sh export DOMAIN_NAME="$DOMAIN_NAME" export REGISTRY_DOCKER_IMAGE="registry:2" -export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH" export RESTORE_WWW="$RESTORE_WWW" export STOP_SERVICES="$STOP_SERVICES" export BACKUP_CERTS="$BACKUP_CERTS" @@ -128,6 +114,9 @@ export BACKUP_BTCPAY="$BACKUP_BTCPAY" export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL" export CLUSTER_NAME="$CLUSTER_NAME" export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" +export USER_SAYS_YES="$USER_SAYS_YES" +export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH" + # ensure our cluster path is created. mkdir -p "$CLUSTER_PATH" @@ -170,6 +159,7 @@ function instantiate_vms { VPS_HOSTNAME= for VIRTUAL_MACHINE in www btcpayserver; do + export VIRTUAL_MACHINE="$VIRTUAL_MACHINE" FQDN= export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" @@ -251,9 +241,6 @@ function instantiate_vms { export VIRTUAL_MACHINE="$VIRTUAL_MACHINE" export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN" export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION" - export BTCPAY_LOCAL_BACKUP_PATH="$SITE_PATH/backups/btcpayserver/$BACKUP_TIMESTAMP" - export BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$UNIX_BACKUP_TIMESTAMP.tar.gz" - ./deployment/deploy_vms.sh # if the local docker client isn't logged in, do so; @@ -358,6 +345,10 @@ export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED" export BTC_CHAIN="regtest|testnet|mainnet" export PRIMARY_DOMAIN="domain0.tld" export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld" +export BTCPAY_SERVER_CPU_COUNT="4" +export BTCPAY_SERVER_MEMORY_MB="4096" +export WWW_SERVER_CPU_COUNT="6" +export WWW_SERVER_MEMORY_MB="4096" EOL @@ -372,7 +363,12 @@ fi source "$PROJECT_DEFINITION_PATH" # the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list. -export DOMAIN_LIST="${PRIMARY_DOMAIN},${OTHER_SITES_LIST}" +DOMAIN_LIST="${PRIMARY_DOMAIN}" +if [ -n "$OTHER_SITES_LIST" ]; then + DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}" +fi + +export DOMAIN_LIST="$DOMAIN_LIST" export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1)) # let's provision our primary domain first. @@ -404,6 +400,8 @@ if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then bash -c "./deployment/www/go.sh" fi +export DOMAIN_NAME="$PRIMARY_DOMAIN" +export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then bash -c "./deployment/btcpayserver/go.sh" fi diff --git a/deployment/btcpayserver/backup_btcpay.sh b/deployment/btcpayserver/backup_btcpay.sh index 888ac02..342a519 100755 --- a/deployment/btcpayserver/backup_btcpay.sh +++ b/deployment/btcpayserver/backup_btcpay.sh @@ -9,6 +9,8 @@ cd "$(dirname "$0")" echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'." +sleep 5 + ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" # TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services @@ -25,8 +27,13 @@ ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BT ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz" ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz" +# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script. +BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver" +if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then + BACKUP_BTCPAY_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$(date +%s).tar.gz" +fi mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH" -scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH" +scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH" -echo "INFO: Created backup archive '$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH' for host '$BTCPAY_FQDN'." +echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'." diff --git a/deployment/btcpayserver/go.sh b/deployment/btcpayserver/go.sh index 296822c..bbfcb0f 100755 --- a/deployment/btcpayserver/go.sh +++ b/deployment/btcpayserver/go.sh @@ -28,6 +28,7 @@ elif [ "$RESTORE_BTCPAY" = true ]; then RUN_SERVICES=true OPEN_URL=true + BACKUP_BTCPAY=false elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then # the administrator may have indicated a reconfig; @@ -39,12 +40,12 @@ elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then fi # if the script gets this far, then we grab a regular backup. -if [ "$BACKUP_BTCPAY" = true ]; then +if [ "$BACKUP_BTCPAY" = true ]; then # we just grab a regular backup ./backup_btcpay.sh fi -if [ "$RUN_SERVICES" = true ]; then +if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then # The default is to resume services, though admin may want to keep services off (eg., for a migration) # we bring the services back up by default. ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh" diff --git a/deployment/btcpayserver/restore.sh b/deployment/btcpayserver/restore.sh index ec7382b..7e09e0f 100755 --- a/deployment/btcpayserver/restore.sh +++ b/deployment/btcpayserver/restore.sh @@ -3,14 +3,18 @@ set -e cd "$(dirname "$0")" -if [ -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then +if [ "$RESTORE_BTCPAY" = false ]; then + exit 0 +fi + +if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then # push the restoration archive to the remote server - echo "INFO: Restoring BTCPAY Server: $BTCPAY_RESTORE_ARCHIVE_PATH" + echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH" REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver" ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH" REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz" - scp "$BTCPAY_RESTORE_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH" + scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH" # we clean up any old containers first before restoring. ssh "$FQDN" docker system prune -f diff --git a/deployment/btcpayserver/stub_btcpay_setup.sh b/deployment/btcpayserver/stub_btcpay_setup.sh index 7363564..a168fac 100755 --- a/deployment/btcpayserver/stub_btcpay_setup.sh +++ b/deployment/btcpayserver/stub_btcpay_setup.sh @@ -70,3 +70,6 @@ scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh" ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh" ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh" ssh "$BTCPAY_FQDN" "touch $REMOTE_HOME/btcpay.complete" + +# lets give time for the containers to spin up +sleep 10 \ No newline at end of file diff --git a/deployment/stub_lxc_profile.sh b/deployment/stub_lxc_profile.sh index f255fc9..b37bdd0 100755 --- a/deployment/stub_lxc_profile.sh +++ b/deployment/stub_lxc_profile.sh @@ -17,11 +17,25 @@ YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME" # If we are deploying the www, we attach the vm to the underlay via macvlan. cat > "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> /home/ubuntu/.bash_profile - - sudo curl -s -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + - sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - sudo chmod +x /usr/local/bin/docker-compose - sudo apt-get install -y openssh-server @@ -259,7 +273,8 @@ fi # let's create a profile for the BCM TYPE-1 VMs. This is per VM. if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then lxc profile create "$LXD_HOSTNAME" -fi -# configure the profile with our generated cloud-init.yml file. -cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" + # configure the profile with our generated cloud-init.yml file. + cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" + +fi diff --git a/deployment/www/generate_certs.sh b/deployment/www/generate_certs.sh index 9d68b27..5268ea9 100755 --- a/deployment/www/generate_certs.sh +++ b/deployment/www/generate_certs.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -ex +set -e # let's do a refresh of the certificates. Let's Encrypt will not run if it's not time. diff --git a/deployment/www/go.sh b/deployment/www/go.sh index 19863e3..09904f0 100755 --- a/deployment/www/go.sh +++ b/deployment/www/go.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -exu +set -eu cd "$(dirname "$0")" # Create the nginx config file which covers all domains. @@ -84,6 +84,18 @@ done ./stop_docker_stacks.sh + + + + + + + + + + + + # if [ "$DEPLOY_ONION_SITE" = true ]; then # # ensure the tor image is built # docker build -t tor:latest ./tor diff --git a/deployment/www/restore_path.sh b/deployment/www/restore_path.sh index 44aa2a2..0f0c561 100755 --- a/deployment/www/restore_path.sh +++ b/deployment/www/restore_path.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -eu +set -eux cd "$(dirname "$0")" FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)" @@ -10,11 +10,16 @@ if [ "$FILE_COUNT" = 0 ]; then exit 0 fi -RESPONSE= -read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE -if [ "$RESPONSE" != "y" ]; then - echo "STOPPING." - exit 0 +# if the user said -y at the cli, we can skip this. +if [ "$USER_SAYS_YES" = false ]; then + + RESPONSE= + read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE + if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 0 + fi + fi # delete the target backup path so we can push restoration files from the management machine. @@ -27,5 +32,6 @@ ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH" scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" # now we run duplicity to restore the archive. -ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$BACKUP_TIMESTAMP" "$REMOTE_SOURCE_BACKUP_PATH/" +ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/" +ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH" \ No newline at end of file diff --git a/deployment/www/stop_docker_stacks.sh b/deployment/www/stop_docker_stacks.sh index 8ba0455..a955817 100755 --- a/deployment/www/stop_docker_stacks.sh +++ b/deployment/www/stop_docker_stacks.sh @@ -30,7 +30,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME" # ensure our local backup path exists so we can pull down the duplicity archive to the management machine. - export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP" + export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP" # ensure our local backup path exists. if [ ! -d "$LOCAL_BACKUP_PATH" ]; then @@ -40,7 +40,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do if [ "$RESTORE_WWW" = true ]; then ./restore_path.sh #ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP" - elif [ "$BACKUP_APPS" = true ]; then + else # if we're not restoring, then we may or may not back up. ./backup_path.sh fi @@ -57,14 +57,13 @@ if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then # wait for all docker containers to stop. # TODO see if there's a way to check for this. sleep 15 + fi # if [ "$STOP_SERVICES" = true ]; then echo "STOPPING as indicated by the --stop flag." - - - exit 1 + exit 0 fi # generate the certs and grab a backup @@ -72,19 +71,23 @@ if [ "$RUN_CERT_RENEWAL" = true ]; then ./generate_certs.sh fi -# Back each domain's certificates under /home/ubuntu/letsencrypt/domain +# let's backup all our letsencrypt certs +export APP="letsencrypt" for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export DOMAIN_NAME="$DOMAIN_NAME" + export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" + # source the site path so we know what features it has. + source "$RESPOSITORY_PATH/reset_env.sh" + source "$SITE_PATH/site_definition" source "$RESPOSITORY_PATH/domain_env.sh" # these variable are used by both backup/restore scripts. - export APP="letsencrypt" export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER" export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME" # ensure our local backup path exists so we can pull down the duplicity archive to the management machine. - export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP" + export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP" mkdir -p "$LOCAL_BACKUP_PATH" if [ "$RESTORE_WWW" = true ]; then @@ -95,6 +98,5 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do elif [ "$BACKUP_APPS" = true ]; then # if we're not restoring, then we may or may not back up. ./backup_path.sh - fi done diff --git a/deployment/www/stub/ghost_yml.sh b/deployment/www/stub/ghost_yml.sh index dee243f..913a7b5 100755 --- a/deployment/www/stub/ghost_yml.sh +++ b/deployment/www/stub/ghost_yml.sh @@ -103,9 +103,10 @@ EOL EOL fi - docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE" - - sleep 2 + if [ "$STOP_SERVICES" = false ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE" + sleep 2 + fi done # language code diff --git a/deployment/www/stub/gitea_yml.sh b/deployment/www/stub/gitea_yml.sh index 1a6979b..bd4e807 100755 --- a/deployment/www/stub/gitea_yml.sh +++ b/deployment/www/stub/gitea_yml.sh @@ -80,9 +80,10 @@ EOL ${DBNET_NAME}: EOL - docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE" - sleep 1 - + if [ "$STOP_SERVICES" = false ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE" + sleep 1 + fi fi done diff --git a/deployment/www/stub/nextcloud_yml.sh b/deployment/www/stub/nextcloud_yml.sh index 19d7da7..44d1ce6 100755 --- a/deployment/www/stub/nextcloud_yml.sh +++ b/deployment/www/stub/nextcloud_yml.sh @@ -74,7 +74,9 @@ networks: EOL - docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en" - + if [ "$STOP_SERVICES" = false ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en" + sleep 1 + fi fi done \ No newline at end of file diff --git a/deployment/www/stub/nginx_config.sh b/deployment/www/stub/nginx_config.sh index e0ba65a..a8bffdc 100755 --- a/deployment/www/stub/nginx_config.sh +++ b/deployment/www/stub/nginx_config.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -exu +set -eu cd "$(dirname "$0")" diff --git a/deployment/www/stub/nginx_yml.sh b/deployment/www/stub/nginx_yml.sh index e25bf6f..7ea195f 100755 --- a/deployment/www/stub/nginx_yml.sh +++ b/deployment/www/stub/nginx_yml.sh @@ -129,5 +129,9 @@ EOL done done -docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy" -# iterate over all our domains and create the nginx config file. + +if [ "$STOP_SERVICES" = false ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy" + # iterate over all our domains and create the nginx config file. + sleep 1 +fi \ No newline at end of file diff --git a/deployment/www/stub/nostr_yml.sh b/deployment/www/stub/nostr_yml.sh index d52e46e..ad6b369 100755 --- a/deployment/www/stub/nostr_yml.sh +++ b/deployment/www/stub/nostr_yml.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -exu +set -eu cd "$(dirname "$0")" docker pull "$NOSTR_RELAY_IMAGE" @@ -75,8 +75,10 @@ messages_per_sec = 3 #max_event_bytes = 131072 EOL - docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE" - sleep 1 + if [ "$STOP_SERVICES" = false ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE" + sleep 1 + fi fi diff --git a/domain_env.sh b/domain_env.sh index 8405e38..963a2df 100755 --- a/domain_env.sh +++ b/domain_env.sh @@ -15,16 +15,11 @@ export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea" export BTC_CHAIN="$BTC_CHAIN" export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES" -BACKUP_TIMESTAMP="$(date +"%Y-%m")" -UNIX_BACKUP_TIMESTAMP="$(date +%s)" SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )" export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}" echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id" -export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP" -export UNIX_BACKUP_TIMESTAMP="$UNIX_BACKUP_TIMESTAMP" - export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1)) STACK_NAME="$DOMAIN_IDENTIFIER-en" diff --git a/install.sh b/install.sh index d8e1716..bdfb6d0 100755 --- a/install.sh +++ b/install.sh @@ -1,12 +1,10 @@ #!/bin/bash -set -exu +set -eu cd "$(dirname "$0")" -source ./defaults.sh - # let's check to ensure the management machine is on the Baseline ubuntu 21.04 -if ! lsb_release -d | grep "Ubuntu 22.04" | grep -q "LTS"; then +if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." exit 1 fi diff --git a/migrate.sh b/migrate.sh old mode 100644 new mode 100755 index bdd4cb5..d837f89 --- a/migrate.sh +++ b/migrate.sh @@ -1,12 +1,89 @@ -# move all migration logic into this script. +#!/bin/bash -if machine exists, then +set -eu +cd "$(dirname "$0")" -get backup. -don't restart services. +CURRENT_CLUSTER="$(lxc remote get-default)" -Then +if echo "$CURRENT_CLUSTER" | grep -q "production"; then + echo "ERROR: YOU MUST COMMENT THIS OUT BEFORE YOU CAN RUN MIGRATE ON PROUDCTION/." + exit 1 +fi -delete machine. +source ./defaults.sh -Then re-run script with --restor option. \ No newline at end of file +export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER" +CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" +export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" + +# ensure the cluster definition exists. +if [ ! -f "$CLUSTER_DEFINITION" ]; then + echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'." + exit 1 +fi + +source "$CLUSTER_DEFINITION" + +# source project defition. +# Now let's load the project definition. +PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME" +PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition" +source "$PROJECT_DEFINITION_PATH" + +export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition" +source "$PRIMARY_SITE_DEFINITION_PATH" + +# Check to see if any of the VMs actually don't exist. +# (we only migrate instantiated vms) +for VM in www btcpayserver; do + LXD_NAME="$VM-${DOMAIN_NAME//./-}" + + # if the VM doesn't exist, the we emit an error message and hard quit. + if ! lxc list --format csv | grep -q "$LXD_NAME"; then + echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again." + exit 1 + fi +done + +BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz" +echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH" + +# first we run ss-deploy --stop +# this grabs a backup of all data (backups are on by default) and saves them to the management machine +# the --stop flag ensures that services do NOT come back online. +# by default, we grab a backup. + +bash -c "./deploy.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" + +RESPONSE= +read -r -p "Are you sure you want to continue the migration? We have a backup TODO.": RESPONSE +if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 0 +fi + + +for VM in www btcpayserver; do + LXD_NAME="$VM-${DOMAIN_NAME//./-}" + lxc delete -f "$LXD_NAME" + + lxc profile delete "$LXD_NAME" +done + + +# delete the base image so it can be created. +if lxc list | grep -q sovereign-stack-base; then + lxc delete -f sovereign-stack-base +fi + +# these only get initialzed upon creation, so we MUST delete here so they get recreated. +if lxc profile list | grep -q sovereign-stack; then + lxc profile delete sovereign-stack +fi + +if lxc image list | grep -q "sovereign-stack-base"; then + lxc image rm sovereign-stack-base +fi + +# Then we can run a restore operation and specify the backup archive at the CLI. +bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"