From 0361700cf2c4034dad5ca12d534162a9c402e75a Mon Sep 17 00:00:00 2001 From: Derek Smith Date: Mon, 6 Mar 2023 14:30:56 -0500 Subject: [PATCH] Initial Commit. --- btcpayserver/.gitignore | 1 + btcpayserver/backup_btcpay.sh | 39 ++ btcpayserver/bashrc.txt | 6 + btcpayserver/go.sh | 58 ++ btcpayserver/remote_scripts/btcpay-backup.sh | 116 ++++ btcpayserver/remote_scripts/btcpay-restore.sh | 115 ++++ btcpayserver/restore.sh | 33 ++ btcpayserver/stub_btcpay_setup.sh | 112 ++++ create_lxc_base.sh | 42 ++ deploy.sh | 451 ++++++++++++++ deploy_vms.sh | 55 ++ domain_env.sh | 27 + stub_lxc_profile.sh | 226 ++++++++ wait_for_lxc_ip.sh | 45 ++ www/.gitignore | 1 + www/backup_path.sh | 29 + www/generate_certs.sh | 45 ++ www/go.sh | 162 ++++++ www/prepare_clams.sh | 35 ++ www/restore_path.sh | 38 ++ www/stop_docker_stacks.sh | 49 ++ www/stub/ghost_yml.sh | 113 ++++ www/stub/gitea_yml.sh | 89 +++ www/stub/nextcloud_yml.sh | 82 +++ www/stub/nginx_config.sh | 548 ++++++++++++++++++ www/stub/nginx_yml.sh | 154 +++++ www/stub/nostr_yml.sh | 96 +++ www/tor/Dockerfile | 11 + www/tor/torrc | 8 + www/tor/torrc-init | 5 + 30 files changed, 2791 insertions(+) create mode 100644 btcpayserver/.gitignore create mode 100755 btcpayserver/backup_btcpay.sh create mode 100644 btcpayserver/bashrc.txt create mode 100755 btcpayserver/go.sh create mode 100644 btcpayserver/remote_scripts/btcpay-backup.sh create mode 100644 btcpayserver/remote_scripts/btcpay-restore.sh create mode 100755 btcpayserver/restore.sh create mode 100755 btcpayserver/stub_btcpay_setup.sh create mode 100755 create_lxc_base.sh create mode 100755 deploy.sh create mode 100755 deploy_vms.sh create mode 100755 domain_env.sh create mode 100755 stub_lxc_profile.sh create mode 100755 wait_for_lxc_ip.sh create mode 100644 www/.gitignore create mode 100755 www/backup_path.sh create mode 100755 www/generate_certs.sh create mode 100755 www/go.sh create mode 100755 www/prepare_clams.sh create mode 100755 www/restore_path.sh create mode 100755 www/stop_docker_stacks.sh create mode 100755 www/stub/ghost_yml.sh create mode 100755 www/stub/gitea_yml.sh create mode 100755 www/stub/nextcloud_yml.sh create mode 100755 www/stub/nginx_config.sh create mode 100755 www/stub/nginx_yml.sh create mode 100755 www/stub/nostr_yml.sh create mode 100644 www/tor/Dockerfile create mode 100644 www/tor/torrc create mode 100644 www/tor/torrc-init diff --git a/btcpayserver/.gitignore b/btcpayserver/.gitignore new file mode 100644 index 0000000..26e8e7b --- /dev/null +++ b/btcpayserver/.gitignore @@ -0,0 +1 @@ +core-lightning \ No newline at end of file diff --git a/btcpayserver/backup_btcpay.sh b/btcpayserver/backup_btcpay.sh new file mode 100755 index 0000000..a4cf276 --- /dev/null +++ b/btcpayserver/backup_btcpay.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -ex +cd "$(dirname "$0")" + +# take the services down, create a backup archive, then pull it down. +# the script executed here from the BTCPAY repo will automatically take services down +# and bring them back up. + +echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'." + +sleep 5 + +ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" + +# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services +# that way processes can run shutdown procedures and leave files in the correct state. +sleep 10 + +# TODO enable encrypted archives +# TODO switch to btcpay-backup.sh when on LXD fully. +scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh" +ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh" +ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh" + +# next we pull the resulting backup archive down to our management machine. +ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz" +ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz" + +# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script. +BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver" +if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then + BACKUP_BTCPAY_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$(date +%s).tar.gz" +fi + +mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH" +scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH" + +echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'." diff --git a/btcpayserver/bashrc.txt b/btcpayserver/bashrc.txt new file mode 100644 index 0000000..afbb134 --- /dev/null +++ b/btcpayserver/bashrc.txt @@ -0,0 +1,6 @@ +# we append this text to the btcpay server /home/ubuntu/.bashrc so +# logged in users have more common access to the variou + +alias bitcoin-cli="bitcoin-cli.sh $@" +alias lightning-cli="bitcoin-lightning-cli.sh $@" + diff --git a/btcpayserver/go.sh b/btcpayserver/go.sh new file mode 100755 index 0000000..c8c3631 --- /dev/null +++ b/btcpayserver/go.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN" + +# run the btcpay setup script if it hasn't been done before. +if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then + ./stub_btcpay_setup.sh + BACKUP_BTCPAY=false +fi + +RUN_SERVICES=true + +# we will re-run the btcpayserver provisioning scripts if directed to do so. +# if an update does occur, we grab another backup. +if [ "$UPDATE_BTCPAY" = true ]; then + # run the update. + ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" + + # btcpay-update.sh brings services back up, but does not take them down. + ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh" + + sleep 20 + +elif [ "$RESTORE_BTCPAY" = true ]; then + # run the update. + ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" + sleep 15 + + ./restore.sh + + RUN_SERVICES=true + BACKUP_BTCPAY=false + +elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then + # the administrator may have indicated a reconfig; + # if so, we re-run setup script. + ./stub_btcpay_setup.sh + + RUN_SERVICES=true + BACKUP_BTCPAY=false +fi + +# if the script gets this far, then we grab a regular backup. +if [ "$BACKUP_BTCPAY" = true ]; then + # we just grab a regular backup + ./backup_btcpay.sh +fi + +if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then + # The default is to resume services, though admin may want to keep services off (eg., for a migration) + # we bring the services back up by default. + ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh" +fi + +echo "FINISHED btcpayserver/go.sh" diff --git a/btcpayserver/remote_scripts/btcpay-backup.sh b/btcpayserver/remote_scripts/btcpay-backup.sh new file mode 100644 index 0000000..1c45858 --- /dev/null +++ b/btcpayserver/remote_scripts/btcpay-backup.sh @@ -0,0 +1,116 @@ +#!/bin/bash -e + +set -o pipefail -o errexit + +# Please be aware of these important issues: +# +# - Old channel state is toxic and you can loose all your funds, if you or someone +# else closes a channel based on the backup with old state - and the state changes +# often! If you publish an old state (say from yesterday's backup) on chain, you +# WILL LOSE ALL YOUR FUNDS IN A CHANNEL, because the counterparty will publish a +# revocation key! + +if [ "$(id -u)" != "0" ]; then + echo "INFO: This script must be run as root." + echo " Use the command 'sudo su -' (include the trailing hypen) and try again." + exit 1 +fi + +# preparation +docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g") +dbdump_name=postgres.sql.gz +btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker" +backup_dir="$docker_dir/volumes/backup_datadir/_data" +dbdump_path="$docker_dir/$dbdump_name" +backup_path="$backup_dir/backup.tar.gz" + +# ensure backup dir exists +if [ ! -d "$backup_dir" ]; then + mkdir -p "$backup_dir" +fi + +cd "$btcpay_dir" +. helpers.sh + +dbcontainer=$(docker ps -a -q -f "name=postgres_1") +if [ -z "$dbcontainer" ]; then + printf "\n" + echo "INFO: Database container is not up and running. Starting BTCPay Server." + docker volume create generated_postgres_datadir + docker-compose -f "$BTCPAY_DOCKER_COMPOSE" up -d postgres + + printf "\n" + dbcontainer=$(docker ps -a -q -f "name=postgres_1") + if [ -z "$dbcontainer" ]; then + echo "INFO: Database container could not be started or found." + exit 1 + fi +fi + +printf "\n" +echo "INFO: Dumping database." +{ + docker exec "$dbcontainer" pg_dumpall -c -U postgres | gzip > "$dbdump_path" + echo "INFO: Database dump done." +} || { + echo "ERROR: Dumping failed. Please check the error message above." + exit 1 +} + +echo "Stopping BTCPay Server..." +btcpay_down + +printf "\n" +cd "$docker_dir" +echo "Archiving files in $(pwd)." + +{ + tar \ + --exclude="volumes/backup_datadir" \ + --exclude="volumes/generated_bitcoin_datadir/_data/blocks" \ + --exclude="volumes/generated_bitcoin_datadir/_data/chainstate" \ + --exclude="volumes/generated_bitcoin_datadir/_data/debug.log" \ + --exclude="volumes/generated_bitcoin_datadir/_data/testnet3/blocks" \ + --exclude="volumes/generated_bitcoin_datadir/_data/testnet3/chainstate" \ + --exclude="volumes/generated_bitcoin_datadir/_data/testnet3/debug.log" \ + --exclude="volumes/generated_bitcoin_datadir/_data/regtest/blocks" \ + --exclude="volumes/generated_bitcoin_datadir/_data/regtest/chainstate" \ + --exclude="volumes/generated_bitcoin_datadir/_data/regtest/debug.log" \ + --exclude="volumes/generated_postgres_datadir" \ + --exclude="volumes/generated_tor_relay_datadir" \ + --exclude="volumes/generated_clightning_bitcoin_datadir/_data/lightning-rpc" \ + --exclude="**/logs/*" \ + -cvzf "$backup_path" "$dbdump_name" volumes/generated_* + echo "INFO: Archive done." + + if [ -n "$BTCPAY_BACKUP_PASSPHRASE" ]; then + printf "\n" + echo "INFO: BTCPAY_BACKUP_PASSPHRASE is set, the backup will be encrypted." + { + gpg -o "$backup_path.gpg" --batch --yes -c --passphrase "$BTCPAY_BACKUP_PASSPHRASE" "$backup_path" + rm "$backup_path" + backup_path="$backup_path.gpg" + echo "INFO: Encryption done." + } || { + echo "INFO: Encrypting failed. Please check the error message above." + echo "INFO: Restarting BTCPay Server." + cd "$btcpay_dir" + + exit 1 + } + fi +} || { + echo "INFO: Archiving failed. Please check the error message above." + echo "Restarting BTCPay Server" + cd "$btcpay_dir" + + exit 1 +} + +printf "Restarting BTCPay Server." +cd "$btcpay_dir" + +echo "Cleaning up." +rm "$dbdump_path" + +echo "INFO: Backup done => $backup_path." diff --git a/btcpayserver/remote_scripts/btcpay-restore.sh b/btcpayserver/remote_scripts/btcpay-restore.sh new file mode 100644 index 0000000..e4f9d8c --- /dev/null +++ b/btcpayserver/remote_scripts/btcpay-restore.sh @@ -0,0 +1,115 @@ +#!/bin/bash -e + +set -o pipefail -o errexit + +if [ "$(id -u)" != "0" ]; then + echo "ERROR: This script must be run as root." + echo "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again." + exit 1 +fi + +backup_path="$1" +if [ -z "$backup_path" ]; then + echo "ERROR: Usage: btcpay-restore.sh /path/to/backup.tar.gz" + exit 1 +fi + +if [ ! -f "$backup_path" ]; then + echo "ERROR: $backup_path does not exist." + exit 1 +fi + +if [[ "$backup_path" == *.gpg && -z "$BTCPAY_BACKUP_PASSPHRASE" ]]; then + echo "INFO: $backup_path is encrypted. Please provide the passphrase to decrypt it." + echo "INFO: Usage: BTCPAY_BACKUP_PASSPHRASE=t0pSeCrEt btcpay-restore.sh /path/to/backup.tar.gz.gpg" + exit 1 +fi + +# preparation +docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g") +restore_dir="$docker_dir/volumes/backup_datadir/_data/restore" +dbdump_name=postgres.sql.gz +btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker" + +# ensure clean restore dir +echo "INFO: Cleaning restore directory $restore_dir." +rm -rf "$restore_dir" +mkdir -p "$restore_dir" + +if [[ "$backup_path" == *.gpg ]]; then + echo "INFO: Decrypting backup file." + { + gpg -o "${backup_path%.*}" --batch --yes --passphrase "$BTCPAY_BACKUP_PASSPHRASE" -d "$backup_path" + backup_path="${backup_path%.*}" + echo "SUCESS: Decryption done." + } || { + echo "INFO: Decryption failed. Please check the error message above." + exit 1 + } +fi + +cd "$restore_dir" + +echo "INFO: Extracting files in $(pwd)." +tar -h -xvf "$backup_path" -C "$restore_dir" + +# basic control checks +if [ ! -f "$dbdump_name" ]; then + echo "ERROR: '$dbdump_name' does not exist." + exit 1 +fi + +if [ ! -d "volumes" ]; then + echo "ERROR: volumes directory does not exist." + exit 1 +fi + +cd "$btcpay_dir" +. helpers.sh + +cd "$restore_dir" + +{ + echo "INFO: Restoring volumes." + # ensure volumes dir exists + if [ ! -d "$docker_dir/volumes" ]; then + mkdir -p "$docker_dir/volumes" + fi + # copy volume directories over + cp -r volumes/* "$docker_dir/volumes/" + # ensure datadirs excluded in backup exist + mkdir -p "$docker_dir/volumes/generated_postgres_datadir/_data" + echo "INFO: Volume restore done." +} || { + echo "INFO: Restoring volumes failed. Please check the error message above." + exit 1 +} + +{ + echo "INFO: Starting database container" + docker-compose -f "$BTCPAY_DOCKER_COMPOSE" up -d postgres + dbcontainer=$(docker ps -a -q -f "name=postgres") + if [ -z "$dbcontainer" ]; then + echo "ERROR: Database container could not be started or found." + exit 1 + fi +} || { + echo "ERROR: Starting database container failed. Please check the error message above." + exit 1 +} + +cd "$restore_dir" + +{ + echo "INFO: Restoring database..." + gunzip -c $dbdump_name | docker exec -i "$dbcontainer" psql -U postgres postgres -a + echo "SUCCESS: Database restore done." +} || { + echo "ERROR: Restoring database failed. Please check the error message above." + exit 1 +} + +echo "INFO: Cleaning up." +rm -rf "$restore_dir" + +echo "SUCCESS: Restore done" diff --git a/btcpayserver/restore.sh b/btcpayserver/restore.sh new file mode 100755 index 0000000..7e09e0f --- /dev/null +++ b/btcpayserver/restore.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +cd "$(dirname "$0")" + +if [ "$RESTORE_BTCPAY" = false ]; then + exit 0 +fi + +if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then + # push the restoration archive to the remote server + echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH" + + REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver" + ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH" + REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz" + scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH" + + # we clean up any old containers first before restoring. + ssh "$FQDN" docker system prune -f + + # push the modified restore script to the remote directory, set permissions, and execute. + scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh" + ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh" + ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'" + + # now, we're going to take things down because aparently we this needs to be re-exececuted. + ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh" + +else + echo "ERROR: File does not exist." + exit 1 +fi diff --git a/btcpayserver/stub_btcpay_setup.sh b/btcpayserver/stub_btcpay_setup.sh new file mode 100755 index 0000000..06b08ac --- /dev/null +++ b/btcpayserver/stub_btcpay_setup.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +set -e +cd "$(dirname "$0")" + +# default is for regtest +CLIGHTNING_WEBSOCKET_PORT=9736 +if [ "$BITCOIN_CHAIN" = testnet ]; then + CLIGHTNING_WEBSOCKET_PORT=9737 +elif [ "$BITCOIN_CHAIN" = mainnet ]; then + CLIGHTNING_WEBSOCKET_PORT=9738 +fi + +export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT" + +# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar" +# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data" + +# This is the config for a basic proxy to the listening port 127.0.0.1:2368 +# It also supports modern TLS, so SSL certs must be available. +#opt-add-nostr-relay; + +export BTCPAYSERVER_GITREPO="https://github.com/btcpayserver/btcpayserver-docker" + +cat > "$SITE_PATH/btcpay.sh" < ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml </dev/null 2>&1; then + echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'." + exit 1 + fi +done + +# do a spot check; if we are on production warn. +if lxc remote get-default | grep -q "production"; then + echo "WARNING: You are running command against a production system!" + echo "" + + # check if there are any uncommited changes. It's dangerous to + # alter production systems when you have commits to make or changes to stash. + if git update-index --refresh | grep -q "needs update"; then + echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue." + exit 1 + fi + + RESPONSE= + read -r -p " Are you sure you want to continue (y) ": RESPONSE + if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 1 + fi + +fi + +DOMAIN_NAME= +RUN_CERT_RENEWAL=true +SKIP_WWW=false +RESTORE_WWW=false +BACKUP_CERTS=true +BACKUP_APPS=true +BACKUP_BTCPAY=true +BACKUP_BTCPAY_ARCHIVE_PATH= +RESTORE_BTCPAY=false +SKIP_BTCPAY=false +UPDATE_BTCPAY=false +RECONFIGURE_BTCPAY_SERVER=false +CLUSTER_NAME="$(lxc remote get-default)" +STOP_SERVICES=false +USER_SAYS_YES=false +RESTART_FRONT_END=true + +# grab any modifications from the command line. +for i in "$@"; do + case $i in + --restore-www) + RESTORE_WWW=true + BACKUP_APPS=false + RUN_CERT_RENEWAL=false + RESTART_FRONT_END=true + shift + ;; + --restore-btcpay) + RESTORE_BTCPAY=true + BACKUP_BTCPAY=false + RUN_CERT_RENEWAL=false + shift + ;; + --backup-certs) + BACKUP_CERTS=true + shift + ;; + --no-backup-www) + BACKUP_CERTS=false + BACKUP_APPS=false + shift + ;; + --stop) + STOP_SERVICES=true + RESTART_FRONT_END=true + shift + ;; + --restart-front-end) + RESTART_FRONT_END=true + shift + ;; + --domain=*) + DOMAIN_NAME="${i#*=}" + shift + ;; + --backup-archive-path=*) + BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}" + shift + ;; + --update-btcpay) + UPDATE_BTCPAY=true + shift + ;; + --skip-www) + SKIP_WWW=true + shift + ;; + --skip-btcpay) + SKIP_BTCPAY=true + shift + ;; + --backup-ghost) + BACKUP_APPS=true + shift + ;; + --no-cert-renew) + RUN_CERT_RENEWAL=false + shift + ;; + --reconfigure-btcpay) + RECONFIGURE_BTCPAY_SERVER=true + shift + ;; + -y) + USER_SAYS_YES=true + shift + ;; + *) + echo "Unexpected option: $1" + exit 1 + ;; + esac +done + +if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then + echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. " + exit 1 +fi + +# set up our default paths. +source ../../defaults.sh + +export DOMAIN_NAME="$DOMAIN_NAME" +export REGISTRY_DOCKER_IMAGE="registry:2" +export RESTORE_WWW="$RESTORE_WWW" +export STOP_SERVICES="$STOP_SERVICES" +export BACKUP_CERTS="$BACKUP_CERTS" +export BACKUP_APPS="$BACKUP_APPS" +export RESTORE_BTCPAY="$RESTORE_BTCPAY" +export BACKUP_BTCPAY="$BACKUP_BTCPAY" +export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL" +export CLUSTER_NAME="$CLUSTER_NAME" +export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" +export USER_SAYS_YES="$USER_SAYS_YES" +export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH" +export RESTART_FRONT_END="$RESTART_FRONT_END" + +# todo convert this to Trezor-T +SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub" +export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH" +if [ ! -f "$SSH_PUBKEY_PATH" ]; then + # generate a new SSH key for the base vm image. + ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N "" +fi + +# ensure our cluster path is created. +mkdir -p "$CLUSTER_PATH" + +CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" +export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" + +######################################### +if [ ! -f "$CLUSTER_DEFINITION" ]; then + echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster'." + exit 1 +fi + +source "$CLUSTER_DEFINITION" + +# this is our password generation mechanism. Relying on GPG for secure password generation +function new_pass { + gpg --gen-random --armor 1 25 +} + + +function stub_site_definition { + mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites" + + # create a symlink from the CLUSTERPATH/sites/DOMAIN_NAME to the ss-sites/domain name + if [ ! -d "$PROJECT_PATH/sites/$DOMAIN_NAME" ]; then + ln -s "$SITE_PATH" "$PROJECT_PATH/sites/$DOMAIN_NAME" + fi + + if [ ! -f "$SITE_PATH/site_definition" ]; then + # check to see if the enf file exists. exist if not. + SITE_DEFINITION_PATH="$SITE_PATH/site_definition" + if [ ! -f "$SITE_DEFINITION_PATH" ]; then + + # stub out a site_definition with new passwords. + cat >"$SITE_DEFINITION_PATH" <"$PROJECT_DEFINITION_PATH" < /home/ubuntu/.ss-githead" +fi + +export DOMAIN_NAME="$PRIMARY_DOMAIN" +export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" +if [ "$SKIP_BTCPAY" = false ]; then + ./btcpayserver/go.sh + + ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead" +fi diff --git a/deploy_vms.sh b/deploy_vms.sh new file mode 100755 index 0000000..e881186 --- /dev/null +++ b/deploy_vms.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + + +## This is a weird if clause since we need to LEFT-ALIGN the statement below. +SSH_STRING="Host ${FQDN}" +if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then + +########## BEGIN +cat >> "$SSH_HOME/config" <<-EOF + +${SSH_STRING} + HostName ${FQDN} + User ubuntu +EOF +### + +fi + +ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN" + +# if the machine doesn't exist, we create it. +if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then + + # create a base image if needed and instantiate a VM. + if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then + echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition." + echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS." + exit 1 + fi + + ./stub_lxc_profile.sh "$LXD_VM_NAME" + + lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$LXD_UBUNTU_BASE_VERSION" "$LXD_VM_NAME" + + # now let's create a new VM to work with. + #@lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm + + # let's PIN the HW address for now so we don't exhaust IP + # and so we can set DNS internally. + lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION" + lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB" + + lxc start "$LXD_VM_NAME" + + bash -c "./wait_for_lxc_ip.sh --lxc-name=$LXD_VM_NAME" +fi + +# scan the remote machine and install it's identity in our SSH known_hosts file. +ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts" + +# create a directory to store backup archives. This is on all new vms. +ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups" diff --git a/domain_env.sh b/domain_env.sh new file mode 100755 index 0000000..c2ee5fb --- /dev/null +++ b/domain_env.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -ex + +export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME" +export BTCPAY_FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME" +export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME" +export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME" +export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME" +export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME" +export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME" +export ADMIN_ACCOUNT_USERNAME="info" +export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME" +export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud" +export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea" +export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES" + + +SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )" +export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}" +echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id" + +export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1)) + +STACK_NAME="$DOMAIN_IDENTIFIER-en" +export NEXTCLOUD_STACK_TAG="nextcloud-$STACK_NAME" +export NEXTCLOUD_DB_STACK_TAG="nextclouddb-$STACK_NAME" diff --git a/stub_lxc_profile.sh b/stub_lxc_profile.sh new file mode 100755 index 0000000..d55c080 --- /dev/null +++ b/stub_lxc_profile.sh @@ -0,0 +1,226 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +LXD_HOSTNAME="${1:-}" + +# generate the custom cloud-init file. Cloud init installs and configures sshd +SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH") +eval "$(ssh-agent -s)" +ssh-add "$SSH_HOME/id_rsa" +export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY" + +export FILENAME="$LXD_HOSTNAME.yml" +mkdir -p "$PROJECT_PATH/cloud-init" +YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME" + +# If we are deploying the www, we attach the vm to the underlay via macvlan. +cat > "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" < /dev/null 2>&1 +# fi + +# if [ "$DEPLOY_NEXTCLOUD" = true ]; then +# xdg-open "http://$NEXTCLOUD_FQDN" > /dev/null 2>&1 +# fi + +# if [ "$DEPLOY_GITEA" = true ]; then +# xdg-open "http://$GITEA_FQDN" > /dev/null 2>&1 +# fi + diff --git a/www/prepare_clams.sh b/www/prepare_clams.sh new file mode 100755 index 0000000..480dc55 --- /dev/null +++ b/www/prepare_clams.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -ex +cd "$(dirname "$0")" + +# deploy clams wallet. +LOCAL_CLAMS_REPO_PATH="$(pwd)/clams" +CLAMS_APP_DOCKER_REPO_URL="https://github.com/farscapian/clams-app-docker" +if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then + git clone "$CLAMS_APP_DOCKER_REPO_URL" "$LOCAL_CLAMS_REPO_PATH" +else + cd "$LOCAL_CLAMS_REPO_PATH" + git config --global pull.rebase false + git pull + cd - +fi + +# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME" +BROWSER_APP_GIT_TAG="1.5.0" +BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app" +BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG" + +# build the browser-app image. +if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then + docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \ + --build-arg VERSION="$BROWSER_APP_GIT_TAG" \ + -t "$BROWSER_APP_IMAGE_NAME" \ + $(pwd)/clams/frontend/browser-app/ +fi + +# If the clams-root volume doesn't exist, we create and seed it. +if ! docker volume list | grep -q clams-root; then + docker volume create clams-root + docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME" +fi diff --git a/www/restore_path.sh b/www/restore_path.sh new file mode 100755 index 0000000..c589bca --- /dev/null +++ b/www/restore_path.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -eux +cd "$(dirname "$0")" + +FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)" +if [ "$FILE_COUNT" = 0 ]; then + echo "ERROR: there are no files in the local backup path '$LOCAL_BACKUP_PATH'." + echo "We're going to continue with execution." + exit 0 +fi + +# if the user said -y at the cli, we can skip this. +if [ "$USER_SAYS_YES" = false ]; then + + RESPONSE= + read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE + if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 0 + fi + +fi + +# delete the target backup path so we can push restoration files from the management machine. +ssh "$PRIMARY_WWW_FQDN" sudo rm -rf "$REMOTE_SOURCE_BACKUP_PATH" + +# scp our local backup directory to the remote machine +ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_BACKUP_PATH" +ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH" + +scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" + +# now we run duplicity to restore the archive. +ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/" + +# reset folder owner to ubuntu +ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP" \ No newline at end of file diff --git a/www/stop_docker_stacks.sh b/www/stop_docker_stacks.sh new file mode 100755 index 0000000..b5f48e6 --- /dev/null +++ b/www/stop_docker_stacks.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +set -eu +cd "$(dirname "$0")" + +# bring down ghost instances. +for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do + export DOMAIN_NAME="$DOMAIN_NAME" + export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" + + # source the site path so we know what features it has. + source ../../../defaults.sh + source "$SITE_PATH/site_definition" + source ../domain_env.sh + + ### Stop all services. + for APP in ghost nextcloud gitea nostr; do + # backup each language for each app. + for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do + STACK_NAME="$DOMAIN_IDENTIFIER-$APP-$LANGUAGE_CODE" + + if docker stack list --format "{{.Name}}" | grep -q "$STACK_NAME"; then + docker stack rm "$STACK_NAME" + sleep 2 + fi + + # these variable are used by both backup/restore scripts. + export APP="$APP" + export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE" + export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME" + + # ensure our local backup path exists so we can pull down the duplicity archive to the management machine. + export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP" + + # ensure our local backup path exists. + if [ ! -d "$LOCAL_BACKUP_PATH" ]; then + mkdir -p "$LOCAL_BACKUP_PATH" + fi + + if [ "$RESTORE_WWW" = true ]; then + ./restore_path.sh + #ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP" + elif [ "$BACKUP_APPS" = true ]; then + # if we're not restoring, then we may or may not back up. + ./backup_path.sh + fi + done + done +done diff --git a/www/stub/ghost_yml.sh b/www/stub/ghost_yml.sh new file mode 100755 index 0000000..e0683fb --- /dev/null +++ b/www/stub/ghost_yml.sh @@ -0,0 +1,113 @@ +#!/bin/bash + +set -eu +cd "$(dirname "$0")" + +for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do + export DOMAIN_NAME="$DOMAIN_NAME" + export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" + + # source the site path so we know what features it has. + source ../../../../defaults.sh + source "$SITE_PATH/site_definition" + source ../../domain_env.sh + + # for each language specified in the site_definition, we spawn a separate ghost container + # at https://www.domain.com/$LANGUAGE_CODE + for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do + + STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE" + + # ensure directories on remote host exist so we can mount them into the containers. + ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME" + ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db" + + export GHOST_STACK_TAG="ghost-$STACK_NAME" + export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME" + + # todo append domain number or port number. + WEBSTACK_PATH="$SITE_PATH/webstack" + mkdir -p "$WEBSTACK_PATH" + export DOCKER_YAML_PATH="$WEBSTACK_PATH/ghost-$LANGUAGE_CODE.yml" + + # here's the NGINX config. We support ghost and nextcloud. + cat > "$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" < "$DOCKER_YAML_PATH" + cat >>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" < "$DOCKER_YAML_PATH" < "$NGINX_CONF_PATH" + +# iterate over all our domains and create the nginx config file. +iteration=0 + +for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do + export DOMAIN_NAME="$DOMAIN_NAME" + export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" + export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}" + + # source the site path so we know what features it has. + source ../../../../defaults.sh + source "$SITE_PATH/site_definition" + source ../../domain_env.sh + echo "after" + if [ $iteration = 0 ]; then + cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <https redirect + cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" + for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do + STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE" + cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" < 1 )); then + # we only need this clause if we know there is more than once lanuage being rendered. + cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <> "$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" < "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" <> "$DOCKER_YAML_PATH" < "$DOCKER_YAML_PATH" + cat >>"$DOCKER_YAML_PATH" <"$NOSTR_CONFIG_PATH" <