diff --git a/Dockerfile b/Dockerfile index 79fc948..b15cdc8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,17 @@ -FROM ubuntu:21.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync duplicity sshfs snapd lxd-client RUN mkdir /sovereign-stack -COPY ./ /sovereign-stack +COPY ./deployment /sovereign-stack WORKDIR /sovereign-stack -RUN mkdir /site -VOLUME /site -ENV SITE_PATH=/site +RUN mkdir /domain +VOLUME /domain +ENV SITE_PATH=/domain COPY ./entrypoint.sh /entrypoint.sh RUN chmod 0744 /entrypoint.sh - - -CMD /entrypoint.sh \ No newline at end of file +CMD /entrypoint.sh diff --git a/README.md b/README.md index 0c1982b..8d83732 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,16 @@ -# Documentation +# Directions -Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository. +ALL files at this level SHOULD be executed on the management machine! If you have just downloaded Soveregin Stack, run `install.sh` to install all the dependencies required by Sovereign Stack scripts. + +# Dockerfile + +If you want to run the Sovereign Stack management machine activities inside a docker container, you can do so by 1) building the image and 2) running the resulting sovereign stack docker container: + +## Building + +docker build -t sovereign-stack . + +## Running + +docker run -it sovereign-stack \ + -v "$HOME/.sites/domain.tld" \ No newline at end of file diff --git a/deploy.sh b/deploy.sh index 31905cf..349f3d8 100755 --- a/deploy.sh +++ b/deploy.sh @@ -3,7 +3,6 @@ set -exu cd "$(dirname "$0")" - check_dependencies () { for cmd in "$@"; do if ! command -v "$cmd" >/dev/null 2>&1; then @@ -24,7 +23,7 @@ VPS_HOSTING_TARGET= RUN_CERT_RENEWAL=true USER_NO_BACKUP=false USER_RUN_RESTORE=false -BTC_CHAIN=testnet +BTC_CHAIN=regtest UPDATE_BTCPAY=false RECONFIGURE_BTCPAY_SERVER=false BTCPAY_ADDITIONAL_HOSTNAMES= @@ -72,6 +71,10 @@ for i in "$@"; do BTC_CHAIN=mainnet shift ;; + --testnet) + BTC_CHAIN=testnet + shift + ;; --reconfigure-btcpay) RECONFIGURE_BTCPAY_SERVER=true shift @@ -96,6 +99,11 @@ if [ -z "$VPS_HOSTING_TARGET" ]; then exit 1 fi +if [ -z "$DOMAIN_NAME" ]; then + echo "ERROR: You MUST specify --domain=domain.tld" + exit 1 +fi + export DOMAIN_NAME="$DOMAIN_NAME" export VPS_HOSTING_TARGET="$VPS_HOSTING_TARGET" export LXD_DISK_TO_USE="$LXD_DISK_TO_USE" @@ -176,8 +184,6 @@ for APP_TO_DEPLOY in btcpay www umbrel; do if [ "$MACHINE_EXISTS" = true ]; then # we delete the machine if the user has directed us to if [ "$MIGRATE_VPS" = true ]; then - - # run the domain_init based on user input. if [ "$USER_NO_BACKUP" = true ]; then echo "Machine exists. We don't need to back it up because the user has directed --no-backup." diff --git a/deployment/README.md b/deployment/README.md new file mode 100644 index 0000000..0c1982b --- /dev/null +++ b/deployment/README.md @@ -0,0 +1,3 @@ +# Documentation + +Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository. diff --git a/deployment/backup_btcpay.sh b/deployment/backup_btcpay.sh new file mode 100755 index 0000000..c7f43dd --- /dev/null +++ b/deployment/backup_btcpay.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -ex + +# take the services down, create a backup archive, then pull it down. +ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./backup.sh" +ssh "$FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz" +ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz" +scp "$FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$LOCAL_BACKUP_PATH/btcpay-$1.tar.gz" diff --git a/deployment/backup_www.sh b/deployment/backup_www.sh new file mode 100755 index 0000000..d56d1c2 --- /dev/null +++ b/deployment/backup_www.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files. +# we could eliminate that and simply save duplicity backups to the management machine running the script +# this could be done by using a local path and mounting it on the remote VPS. +# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs + +# step 1: run duplicity on the remote system to backup all files to the remote system. +ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --exclude "$REMOTE_HOME/backups" "$REMOTE_HOME" "file://$REMOTE_BACKUP_PATH" +ssh "$FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH" + +# now let's pull down the latest files from the backup directory. +# create a temp directory to serve as the mountpoint for the remote machine backups directory +sshfs "$FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH" + +# rsync the files from the remote server to our local backup path. +rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/" + +# step 4: unmount the SSHFS filesystem and cleanup. +umount "$SSHFS_PATH" +rm -rf "$SSHFS_PATH" diff --git a/deployment/command.sh b/deployment/command.sh new file mode 100644 index 0000000..77ba2be --- /dev/null +++ b/deployment/command.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +bash -c ./deploy.sh --domain=bitizen.store --hosting-provider=lxd --macvlan-interface=eno3 --storage-backend=/dev/sda diff --git a/deployment/defaults.sh b/deployment/defaults.sh new file mode 100644 index 0000000..bddab5a --- /dev/null +++ b/deployment/defaults.sh @@ -0,0 +1,141 @@ +#!/bin/bash + +set -eu + +export DEPLOY_WWW_SERVER=false +export DEPLOY_BTCPPAY_SERVER=false +export DEPLOY_UMBREL_VPS=false + +export DEPLOY_GHOST=true +export DEPLOY_NOSTR=false +export DEPLOY_ONION_SITE=false +export DEPLOY_NEXTCLOUD=false +export DEPLOY_GITEA=false + +export WWW_HOSTNAME="www" +export BTCPAY_HOSTNAME="btcpay" +export UMBREL_HOSTNAME="umbrel" +export NEXTCLOUD_HOSTNAME="nextcloud" +export GITEA_HOSTNAME="git" +export NOSTR_HOSTNAME="messages" +export NOSTR_ACCOUNT_PUBKEY= + +export DDNS_PASSWORD= + +# this is where the html is sourced from. +export SITE_HTML_PATH= + +# enter your AWS Access Key and Secret Access Key here. +export AWS_ACCESS_KEY= +export AWS_SECRET_ACCESS_KEY= + +# if overridden, the app will be deployed to proxy $BTCPAY_HOSTNAME.$DOMAIN_NAME requests to the URL specified. +# this is useful when you want to oursource your BTCPAY fullnode/lightning node. +#export BTCPAY_HANDLER_URL= + + +export SMTP_SERVER="smtp.mailgun.org" +export SMTP_PORT="587" + +# default AWS region and AMI (free-tier AMI ubuntu 20.10) +export AWS_REGION="us-east-1" + +# AMI NAME: +# ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20220420 +export AWS_AMI_ID="ami-09d56f8956ab235b3" +WWW_INSTANCE_TYPE="t2.micro" +BTCPAY_INSTANCE_TYPE="t2.medium" + +# goal will be to keep any particular instance to run AT OR BELOW t2.medium. +# other options are t2.small, micro, nano; micro is the free-tier eligible. +# [1=vCPUs, 1=Mem(GiB)] +# nano [1,0.5], micro [1,1] (free-tier eligible), small [1,2], medium [2,4], large [2,8], xlarge [4,16], 2xlarge [8,32] + +export WWW_INSTANCE_TYPE="$WWW_INSTANCE_TYPE" +export BTCPAY_INSTANCE_TYPE="$BTCPAY_INSTANCE_TYPE" + +export SMTP_PASSWORD= +export GHOST_MYSQL_PASSWORD= +export GHOST_MYSQL_ROOT_PASSWORD= +export NEXTCLOUD_MYSQL_PASSWORD= +export GITEA_MYSQL_PASSWORD= +export NEXTCLOUD_MYSQL_ROOT_PASSWORD= +export GITEA_MYSQL_ROOT_PASSWORD= +export DUPLICITY_BACKUP_PASSPHRASE= +#opt-add-fireflyiii;opt-add-zammad +export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-configurator;" +export SSH_HOME="$HOME/.ssh" +export VLAN_INTERFACE= +export CACHE_DIR="$HOME/cache" +export VM_NAME= +export DEV_MEMORY_MB="4096" +export DEV_CPU_COUNT="4" +export SSHFS_PATH="/tmp/sshfs_temp" + +export NEXTCLOUD_SPACE_GB=10 + +DEV_LXD_REMOTE="$(lxc remote get-default)" +export DEV_LXD_REMOTE="$DEV_LXD_REMOTE" + +export SITE_TITLE= + +# we use this later when we create a VM, we annotate what git commit (from a tag) we used. +LATEST_GIT_TAG="$(git describe --abbrev=0)" +export LATEST_GIT_TAG="$LATEST_GIT_TAG" + +LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)" +export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT" + + +# let's ensure all the tools are installed +if [ ! -f "$(which rsync)" ]; then + echo "ERROR: rsync is not installed. You may want to install your dependencies." + exit 1 +fi + +# shellcheck disable=1091 +export SITE_PATH="$HOME/.sites" +export LXD_DISK_TO_USE= + + +ENABLE_NGINX_CACHING=false + + + +# TODO +# 1 add check for ~/.aws/credentials and stub one out +# 2 ensure install.sh has been run by checking for tor, docker-machine, lxd, wait-for-it, etc. +# 3 pretty much just run the install script if anything is awry +# 4 maybe check to ensure all the CNAME and A+ records are there first so we can quit before machine creation. + +export SITE_PATH="$SITE_PATH/$DOMAIN_NAME" +if [ ! -d "$SITE_PATH" ]; then + echo "ERROR: '$SITE_PATH' does not exist." + exit 1 +fi + +export SITE_PATH="$SITE_PATH" +export BTC_CHAIN="$BTC_CHAIN" + +# if we're running aws/public, we enable nginx caching since it's a public site. +if [ "$VPS_HOSTING_TARGET" = aws ]; then + # TODO the correct behavior is to be =true, but cookies aren't working right now. + ENABLE_NGINX_CACHING=true +fi + +DEFAULT_DB_IMAGE="mariadb:10.6.5" +export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING" + +# run the docker stack. +export GHOST_IMAGE="ghost:4.44.0" +export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE" +export NGINX_IMAGE="nginx:1.21.6" +export NEXTCLOUD_IMAGE="nextcloud:23.0.2" +export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE" + +export GITEA_IMAGE="gitea/gitea:latest" +export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE" + +export WWW_MAC_ADDRESS= +export BTCPAY_MAC_ADDRESS= +export UMBREL_MAC_ADDRESS= \ No newline at end of file diff --git a/deployment/domain_init.sh b/deployment/domain_init.sh new file mode 100755 index 0000000..a2bf3a3 --- /dev/null +++ b/deployment/domain_init.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +set -eux +cd "$(dirname "$0")" + +# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa +if [ ! -f "$SSH_HOME/id_rsa" ]; then + # generate a new SSH key for the base vm image. + ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N "" +fi + +# if an authorized_keys file does not exist, we'll stub one out with the current user. +# add additional id_rsa.pub entries manually for more administrative logins. +if [ ! -f "$SITE_PATH/authorized_keys" ]; then + cat "$SSH_HOME/id_rsa.pub" >> "$SITE_PATH/authorized_keys" +fi + +## This is a weird if clause since we need to LEFT-ALIGN the statement below. +SSH_STRING="Host ${FQDN}" +if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then + +########## BEGIN +cat >> "$SSH_HOME/config" <<-EOF + +${SSH_STRING} + HostName ${FQDN} + User ubuntu +EOF +### + +fi + +# when set to true, this flag indicates that a new VPS was created during THIS script run. +if [ "$VPS_HOSTING_TARGET" = aws ]; then + # let's create the remote VPS if needed. + if ! docker-machine ls -q --filter name="$FQDN" | grep -q "$FQDN"; then + RUN_BACKUP=false + + ./provision_vps.sh + + ./prepare_vps_host.sh + fi +elif [ "$VPS_HOSTING_TARGET" = lxd ]; then + ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN" + + #check to ensure the MACVLAN interface has been set by the user + if [ -z "$MACVLAN_INTERFACE" ]; then + echo "ERROR: MACVLAN_INTERFACE has not been defined. Use '--macvlan-interface=eno1' for example." + exit 1 + fi + + # let's first check to ensure there's a cert.tar.gz. We need a valid cert for testing. + if [ ! -f "$SITE_PATH/certs.tar.gz" ]; then + echo "ERROR: We need a valid cert for testing." + exit 1 + fi + + # if the machine doesn't exist, we create it. + if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then + export RUN_BACKUP=false + + # create a base image if needed and instantiate a VM. + if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then + echo "ERROR: You MUST define a MAC Address for all your machines." + exit 1 + fi + + ./provision_lxc.sh + fi + + # prepare the VPS to support our applications and backups and stuff. + ./prepare_vps_host.sh +fi + + +# this tells our local docker client to target the remote endpoint via SSH +export DOCKER_HOST="ssh://ubuntu@$FQDN" + +# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying. +if [ "$APP_TO_DEPLOY" = www ]; then + ./go_www.sh +elif [ "$APP_TO_DEPLOY" = btcpay ]; then + ./go_btcpay.sh +elif [ "$APP_TO_DEPLOY" = umbrel ]; then + ./go_umbrel.sh +elif [ "$APP_TO_DEPLOY" = certonly ]; then + # renew the certs; certbot takes care of seeing if we need to actually renew. + if [ "$RUN_CERT_RENEWAL" = true ]; then + ./generate_certs.sh + fi + + echo "INFO: Please run 'docker-machine rm -f $FQDN' to remove the remote VPS." + exit +else + echo "ERROR: APP_TO_DEPLOY not set correctly. Please refer to the documentation for allowable values." + exit +fi + +echo "Successfull deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET; Latest git tag is $LATEST_GIT_TAG" >> "$SITE_PATH/debug.log" diff --git a/deployment/down_btcpay_compose.sh b/deployment/down_btcpay_compose.sh new file mode 100644 index 0000000..05a7907 --- /dev/null +++ b/deployment/down_btcpay_compose.sh @@ -0,0 +1,2 @@ +#!/bin/bash + diff --git a/deployment/expensive-relay/Dockerfile b/deployment/expensive-relay/Dockerfile new file mode 100644 index 0000000..e69de29 diff --git a/deployment/expensive-relay/conf.conf b/deployment/expensive-relay/conf.conf new file mode 100644 index 0000000..e69de29 diff --git a/deployment/generate_certs.sh b/deployment/generate_certs.sh new file mode 100755 index 0000000..d1ad1cd --- /dev/null +++ b/deployment/generate_certs.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + + +if [ "$VPS_HOSTING_TARGET" = aws ]; then + # let's do a refresh of the certificates. Let's Encrypt will not run if it's not time. + docker pull certbot/certbot + + docker run -it --rm \ + --name certbot \ + -p 80:80 \ + -p 443:443 \ + -v /etc/letsencrypt:/etc/letsencrypt \ + -v /var/lib/letsencrypt:/var/lib/letsencrypt certbot/certbot \ + certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS" + + # backup the certs to our SITE_PATH/certs.tar.gz so we have them handy (for local development) + ssh "$FQDN" sudo tar -zcvf "$REMOTE_HOME/certs.tar.gz" -C /etc ./letsencrypt + ssh "$FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/certs.tar.gz" + + # now pull the tarballs down the local machine. + scp "$FQDN:$REMOTE_HOME/certs.tar.gz" "$SITE_PATH/certs.tar.gz" +else + echo "INFO: Skipping certificate renewal since we're on hosting provider=lxd." +fi \ No newline at end of file diff --git a/deployment/go_btcpay.sh b/deployment/go_btcpay.sh new file mode 100755 index 0000000..3c8bd6c --- /dev/null +++ b/deployment/go_btcpay.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +if [ "$RUN_BACKUP" = true ]; then + # shellcheck disable=SC2029 + ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh" +fi + +# we will re-run the btcpay provisioning scripts if directed to do so. +# if an update does occur, we grab another backup. +if [ "$UPDATE_BTCPAY" = true ]; then + + if [ "$RUN_BACKUP" = true ]; then + # grab a backup PRIOR to update + ./backup_btcpay.sh "before-update-$UNIX_BACKUP_TIMESTAMP" + fi + + # run the update. + # shellcheck disable=SC2029 + ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-update.sh" + +else + if [ "$RUN_BACKUP" = true ]; then + # we just grab a regular backup + ./backup_btcpay.sh "regular-backup-$UNIX_BACKUP_TIMESTAMP" + fi +fi + +# run a restoration if specified. +if [ "$RUN_RESTORE" = true ]; then + # shellcheck disable=SC2029 + ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh" + ./restore_btcpay.sh +fi + +# the administrator may have indicated a reconfig; if so, re-run the setup (useful for adding alternative names to TLS) +if [ "$RECONFIGURE_BTCPAY_SERVER" = true ]; then + # re-run the setup script. + ./run_btcpay_setup.sh +fi + +if [ "$MIGRATE_VPS" = false ]; then + # The default is to resume services, though admin may want to keep services off (eg., for a migration) + # we bring the services back up by default. + # shellcheck disable=SC2029 + ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-up.sh" + + # we wait for lightning to comone line too. + wait-for-it -t -60 "$FQDN:80" + wait-for-it -t -60 "$FQDN:443" + + xdg-open "http://$FQDN" +else + echo "WARNING: The '--migrate' flag was specified. BTCPay Server services HAVE NOT BEEN TURNED ON!" + echo "NOTE: You can restore your latest backup to a new host that has BTCPay Server installed." +fi diff --git a/deployment/go_umbrel.sh b/deployment/go_umbrel.sh new file mode 100755 index 0000000..7c2af8d --- /dev/null +++ b/deployment/go_umbrel.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -exu + +ssh "$FQDN" " + set -x + + cd /home/ubuntu + + # first, lets make sure we have the latest code. We use git over HTTPS and store it in ~/umbrel + # ~/umbrel is the only folder we need to backup + if [ ! -d ./umbrel ]; then + git clone https://github.com/getumbrel/umbrel.git ./umbrel + else + + if [ -f ./umbrel/scripts/stop ]; then + sudo ./umbrel/scripts/stop + fi + fi +" + +# # DO SOME BACKUP OPERATION + +# ssh "$FQDN" " +# set -x + +# mkdir -p /home/ubuntu/backup + +# sudo PASSPHRASE=${DUPLICITY_BACKUP_PASSPHRASE} duplicity --exclude ${REMOTE_HOME}/umbrel/bitcoin/blocks ${REMOTE_HOME}/umbrel file://${REMOTE_BACKUP_PATH} +# sudo chown -R ubuntu:ubuntu ${REMOTE_BACKUP_PATH} +# " + +# Start services back up. +ssh "$FQDN" " + set -ex + cd /home/ubuntu/umbrel + + git config pull.rebase true + git fetch --all --tags + git checkout master + git pull + git checkout tags/v0.4.17 + + # To use Umbrel on mainnet, run: + sudo NETWORK=$BTC_CHAIN /home/ubuntu/umbrel/scripts/start +" + +# we wait for lightning to comone line too. +wait-for-it -t -60 "$FQDN:80" + +xdg-open "http://$FQDN" diff --git a/deployment/go_www.sh b/deployment/go_www.sh new file mode 100755 index 0000000..7e02e8d --- /dev/null +++ b/deployment/go_www.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +set -exu + +TOR_CONFIG_PATH= + +ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db" + +if [ "$DEPLOY_NEXTCLOUD" = true ]; then + ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/db/data" + ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/db/logs" + ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/html" +fi + +if [ "$DEPLOY_GITEA" = true ]; then + ssh "$FQDN" "mkdir -p $REMOTE_GITEA_PATH/data $REMOTE_GITEA_PATH/db" +fi + +# enable docker swarm mode so we can support docker stacks. +if ! docker info | grep -q "Swarm: active"; then + docker swarm init +fi + +# stop services. +if docker stack list --format "{{.Name}}" | grep -q webstack; then + docker stack rm webstack + sleep 20 +fi + +# this will generate letsencrypt certs and pull them down locally. +if [ "$VPS_HOSTING_TARGET" != lxd ]; then + # really we should change this if clause to some thing like + # "if the perimeter firewall allows port 80/443, then go ahead." + if [ "$VPS_HOSTING_TARGET" = aws ] && [ "$RUN_CERT_RENEWAL" = true ]; then + ./generate_certs.sh + fi +else + # restore the certs. If they don't exist in a backup we restore from SITE_PATH + if [ -f "$SITE_PATH/certs.tar.gz" ]; then + scp "$SITE_PATH/certs.tar.gz" "ubuntu@$FQDN:$REMOTE_HOME/certs.tar.gz" + ssh "$FQDN" "sudo tar -xvf $REMOTE_HOME/certs.tar.gz -C /etc" + else + echo "ERROR: Certificates do not exist locally." + exit 1 + fi +fi + + +if [ "$RUN_BACKUP" = true ]; then + ./backup_www.sh +fi + +if [ "$RUN_RESTORE" = true ]; then + ./restore_www.sh +fi + +if [ "$DEPLOY_ONION_SITE" = true ]; then + # ensure the tor image is built + docker build -t tor:latest ./tor + + # if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore. + # this is how we generate a new torv3 endpoint. + if ! ssh "$FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then + ssh "$FQDN" "mkdir -p $REMOTE_HOME/tor" + TOR_CONFIG_PATH="$(pwd)/tor/torrc-init" + export TOR_CONFIG_PATH="$TOR_CONFIG_PATH" + docker stack deploy -c ./tor.yml torstack + sleep 20 + docker stack rm torstack + sleep 20 + fi + + ONION_ADDRESS="$(ssh "$FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)" + export ONION_ADDRESS="$ONION_ADDRESS" + + # # Since we run a separate ghost process, we create a new directory and symlink it to the original + # if ! ssh "$FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then + # ssh "$FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes" + # fi +fi + +if [ "$RUN_SERVICES" = true ]; then + docker stack deploy -c "$DOCKER_YAML_PATH" webstack + + # start a browser session; point it to port 80 to ensure HTTPS redirect. + wait-for-it -t 320 "$FQDN:80" + wait-for-it -t 320 "$FQDN:443" + + # open bowser tabs. + if [ "$DEPLOY_GHOST" = true ]; then + xdg-open "http://$FQDN" + fi + + if [ "$DEPLOY_NEXTCLOUD" = true ]; then + xdg-open "http://$NEXTCLOUD_FQDN" + fi + + if [ "$DEPLOY_GITEA" = true ]; then + xdg-open "http://$GITEA_FQDN" + fi +fi diff --git a/deployment/lxc_profile.yml b/deployment/lxc_profile.yml new file mode 100644 index 0000000..3cc5f4a --- /dev/null +++ b/deployment/lxc_profile.yml @@ -0,0 +1,148 @@ +config: + limits.cpu: "${DEV_CPU_COUNT}" + limits.memory: "${DEV_MEMORY_MB}MB" + user.vendor-data: | + #cloud-config + + apt_mirror: http://us.archive.ubuntu.com/ubuntu/ + package_update: true + package_upgrade: false + package_reboot_if_required: false + + preserve_hostname: false + fqdn: ${FQDN} + + packages: + - curl + - ssh-askpass + - apt-transport-https + - ca-certificates + - gnupg-agent + - software-properties-common + - lsb-release + - net-tools + - htop + - rsync + - duplicity + - sshfs + - fswatch + - jq + - git + - nano + + groups: + - docker + + users: + - name: ubuntu + shell: /bin/bash + lock_passwd: false + groups: + - docker + sudo: + - ALL=(ALL) NOPASSWD:ALL + ssh_authorized_keys: + - ${SSH_AUTHORIZED_KEY} + + write_files: + - path: ${REMOTE_HOME}/docker.asc + content: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + + mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth + lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh + 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq + L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7 + UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N + cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht + ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo + vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD + G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ + XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj + q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB + tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3 + BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO + v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd + tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk + jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m + 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P + XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc + FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8 + g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm + ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh + 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5 + G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW + FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB + EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF + M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx + Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu + w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk + z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8 + eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb + VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa + 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X + zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ + pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7 + ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ + BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY + 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp + YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI + mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES + KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7 + JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ + cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0 + 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5 + U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z + VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f + irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk + SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz + QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W + 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw + 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe + dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y + Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR + H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh + /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ + M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S + xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O + jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG + YT90qFF93M3v01BbxP+EIY2/9tiIPbrd + =0YYh + -----END PGP PUBLIC KEY BLOCK----- + + - path: /etc/ssh/ssh_config + content: | + Port 22 + ListenAddress 0.0.0.0 + Protocol 2 + ChallengeResponseAuthentication no + PasswordAuthentication no + UsePAM no + LogLevel INFO + + runcmd: + - cat ${REMOTE_HOME}/docker.asc | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + - sudo rm ${REMOTE_HOME}/docker.asc + - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list + - sudo apt-get remove docker docker.io containerd runc + - sudo apt-get update + - sudo apt-get install -y docker-ce docker-ce-cli containerd.io + - echo "alias ll='ls -lah'" >> ${REMOTE_HOME}/.bash_profile + - sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + - sudo chmod +x /usr/local/bin/docker-compose + - sudo apt-get install -y openssh-server + +description: Default LXD profile for ${DOMAIN_NAME} +devices: + root: + path: / + pool: default + type: disk + config: + source: cloud-init:config + type: disk + enp5s0: + nictype: macvlan + parent: ${MACVLAN_INTERFACE} + type: nic +name: ${LXD_VM_NAME} diff --git a/deployment/prepare_vps_host.sh b/deployment/prepare_vps_host.sh new file mode 100755 index 0000000..d05c763 --- /dev/null +++ b/deployment/prepare_vps_host.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -exu + +# scan the remote machine and install it's identity in our SSH known_hosts file. +ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts" + +# create a directory to store backup archives. This is on all new vms. +ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups" + +if [ "$APP_TO_DEPLOY" = btcpay ]; then + echo "INFO: new machine detected. Provisioning BTCPay server scripts." + + ./run_btcpay_setup.sh + exit +fi diff --git a/deployment/provision_lxc.sh b/deployment/provision_lxc.sh new file mode 100755 index 0000000..75b64f1 --- /dev/null +++ b/deployment/provision_lxc.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +set -eux + +# check to ensure the admin has specified a MACVLAN interface +if [ -z "$MACVLAN_INTERFACE" ]; then + echo "ERROR: MACVLAN_INTERFACE not defined in project." + exit 1 +fi + +# The base VM image. +BASE_LXC_IMAGE="ubuntu/22.04/cloud" + +# let's create a profile for the BCM TYPE-1 VMs. This is per VM. +if ! lxc profile list --format csv | grep -q "$LXD_VM_NAME"; then + lxc profile create "$LXD_VM_NAME" +fi + +# generate the custom cloud-init file. Cloud init installs and configures sshd +SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub") +eval "$(ssh-agent -s)" +ssh-add "$SSH_HOME/id_rsa" +export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY" +envsubst < ./lxc_profile.yml > "$SITE_PATH/cloud-init-$APP_TO_DEPLOY.yml" + +# configure the profile with our generated cloud-init.yml file. +lxc profile edit "$LXD_VM_NAME" < "$SITE_PATH/cloud-init-$APP_TO_DEPLOY.yml" + +function wait_for_lxc_ip { + +LXC_INSTANCE_NAME="$1" +IP_V4_ADDRESS= +while true; do + IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true + export IP_V4_ADDRESS="$IP_V4_ADDRESS" + if [ -n "$IP_V4_ADDRESS" ]; then + # give the machine extra time to spin up. + wait-for-it -t 300 "$IP_V4_ADDRESS:22" + break + else + sleep 1 + printf '.' + fi +done + +} + +function run_ddns { + # now that the VM has an IP, we can update the DNS record. TODO add additional DNS providers here; namecheap only atm. + DDNS_STRING="$VPS_HOSTNAME" + if [ "$VPS_HOSTNAME" = www ]; then + # next update our DDNS record. TODO enable local/remote name provider. + DDNS_STRING="@" + fi + + # if the DNS record is incorrect, we run DDNS to get it corrected yo. + if "$(getent hosts "$FQDN" | awk '{ print $1 }')" != "$IP_V4_ADDRESS"; then + curl "https://dynamicdns.park-your-domain.com/update?host=$DDNS_STRING&domain=$DOMAIN_NAME&password=$DDNS_PASSWORD&ip=$IP_V4_ADDRESS" + + DDNS_SLEEP_SECONDS=60 + while true; do + # we test the www CNAME here so we can be assured the underlying has corrected. + if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$IP_V4_ADDRESS" ]]; then + echo "" + echo "SUCCESS: The DNS appears to be configured correctly." + + echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow stale DNS records to expire." + sleep "$DDNS_SLEEP_SECONDS"; + break; + fi + + printf "." && sleep 2; + done + fi +} + +# create the default storage pool if necessary +if ! lxc storage list --format csv | grep -q default; then + if [ -n "$LXD_DISK_TO_USE" ]; then + lxc storage create default zfs source="$LXD_DISK_TO_USE" size="${ROOT_DISK_SIZE_GB}GB" + else + lxc storage create default zfs size="${ROOT_DISK_SIZE_GB}GB" + fi +fi + +# If our template doesn't exist, we create one. +if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then + + # If the lxc VM does exist, then we will delete it (so we can start fresh) + if lxc list -q --format csv | grep -q "$VM_NAME"; then + lxc delete "$VM_NAME" --force + + # remove the ssh known endpoint else we get warnings. + ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VM_NAME" + fi + + # let's download our base image. + if ! lxc image list --format csv --columns l | grep -q "ubuntu-base"; then + # if the image doesn't exist, download it from Ubuntu's image server + # TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs + # we don't really need to cache this locally since it gets continually updated upstream. + lxc image copy "images:$BASE_LXC_IMAGE" "$DEV_LXD_REMOTE": --alias "ubuntu-base" --public --vm + fi + + lxc init \ + --profile="$LXD_VM_NAME" \ + "ubuntu-base" \ + "$VM_NAME" --vm + + # let's PIN the HW address for now so we don't exhaust IP + # and so we can set DNS internally. + + lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION" + + lxc start "$VM_NAME" + + # let's wait a minimum of 15 seconds before we start checking for an IP address. + sleep 15 + + # let's wait for the LXC vm remote machine to get an IP address. + wait_for_lxc_ip "$VM_NAME" + + # Let's remove any entry in our known_hosts, then add it back. + # we are using IP address here so we don't have to rely on external DNS + # configuration for the base image preparataion. + ssh-keygen -R "$IP_V4_ADDRESS" + ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts" + ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu "$REMOTE_HOME" + + # stop the VM and get a snapshot. + lxc stop "$VM_NAME" + lxc publish "$DEV_LXD_REMOTE:$VM_NAME" --alias "$VM_NAME" --public + lxc delete "$VM_NAME" +fi + +# now let's create a new VM to work with. +lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm + +# let's PIN the HW address for now so we don't exhaust IP +# and so we can set DNS internally. +lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION" +lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB" + +lxc start "$LXD_VM_NAME" + +wait_for_lxc_ip "$LXD_VM_NAME" + +run_ddns + +# remove any existing SSH identities for the host, then add it back. +ssh-keygen -R "$IP_V4_ADDRESS" \ No newline at end of file diff --git a/deployment/provision_vps.sh b/deployment/provision_vps.sh new file mode 100755 index 0000000..92c5beb --- /dev/null +++ b/deployment/provision_vps.sh @@ -0,0 +1,89 @@ +#!/bin/bash + +set -eux +cd "$(dirname "$0")" + +if [ ! -f "$HOME/.aws/credentials" ]; then + + # TODO write a credential file baseline + echo "ERROR: Please update your '$HOME/.aws/credentials' file before continuing." + mkdir -p "$HOME/.aws" + touch "$HOME/.aws/credentials" + + # stub out a site_definition with new passwords. + cat >"$HOME/.aws/credentials" <> $REMOTE_HOME/.ssh/authorized_keys" + +# we have to ensure ubuntu is able to do sudo less docker commands. +docker-machine ssh "$FQDN" sudo usermod -aG docker ubuntu + +# we restart so dockerd starts with fresh group membership. +docker-machine ssh "$FQDN" sudo systemctl restart docker + +# TODO INSTALL DOCKER COMPOSE + +# let's wire up the DNS so subsequent ssh commands resolve to the VPS. +./run_ddns.sh + +# remove the SSH hostname from known_hosts as we'll +# todo why do we need this again? +ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN" diff --git a/deployment/restore_btcpay.sh b/deployment/restore_btcpay.sh new file mode 100755 index 0000000..74e5d29 --- /dev/null +++ b/deployment/restore_btcpay.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -exu + +# this scripts ASSUMES services have already been taken down. + +# first let's ask the user for the absolute path to the backup file that we want to restore. +FILE_PATH= +read -r -p "Please enter the absolute path of the backup file you want to restore: ": FILE_PATH +if [ -f "$FILE_PATH" ]; then + # then we grab a backup of the existing stuff BEFORE the restoration attempt + ./backup_btcpay.sh "before-restore-$UNIX_BACKUP_TIMESTAMP" + + echo "INFO: Restoring BTCPAY Server: $FILE_PATH" + ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH" + scp "$FILE_PATH" "$FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz" + ssh "$FQDN" "cd /; sudo tar -xzvf $REMOTE_BACKUP_PATH/btcpay.tar.gz" +else + echo "ERROR: File does not exist." + exit 1 +fi diff --git a/deployment/restore_www.sh b/deployment/restore_www.sh new file mode 100755 index 0000000..32ab1eb --- /dev/null +++ b/deployment/restore_www.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -exu + +# first, this is a restore operation. We need to ask the administrator +# if they want to continue because it results in data loss. +# indeed, our first step is the delete the home directory on the remote server. + +# delete the home directory so we know we are restoring all files from the duplicity archive. +ssh "$FQDN" sudo rm -rf "$REMOTE_HOME/*" + +# scp our local backup directory to the remote machine +ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH" + +# TODO instead of scp the files up there, lets' mount the local backup folder to a remote folder then just run a duplicity restore. +scp -r "$LOCAL_BACKUP_PATH/" "$FQDN:$REMOTE_HOME/backups/$APP_TO_DEPLOY" + +# now we run duplicity to restore the archive. +ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/" "$REMOTE_HOME/" diff --git a/deployment/run_btcpay_setup.sh b/deployment/run_btcpay_setup.sh new file mode 100755 index 0000000..ec71977 --- /dev/null +++ b/deployment/run_btcpay_setup.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set -ex + + +# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar" +# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data" + +# This is the config for a basic proxy to the listening port 127.0.0.1:2368 +# It also supports modern TLS, so SSL certs must be available. +cat > "$SITE_PATH/btcpay.sh" <"$SITE_DEFINITION_PATH" < "$DOCKER_YAML_PATH" + +cat >>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <>"$DOCKER_YAML_PATH" <