1
1
Fork 1

Update to latest project head.

This commit is contained in:
Derek Smith 2023-08-20 20:47:27 -04:00
parent 88291a2ee0
commit 68f487a5dc
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
9 changed files with 49 additions and 116 deletions

View File

@ -1,3 +1,3 @@
# Documentation
All documentation for this project can be found at [sovereign-stack.org](https://www.sovereign-stack.org).
All documentation for this project can be found at [sovereign-stack.org](https://www.sovereign-stack.org). To get started with this code, check out [this post](https://www.sovereign-stack.org/get/).

View File

@ -2,7 +2,7 @@
# https://www.sovereign-stack.org/ss-down/
set -eu
set -exu
cd "$(dirname "$0")"
if lxc remote get-default -q | grep -q "local"; then
@ -15,6 +15,7 @@ OTHER_SITES_LIST=
SKIP_BTCPAYSERVER=false
SKIP_WWW=false
SKIP_CLAMSSERVER=false
BACKUP_WWW_APPS=true
# grab any modifications from the command line.
for i in "$@"; do
@ -77,6 +78,14 @@ for VIRTUAL_MACHINE in $SERVERS; do
if lxc list | grep -q "$LXD_NAME"; then
bash -c "./stop.sh --server=$VIRTUAL_MACHINE"
if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
APP_LIST="letsencrypt ghost nextcloud gitea nostr"
echo "INFO: Backing up WWW apps."
for APP in $APP_LIST; do
bash -c "$(pwd)/project/www/backup_www.sh --app=$APP"
done
fi
lxc stop "$LXD_NAME"
lxc delete "$LXD_NAME"
@ -119,4 +128,4 @@ done
if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn
fi
fi

@ -1 +1 @@
Subproject commit 56e0e057a612fce1150c36b29932c2e6ccf64542
Subproject commit 9eceb40dba3946011723321d059614c9d9acf8fe

View File

@ -4,7 +4,6 @@
set -e
cd "$(dirname "$0")"
PURGE_LXD=false
# grab any modifications from the command line.
@ -72,9 +71,10 @@ if [ "$PURGE_LXD" = true ]; then
# done
# done
if lxc storage list --format csv | grep -q ss-base; then
lxc storage delete ss-base
fi
echo "WARNING: ss-basae NOT DELETED. NEED TO TEST THIS SCRIPT"
# if lxc storage list --format csv | grep -q ss-base; then
# lxc storage delete ss-base
# fi
CURRENT_REMOTE="$(lxc remote get-default)"
if ! lxc remote get-default | grep -q "local"; then

4
deployment/restore.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
set -exu

View File

@ -53,7 +53,12 @@ if [ "$SERVER_TO_STOP" = www ]; then
fi
if [ "$SERVER_TO_STOP" = btcpayserver ]; then
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
if wait-for-it -t 5 "$BTCPAY_SERVER_FQDN":22; then
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
echo "ERROR: the remote BTCPAY Server is not available on ssh."
exit 1
fi
fi
if [ "$SERVER_TO_STOP" = clamsserver ]; then

View File

@ -38,13 +38,13 @@ OTHER_SITES_LIST=
PRIMARY_DOMAIN=
RUN_CERT_RENEWAL=true
SKIP_BASE_IMAGE_CREATION=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=true
BACKUP_BTCPAY=true
SKIP_BTCPAYSERVER=false
SKIP_WWW=false
SKIP_CLAMSSERVER=false
BACKUP_WWW_APPS=true
BACKUP_BTCPAY_ARCHIVE_PATH=
RESTORE_BTCPAY=false
UPDATE_BTCPAY=false
@ -62,6 +62,10 @@ for i in "$@"; do
RESTORE_CERTS=true
shift
;;
--restore-wwwserver)
RESTORE_WWW=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
shift
@ -124,6 +128,7 @@ fi
export REGISTRY_DOCKER_IMAGE="registry:2"
export BACKUP_CERTS="$BACKUP_CERTS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export RESTORE_WWW="$RESTORE_WWW"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export REMOTE_NAME="$REMOTE_NAME"
@ -131,7 +136,6 @@ export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTORE_CERTS="$RESTORE_CERTS"
export BACKUP_WWW_APPS="$BACKUP_WWW_APPS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
@ -430,25 +434,26 @@ if [ "$SKIP_CLAMSSERVER" = false ]; then
if [ -n "$CLAMS_SERVER_MAC_ADDRESS" ]; then
export DOCKER_HOST="ssh://ubuntu@$CLAMS_SERVER_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init
fi
# set the active env to our CLAMS_FQDN
cat >./project/clams-server/active_env.txt <<EOL
${CLAMS_SERVER_FQDN}
EOL
# and we have to set our environment file as well.
cat > ./project/clams-server/environments/"$CLAMS_SERVER_FQDN" <<EOL
CLAMS_ENV_FILE=./project/clams-server/environments/"$CLAMS_SERVER_FQDN"
# only stub out the file if it doesn't exist. otherwise we leave it be.
if [ ! -f "$CLAMS_ENV_FILE" ]; then
# and we have to set our environment file as well.
cat > "$CLAMS_ENV_FILE" <<EOL
DOCKER_HOST=ssh://ubuntu@${CLAMS_SERVER_FQDN}
DOMAIN_NAME=${PRIMARY_DOMAIN}
ENABLE_TLS=true
BTC_CHAIN=${BITCOIN_CHAIN}
CLN_COUNT=5
CHANNEL_SETUP=prism
CLN_COUNT=200
CHANNEL_SETUP=none
CLAMS_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/clamsserver
EOL
fi
bash -c "./project/clams-server/up.sh -y"
fi

View File

@ -1,89 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
. ./target.sh
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
echo "WARNING: this script backs up your existing remote and saves all data locally in the SSME."
echo " Then, all your VMs are destroyed on the remote resulting is destruction of user data."
echo " But then we re-create everything using the new codebase, then restore user data to the"
echo " newly provisioned VMs."
RESPONSE=
read -r -p "Are you sure you want to continue (y/n): ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
. ./deployment_defaults.sh
. ./remote_env.sh
. ./project_env.sh
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-up again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first, let's grab the GIT commit from the remote machine.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
# source the site path so we know what features it has.
source "$SITE_PATH/site.conf"
source ./project/domain_env.sh
# now we want to switch the git HEAD of the project subdirectory to the
# version of code that was last used
GIT_COMMIT_ON_REMOTE_HOST="$(ssh ubuntu@$WWW_FQDN docker info | grep -o 'PROJECT_COMMIT=[a-zA-Z0-9]*' | cut -d'=' -f2)"
cd project/
echo "INFO: switch the 'project' repo to commit prior commit '$GIT_COMMIT_ON_REMOTE_HOST'"
echo " This allows Sovereign Stack to can grab a backup using the version of the code"
echo " that was used when the deployment was created."
git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
cd -
# run deploy which backups up everything, but doesnt restart any services.
bash -c "./up.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH --backup-www --backup-btcpayserver --skip-base-image"
# call the down script (be default it is non-destructuve of user data.)
./down.sh
# next we switch back to the current version of Sovereign Stack scripts for bringin up the new version.
cd project/
echo "INFO: switching the 'project' repo back to the most recent commit '$TARGET_PROJECT_GIT_COMMIT'"
echo " That way new deployments will be instantiated using the latest codebase."
git checkout "$TARGET_PROJECT_GIT_COMMIT"
cd -
# TODO we can do some additional logic here. FOr example if the user wants to provide a source/target project/remote,
# we can backup the source remote+project and restore it to the target remote+project. This will facilitate cross-device migrations
# However, if the source and target project/remote are the same, we don't really
# need to do any restorations (or backups for that matter, though we still grab one);
# we simply mount the existing data. That's the more common case where the user is simply upgrading the system in-place.
./up.sh

View File

@ -1,13 +1,12 @@
#!/bin/bash
alias ss-up='/home/ubuntu/sovereign-stack/deployment/up.sh $@'
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-update='/home/ubuntu/sovereign-stack/deployment/update.sh $@'
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-up='/home/ubuntu/sovereign-stack/deployment/up.sh $@'
alias ss-down='/home/ubuntu/sovereign-stack/deployment/down.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-stop='/home/ubuntu/sovereign-stack/deployment/stop.sh $@'
alias ss-start='/home/ubuntu/sovereign-stack/deployment/start.sh $@'
alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt'
alias ss-restore='/home/ubuntu/sovereign-stack/deployment/restore.sh $@'
alias ll='ls -lah'
alias ll='ls -lah'