Update variable names.

This commit is contained in:
Derek Smith 2023-04-02 09:28:42 -04:00
parent ec04b8e274
commit c3c187311e
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
14 changed files with 46 additions and 88 deletions

View File

@ -11,7 +11,7 @@ echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
sleep 5
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
# that way processes can run shutdown procedures and leave files in the correct state.
@ -19,9 +19,9 @@ sleep 10
# TODO enable encrypted archives
# TODO switch to btcpay-backup.sh when on LXD fully.
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_DATA_PATH/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_DATA_PATH/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
# next we pull the resulting backup archive down to our management machine.
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"

View File

@ -3,10 +3,6 @@
set -e
cd "$(dirname "$0")"
if [ "$RESTORE_BTCPAY" = false ]; then
exit 0
fi
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
# push the restoration archive to the remote server
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
@ -16,14 +12,8 @@ if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
# we clean up any old containers first before restoring.
ssh "$FQDN" docker system prune -f
# push the modified restore script to the remote directory, set permissions, and execute.
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
# now, we're going to take things down because aparently we this needs to be re-exececuted.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_DATA_PATH/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_DATA_PATH/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
fi

View File

@ -55,7 +55,7 @@ export BTCPAYGEN_CRYPTO1="btc"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
export BTCPAY_BASE_DIRECTORY=${REMOTE_DATA_PATH}
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
@ -68,7 +68,7 @@ export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
# next we create fragments to customize various aspects of the system
# this block customizes clightning to ensure the correct endpoints are being advertised
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
cat > ${REMOTE_DATA_PATH}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
services:
clightning_bitcoin:
@ -86,22 +86,17 @@ EOF
# run the setup script.
. ./btcpay-setup.sh -i
touch ${REMOTE_HOME}/btcpay.complete
touch ${REMOTE_DATA_PATH}/btcpay.complete
EOL
# send an updated ~/.bashrc so we have quicker access to cli tools
scp ./bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
# send the setup script to the remote machine.
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_DATA_PATH/btcpay_setup.sh"
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_DATA_PATH/btcpay_setup.sh"
# script is executed under sudo
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_DATA_PATH/btcpay_setup.sh"
# lets give time for the containers to spin up
sleep 10

View File

@ -140,7 +140,8 @@ fi
# set up our default paths.
source ../../defaults.sh
export DOMAIN_NAME="$DOMAIN_NAME"
. ../remote_env.sh
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES"
@ -150,7 +151,7 @@ export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export REMOTE_NAME="$REMOTE_NAME"
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
@ -229,40 +230,22 @@ EOL
}
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
export PROJECT_PREFIX="$PROJECT_PREFIX"
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
export PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
if [ -n "$USER_TARGET_PROJECT" ]; then
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
continue
fi
fi
# create a symlink from ./remotepath/projects/project
if [ ! -d "$REMOTE_PATH/projects/$PROJECT_NAME" ]; then
ln -s "$PROJECT_PATH" "$REMOTE_PATH/projects/$PROJECT_NAME"
fi
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECT_PATH"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
# create a symlink from ./remotepath/projects/project
if [ ! -d "$REMOTE_PATH/projects/$PROJECT_NAME" ]; then
ln -s "$PROJECT_PATH" "$REMOTE_PATH/projects/$PROJECT_NAME"
fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# stub out a project.conf
cat >"$PROJECT_DEFINITION_PATH" <<EOL
cat >"$PROJECT_DEFINITION_PATH" <<EOL
# see https://www.sovereign-stack.org/ss-deploy/#projectconf for more info.
PRIMARY_DOMAIN="domain0.tld"

View File

@ -11,8 +11,10 @@ export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
export REMOTE_GHOST_PATH="$REMOTE_DATA_PATH/ghost"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_DATA_PATH/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_DATA_PATH/gitea"
export REMOTE_CLAMS_PATH="$REMOTE_DATA_PATH/clams"
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"

View File

@ -18,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
# at a cachehing reverse proxy that runs nginx.
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs"
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_DATA_PATH/letsencrypt/$DOMAIN_NAME/_logs"
# this is minimum required; www and btcpay.
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
@ -38,7 +38,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
done
fi
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_DATA_PATH/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_DATA_PATH/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
# execute the certbot command that we dynamically generated.
eval "$GENERATE_CERT_STRING"

View File

@ -72,7 +72,6 @@ done
./stop_docker_stacks.sh
# TODO check if there are any other stacks that are left running (other than reverse proxy)
# if so, this may mean the user has disabled one or more domains and that existing sites/services
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
@ -82,8 +81,6 @@ if [[ $(docker stack list | wc -l) -gt 2 ]]; then
exit 1
fi
# ok, the backend stacks are stopped.
if [ "$RESTART_FRONT_END" = true ]; then
# remove the nginx stack
@ -117,7 +114,7 @@ if [ "$RESTART_FRONT_END" = true ]; then
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"

View File

@ -37,4 +37,4 @@ scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
# reset folder owner to ubuntu
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_DATA_PATH/$APP"

View File

@ -27,7 +27,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
# these variable are used by both backup/restore scripts.
export APP="$APP"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
@ -39,8 +39,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
if [ "$RESTORE_WWW" = true ]; then
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi

View File

@ -19,8 +19,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
# ensure directories on remote host exist so we can mount them into the containers.
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/db"
export GHOST_STACK_TAG="ghost-$STACK_NAME"
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
@ -44,7 +44,7 @@ EOL
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
environment:
EOL
if [ "$LANGUAGE_CODE" = "en" ]; then
@ -74,7 +74,7 @@ EOL
networks:
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=ghost

View File

@ -35,7 +35,7 @@ services:
- nextcloud-${DOMAIN_IDENTIFIER}-en
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
environment:
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
- MYSQL_DATABASE=nextcloud
@ -55,7 +55,7 @@ services:
networks:
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
environment:
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}

View File

@ -323,14 +323,6 @@ EOL
EOL
# # add the Onion-Location header if specifed.
# if [ "$DEPLOY_ONION_SITE" = true ]; then
# cat >>"$NGINX_CONF_PATH" <<EOL
# add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
# EOL
# fi
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"

View File

@ -60,11 +60,11 @@ EOL
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
- ${REMOTE_DATA_PATH}/letsencrypt:/etc/letsencrypt:ro
EOL
if [ "$DEPLOY_CLAMS" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- clams-browser-app:/browser-app:ro
- ${REMOTE_CLAMS_PATH}:/browser-app
EOL
fi

View File

@ -15,7 +15,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
source ../../domain_env.sh
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"
REMOTE_NOSTR_PATH="$REMOTE_DATA_PATH/nostr"
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"