Compare commits

..

5 Commits

10 changed files with 69 additions and 98 deletions

View File

@ -81,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NGINX_IMAGE="nginx:1.23.2" export NGINX_IMAGE="nginx:1.23.2"
# version of backup is 24.0.3 # version of backup is 24.0.3
export NEXTCLOUD_IMAGE="nextcloud:25.0.1" export NEXTCLOUD_IMAGE="nextcloud:25.0.2"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE" export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
# TODO PIN the gitea version number. # TODO PIN the gitea version number.

View File

@ -23,7 +23,7 @@ RECONFIGURE_BTCPAY_SERVER=false
CLUSTER_NAME="$(lxc remote get-default)" CLUSTER_NAME="$(lxc remote get-default)"
STOP_SERVICES=false STOP_SERVICES=false
USER_SAYS_YES=false USER_SAYS_YES=false
RESTART_FRONT_END=false RESTART_FRONT_END=true
# grab any modifications from the command line. # grab any modifications from the command line.
for i in "$@"; do for i in "$@"; do
@ -32,6 +32,7 @@ for i in "$@"; do
RESTORE_WWW=true RESTORE_WWW=true
BACKUP_APPS=false BACKUP_APPS=false
RUN_CERT_RENEWAL=false RUN_CERT_RENEWAL=false
RESTART_FRONT_END=true
shift shift
;; ;;
--restore-btcpay) --restore-btcpay)
@ -51,6 +52,7 @@ for i in "$@"; do
;; ;;
--stop) --stop)
STOP_SERVICES=true STOP_SERVICES=true
RESTART_FRONT_END=true
shift shift
;; ;;
--restart-front-end) --restart-front-end)

View File

@ -39,7 +39,7 @@ export NBITCOIN_NETWORK="${BTC_CHAIN}"
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}" export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
export BTCPAYGEN_LIGHTNING="clightning" export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc" export BTCPAYGEN_CRYPTO1="btc"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;opt-add-btctransmuter;bitcoin-clightning.custom;" export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
export BTCPAYGEN_REVERSEPROXY="nginx" export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false export BTCPAY_ENABLE_SSH=false
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME} export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}

View File

@ -36,7 +36,7 @@ if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
# if the image doesn't exist, download it from Ubuntu's image server # if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs # TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream. # we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "ubuntu-base" --public --vm lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "ubuntu-base" --public --vm --auto-update
fi fi
# this vm is used temperarily with # this vm is used temperarily with

View File

@ -175,8 +175,8 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
- sudo apt-get update - sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io - sudo apt-get install -y docker-ce docker-ce-cli containerd.io
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile - echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
- echo "alias bitcoin-cli='bitcoin-cli.sh \$@'" >> /home/ubuntu/.bash_profile - echo "alias bitcoin-cli=\"bitcoin-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- echo "alias lightning-cli='bitcoin-lightning-cli.sh \$@'" >> /home/ubuntu/.bash_profile - echo "alias lightning-cli=\"bitcoin-lightning-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
- sudo chmod +x /usr/local/bin/docker-compose - sudo chmod +x /usr/local/bin/docker-compose
- sudo apt-get install -y openssh-server - sudo apt-get install -y openssh-server

View File

@ -85,15 +85,66 @@ done
./stop_docker_stacks.sh ./stop_docker_stacks.sh
# TODO check if there are any other stacks that are left running (other than reverse proxy)
# if so, this may mean the user has disabled one or more domains and that existing sites/services
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
echo "exiting."
exit 1
fi
# ok, the backend stacks are stopped.
if [ "$RESTART_FRONT_END" = true ]; then
# remove the nginx stack
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
sleep 2
docker stack rm reverse-proxy
# wait for all docker containers to stop.
# TODO see if there's a way to check for this.
sleep 15
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ]; then
./generate_certs.sh
fi
# let's backup all our letsencrypt certs
export APP="letsencrypt"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh"
source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh"
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
if [ "$RESTORE_WWW" = true ]; then
sleep 5
echo "STARTING restore_path.sh for letsencrypt."
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done
fi
# if [ "$DEPLOY_ONION_SITE" = true ]; then # if [ "$DEPLOY_ONION_SITE" = true ]; then

View File

@ -47,52 +47,3 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
done done
done done
done done
if [ "$RESTART_FRONT_END" = true ]; then
# remove the nginx stack
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
sleep 2
docker stack rm reverse-proxy
# wait for all docker containers to stop.
# TODO see if there's a way to check for this.
sleep 15
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ]; then
./generate_certs.sh
fi
# let's backup all our letsencrypt certs
export APP="letsencrypt"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh"
source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh"
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
if [ "$RESTORE_WWW" = true ]; then
sleep 5
echo "STARTING restore_path.sh for letsencrypt."
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done
fi

View File

@ -62,10 +62,10 @@ echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_
# the --stop flag ensures that services do NOT come back online. # the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup. # by default, we grab a backup.
bash -c "./deploy.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
RESPONSE= RESPONSE=
read -r -p "Are you sure you want to continue the migration? We have a backup TODO.": RESPONSE read -r -p "Are you sure you want to continue the migration? ": RESPONSE
if [ "$RESPONSE" != "y" ]; then if [ "$RESPONSE" != "y" ]; then
echo "STOPPING." echo "STOPPING."
exit 0 exit 0
@ -90,9 +90,13 @@ if lxc profile list | grep -q sovereign-stack; then
lxc profile delete sovereign-stack lxc profile delete sovereign-stack
fi fi
if lxc image list | grep -q "sovereign-stack-base"; then if lxc image list | grep -q sovereign-stack-base; then
lxc image rm sovereign-stack-base lxc image rm sovereign-stack-base
fi fi
if lxc image list | grep -q ubuntu-base; then
lxc image rm ubuntu-base
fi
# Then we can run a restore operation and specify the backup archive at the CLI. # Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"

View File

@ -9,10 +9,12 @@ TAG_MESSAGE="Creating tag $TAG_NAME on $(date)."
git tag -a "$TAG_NAME" -m "$TAG_MESSAGE" -s git tag -a "$TAG_NAME" -m "$TAG_MESSAGE" -s
# optional; push to remote # push commits and tags to origin
git push --set-upstream origin --all git push --set-upstream origin --all
git push --set-upstream origin --tags git push --set-upstream origin --tags
## note this will only work if you have permissions to update HEAD on https://git.sovereign-stack.org/ss/sovereign-stack.git
RESPONSE= RESPONSE=
read -r -p " Would you like to push this to the main ss repo? (y) ": RESPONSE read -r -p " Would you like to push this to the main ss repo? (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then if [ "$RESPONSE" != "y" ]; then

View File

@ -1,39 +0,0 @@
#!/bin/bash
set -e
SSH_ENDPOINT_HOSTNAME="atlantis"
SSH_ENDPOINT_DOMAIN_NAME="ancapistan.io"
TEST_DOMAIN="ancapistan.casa"
CLUSTER_NAME="development"
export LXD_VM_NAME="${TEST_DOMAIN//./-}"
if [ -n "$TEST_DOMAIN" ]; then
lxc delete --force www-"$LXD_VM_NAME"
lxc delete --force btcpay-"$LXD_VM_NAME"
lxc delete --force sovereign-stack
lxc delete --force sovereign-stack-base
lxc profile delete www-"$LXD_VM_NAME"
lxc profile delete btcpay-"$LXD_VM_NAME"
fi
lxc profile delete sovereign-stack
lxc image rm sovereign-stack-base
lxc image rm ubuntu-base
lxc network delete lxdbrSS
lxc storage delete sovereign-stack
lxc remote switch "local"
lxc remote remove "$CLUSTER_NAME"
source "$HOME/.bashrc"
./cluster.sh create "$CLUSTER_NAME" "$SSH_ENDPOINT_HOSTNAME.$SSH_ENDPOINT_DOMAIN_NAME"
#--data-plane-interface=enp89s0
#./deploy.sh