Compare commits
4 Commits
b23d60a6ad
...
42aa3742fc
Author | SHA1 | Date | |
---|---|---|---|
42aa3742fc | |||
104e547a57 | |||
7a08b7cdcf | |||
fac6ab0ecf |
@ -81,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
|||||||
export NGINX_IMAGE="nginx:1.23.2"
|
export NGINX_IMAGE="nginx:1.23.2"
|
||||||
|
|
||||||
# version of backup is 24.0.3
|
# version of backup is 24.0.3
|
||||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.1"
|
export NEXTCLOUD_IMAGE="nextcloud:25.0.2"
|
||||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
# TODO PIN the gitea version number.
|
# TODO PIN the gitea version number.
|
||||||
|
@ -23,7 +23,7 @@ RECONFIGURE_BTCPAY_SERVER=false
|
|||||||
CLUSTER_NAME="$(lxc remote get-default)"
|
CLUSTER_NAME="$(lxc remote get-default)"
|
||||||
STOP_SERVICES=false
|
STOP_SERVICES=false
|
||||||
USER_SAYS_YES=false
|
USER_SAYS_YES=false
|
||||||
RESTART_FRONT_END=false
|
RESTART_FRONT_END=true
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
@ -32,6 +32,7 @@ for i in "$@"; do
|
|||||||
RESTORE_WWW=true
|
RESTORE_WWW=true
|
||||||
BACKUP_APPS=false
|
BACKUP_APPS=false
|
||||||
RUN_CERT_RENEWAL=false
|
RUN_CERT_RENEWAL=false
|
||||||
|
RESTART_FRONT_END=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--restore-btcpay)
|
--restore-btcpay)
|
||||||
@ -51,6 +52,7 @@ for i in "$@"; do
|
|||||||
;;
|
;;
|
||||||
--stop)
|
--stop)
|
||||||
STOP_SERVICES=true
|
STOP_SERVICES=true
|
||||||
|
RESTART_FRONT_END=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--restart-front-end)
|
--restart-front-end)
|
||||||
|
@ -85,15 +85,66 @@ done
|
|||||||
./stop_docker_stacks.sh
|
./stop_docker_stacks.sh
|
||||||
|
|
||||||
|
|
||||||
|
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
||||||
|
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
||||||
|
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
||||||
|
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
|
||||||
|
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
|
||||||
|
echo "exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ok, the backend stacks are stopped.
|
||||||
|
if [ "$RESTART_FRONT_END" = true ]; then
|
||||||
|
# remove the nginx stack
|
||||||
|
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
docker stack rm reverse-proxy
|
||||||
|
|
||||||
|
# wait for all docker containers to stop.
|
||||||
|
# TODO see if there's a way to check for this.
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# generate the certs and grab a backup
|
||||||
|
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's backup all our letsencrypt certs
|
||||||
|
export APP="letsencrypt"
|
||||||
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# source the site path so we know what features it has.
|
||||||
|
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||||
|
source "$SITE_PATH/site_definition"
|
||||||
|
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||||
|
|
||||||
|
# these variable are used by both backup/restore scripts.
|
||||||
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||||
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
|
||||||
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
|
sleep 5
|
||||||
|
echo "STARTING restore_path.sh for letsencrypt."
|
||||||
|
./restore_path.sh
|
||||||
|
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||||
|
elif [ "$BACKUP_APPS" = true ]; then
|
||||||
|
# if we're not restoring, then we may or may not back up.
|
||||||
|
./backup_path.sh
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
@ -47,52 +47,3 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ "$RESTART_FRONT_END" = true ]; then
|
|
||||||
# remove the nginx stack
|
|
||||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
docker stack rm reverse-proxy
|
|
||||||
|
|
||||||
# wait for all docker containers to stop.
|
|
||||||
# TODO see if there's a way to check for this.
|
|
||||||
sleep 15
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# generate the certs and grab a backup
|
|
||||||
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
|
||||||
./generate_certs.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# let's backup all our letsencrypt certs
|
|
||||||
export APP="letsencrypt"
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
|
||||||
source "$RESPOSITORY_PATH/reset_env.sh"
|
|
||||||
source "$SITE_PATH/site_definition"
|
|
||||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
|
||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
|
||||||
|
|
||||||
if [ "$RESTORE_WWW" = true ]; then
|
|
||||||
sleep 5
|
|
||||||
echo "STARTING restore_path.sh for letsencrypt."
|
|
||||||
./restore_path.sh
|
|
||||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
|
||||||
elif [ "$BACKUP_APPS" = true ]; then
|
|
||||||
# if we're not restoring, then we may or may not back up.
|
|
||||||
./backup_path.sh
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
39
reset.sh
39
reset.sh
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
SSH_ENDPOINT_HOSTNAME="atlantis"
|
|
||||||
SSH_ENDPOINT_DOMAIN_NAME="ancapistan.io"
|
|
||||||
TEST_DOMAIN="ancapistan.casa"
|
|
||||||
CLUSTER_NAME="development"
|
|
||||||
|
|
||||||
export LXD_VM_NAME="${TEST_DOMAIN//./-}"
|
|
||||||
|
|
||||||
if [ -n "$TEST_DOMAIN" ]; then
|
|
||||||
lxc delete --force www-"$LXD_VM_NAME"
|
|
||||||
lxc delete --force btcpay-"$LXD_VM_NAME"
|
|
||||||
lxc delete --force sovereign-stack
|
|
||||||
lxc delete --force sovereign-stack-base
|
|
||||||
|
|
||||||
lxc profile delete www-"$LXD_VM_NAME"
|
|
||||||
lxc profile delete btcpay-"$LXD_VM_NAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
lxc profile delete sovereign-stack
|
|
||||||
|
|
||||||
lxc image rm sovereign-stack-base
|
|
||||||
lxc image rm ubuntu-base
|
|
||||||
|
|
||||||
lxc network delete lxdbrSS
|
|
||||||
|
|
||||||
lxc storage delete sovereign-stack
|
|
||||||
|
|
||||||
lxc remote switch "local"
|
|
||||||
lxc remote remove "$CLUSTER_NAME"
|
|
||||||
|
|
||||||
source "$HOME/.bashrc"
|
|
||||||
|
|
||||||
./cluster.sh create "$CLUSTER_NAME" "$SSH_ENDPOINT_HOSTNAME.$SSH_ENDPOINT_DOMAIN_NAME"
|
|
||||||
#--data-plane-interface=enp89s0
|
|
||||||
|
|
||||||
#./deploy.sh
|
|
Loading…
Reference in New Issue
Block a user