Compare commits
No commits in common. "42aa3742fc004cae2fd79abaac8996ea5db284d0" and "b23d60a6adc09ad246b24d959fae99739424bb7f" have entirely different histories.
42aa3742fc
...
b23d60a6ad
@ -81,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||
export NGINX_IMAGE="nginx:1.23.2"
|
||||
|
||||
# version of backup is 24.0.3
|
||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.2"
|
||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.1"
|
||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||
|
||||
# TODO PIN the gitea version number.
|
||||
|
@ -23,7 +23,7 @@ RECONFIGURE_BTCPAY_SERVER=false
|
||||
CLUSTER_NAME="$(lxc remote get-default)"
|
||||
STOP_SERVICES=false
|
||||
USER_SAYS_YES=false
|
||||
RESTART_FRONT_END=true
|
||||
RESTART_FRONT_END=false
|
||||
|
||||
# grab any modifications from the command line.
|
||||
for i in "$@"; do
|
||||
@ -32,7 +32,6 @@ for i in "$@"; do
|
||||
RESTORE_WWW=true
|
||||
BACKUP_APPS=false
|
||||
RUN_CERT_RENEWAL=false
|
||||
RESTART_FRONT_END=true
|
||||
shift
|
||||
;;
|
||||
--restore-btcpay)
|
||||
@ -52,7 +51,6 @@ for i in "$@"; do
|
||||
;;
|
||||
--stop)
|
||||
STOP_SERVICES=true
|
||||
RESTART_FRONT_END=true
|
||||
shift
|
||||
;;
|
||||
--restart-front-end)
|
||||
|
@ -85,66 +85,15 @@ done
|
||||
./stop_docker_stacks.sh
|
||||
|
||||
|
||||
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
||||
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
||||
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
||||
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
|
||||
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
|
||||
echo "exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
|
||||
# ok, the backend stacks are stopped.
|
||||
if [ "$RESTART_FRONT_END" = true ]; then
|
||||
# remove the nginx stack
|
||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||
sleep 2
|
||||
|
||||
docker stack rm reverse-proxy
|
||||
|
||||
# wait for all docker containers to stop.
|
||||
# TODO see if there's a way to check for this.
|
||||
sleep 15
|
||||
|
||||
fi
|
||||
|
||||
# generate the certs and grab a backup
|
||||
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||
./generate_certs.sh
|
||||
fi
|
||||
|
||||
# let's backup all our letsencrypt certs
|
||||
export APP="letsencrypt"
|
||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
|
||||
# source the site path so we know what features it has.
|
||||
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||
source "$SITE_PATH/site_definition"
|
||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||
|
||||
# these variable are used by both backup/restore scripts.
|
||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||
|
||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||
|
||||
if [ "$RESTORE_WWW" = true ]; then
|
||||
sleep 5
|
||||
echo "STARTING restore_path.sh for letsencrypt."
|
||||
./restore_path.sh
|
||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||
elif [ "$BACKUP_APPS" = true ]; then
|
||||
# if we're not restoring, then we may or may not back up.
|
||||
./backup_path.sh
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||
|
@ -47,3 +47,52 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
if [ "$RESTART_FRONT_END" = true ]; then
|
||||
# remove the nginx stack
|
||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||
sleep 2
|
||||
|
||||
docker stack rm reverse-proxy
|
||||
|
||||
# wait for all docker containers to stop.
|
||||
# TODO see if there's a way to check for this.
|
||||
sleep 15
|
||||
|
||||
fi
|
||||
|
||||
# generate the certs and grab a backup
|
||||
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||
./generate_certs.sh
|
||||
fi
|
||||
|
||||
# let's backup all our letsencrypt certs
|
||||
export APP="letsencrypt"
|
||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
|
||||
# source the site path so we know what features it has.
|
||||
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||
source "$SITE_PATH/site_definition"
|
||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||
|
||||
# these variable are used by both backup/restore scripts.
|
||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||
|
||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||
|
||||
if [ "$RESTORE_WWW" = true ]; then
|
||||
sleep 5
|
||||
echo "STARTING restore_path.sh for letsencrypt."
|
||||
./restore_path.sh
|
||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||
elif [ "$BACKUP_APPS" = true ]; then
|
||||
# if we're not restoring, then we may or may not back up.
|
||||
./backup_path.sh
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
39
reset.sh
Executable file
39
reset.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
SSH_ENDPOINT_HOSTNAME="atlantis"
|
||||
SSH_ENDPOINT_DOMAIN_NAME="ancapistan.io"
|
||||
TEST_DOMAIN="ancapistan.casa"
|
||||
CLUSTER_NAME="development"
|
||||
|
||||
export LXD_VM_NAME="${TEST_DOMAIN//./-}"
|
||||
|
||||
if [ -n "$TEST_DOMAIN" ]; then
|
||||
lxc delete --force www-"$LXD_VM_NAME"
|
||||
lxc delete --force btcpay-"$LXD_VM_NAME"
|
||||
lxc delete --force sovereign-stack
|
||||
lxc delete --force sovereign-stack-base
|
||||
|
||||
lxc profile delete www-"$LXD_VM_NAME"
|
||||
lxc profile delete btcpay-"$LXD_VM_NAME"
|
||||
fi
|
||||
|
||||
lxc profile delete sovereign-stack
|
||||
|
||||
lxc image rm sovereign-stack-base
|
||||
lxc image rm ubuntu-base
|
||||
|
||||
lxc network delete lxdbrSS
|
||||
|
||||
lxc storage delete sovereign-stack
|
||||
|
||||
lxc remote switch "local"
|
||||
lxc remote remove "$CLUSTER_NAME"
|
||||
|
||||
source "$HOME/.bashrc"
|
||||
|
||||
./cluster.sh create "$CLUSTER_NAME" "$SSH_ENDPOINT_HOSTNAME.$SSH_ENDPOINT_DOMAIN_NAME"
|
||||
#--data-plane-interface=enp89s0
|
||||
|
||||
#./deploy.sh
|
Loading…
Reference in New Issue
Block a user