forked from ss/sovereign-stack
Compare commits
6 Commits
69d5564e44
...
3bc86d8aa3
Author | SHA1 | Date | |
---|---|---|---|
3bc86d8aa3 | |||
a5ce7ceb4c | |||
bceec32b66 | |||
8316360c70 | |||
1288962d79 | |||
1bc685fa74 |
@ -70,7 +70,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
|
|||||||
|
|
||||||
|
|
||||||
# run the docker stack.
|
# run the docker stack.
|
||||||
export GHOST_IMAGE="ghost:5.23.0"
|
export GHOST_IMAGE="ghost:5.26.2"
|
||||||
|
|
||||||
# TODO switch to mysql. May require intricate export work for existing sites.
|
# TODO switch to mysql. May require intricate export work for existing sites.
|
||||||
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
||||||
|
@ -23,6 +23,7 @@ RECONFIGURE_BTCPAY_SERVER=false
|
|||||||
CLUSTER_NAME="$(lxc remote get-default)"
|
CLUSTER_NAME="$(lxc remote get-default)"
|
||||||
STOP_SERVICES=false
|
STOP_SERVICES=false
|
||||||
USER_SAYS_YES=false
|
USER_SAYS_YES=false
|
||||||
|
RESTART_FRONT_END=false
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
@ -52,6 +53,10 @@ for i in "$@"; do
|
|||||||
STOP_SERVICES=true
|
STOP_SERVICES=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--restart-front-end)
|
||||||
|
RESTART_FRONT_END=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--domain=*)
|
--domain=*)
|
||||||
DOMAIN_NAME="${i#*=}"
|
DOMAIN_NAME="${i#*=}"
|
||||||
shift
|
shift
|
||||||
@ -116,6 +121,7 @@ export CLUSTER_NAME="$CLUSTER_NAME"
|
|||||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
||||||
export USER_SAYS_YES="$USER_SAYS_YES"
|
export USER_SAYS_YES="$USER_SAYS_YES"
|
||||||
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
||||||
|
|
||||||
|
|
||||||
# ensure our cluster path is created.
|
# ensure our cluster path is created.
|
||||||
|
@ -34,15 +34,16 @@ fi
|
|||||||
cd btcpayserver-docker
|
cd btcpayserver-docker
|
||||||
|
|
||||||
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
|
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
|
||||||
|
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
|
||||||
export NBITCOIN_NETWORK="${BTC_CHAIN}"
|
export NBITCOIN_NETWORK="${BTC_CHAIN}"
|
||||||
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
|
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
|
||||||
export BTCPAYGEN_LIGHTNING="clightning"
|
export BTCPAYGEN_LIGHTNING="clightning"
|
||||||
export BTCPAYGEN_CRYPTO1="btc"
|
export BTCPAYGEN_CRYPTO1="btc"
|
||||||
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;opt-add-btctransmuter;"
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;opt-add-btctransmuter;bitcoin-clightning.custom;"
|
||||||
export BTCPAYGEN_REVERSEPROXY="nginx"
|
export BTCPAYGEN_REVERSEPROXY="nginx"
|
||||||
export BTCPAY_ENABLE_SSH=false
|
export BTCPAY_ENABLE_SSH=false
|
||||||
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
||||||
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
|
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
|
||||||
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
||||||
|
|
||||||
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
||||||
@ -55,9 +56,25 @@ if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# next we create fragments to customize various aspects of the system
|
||||||
|
# this block customizes clightning to ensure the correct endpoints are being advertised
|
||||||
|
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
|
||||||
|
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
|
||||||
|
|
||||||
|
services:
|
||||||
|
clightning_bitcoin:
|
||||||
|
environment:
|
||||||
|
LIGHTNINGD_OPT: |
|
||||||
|
announce-addr-dns=true
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
# run the setup script.
|
# run the setup script.
|
||||||
. ./btcpay-setup.sh -i
|
. ./btcpay-setup.sh -i
|
||||||
|
|
||||||
|
touch ${REMOTE_HOME}/btcpay.complete
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
# send an updated ~/.bashrc so we have quicker access to cli tools
|
# send an updated ~/.bashrc so we have quicker access to cli tools
|
||||||
@ -68,8 +85,10 @@ ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
|
|||||||
# send the setup script to the remote machine.
|
# send the setup script to the remote machine.
|
||||||
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
|
||||||
|
# script is executed under sudo
|
||||||
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "touch $REMOTE_HOME/btcpay.complete"
|
|
||||||
|
|
||||||
# lets give time for the containers to spin up
|
# lets give time for the containers to spin up
|
||||||
sleep 10
|
sleep 10
|
@ -175,10 +175,12 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
|||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||||
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
|
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
|
||||||
|
- echo "alias bitcoin-cli='bitcoin-cli.sh \$@'" >> /home/ubuntu/.bash_profile
|
||||||
|
- echo "alias lightning-cli='bitcoin-lightning-cli.sh \$@'" >> /home/ubuntu/.bash_profile
|
||||||
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
- sudo chmod +x /usr/local/bin/docker-compose
|
- sudo chmod +x /usr/local/bin/docker-compose
|
||||||
- sudo apt-get install -y openssh-server
|
- sudo apt-get install -y openssh-server
|
||||||
|
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -34,4 +34,5 @@ scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
|||||||
# now we run duplicity to restore the archive.
|
# now we run duplicity to restore the archive.
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||||
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
# reset folder owner to ubuntu
|
||||||
|
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
@ -48,55 +48,51 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# remove the nginx stack
|
if [ "$RESTART_FRONT_END" = true ]; then
|
||||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
# remove the nginx stack
|
||||||
sleep 2
|
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||||
|
sleep 2
|
||||||
|
|
||||||
docker stack rm reverse-proxy
|
docker stack rm reverse-proxy
|
||||||
|
|
||||||
# wait for all docker containers to stop.
|
# wait for all docker containers to stop.
|
||||||
# TODO see if there's a way to check for this.
|
# TODO see if there's a way to check for this.
|
||||||
sleep 15
|
sleep 15
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
#
|
|
||||||
if [ "$STOP_SERVICES" = true ]; then
|
|
||||||
echo "STOPPING as indicated by the --stop flag."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# generate the certs and grab a backup
|
|
||||||
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
|
||||||
./generate_certs.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# let's backup all our letsencrypt certs
|
|
||||||
export APP="letsencrypt"
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
|
||||||
source "$RESPOSITORY_PATH/reset_env.sh"
|
|
||||||
source "$SITE_PATH/site_definition"
|
|
||||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
|
||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
|
||||||
|
|
||||||
if [ "$RESTORE_WWW" = true ]; then
|
|
||||||
sleep 5
|
|
||||||
echo "STARTING restore_path.sh for letsencrypt."
|
|
||||||
./restore_path.sh
|
|
||||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
|
||||||
elif [ "$BACKUP_APPS" = true ]; then
|
|
||||||
# if we're not restoring, then we may or may not back up.
|
|
||||||
./backup_path.sh
|
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
# generate the certs and grab a backup
|
||||||
|
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's backup all our letsencrypt certs
|
||||||
|
export APP="letsencrypt"
|
||||||
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# source the site path so we know what features it has.
|
||||||
|
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||||
|
source "$SITE_PATH/site_definition"
|
||||||
|
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||||
|
|
||||||
|
# these variable are used by both backup/restore scripts.
|
||||||
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||||
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
|
||||||
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
|
sleep 5
|
||||||
|
echo "STARTING restore_path.sh for letsencrypt."
|
||||||
|
./restore_path.sh
|
||||||
|
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||||
|
elif [ "$BACKUP_APPS" = true ]; then
|
||||||
|
# if we're not restoring, then we may or may not back up.
|
||||||
|
./backup_path.sh
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
13
migrate.sh
13
migrate.sh
@ -6,8 +6,17 @@ cd "$(dirname "$0")"
|
|||||||
CURRENT_CLUSTER="$(lxc remote get-default)"
|
CURRENT_CLUSTER="$(lxc remote get-default)"
|
||||||
|
|
||||||
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
||||||
echo "ERROR: YOU MUST COMMENT THIS OUT BEFORE YOU CAN RUN MIGRATE ON PROUDCTION/."
|
echo "WARNING: You are running a migration procedure on a production system."
|
||||||
exit 1
|
echo ""
|
||||||
|
|
||||||
|
|
||||||
|
RESPONSE=
|
||||||
|
read -r -p " Are you sure you want to continue (y) ": RESPONSE
|
||||||
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
|
echo "STOPPING."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source ./defaults.sh
|
source ./defaults.sh
|
||||||
|
Loading…
Reference in New Issue
Block a user