Compare commits
No commits in common. "f05daa9bfb11242eab920fdc4dd490d9bfdd6fbb" and "7a710d09162f98fa38e4c60fa00c87c8bcbeb5e9" have entirely different histories.
f05daa9bfb
...
7a710d0916
@ -11,7 +11,7 @@ echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
|
|||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_BACKUP_PATH; cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
|
||||||
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
||||||
# that way processes can run shutdown procedures and leave files in the correct state.
|
# that way processes can run shutdown procedures and leave files in the correct state.
|
||||||
@ -19,13 +19,13 @@ sleep 10
|
|||||||
|
|
||||||
# TODO enable encrypted archives
|
# TODO enable encrypted archives
|
||||||
# TODO switch to btcpay-backup.sh when on LXD fully.
|
# TODO switch to btcpay-backup.sh when on LXD fully.
|
||||||
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_DATA_PATH/btcpay-backup.sh"
|
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_DATA_PATH/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
||||||
|
|
||||||
# next we pull the resulting backup archive down to our management machine.
|
# next we pull the resulting backup archive down to our management machine.
|
||||||
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
|
||||||
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
||||||
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
||||||
@ -34,6 +34,6 @@ if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
||||||
scp "$BTCPAY_FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
# these aliases are simply calling the btcpay server scripts.
|
# we append this text to the btcpay server /home/ubuntu/.bashrc so
|
||||||
alias bitcoin-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-cli.sh $@"
|
# logged in users have more common access to the variou
|
||||||
alias lightning-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-lightning-cli.sh $@"
|
|
||||||
|
alias bitcoin-cli="bitcoin-cli.sh $@"
|
||||||
|
alias lightning-cli="bitcoin-lightning-cli.sh $@"
|
||||||
|
|
||||||
|
@ -5,13 +5,14 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
|
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
|
||||||
|
|
||||||
if [ "$STOP_SERVICES" = true ]; then
|
# run the btcpay setup script if it hasn't been done before.
|
||||||
# run the update.
|
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
|
||||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
|
||||||
else
|
|
||||||
./stub_btcpay_setup.sh
|
./stub_btcpay_setup.sh
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
RUN_SERVICES=true
|
||||||
|
|
||||||
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
||||||
# if an update does occur, we grab another backup.
|
# if an update does occur, we grab another backup.
|
||||||
if [ "$UPDATE_BTCPAY" = true ]; then
|
if [ "$UPDATE_BTCPAY" = true ]; then
|
||||||
@ -21,7 +22,7 @@ if [ "$UPDATE_BTCPAY" = true ]; then
|
|||||||
# btcpay-update.sh brings services back up, but does not take them down.
|
# btcpay-update.sh brings services back up, but does not take them down.
|
||||||
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
||||||
|
|
||||||
sleep 30
|
sleep 20
|
||||||
|
|
||||||
elif [ "$RESTORE_BTCPAY" = true ]; then
|
elif [ "$RESTORE_BTCPAY" = true ]; then
|
||||||
# run the update.
|
# run the update.
|
||||||
@ -30,6 +31,15 @@ elif [ "$RESTORE_BTCPAY" = true ]; then
|
|||||||
|
|
||||||
./restore.sh
|
./restore.sh
|
||||||
|
|
||||||
|
RUN_SERVICES=true
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
|
|
||||||
|
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
||||||
|
# the administrator may have indicated a reconfig;
|
||||||
|
# if so, we re-run setup script.
|
||||||
|
./stub_btcpay_setup.sh
|
||||||
|
|
||||||
|
RUN_SERVICES=true
|
||||||
BACKUP_BTCPAY=false
|
BACKUP_BTCPAY=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -39,8 +49,10 @@ if [ "$BACKUP_BTCPAY" = true ]; then
|
|||||||
./backup_btcpay.sh
|
./backup_btcpay.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$STOP_SERVICES" = false ]; then
|
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
|
||||||
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
# we bring the services back up by default.
|
# we bring the services back up by default.
|
||||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "FINISHED btcpayserver/go.sh"
|
||||||
|
@ -3,17 +3,27 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ "$RESTORE_BTCPAY" = false ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
# push the restoration archive to the remote server
|
# push the restoration archive to the remote server
|
||||||
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
BTCPAY_REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/btcpayserver"
|
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
|
||||||
ssh "$FQDN" mkdir -p "$BTCPAY_REMOTE_BACKUP_PATH"
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
REMOTE_BTCPAY_ARCHIVE_PATH="$BTCPAY_REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
|
# we clean up any old containers first before restoring.
|
||||||
|
ssh "$FQDN" docker system prune -f
|
||||||
|
|
||||||
# push the modified restore script to the remote directory, set permissions, and execute.
|
# push the modified restore script to the remote directory, set permissions, and execute.
|
||||||
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_DATA_PATH/btcpay-restore.sh"
|
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
|
||||||
ssh "$FQDN" "sudo mv $REMOTE_DATA_PATH/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
|
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
|
||||||
ssh "$FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
|
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
|
||||||
|
|
||||||
|
# now, we're going to take things down because aparently we this needs to be re-exececuted.
|
||||||
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
fi
|
fi
|
||||||
|
@ -3,8 +3,6 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# default is for regtest
|
# default is for regtest
|
||||||
CLIGHTNING_WEBSOCKET_PORT=9736
|
CLIGHTNING_WEBSOCKET_PORT=9736
|
||||||
if [ "$BITCOIN_CHAIN" = testnet ]; then
|
if [ "$BITCOIN_CHAIN" = testnet ]; then
|
||||||
@ -15,7 +13,6 @@ fi
|
|||||||
|
|
||||||
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
|
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
|
||||||
|
|
||||||
|
|
||||||
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
||||||
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
||||||
|
|
||||||
@ -58,20 +55,24 @@ export BTCPAYGEN_CRYPTO1="btc"
|
|||||||
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
|
||||||
export BTCPAYGEN_REVERSEPROXY="nginx"
|
export BTCPAYGEN_REVERSEPROXY="nginx"
|
||||||
export BTCPAY_ENABLE_SSH=false
|
export BTCPAY_ENABLE_SSH=false
|
||||||
export BTCPAY_BASE_DIRECTORY=${REMOTE_DATA_PATH}
|
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
||||||
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
|
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
|
||||||
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
||||||
|
|
||||||
# if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
||||||
# cd ./contrib/FastSync
|
# run fast_sync if it's not been done before.
|
||||||
# ./load-utxo-set.sh
|
if [ ! -f /home/ubuntu/fast_sync_completed ]; then
|
||||||
# cd -
|
cd ./contrib/FastSync
|
||||||
# fi
|
./load-utxo-set.sh
|
||||||
|
touch /home/ubuntu/fast_sync_completed
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# next we create fragments to customize various aspects of the system
|
# next we create fragments to customize various aspects of the system
|
||||||
# this block customizes clightning to ensure the correct endpoints are being advertised
|
# this block customizes clightning to ensure the correct endpoints are being advertised
|
||||||
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
|
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
|
||||||
cat > ${REMOTE_DATA_PATH}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
|
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
|
||||||
|
|
||||||
services:
|
services:
|
||||||
clightning_bitcoin:
|
clightning_bitcoin:
|
||||||
@ -89,17 +90,22 @@ EOF
|
|||||||
# run the setup script.
|
# run the setup script.
|
||||||
. ./btcpay-setup.sh -i
|
. ./btcpay-setup.sh -i
|
||||||
|
|
||||||
touch ${REMOTE_DATA_PATH}/btcpay.complete
|
touch ${REMOTE_HOME}/btcpay.complete
|
||||||
chown ubuntu:ubuntu ${REMOTE_DATA_PATH}/btcpay.complete
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
# send an updated ~/.bashrc so we have quicker access to cli tools
|
||||||
|
scp ./bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
|
||||||
|
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
|
||||||
|
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
|
||||||
|
|
||||||
# send the setup script to the remote machine.
|
# send the setup script to the remote machine.
|
||||||
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_DATA_PATH/btcpay_setup.sh"
|
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_DATA_PATH/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
|
||||||
# script is executed under sudo
|
# script is executed under sudo
|
||||||
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_DATA_PATH/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
|
||||||
|
|
||||||
# lets give time for the containers to spin up
|
# lets give time for the containers to spin up
|
||||||
sleep 10
|
sleep 10
|
@ -5,10 +5,6 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
|
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
|
|
||||||
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
|
|
||||||
fi
|
|
||||||
|
|
||||||
# let's download our base image.
|
# let's download our base image.
|
||||||
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
|
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
|
||||||
@ -32,52 +28,52 @@ if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
|||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
# the base image is ubuntu:22.04.
|
# the base image is ubuntu:22.04.
|
||||||
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project=default
|
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
|
||||||
|
|
||||||
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
|
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
|
||||||
lxc config set "$BASE_IMAGE_VM_NAME" --project=default
|
lxc config set "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
# for CHAIN in mainnet testnet; do
|
for CHAIN in mainnet testnet; do
|
||||||
# for DATA in blocks chainstate; do
|
for DATA in blocks chainstate; do
|
||||||
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA"
|
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
||||||
# done
|
done
|
||||||
# done
|
done
|
||||||
|
|
||||||
lxc start "$BASE_IMAGE_VM_NAME" --project=default
|
lxc start "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
sleep 15
|
sleep 15
|
||||||
while lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
while lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
||||||
# ensure the ssh service is listening at localhost
|
# ensure the ssh service is listening at localhost
|
||||||
lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- wait-for-it -t 100 127.0.0.1:22
|
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
|
||||||
|
|
||||||
# # If we have any chaninstate or blocks in our SSME, let's push them to the
|
# If we have any chaninstate or blocks in our SSME, let's push them to the
|
||||||
# # remote host as a zfs volume that way deployments can share a common history
|
# remote host as a zfs volume that way deployments can share a common history
|
||||||
# # of chainstate/blocks.
|
# of chainstate/blocks.
|
||||||
# for CHAIN in testnet mainnet; do
|
for CHAIN in testnet mainnet; do
|
||||||
# for DATA in blocks chainstate; do
|
for DATA in blocks chainstate; do
|
||||||
# # if the storage snapshot doesn't yet exist, create it.
|
# if the storage snapshot doesn't yet exist, create it.
|
||||||
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
||||||
# if [ -d "$DATA_PATH" ]; then
|
if [ -d "$DATA_PATH" ]; then
|
||||||
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
|
COMPLETE_FILE_PATH="$DATA_PATH/complete"
|
||||||
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
|
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
|
||||||
# lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
|
lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
|
||||||
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
|
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
|
||||||
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
|
lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
|
||||||
# else
|
else
|
||||||
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
|
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
|
||||||
# fi
|
fi
|
||||||
# fi
|
fi
|
||||||
# fi
|
fi
|
||||||
# done
|
done
|
||||||
# done
|
done
|
||||||
|
|
||||||
# stop the VM and get a snapshot.
|
# stop the VM and get a snapshot.
|
||||||
lxc stop "$BASE_IMAGE_VM_NAME" --project=default
|
lxc stop "$BASE_IMAGE_VM_NAME"
|
||||||
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project=default
|
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -85,14 +81,14 @@ echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'.
|
|||||||
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
|
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
|
||||||
|
|
||||||
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
|
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
|
||||||
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
|
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
# # now let's get a snapshot of each of the blocks/chainstate directories.
|
# now let's get a snapshot of each of the blocks/chainstate directories.
|
||||||
# for CHAIN in testnet mainnet; do
|
for CHAIN in testnet mainnet; do
|
||||||
# for DATA in blocks chainstate; do
|
for DATA in blocks chainstate; do
|
||||||
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
|
echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
|
||||||
# lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
|
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
|
||||||
# fi
|
fi
|
||||||
# done
|
done
|
||||||
# done
|
done
|
||||||
|
117
deploy.sh
117
deploy.sh
@ -35,23 +35,29 @@ if lxc remote get-default | grep -q "production"; then
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# switch to the defult project. We will switch to something more specific later.
|
||||||
|
if ! lxc info | grep "project:" | grep -q default; then
|
||||||
|
lxc project switch default
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOMAIN_NAME=
|
||||||
RUN_CERT_RENEWAL=true
|
RUN_CERT_RENEWAL=true
|
||||||
SKIP_WWW=false
|
SKIP_WWW=false
|
||||||
RESTORE_WWW=false
|
RESTORE_WWW=false
|
||||||
RESTORE_CERTS=false
|
RESTORE_CERTS=false
|
||||||
BACKUP_CERTS=false
|
BACKUP_CERTS=true
|
||||||
BACKUP_BTCPAY=false
|
BACKUP_APPS=true
|
||||||
BACKUP_CERTS=false
|
BACKUP_BTCPAY=true
|
||||||
BACKUP_APPS=false
|
|
||||||
BACKUP_BTCPAY=false
|
|
||||||
BACKUP_BTCPAY_ARCHIVE_PATH=
|
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||||
RESTORE_BTCPAY=false
|
RESTORE_BTCPAY=false
|
||||||
SKIP_BTCPAY=false
|
SKIP_BTCPAY=false
|
||||||
UPDATE_BTCPAY=false
|
UPDATE_BTCPAY=false
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=false
|
||||||
REMOTE_NAME="$(lxc remote get-default)"
|
REMOTE_NAME="$(lxc remote get-default)"
|
||||||
STOP_SERVICES=false
|
STOP_SERVICES=false
|
||||||
USER_SAYS_YES=false
|
USER_SAYS_YES=false
|
||||||
RESTART_FRONT_END=true
|
RESTART_FRONT_END=true
|
||||||
|
USER_TARGET_PROJECT=
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
@ -63,31 +69,39 @@ for i in "$@"; do
|
|||||||
--restore-www)
|
--restore-www)
|
||||||
RESTORE_WWW=true
|
RESTORE_WWW=true
|
||||||
RESTORE_CERTS=true
|
RESTORE_CERTS=true
|
||||||
|
BACKUP_APPS=false
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
|
RESTART_FRONT_END=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--restore-btcpay)
|
--restore-btcpay)
|
||||||
RESTORE_BTCPAY=true
|
RESTORE_BTCPAY=true
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--backup-www)
|
--no-backup-www)
|
||||||
BACKUP_CERTS=true
|
BACKUP_CERTS=false
|
||||||
BACKUP_APPS=true
|
BACKUP_APPS=false
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--backup-btcpayserver)
|
|
||||||
BACKUP_BTCPAY=true
|
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--stop)
|
--stop)
|
||||||
STOP_SERVICES=true
|
STOP_SERVICES=true
|
||||||
RESTART_FRONT_END=false
|
RESTART_FRONT_END=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--restart-front-end)
|
||||||
|
RESTART_FRONT_END=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--backup-archive-path=*)
|
--backup-archive-path=*)
|
||||||
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
|
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--project=*)
|
||||||
|
USER_TARGET_PROJECT="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--update-btcpay)
|
--update-btcpay)
|
||||||
UPDATE_BTCPAY=true
|
UPDATE_BTCPAY=true
|
||||||
shift
|
shift
|
||||||
@ -96,7 +110,7 @@ for i in "$@"; do
|
|||||||
SKIP_WWW=true
|
SKIP_WWW=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--skip-btcpayserver)
|
--skip-btcpay)
|
||||||
SKIP_BTCPAY=true
|
SKIP_BTCPAY=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
@ -104,6 +118,10 @@ for i in "$@"; do
|
|||||||
RUN_CERT_RENEWAL=false
|
RUN_CERT_RENEWAL=false
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--reconfigure-btcpay)
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
-y)
|
-y)
|
||||||
USER_SAYS_YES=true
|
USER_SAYS_YES=true
|
||||||
shift
|
shift
|
||||||
@ -128,8 +146,7 @@ fi
|
|||||||
# set up our default paths.
|
# set up our default paths.
|
||||||
source ../../defaults.sh
|
source ../../defaults.sh
|
||||||
|
|
||||||
. ../remote_env.sh
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
|
||||||
export REGISTRY_DOCKER_IMAGE="registry:2"
|
export REGISTRY_DOCKER_IMAGE="registry:2"
|
||||||
export RESTORE_WWW="$RESTORE_WWW"
|
export RESTORE_WWW="$RESTORE_WWW"
|
||||||
export STOP_SERVICES="$STOP_SERVICES"
|
export STOP_SERVICES="$STOP_SERVICES"
|
||||||
@ -139,7 +156,7 @@ export RESTORE_BTCPAY="$RESTORE_BTCPAY"
|
|||||||
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
|
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
|
||||||
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
||||||
export REMOTE_NAME="$REMOTE_NAME"
|
export REMOTE_NAME="$REMOTE_NAME"
|
||||||
export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
|
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
|
||||||
export USER_SAYS_YES="$USER_SAYS_YES"
|
export USER_SAYS_YES="$USER_SAYS_YES"
|
||||||
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
||||||
@ -198,7 +215,6 @@ DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
|||||||
DEPLOY_GHOST=true
|
DEPLOY_GHOST=true
|
||||||
DEPLOY_CLAMS=false
|
DEPLOY_CLAMS=false
|
||||||
DEPLOY_NEXTCLOUD=false
|
DEPLOY_NEXTCLOUD=false
|
||||||
DEPLOY_NOSTR=false
|
|
||||||
NOSTR_ACCOUNT_PUBKEY=
|
NOSTR_ACCOUNT_PUBKEY=
|
||||||
DEPLOY_GITEA=false
|
DEPLOY_GITEA=false
|
||||||
GHOST_MYSQL_PASSWORD="$(new_pass)"
|
GHOST_MYSQL_PASSWORD="$(new_pass)"
|
||||||
@ -219,9 +235,26 @@ EOL
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
|
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
|
||||||
|
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
|
||||||
|
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
|
||||||
|
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
||||||
|
export PROJECT_PREFIX="$PROJECT_PREFIX"
|
||||||
|
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
|
||||||
|
|
||||||
|
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||||
|
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||||
|
|
||||||
|
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
|
||||||
|
if [ -n "$USER_TARGET_PROJECT" ]; then
|
||||||
|
|
||||||
|
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
export PROJECT_NAME="$PROJECT_NAME"
|
export PROJECT_NAME="$PROJECT_NAME"
|
||||||
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
|
export PROJECT_PATH="$PROJECT_PATH"
|
||||||
|
|
||||||
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
|
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
|
||||||
|
|
||||||
@ -242,7 +275,6 @@ PRIMARY_DOMAIN="domain0.tld"
|
|||||||
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
||||||
|
|
||||||
WWW_SERVER_MAC_ADDRESS=
|
WWW_SERVER_MAC_ADDRESS=
|
||||||
# WWW_SSDATA_DISK_SIZE_GB=100
|
|
||||||
# WWW_SERVER_CPU_COUNT="6"
|
# WWW_SERVER_CPU_COUNT="6"
|
||||||
# WWW_SERVER_MEMORY_MB="4096"
|
# WWW_SERVER_MEMORY_MB="4096"
|
||||||
|
|
||||||
@ -259,7 +291,8 @@ EOL
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. ../project_env.sh
|
# source project defition.
|
||||||
|
source "$PROJECT_DEFINITION_PATH"
|
||||||
|
|
||||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
|
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
|
||||||
@ -292,14 +325,12 @@ export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
|||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
stub_site_definition
|
stub_site_definition
|
||||||
|
|
||||||
# bring the VMs up under the primary domain name.
|
# bring the VMs up under the primary domain name.
|
||||||
|
|
||||||
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
|
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
|
||||||
|
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
|
||||||
|
|
||||||
# iterate over all our server endpoints and provision them if needed.
|
# iterate over all our server endpoints and provision them if needed.
|
||||||
# www
|
# www
|
||||||
@ -311,16 +342,6 @@ if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
for VIRTUAL_MACHINE in www btcpayserver; do
|
for VIRTUAL_MACHINE in www btcpayserver; do
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = btcpayserver ] && [ "$SKIP_BTCPAY" = true ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = www ] && [ "$SKIP_WWW" = true ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||||
FQDN=
|
FQDN=
|
||||||
|
|
||||||
@ -335,6 +356,12 @@ for VIRTUAL_MACHINE in www btcpayserver; do
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# create the lxc project as specified by PROJECT_NAME
|
||||||
|
if ! lxc project list | grep -q "$PROJECT_NAME"; then
|
||||||
|
lxc project create "$PROJECT_NAME"
|
||||||
|
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=false
|
||||||
|
fi
|
||||||
|
|
||||||
# Goal is to get the macvlan interface.
|
# Goal is to get the macvlan interface.
|
||||||
LXD_SS_CONFIG_LINE=
|
LXD_SS_CONFIG_LINE=
|
||||||
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
|
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
|
||||||
@ -353,6 +380,7 @@ for VIRTUAL_MACHINE in www btcpayserver; do
|
|||||||
|
|
||||||
# Now let's switch to the new project to ensure new resources are created under the project scope.
|
# Now let's switch to the new project to ensure new resources are created under the project scope.
|
||||||
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||||
|
echo "INFO: switch to lxd project '$PROJECT_NAME'."
|
||||||
lxc project switch "$PROJECT_NAME"
|
lxc project switch "$PROJECT_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -374,24 +402,29 @@ for VIRTUAL_MACHINE in www btcpayserver; do
|
|||||||
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
VPS_HOSTNAME="$WWW_HOSTNAME"
|
VPS_HOSTNAME="$WWW_HOSTNAME"
|
||||||
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
|
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
|
||||||
|
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
|
||||||
|
|
||||||
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
|
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
|
||||||
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
|
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
|
||||||
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
|
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
|
||||||
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
|
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
|
||||||
|
if [ "$BITCOIN_CHAIN" = mainnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=150
|
||||||
|
elif [ "$BITCOIN_CHAIN" = testnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=70
|
||||||
|
fi
|
||||||
|
|
||||||
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
|
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
|
||||||
export FQDN="$BASE_IMAGE_VM_NAME"
|
export FQDN="$BASE_IMAGE_VM_NAME"
|
||||||
|
ROOT_DISK_SIZE_GB=8
|
||||||
else
|
else
|
||||||
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
|
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
export FQDN="$FQDN"
|
export FQDN="$FQDN"
|
||||||
export LXD_VM_NAME="${FQDN//./-}"
|
export LXD_VM_NAME="${FQDN//./-}"
|
||||||
|
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||||
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
||||||
export PROJECT_PATH="$PROJECT_PATH"
|
export PROJECT_PATH="$PROJECT_PATH"
|
||||||
|
|
||||||
@ -423,8 +456,6 @@ done
|
|||||||
if [ "$SKIP_WWW" = false ]; then
|
if [ "$SKIP_WWW" = false ]; then
|
||||||
./www/go.sh
|
./www/go.sh
|
||||||
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||||
else
|
|
||||||
echo "INFO: Skipping www VM."
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||||
@ -433,6 +464,6 @@ if [ "$SKIP_BTCPAY" = false ]; then
|
|||||||
./btcpayserver/go.sh
|
./btcpayserver/go.sh
|
||||||
|
|
||||||
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||||
else
|
|
||||||
echo "INFO: Skipping the btcpayserver VM."
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
done
|
63
deploy_vm.sh
63
deploy_vm.sh
@ -30,51 +30,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
|
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME"
|
||||||
VM_ID=
|
|
||||||
BACKUP_DISK_SIZE_GB=
|
|
||||||
SSDATA_DISK_SIZE_GB=
|
|
||||||
DOCKER_DISK_SIZE_GB=
|
|
||||||
if [ "$VIRTUAL_MACHINE" = www ]; then
|
|
||||||
VM_ID="w"
|
|
||||||
BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
|
|
||||||
SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
|
|
||||||
DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
|
||||||
VM_ID="b"
|
|
||||||
BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
|
|
||||||
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
|
|
||||||
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
|
|
||||||
fi
|
|
||||||
|
|
||||||
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
|
|
||||||
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
|
|
||||||
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
|
|
||||||
fi
|
|
||||||
|
|
||||||
# TODO ensure we are only GROWING the volume--never shrinking
|
|
||||||
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
|
|
||||||
|
|
||||||
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
|
|
||||||
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
|
|
||||||
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
|
|
||||||
fi
|
|
||||||
|
|
||||||
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
|
|
||||||
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
|
|
||||||
|
|
||||||
|
|
||||||
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
|
|
||||||
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
|
|
||||||
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
|
|
||||||
fi
|
|
||||||
|
|
||||||
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
|
|
||||||
|
|
||||||
|
|
||||||
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
|
|
||||||
|
|
||||||
# now let's create a new VM to work with.
|
# now let's create a new VM to work with.
|
||||||
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
@ -83,9 +39,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|||||||
# let's PIN the HW address for now so we don't exhaust IP
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
# and so we can set DNS internally.
|
# and so we can set DNS internally.
|
||||||
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
# attack the docker block device.
|
|
||||||
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
|
|
||||||
|
|
||||||
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
||||||
# # attach any volumes
|
# # attach any volumes
|
||||||
@ -101,17 +55,10 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
|
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
|
# create a directory to store backup archives. This is on all new vms.
|
||||||
ssh "$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
|
||||||
# send an updated ~/.bashrc so we have quicker access to cli tools
|
|
||||||
scp ./btcpayserver/bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
|
|
||||||
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
|
|
||||||
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
@ -11,10 +11,8 @@ export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
|||||||
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
|
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
|
||||||
export ADMIN_ACCOUNT_USERNAME="info"
|
export ADMIN_ACCOUNT_USERNAME="info"
|
||||||
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
||||||
export REMOTE_GHOST_PATH="$REMOTE_DATA_PATH/ghost"
|
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
|
||||||
export REMOTE_NEXTCLOUD_PATH="$REMOTE_DATA_PATH/nextcloud"
|
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
||||||
export REMOTE_GITEA_PATH="$REMOTE_DATA_PATH/gitea"
|
|
||||||
export REMOTE_CLAMS_PATH="$REMOTE_DATA_PATH/clams"
|
|
||||||
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,8 +5,6 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
VIRTUAL_MACHINE=base
|
VIRTUAL_MACHINE=base
|
||||||
LXD_HOSTNAME=
|
LXD_HOSTNAME=
|
||||||
SSDATA_VOLUME_NAME=
|
|
||||||
BACKUP_VOLUME_NAME=
|
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
@ -19,14 +17,6 @@ for i in "$@"; do
|
|||||||
VIRTUAL_MACHINE="${i#*=}"
|
VIRTUAL_MACHINE="${i#*=}"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--ss-volume-name=*)
|
|
||||||
SSDATA_VOLUME_NAME="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--backup-volume-name=*)
|
|
||||||
BACKUP_VOLUME_NAME="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "Unexpected option: $1"
|
echo "Unexpected option: $1"
|
||||||
exit 1
|
exit 1
|
||||||
@ -141,9 +131,7 @@ EOF
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = base ]; then
|
|
||||||
cat >> "$YAML_PATH" <<EOF
|
cat >> "$YAML_PATH" <<EOF
|
||||||
runcmd:
|
runcmd:
|
||||||
- sudo mkdir -m 0755 -p /etc/apt/keyrings
|
- sudo mkdir -m 0755 -p /etc/apt/keyrings
|
||||||
@ -152,12 +140,44 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
|
|||||||
- sudo apt-get update
|
- sudo apt-get update
|
||||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
|
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
|
||||||
|
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" != base ]; then
|
# write_files:
|
||||||
|
# - path: /etc/ssh/sshd_config
|
||||||
|
# content: |
|
||||||
|
# Port 22
|
||||||
|
# ListenAddress 0.0.0.0
|
||||||
|
# Protocol 2
|
||||||
|
# ChallengeResponseAuthentication no
|
||||||
|
# PasswordAuthentication no
|
||||||
|
# UsePAM no
|
||||||
|
# LogLevel INFO
|
||||||
|
|
||||||
|
|
||||||
|
# - path: /etc/docker/daemon.json
|
||||||
|
# content: |
|
||||||
|
# {
|
||||||
|
# "registry-mirrors": "${REGISTRY_URL}",
|
||||||
|
# "labels": "githead=${LATEST_GIT_COMMIT}"
|
||||||
|
# }
|
||||||
|
#"labels": [githead="${LATEST_GIT_COMMIT}"]
|
||||||
|
|
||||||
|
# apt:
|
||||||
|
# sources:
|
||||||
|
# docker.list:
|
||||||
|
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
|
||||||
|
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# - sudo apt-get update
|
||||||
|
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
|
||||||
|
else
|
||||||
# all other machines that are not the base image
|
# all other machines that are not the base image
|
||||||
cat >> "$YAML_PATH" <<EOF
|
cat >> "$YAML_PATH" <<EOF
|
||||||
user.vendor-data: |
|
user.vendor-data: |
|
||||||
@ -170,30 +190,6 @@ if [ "$VIRTUAL_MACHINE" != base ]; then
|
|||||||
preserve_hostname: true
|
preserve_hostname: true
|
||||||
fqdn: ${FQDN}
|
fqdn: ${FQDN}
|
||||||
|
|
||||||
resize_rootfs: false
|
|
||||||
|
|
||||||
disk_setup:
|
|
||||||
/dev/sdb:
|
|
||||||
table_type: 'gpt'
|
|
||||||
layout: true
|
|
||||||
overwrite: false
|
|
||||||
|
|
||||||
fs_setup:
|
|
||||||
- label: docker-data
|
|
||||||
filesystem: 'ext4'
|
|
||||||
device: '/dev/sdb1'
|
|
||||||
overwrite: false
|
|
||||||
|
|
||||||
mounts:
|
|
||||||
- [ sdb, /var/lib/docker ]
|
|
||||||
|
|
||||||
mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
|
|
||||||
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" != base ]; then
|
|
||||||
cat >> "$YAML_PATH" <<EOF
|
|
||||||
user.network-config: |
|
user.network-config: |
|
||||||
version: 2
|
version: 2
|
||||||
ethernets:
|
ethernets:
|
||||||
@ -208,6 +204,7 @@ if [ "$VIRTUAL_MACHINE" != base ]; then
|
|||||||
enp6s0:
|
enp6s0:
|
||||||
dhcp4: true
|
dhcp4: true
|
||||||
|
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
fi
|
fi
|
||||||
@ -225,21 +222,6 @@ devices:
|
|||||||
type: disk
|
type: disk
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" != base ]; then
|
|
||||||
cat >> "$YAML_PATH" <<EOF
|
|
||||||
ss-data:
|
|
||||||
path: ${REMOTE_DATA_PATH}
|
|
||||||
pool: ss-base
|
|
||||||
source: ${SSDATA_VOLUME_NAME}
|
|
||||||
type: disk
|
|
||||||
ss-backup:
|
|
||||||
path: ${REMOTE_BACKUP_PATH}
|
|
||||||
pool: ss-base
|
|
||||||
source: ${BACKUP_VOLUME_NAME}
|
|
||||||
type: disk
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Stub out the network piece for the base image.
|
# Stub out the network piece for the base image.
|
||||||
if [ "$VIRTUAL_MACHINE" = base ]; then
|
if [ "$VIRTUAL_MACHINE" = base ]; then
|
||||||
cat >> "$YAML_PATH" <<EOF
|
cat >> "$YAML_PATH" <<EOF
|
||||||
@ -268,19 +250,9 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
if [ "$VIRTUAL_MACHINE" = base ]; then
|
|
||||||
if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
|
|
||||||
lxc profile create "$LXD_HOSTNAME" --project default
|
|
||||||
fi
|
|
||||||
|
|
||||||
# configure the profile with our generated cloud-init.yml file.
|
|
||||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default
|
|
||||||
else
|
|
||||||
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
||||||
lxc profile create "$LXD_HOSTNAME"
|
lxc profile create "$LXD_HOSTNAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# configure the profile with our generated cloud-init.yml file.
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
||||||
fi
|
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
|
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
|
||||||
# at a cachehing reverse proxy that runs nginx.
|
# at a cachehing reverse proxy that runs nginx.
|
||||||
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs"
|
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs"
|
||||||
|
|
||||||
# this is minimum required; www and btcpay.
|
# this is minimum required; www and btcpay.
|
||||||
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
|
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
|
||||||
@ -26,7 +26,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
|
||||||
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
|
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
|
||||||
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
|
||||||
|
|
||||||
|
|
||||||
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
|
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
|
||||||
@ -38,7 +38,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
|
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
|
||||||
|
|
||||||
# execute the certbot command that we dynamically generated.
|
# execute the certbot command that we dynamically generated.
|
||||||
eval "$GENERATE_CERT_STRING"
|
eval "$GENERATE_CERT_STRING"
|
||||||
|
43
www/go.sh
43
www/go.sh
@ -7,10 +7,12 @@ cd "$(dirname "$0")"
|
|||||||
DOCKER_HOST="ssh://ubuntu@$WWW_FQDN"
|
DOCKER_HOST="ssh://ubuntu@$WWW_FQDN"
|
||||||
export DOCKER_HOST="$DOCKER_HOST"
|
export DOCKER_HOST="$DOCKER_HOST"
|
||||||
|
|
||||||
|
# prepare clams images and such
|
||||||
|
#./prepare_clams.sh
|
||||||
|
|
||||||
# Create the nginx config file which covers all domainys.
|
# Create the nginx config file which covers all domainys.
|
||||||
bash -c ./stub/nginx_config.sh
|
bash -c ./stub/nginx_config.sh
|
||||||
|
|
||||||
BUILD_CLAMS=false
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
@ -20,10 +22,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../domain_env.sh
|
source ../domain_env.sh
|
||||||
|
|
||||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
|
||||||
BUILD_CLAMS=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
### Let's check to ensure all the requiredsettings are set.
|
### Let's check to ensure all the requiredsettings are set.
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
|
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
|
||||||
@ -60,15 +58,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
|
||||||
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
|
||||||
echo "ERROR: When deploying nostr, you MUST specify NOSTR_ACCOUNT_PUBKEY."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
||||||
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site.conf."
|
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site.conf."
|
||||||
exit 1
|
exit 1
|
||||||
@ -83,6 +72,7 @@ done
|
|||||||
|
|
||||||
./stop_docker_stacks.sh
|
./stop_docker_stacks.sh
|
||||||
|
|
||||||
|
|
||||||
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
||||||
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
||||||
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
||||||
@ -92,8 +82,20 @@ if [[ $(docker stack list | wc -l) -gt 2 ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ok, the backend stacks are stopped.
|
# ok, the backend stacks are stopped.
|
||||||
if [ "$RESTART_FRONT_END" = true ]; then
|
if [ "$RESTART_FRONT_END" = true ]; then
|
||||||
|
# remove the nginx stack
|
||||||
|
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
docker stack rm reverse-proxy
|
||||||
|
|
||||||
|
# wait for all docker containers to stop.
|
||||||
|
# TODO see if there's a way to check for this.
|
||||||
|
sleep 20
|
||||||
|
fi
|
||||||
|
|
||||||
# generate the certs and grab a backup
|
# generate the certs and grab a backup
|
||||||
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
|
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
|
||||||
@ -114,8 +116,8 @@ if [ "$RESTART_FRONT_END" = true ]; then
|
|||||||
source ../domain_env.sh
|
source ../domain_env.sh
|
||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
# these variable are used by both backup/restore scripts.
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER"
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
@ -128,13 +130,6 @@ if [ "$RESTART_FRONT_END" = true ]; then
|
|||||||
./backup_path.sh
|
./backup_path.sh
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
else
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build the clams docker image
|
|
||||||
if [ "$BUILD_CLAMS" = true ]; then
|
|
||||||
./clams/build.sh
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
||||||
@ -146,8 +141,6 @@ fi
|
|||||||
./stub/gitea_yml.sh
|
./stub/gitea_yml.sh
|
||||||
./stub/nostr_yml.sh
|
./stub/nostr_yml.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||||
# # WWW_FQDN is in our certificate, so we resolve to that.
|
# # WWW_FQDN is in our certificate, so we resolve to that.
|
||||||
# wait-for-it -t 320 "$WWW_FQDN:80"
|
# wait-for-it -t 320 "$WWW_FQDN:80"
|
||||||
|
35
www/prepare_clams.sh
Executable file
35
www/prepare_clams.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# deploy clams wallet.
|
||||||
|
LOCAL_CLAMS_REPO_PATH="$(pwd)/clams"
|
||||||
|
CLAMS_APP_DOCKER_REPO_URL="https://github.com/farscapian/clams-app-docker"
|
||||||
|
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
|
||||||
|
git clone "$CLAMS_APP_DOCKER_REPO_URL" "$LOCAL_CLAMS_REPO_PATH"
|
||||||
|
else
|
||||||
|
cd "$LOCAL_CLAMS_REPO_PATH"
|
||||||
|
git config --global pull.rebase false
|
||||||
|
git pull
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME"
|
||||||
|
BROWSER_APP_GIT_TAG="1.5.0"
|
||||||
|
BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app"
|
||||||
|
BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG"
|
||||||
|
|
||||||
|
# build the browser-app image.
|
||||||
|
if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then
|
||||||
|
docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \
|
||||||
|
--build-arg VERSION="$BROWSER_APP_GIT_TAG" \
|
||||||
|
-t "$BROWSER_APP_IMAGE_NAME" \
|
||||||
|
"$(pwd)/clams/frontend/browser-app/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the clams-root volume doesn't exist, we create and seed it.
|
||||||
|
if ! docker volume list | grep -q clams-root; then
|
||||||
|
docker volume create clams-root
|
||||||
|
docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME"
|
||||||
|
fi
|
@ -37,4 +37,4 @@ scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
|||||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||||
|
|
||||||
# reset folder owner to ubuntu
|
# reset folder owner to ubuntu
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_DATA_PATH/$APP"
|
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
@ -26,8 +26,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
# these variable are used by both backup/restore scripts.
|
||||||
export APP="$APP"
|
export APP="$APP"
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
@ -39,19 +39,11 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
|
|
||||||
if [ "$RESTORE_WWW" = true ]; then
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
./restore_path.sh
|
./restore_path.sh
|
||||||
fi
|
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||||
|
elif [ "$BACKUP_APPS" = true ]; then
|
||||||
if [ "$BACKUP_APPS" = true ]; then
|
|
||||||
# if we're not restoring, then we may or may not back up.
|
# if we're not restoring, then we may or may not back up.
|
||||||
./backup_path.sh
|
./backup_path.sh
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# remove the nginx stack
|
|
||||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
|
||||||
docker stack rm reverse-proxy
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
fi
|
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
DEPLOY_STACK=false
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
@ -13,10 +12,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
DEPLOY_STACK=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# for each language specified in the site.conf, we spawn a separate ghost container
|
# for each language specified in the site.conf, we spawn a separate ghost container
|
||||||
# at https://www.domain.com/$LANGUAGE_CODE
|
# at https://www.domain.com/$LANGUAGE_CODE
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
@ -24,8 +19,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
|
|
||||||
# ensure directories on remote host exist so we can mount them into the containers.
|
# ensure directories on remote host exist so we can mount them into the containers.
|
||||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME"
|
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME"
|
||||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/db"
|
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db"
|
||||||
|
|
||||||
export GHOST_STACK_TAG="ghost-$STACK_NAME"
|
export GHOST_STACK_TAG="ghost-$STACK_NAME"
|
||||||
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
|
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
|
||||||
@ -49,7 +44,7 @@ EOL
|
|||||||
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
|
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
|
||||||
environment:
|
environment:
|
||||||
EOL
|
EOL
|
||||||
if [ "$LANGUAGE_CODE" = "en" ]; then
|
if [ "$LANGUAGE_CODE" = "en" ]; then
|
||||||
@ -79,7 +74,7 @@ EOL
|
|||||||
networks:
|
networks:
|
||||||
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
|
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
||||||
- MYSQL_DATABASE=ghost
|
- MYSQL_DATABASE=ghost
|
||||||
@ -108,7 +103,7 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_STACK" = true ] && [ "$STOP_SERVICES" = false ]; then
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
||||||
sleep 2
|
sleep 2
|
||||||
fi
|
fi
|
||||||
|
@ -35,7 +35,7 @@ services:
|
|||||||
- nextcloud-${DOMAIN_IDENTIFIER}-en
|
- nextcloud-${DOMAIN_IDENTIFIER}-en
|
||||||
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
|
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
- MYSQL_DATABASE=nextcloud
|
- MYSQL_DATABASE=nextcloud
|
||||||
@ -55,7 +55,7 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
|
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
|
||||||
environment:
|
environment:
|
||||||
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
||||||
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
|
@ -31,8 +31,6 @@ events {
|
|||||||
http {
|
http {
|
||||||
client_max_body_size 100m;
|
client_max_body_size 100m;
|
||||||
server_tokens off;
|
server_tokens off;
|
||||||
sendfile on;
|
|
||||||
include mime.types;
|
|
||||||
|
|
||||||
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
|
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
|
||||||
# Needed to allow very long URLs to prevent issues while signing PSBTs
|
# Needed to allow very long URLs to prevent issues while signing PSBTs
|
||||||
@ -58,24 +56,6 @@ http {
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
|
||||||
# clams-browser-app server
|
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
|
||||||
# https server block for https://${CLAMS_FQDN}
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
server_name ${CLAMS_FQDN};
|
|
||||||
|
|
||||||
location / {
|
|
||||||
return 301 https://${CLAMS_FQDN}\$request_uri;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
EOL
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# ghost http to https redirects.
|
# ghost http to https redirects.
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# http://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
|
# http://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
|
||||||
@ -200,7 +180,7 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# We return a JSON object with name/pubkey mapping per NIP05.
|
# We return a JSON object with name/pubkey mapping per NIP05.
|
||||||
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
|
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
|
||||||
@ -227,7 +207,7 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# wss://$NOSTR_FQDN server block
|
# wss://$NOSTR_FQDN server block
|
||||||
server {
|
server {
|
||||||
@ -283,32 +263,36 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
# Clams server entry
|
||||||
# clams-browser-app server
|
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
|
||||||
# https server block for https://${CLAMS_FQDN}
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
# cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
# # https server block for https://${CLAMS_FQDN}
|
||||||
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
# server {
|
||||||
|
# listen 443 ssl http2;
|
||||||
|
|
||||||
server_name ${CLAMS_FQDN};
|
# ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
|
# ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
|
# ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
|
|
||||||
server_tokens off;
|
# server_name ${CLAMS_FQDN};
|
||||||
autoindex off;
|
# index index.js;
|
||||||
gzip_static on;
|
|
||||||
|
|
||||||
root /browser-app;
|
# root /apps/clams;
|
||||||
index 200.html;
|
# index 200.htm;
|
||||||
}
|
|
||||||
|
|
||||||
EOL
|
# location / {
|
||||||
fi
|
# try_files \$uri \$uri/ /200.htm;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# location ~* \.(?:css|js|jpg|svg)$ {
|
||||||
|
# expires 30d;
|
||||||
|
# add_header Cache-Control "public";
|
||||||
|
# }
|
||||||
|
|
||||||
|
# }
|
||||||
|
|
||||||
|
# EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
|
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
@ -339,6 +323,14 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
# # add the Onion-Location header if specifed.
|
||||||
|
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
# cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
|
||||||
|
|
||||||
|
# EOL
|
||||||
|
# fi
|
||||||
|
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
|
|
||||||
@ -440,7 +432,7 @@ EOL
|
|||||||
}
|
}
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
@ -522,6 +514,31 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# deploy Clams browser app under the primary domain.
|
||||||
|
if [ $iteration = 0 ]; then
|
||||||
|
|
||||||
|
cat >> "$NGINX_CONF_PATH" <<EOF
|
||||||
|
|
||||||
|
# server block for the clams browser-app; just a static website
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
|
||||||
|
server_name ${CLAMS_FQDN};
|
||||||
|
|
||||||
|
autoindex off;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
gzip_static on;
|
||||||
|
|
||||||
|
root /browser-app;
|
||||||
|
index 200.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
iteration=$((iteration+1))
|
iteration=$((iteration+1))
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -30,12 +30,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
|
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
# We create another ghost instance under /
|
# We create another ghost instance under /
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$LANGUAGE_CODE" = en ]; then
|
if [ "$LANGUAGE_CODE" = en ]; then
|
||||||
if [ "$DEPLOY_GITEA" = "true" ]; then
|
if [ "$DEPLOY_GITEA" = "true" ]; then
|
||||||
@ -50,7 +47,7 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
- nostrnet-$DOMAIN_IDENTIFIER-en
|
- nostrnet-$DOMAIN_IDENTIFIER-en
|
||||||
EOL
|
EOL
|
||||||
@ -63,11 +60,11 @@ done
|
|||||||
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH_LETSENCRYPT}:/etc/letsencrypt:ro
|
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
|
||||||
EOL
|
EOL
|
||||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
- ${REMOTE_CLAMS_PATH}:/browser-app
|
- clams-browser-app:/browser-app:ro
|
||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -85,11 +82,15 @@ configs:
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
################ NETWORKS SECTION
|
################ NETWORKS SECTION
|
||||||
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
networks:
|
networks:
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
@ -99,24 +100,21 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# for each language specified in the site.conf, we spawn a separate ghost container
|
# for each language specified in the site.conf, we spawn a separate ghost container
|
||||||
# at https://www.domain.com/$LANGUAGE_CODE
|
# at https://www.domain.com/$LANGUAGE_CODE
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
|
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$LANGUAGE_CODE" = en ]; then
|
if [ "$LANGUAGE_CODE" = en ]; then
|
||||||
if [ "$DEPLOY_GITEA" = true ]; then
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
giteanet-$DOMAIN_IDENTIFIER-en:
|
giteanet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -124,25 +122,33 @@ EOL
|
|||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
nextcloudnet-$DOMAIN_IDENTIFIER-en:
|
nextcloudnet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
nostrnet-$DOMAIN_IDENTIFIER-en:
|
nostrnet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
EOL
|
|
||||||
|
|
||||||
|
EOL
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
|
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||||
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
|
volumes:
|
||||||
|
clams-browser-app:
|
||||||
|
external: true
|
||||||
|
name: clams-root
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ "$STOP_SERVICES" = false ]; then
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
# for some reason we need to wait here. See if there's a fix; poll for service readiness?
|
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
||||||
sleep 5
|
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" reverse-proxy
|
|
||||||
# iterate over all our domains and create the nginx config file.
|
# iterate over all our domains and create the nginx config file.
|
||||||
sleep 3
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
@ -14,8 +14,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
REMOTE_NOSTR_PATH="$REMOTE_DATA_PATH/nostr"
|
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"
|
||||||
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
|
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
|
||||||
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
|
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
|
||||||
|
|
||||||
@ -25,6 +25,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
|
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
|
||||||
|
|
||||||
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
|
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
|
||||||
|
DBNET_NAME="nostrdbnet-$DOMAIN_IDENTIFIER"
|
||||||
|
|
||||||
# here's the NGINX config. We support ghost and nextcloud.
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
echo "" > "$DOCKER_YAML_PATH"
|
echo "" > "$DOCKER_YAML_PATH"
|
||||||
@ -89,5 +90,7 @@ EOL
|
|||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
Loading…
Reference in New Issue
Block a user