Compare commits

...

2 Commits

Author SHA1 Message Date
2f16d2de0a
Wire up project repo. 2023-03-06 14:44:36 -05:00
0510a36522
Delete project for repo refactor. 2023-03-06 14:33:25 -05:00
33 changed files with 23 additions and 2795 deletions

2
deployment/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# this is tracked in a distinct git repo.
project

View File

@ -1 +0,0 @@
core-lightning

View File

@ -1,39 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
# take the services down, create a backup archive, then pull it down.
# the script executed here from the BTCPAY repo will automatically take services down
# and bring them back up.
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
sleep 5
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
# that way processes can run shutdown procedures and leave files in the correct state.
sleep 10
# TODO enable encrypted archives
# TODO switch to btcpay-backup.sh when on LXD fully.
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
# next we pull the resulting backup archive down to our management machine.
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
BACKUP_BTCPAY_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$(date +%s).tar.gz"
fi
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."

View File

@ -1,6 +0,0 @@
# we append this text to the btcpay server /home/ubuntu/.bashrc so
# logged in users have more common access to the variou
alias bitcoin-cli="bitcoin-cli.sh $@"
alias lightning-cli="bitcoin-lightning-cli.sh $@"

View File

@ -1,58 +0,0 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
# run the btcpay setup script if it hasn't been done before.
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
./stub_btcpay_setup.sh
BACKUP_BTCPAY=false
fi
RUN_SERVICES=true
# we will re-run the btcpayserver provisioning scripts if directed to do so.
# if an update does occur, we grab another backup.
if [ "$UPDATE_BTCPAY" = true ]; then
# run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# btcpay-update.sh brings services back up, but does not take them down.
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
sleep 20
elif [ "$RESTORE_BTCPAY" = true ]; then
# run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
sleep 15
./restore.sh
RUN_SERVICES=true
BACKUP_BTCPAY=false
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
# the administrator may have indicated a reconfig;
# if so, we re-run setup script.
./stub_btcpay_setup.sh
RUN_SERVICES=true
BACKUP_BTCPAY=false
fi
# if the script gets this far, then we grab a regular backup.
if [ "$BACKUP_BTCPAY" = true ]; then
# we just grab a regular backup
./backup_btcpay.sh
fi
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
# we bring the services back up by default.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
fi
echo "FINISHED btcpayserver/go.sh"

View File

@ -1,116 +0,0 @@
#!/bin/bash -e
set -o pipefail -o errexit
# Please be aware of these important issues:
#
# - Old channel state is toxic and you can loose all your funds, if you or someone
# else closes a channel based on the backup with old state - and the state changes
# often! If you publish an old state (say from yesterday's backup) on chain, you
# WILL LOSE ALL YOUR FUNDS IN A CHANNEL, because the counterparty will publish a
# revocation key!
if [ "$(id -u)" != "0" ]; then
echo "INFO: This script must be run as root."
echo " Use the command 'sudo su -' (include the trailing hypen) and try again."
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
backup_dir="$docker_dir/volumes/backup_datadir/_data"
dbdump_path="$docker_dir/$dbdump_name"
backup_path="$backup_dir/backup.tar.gz"
# ensure backup dir exists
if [ ! -d "$backup_dir" ]; then
mkdir -p "$backup_dir"
fi
cd "$btcpay_dir"
. helpers.sh
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
printf "\n"
echo "INFO: Database container is not up and running. Starting BTCPay Server."
docker volume create generated_postgres_datadir
docker-compose -f "$BTCPAY_DOCKER_COMPOSE" up -d postgres
printf "\n"
dbcontainer=$(docker ps -a -q -f "name=postgres_1")
if [ -z "$dbcontainer" ]; then
echo "INFO: Database container could not be started or found."
exit 1
fi
fi
printf "\n"
echo "INFO: Dumping database."
{
docker exec "$dbcontainer" pg_dumpall -c -U postgres | gzip > "$dbdump_path"
echo "INFO: Database dump done."
} || {
echo "ERROR: Dumping failed. Please check the error message above."
exit 1
}
echo "Stopping BTCPay Server..."
btcpay_down
printf "\n"
cd "$docker_dir"
echo "Archiving files in $(pwd)."
{
tar \
--exclude="volumes/backup_datadir" \
--exclude="volumes/generated_bitcoin_datadir/_data/blocks" \
--exclude="volumes/generated_bitcoin_datadir/_data/chainstate" \
--exclude="volumes/generated_bitcoin_datadir/_data/debug.log" \
--exclude="volumes/generated_bitcoin_datadir/_data/testnet3/blocks" \
--exclude="volumes/generated_bitcoin_datadir/_data/testnet3/chainstate" \
--exclude="volumes/generated_bitcoin_datadir/_data/testnet3/debug.log" \
--exclude="volumes/generated_bitcoin_datadir/_data/regtest/blocks" \
--exclude="volumes/generated_bitcoin_datadir/_data/regtest/chainstate" \
--exclude="volumes/generated_bitcoin_datadir/_data/regtest/debug.log" \
--exclude="volumes/generated_postgres_datadir" \
--exclude="volumes/generated_tor_relay_datadir" \
--exclude="volumes/generated_clightning_bitcoin_datadir/_data/lightning-rpc" \
--exclude="**/logs/*" \
-cvzf "$backup_path" "$dbdump_name" volumes/generated_*
echo "INFO: Archive done."
if [ -n "$BTCPAY_BACKUP_PASSPHRASE" ]; then
printf "\n"
echo "INFO: BTCPAY_BACKUP_PASSPHRASE is set, the backup will be encrypted."
{
gpg -o "$backup_path.gpg" --batch --yes -c --passphrase "$BTCPAY_BACKUP_PASSPHRASE" "$backup_path"
rm "$backup_path"
backup_path="$backup_path.gpg"
echo "INFO: Encryption done."
} || {
echo "INFO: Encrypting failed. Please check the error message above."
echo "INFO: Restarting BTCPay Server."
cd "$btcpay_dir"
exit 1
}
fi
} || {
echo "INFO: Archiving failed. Please check the error message above."
echo "Restarting BTCPay Server"
cd "$btcpay_dir"
exit 1
}
printf "Restarting BTCPay Server."
cd "$btcpay_dir"
echo "Cleaning up."
rm "$dbdump_path"
echo "INFO: Backup done => $backup_path."

View File

@ -1,115 +0,0 @@
#!/bin/bash -e
set -o pipefail -o errexit
if [ "$(id -u)" != "0" ]; then
echo "ERROR: This script must be run as root."
echo "➡️ Use the command 'sudo su -' (include the trailing hypen) and try again."
exit 1
fi
backup_path="$1"
if [ -z "$backup_path" ]; then
echo "ERROR: Usage: btcpay-restore.sh /path/to/backup.tar.gz"
exit 1
fi
if [ ! -f "$backup_path" ]; then
echo "ERROR: $backup_path does not exist."
exit 1
fi
if [[ "$backup_path" == *.gpg && -z "$BTCPAY_BACKUP_PASSPHRASE" ]]; then
echo "INFO: $backup_path is encrypted. Please provide the passphrase to decrypt it."
echo "INFO: Usage: BTCPAY_BACKUP_PASSPHRASE=t0pSeCrEt btcpay-restore.sh /path/to/backup.tar.gz.gpg"
exit 1
fi
# preparation
docker_dir=$(docker volume inspect generated_btcpay_datadir --format="{{.Mountpoint}}" | sed -e "s%/volumes/.*%%g")
restore_dir="$docker_dir/volumes/backup_datadir/_data/restore"
dbdump_name=postgres.sql.gz
btcpay_dir="$BTCPAY_BASE_DIRECTORY/btcpayserver-docker"
# ensure clean restore dir
echo "INFO: Cleaning restore directory $restore_dir."
rm -rf "$restore_dir"
mkdir -p "$restore_dir"
if [[ "$backup_path" == *.gpg ]]; then
echo "INFO: Decrypting backup file."
{
gpg -o "${backup_path%.*}" --batch --yes --passphrase "$BTCPAY_BACKUP_PASSPHRASE" -d "$backup_path"
backup_path="${backup_path%.*}"
echo "SUCESS: Decryption done."
} || {
echo "INFO: Decryption failed. Please check the error message above."
exit 1
}
fi
cd "$restore_dir"
echo "INFO: Extracting files in $(pwd)."
tar -h -xvf "$backup_path" -C "$restore_dir"
# basic control checks
if [ ! -f "$dbdump_name" ]; then
echo "ERROR: '$dbdump_name' does not exist."
exit 1
fi
if [ ! -d "volumes" ]; then
echo "ERROR: volumes directory does not exist."
exit 1
fi
cd "$btcpay_dir"
. helpers.sh
cd "$restore_dir"
{
echo "INFO: Restoring volumes."
# ensure volumes dir exists
if [ ! -d "$docker_dir/volumes" ]; then
mkdir -p "$docker_dir/volumes"
fi
# copy volume directories over
cp -r volumes/* "$docker_dir/volumes/"
# ensure datadirs excluded in backup exist
mkdir -p "$docker_dir/volumes/generated_postgres_datadir/_data"
echo "INFO: Volume restore done."
} || {
echo "INFO: Restoring volumes failed. Please check the error message above."
exit 1
}
{
echo "INFO: Starting database container"
docker-compose -f "$BTCPAY_DOCKER_COMPOSE" up -d postgres
dbcontainer=$(docker ps -a -q -f "name=postgres")
if [ -z "$dbcontainer" ]; then
echo "ERROR: Database container could not be started or found."
exit 1
fi
} || {
echo "ERROR: Starting database container failed. Please check the error message above."
exit 1
}
cd "$restore_dir"
{
echo "INFO: Restoring database..."
gunzip -c $dbdump_name | docker exec -i "$dbcontainer" psql -U postgres postgres -a
echo "SUCCESS: Database restore done."
} || {
echo "ERROR: Restoring database failed. Please check the error message above."
exit 1
}
echo "INFO: Cleaning up."
rm -rf "$restore_dir"
echo "SUCCESS: Restore done"

View File

@ -1,33 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
if [ "$RESTORE_BTCPAY" = false ]; then
exit 0
fi
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
# push the restoration archive to the remote server
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
# we clean up any old containers first before restoring.
ssh "$FQDN" docker system prune -f
# push the modified restore script to the remote directory, set permissions, and execute.
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
# now, we're going to take things down because aparently we this needs to be re-exececuted.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
echo "ERROR: File does not exist."
exit 1
fi

View File

@ -1,112 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
# default is for regtest
CLIGHTNING_WEBSOCKET_PORT=9736
if [ "$BITCOIN_CHAIN" = testnet ]; then
CLIGHTNING_WEBSOCKET_PORT=9737
elif [ "$BITCOIN_CHAIN" = mainnet ]; then
CLIGHTNING_WEBSOCKET_PORT=9738
fi
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
# This is the config for a basic proxy to the listening port 127.0.0.1:2368
# It also supports modern TLS, so SSL certs must be available.
#opt-add-nostr-relay;
export BTCPAYSERVER_GITREPO="https://github.com/btcpayserver/btcpayserver-docker"
cat > "$SITE_PATH/btcpay.sh" <<EOL
#!/bin/bash
set -e
cd "\$(dirname "\$0")"
# wait for cloud-init to complete yo
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
if [ ! -d "btcpayserver-docker" ]; then
echo "cloning btcpayserver-docker";
git clone -b master ${BTCPAYSERVER_GITREPO} btcpayserver-docker;
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
else
cd ./btcpayserver-docker
git pull
git pull --all --tags
cd -
fi
cd btcpayserver-docker
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
# run fast_sync if it's not been done before.
if [ ! -f /home/ubuntu/fast_sync_completed ]; then
cd ./contrib/FastSync
./load-utxo-set.sh
touch /home/ubuntu/fast_sync_completed
cd -
fi
fi
# next we create fragments to customize various aspects of the system
# this block customizes clightning to ensure the correct endpoints are being advertised
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
services:
clightning_bitcoin:
environment:
LIGHTNINGD_OPT: |
announce-addr-dns=true
experimental-websocket-port=9736
ports:
- "${CLIGHTNING_WEBSOCKET_PORT}:9736"
expose:
- "9736"
EOF
# run the setup script.
. ./btcpay-setup.sh -i
touch ${REMOTE_HOME}/btcpay.complete
EOL
# send an updated ~/.bashrc so we have quicker access to cli tools
scp ./bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
# send the setup script to the remote machine.
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
# script is executed under sudo
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
# lets give time for the containers to spin up
sleep 10

View File

@ -1,42 +0,0 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
./stub_lxc_profile.sh "$BASE_IMAGE_VM_NAME"
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$LXD_UBUNTU_BASE_VERSION"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME"
lxc start "$BASE_IMAGE_VM_NAME"
sleep 30
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME"
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$LXD_UBUNTU_BASE_VERSION"
fi

View File

@ -1,451 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
LATEST_GIT_COMMIT="$(cat ../../.git/refs/heads/master)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs lxc; do
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
exit 1
fi
done
# do a spot check; if we are on production warn.
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running command against a production system!"
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
DOMAIN_NAME=
RUN_CERT_RENEWAL=true
SKIP_WWW=false
RESTORE_WWW=false
BACKUP_CERTS=true
BACKUP_APPS=true
BACKUP_BTCPAY=true
BACKUP_BTCPAY_ARCHIVE_PATH=
RESTORE_BTCPAY=false
SKIP_BTCPAY=false
UPDATE_BTCPAY=false
RECONFIGURE_BTCPAY_SERVER=false
CLUSTER_NAME="$(lxc remote get-default)"
STOP_SERVICES=false
USER_SAYS_YES=false
RESTART_FRONT_END=true
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-www)
RESTORE_WWW=true
BACKUP_APPS=false
RUN_CERT_RENEWAL=false
RESTART_FRONT_END=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
BACKUP_BTCPAY=false
RUN_CERT_RENEWAL=false
shift
;;
--backup-certs)
BACKUP_CERTS=true
shift
;;
--no-backup-www)
BACKUP_CERTS=false
BACKUP_APPS=false
shift
;;
--stop)
STOP_SERVICES=true
RESTART_FRONT_END=true
shift
;;
--restart-front-end)
RESTART_FRONT_END=true
shift
;;
--domain=*)
DOMAIN_NAME="${i#*=}"
shift
;;
--backup-archive-path=*)
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
shift
;;
--update-btcpay)
UPDATE_BTCPAY=true
shift
;;
--skip-www)
SKIP_WWW=true
shift
;;
--skip-btcpay)
SKIP_BTCPAY=true
shift
;;
--backup-ghost)
BACKUP_APPS=true
shift
;;
--no-cert-renew)
RUN_CERT_RENEWAL=false
shift
;;
--reconfigure-btcpay)
RECONFIGURE_BTCPAY_SERVER=true
shift
;;
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. "
exit 1
fi
# set up our default paths.
source ../../defaults.sh
export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES"
export BACKUP_CERTS="$BACKUP_CERTS"
export BACKUP_APPS="$BACKUP_APPS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export CLUSTER_NAME="$CLUSTER_NAME"
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
# ensure our cluster path is created.
mkdir -p "$CLUSTER_PATH"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
#########################################
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster'."
exit 1
fi
source "$CLUSTER_DEFINITION"
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
gpg --gen-random --armor 1 25
}
function stub_site_definition {
mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites"
# create a symlink from the CLUSTERPATH/sites/DOMAIN_NAME to the ss-sites/domain name
if [ ! -d "$PROJECT_PATH/sites/$DOMAIN_NAME" ]; then
ln -s "$SITE_PATH" "$PROJECT_PATH/sites/$DOMAIN_NAME"
fi
if [ ! -f "$SITE_PATH/site_definition" ]; then
# check to see if the enf file exists. exist if not.
SITE_DEFINITION_PATH="$SITE_PATH/site_definition"
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
# stub out a site_definition with new passwords.
cat >"$SITE_DEFINITION_PATH" <<EOL
#!/bin/bash
export DOMAIN_NAME="${DOMAIN_NAME}"
#export BTCPAY_ALT_NAMES="tip,store,pay,send"
export SITE_LANGUAGE_CODES="en"
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
export DEPLOY_GHOST=true
export DEPLOY_CLAMS=true
export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD="$(new_pass)"
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
export GITEA_MYSQL_PASSWORD="$(new_pass)"
export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1
fi
fi
}
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects"
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECT_PATH"
# create a symlink from ./clusterpath/projects/project
if [ ! -d "$CLUSTER_PATH/projects/$PROJECT_NAME" ]; then
ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$PROJECT_NAME"
fi
# create the lxc project as specified by PROJECT_NAME
if ! lxc project list | grep -q "$PROJECT_NAME"; then
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
lxc project create "$PROJECT_NAME"
lxc project set "$PROJECT_NAME" features.networks=true
fi
# # check if we need to provision a new lxc project.
# if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
# echo "INFO: switch to lxd project '$PROJECT_NAME'."
# lxc project switch "$PROJECT_NAME"
# fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# stub out a project_definition
cat >"$PROJECT_DEFINITION_PATH" <<EOL
#!/bin/bash
# see https://www.sovereign-stack.org/project-definition for more info.
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
export PRIMARY_DOMAIN=
#export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="6"
export WWW_SERVER_MEMORY_MB="4096"
EOL
chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/projects/"
exit 1
fi
# source project defition.
source "$PROJECT_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project_definition."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
fi
export DOMAIN_LIST="$DOMAIN_LIST"
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
# let's provision our primary domain first.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
stub_site_definition
# bring the VMs up under the primary domain name.
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
# iterate over all our server endpoints and provision them if needed.
# www
VPS_HOSTNAME=
for VIRTUAL_MACHINE in www btcpayserver; do
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
FQDN=
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site_definition"
source ./domain_env.sh
# VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: DOMAIN_NAME not specified. Use the --domain-name= option."
exit 1
fi
# # switch to the default project
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv | grep lxdbr0 | grep -q ss-config; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
echo "ERROR: the MACVLAN interface has not been specified. You may need to run ss-cluster again."
exit 1
fi
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
# # switch to the default project to ensure the base image is created.
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# create the lxd base image.
./create_lxc_base.sh
# # now switch to the current chain project.
# if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then
# lxc project switch "$BITCOIN_CHAIN"
# fi
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
DDNS_HOST=
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$SKIP_WWW" = true ]; then
echo "INFO: Skipping WWW due to command line argument."
continue
fi
VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
DDNS_HOST="$WWW_HOSTNAME"
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
DDNS_HOST="$BTCPAY_HOSTNAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
if [ "$BITCOIN_CHAIN" = mainnet ]; then
ROOT_DISK_SIZE_GB=150
elif [ "$BITCOIN_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=70
fi
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
DDNS_HOST="$BASE_IMAGE_VM_NAME"
ROOT_DISK_SIZE_GB=8
else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
exit
fi
export DDNS_HOST="$DDNS_HOST"
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
export LXD_VM_NAME="${FQDN//./-}"
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
./deploy_vms.sh
if [ "$VIRTUAL_MACHINE" = www ]; then
# this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0
fi
fi
done
# let's stub out the rest of our site definitions, if any.
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# stub out the site_defition if it's doesn't exist.
stub_site_definition
done
# now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ]; then
./www/go.sh
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
fi
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ]; then
./btcpayserver/go.sh
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
fi

View File

@ -1,55 +0,0 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}"
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
########## BEGIN
cat >> "$SSH_HOME/config" <<-EOF
${SSH_STRING}
HostName ${FQDN}
User ubuntu
EOF
###
fi
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
# if the machine doesn't exist, we create it.
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
exit 1
fi
./stub_lxc_profile.sh "$LXD_VM_NAME"
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$LXD_UBUNTU_BASE_VERSION" "$LXD_VM_NAME"
# now let's create a new VM to work with.
#@lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME"
bash -c "./wait_for_lxc_ip.sh --lxc-name=$LXD_VM_NAME"
fi
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
# create a directory to store backup archives. This is on all new vms.
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"

View File

@ -1,27 +0,0 @@
#!/bin/bash
set -ex
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
export BTCPAY_FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1))
STACK_NAME="$DOMAIN_IDENTIFIER-en"
export NEXTCLOUD_STACK_TAG="nextcloud-$STACK_NAME"
export NEXTCLOUD_DB_STACK_TAG="nextclouddb-$STACK_NAME"

View File

@ -1,226 +0,0 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
LXD_HOSTNAME="${1:-}"
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
mkdir -p "$PROJECT_PATH/cloud-init"
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat > "$YAML_PATH" <<EOF
config:
EOF
if [ "$VIRTUAL_MACHINE" = www ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
EOF
else [ "$VIRTUAL_MACHINE" = btcpayserver ];
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
EOF
fi
if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
# this is for the base image only...
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
package_update: true
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: false
fqdn: ${BASE_IMAGE_VM_NAME}
packages:
- curl
- ssh-askpass
- apt-transport-https
- ca-certificates
- gnupg-agent
- software-properties-common
- lsb-release
- net-tools
- htop
- rsync
- duplicity
- sshfs
- fswatch
- jq
- git
- nano
- wait-for-it
- dnsutils
- wget
groups:
- docker
users:
- name: ubuntu
groups: docker
shell: /bin/bash
lock_passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
write_files:
- path: /etc/ssh/ssh_config
content: |
Port 22
ListenAddress 0.0.0.0
Protocol 2
ChallengeResponseAuthentication no
PasswordAuthentication no
UsePAM no
LogLevel INFO
- path: /etc/docker/daemon.json
content: |
{
"registry-mirrors": ["${REGISTRY_URL}"]
}
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo apt-get install -y openssh-server
EOF
# - path: /etc/docker/daemon.json
# content: |
# {
# "registry-mirrors": "${REGISTRY_URL}",
# "labels": "githead=${LATEST_GIT_COMMIT}"
# }
#"labels": [githead="${LATEST_GIT_COMMIT}"]
# apt:
# sources:
# docker.list:
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
# - sudo apt-get update
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
# all other machines.
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
package_update: false
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: true
fqdn: ${FQDN}
user.network-config: |
version: 2
ethernets:
enp5s0:
dhcp4: true
match:
macaddress: ${MAC_ADDRESS_TO_PROVISION}
set-name: enp5s0
enp6s0:
dhcp4: false
EOF
if [[ "$LXD_HOSTNAME" = $WWW_HOSTNAME-* ]]; then
cat >> "$YAML_PATH" <<EOF
addresses: [10.139.144.5/24]
nameservers:
addresses: [10.139.144.1]
EOF
fi
if [[ "$LXD_HOSTNAME" = $BTCPAY_HOSTNAME-* ]]; then
cat >> "$YAML_PATH" <<EOF
addresses: [10.139.144.10/24]
nameservers:
addresses: [10.139.144.1]
EOF
fi
fi
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat >> "$YAML_PATH" <<EOF
description: Default LXD profile for ${FILENAME}
devices:
root:
path: /
pool: ss-base
type: disk
config:
source: cloud-init:config
type: disk
EOF
# Stub out the network piece for the base image.
if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ] ; then
#
cat >> "$YAML_PATH" <<EOF
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${PRIMARY_DOMAIN}
EOF
fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"

View File

@ -1,45 +0,0 @@
#!/bin/bash
set -ex
LXC_INSTANCE_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxc-name=*)
LXC_INSTANCE_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# if the invoker did not set the instance name, throw an error.
if [ -z "$LXC_INSTANCE_NAME" ]; then
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
exit 1
fi
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
exit 1
fi
IP_V4_ADDRESS=
while true; do
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
if [ -n "$IP_V4_ADDRESS" ]; then
# give the machine extra time to spin up.
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
echo ""
break
else
sleep 1
printf '.'
fi
done

View File

@ -1 +0,0 @@
clams

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
# this script backups up a source path to a destination folder on the remote VM
# then pulls that data down to the maanagement environment
# if the source files to backup don't exist on the remote host, we return.
if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_SOURCE_BACKUP_PATH ]"; then
exit 0
fi
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity "$REMOTE_SOURCE_BACKUP_PATH" "file://$REMOTE_BACKUP_PATH"
ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
SSHFS_PATH="/tmp/sshfs_temp"
mkdir -p "$SSHFS_PATH"
# now let's pull down the latest files from the backup directory.
# create a temp directory to serve as the mountpoint for the remote machine backups directory
sshfs "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH"
# rsync the files from the remote server to our local backup path.
rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
# step 4: unmount the SSHFS filesystem and cleanup.
umount "$SSHFS_PATH"
rm -rf "$SSHFS_PATH"

View File

@ -1,45 +0,0 @@
#!/bin/bash
set -ex
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
docker pull certbot/certbot:latest
# iterate over each domain and call certbot
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../domain_env.sh
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
# at a cachehing reverse proxy that runs nginx.
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs"
# this is minimum required; www and btcpay.
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
# and append the domain names to the certbot request
if [ -n "$BTCPAY_ALT_NAMES" ]; then
# let's stub out the rest of our site definitions, if any.
for ALT_NAME in ${BTCPAY_ALT_NAMES//,/ }; do
DOMAIN_STRING="$DOMAIN_STRING -d $ALT_NAME.$DOMAIN_NAME"
done
fi
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
# execute the certbot command that we dynamically generated.
eval "$GENERATE_CERT_STRING"
done

View File

@ -1,162 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
# redirect all docker commands to the remote host.
DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
export DOCKER_HOST="$DOCKER_HOST"
# prepare clams images and such
./prepare_clams.sh
# Create the nginx config file which covers all domains.
bash -c ./stub/nginx_config.sh
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../domain_env.sh
### Let's check to ensure all the requiredsettings are set.
if [ "$DEPLOY_GHOST" = true ]; then
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
echo "ERROR: Ensure GHOST_MYSQL_PASSWORD is configured in your site_definition."
exit 1
fi
if [ -z "$GHOST_MYSQL_ROOT_PASSWORD" ]; then
echo "ERROR: Ensure GHOST_MYSQL_ROOT_PASSWORD is configured in your site_definition."
exit 1
fi
fi
if [ "$DEPLOY_GITEA" = true ]; then
if [ -z "$GITEA_MYSQL_PASSWORD" ]; then
echo "ERROR: Ensure GITEA_MYSQL_PASSWORD is configured in your site_definition."
exit 1
fi
if [ -z "$GITEA_MYSQL_ROOT_PASSWORD" ]; then
echo "ERROR: Ensure GITEA_MYSQL_ROOT_PASSWORD is configured in your site_definition."
exit 1
fi
fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
if [ -z "$NEXTCLOUD_MYSQL_ROOT_PASSWORD" ]; then
echo "ERROR: Ensure NEXTCLOUD_MYSQL_ROOT_PASSWORD is configured in your site_definition."
exit 1
fi
if [ -z "$NEXTCLOUD_MYSQL_PASSWORD" ]; then
echo "ERROR: Ensure NEXTCLOUD_MYSQL_PASSWORD is configured in your site_definition."
exit 1
fi
fi
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site_definition."
exit 1
fi
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: Ensure DOMAIN_NAME is configured in your site_definition."
exit 1
fi
done
./stop_docker_stacks.sh
# TODO check if there are any other stacks that are left running (other than reverse proxy)
# if so, this may mean the user has disabled one or more domains and that existing sites/services
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
echo "exiting."
exit 1
fi
# ok, the backend stacks are stopped.
if [ "$RESTART_FRONT_END" = true ]; then
# remove the nginx stack
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
sleep 2
docker stack rm reverse-proxy
# wait for all docker containers to stop.
# TODO see if there's a way to check for this.
sleep 20
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ]; then
./generate_certs.sh
fi
# let's backup all our letsencrypt certs
export APP="letsencrypt"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../domain_env.sh
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
if [ "$RESTORE_WWW" = true ]; then
sleep 5
echo "STARTING restore_path.sh for letsencrypt."
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done
fi
# nginx gets deployed first since it "owns" the docker networks of downstream services.
./stub/nginx_yml.sh
# next run our application stub logic. These deploy the apps too if configured to do so.
./stub/ghost_yml.sh
./stub/nextcloud_yml.sh
./stub/gitea_yml.sh
./stub/nostr_yml.sh
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
# # WWW_FQDN is in our certificate, so we resolve to that.
# wait-for-it -t 320 "$WWW_FQDN:80"
# wait-for-it -t 320 "$WWW_FQDN:443"
# # open bowser tabs.
# if [ "$DEPLOY_GHOST" = true ]; then
# xdg-open "http://$WWW_FQDN" > /dev/null 2>&1
# fi
# if [ "$DEPLOY_NEXTCLOUD" = true ]; then
# xdg-open "http://$NEXTCLOUD_FQDN" > /dev/null 2>&1
# fi
# if [ "$DEPLOY_GITEA" = true ]; then
# xdg-open "http://$GITEA_FQDN" > /dev/null 2>&1
# fi

View File

@ -1,33 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
# deploy clams wallet.
LOCAL_CLAMS_REPO_PATH="$(pwd)/clams"
BROWSER_APP_GIT_TAG="1.5.0"
BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app"
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
git clone "$BROWSER_APP_GIT_REPO_URL" "$LOCAL_CLAMS_REPO_PATH"
else
cd "$LOCAL_CLAMS_REPO_PATH"
git config --global pull.rebase false
git pull
cd -
fi
# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME"
BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG"
# build the browser-app image.
if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then
docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \
--build-arg VERSION="$BROWSER_APP_GIT_TAG" \
-t "$BROWSER_APP_IMAGE_NAME" \
$(pwd)/clams/frontend/browser-app/
fi
# If the clams-root volume doesn't exist, we create and seed it.
if ! docker volume list | grep -q clams-root; then
docker volume create clams-root
docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME"
fi

View File

@ -1,38 +0,0 @@
#!/bin/bash
set -eux
cd "$(dirname "$0")"
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
if [ "$FILE_COUNT" = 0 ]; then
echo "ERROR: there are no files in the local backup path '$LOCAL_BACKUP_PATH'."
echo "We're going to continue with execution."
exit 0
fi
# if the user said -y at the cli, we can skip this.
if [ "$USER_SAYS_YES" = false ]; then
RESPONSE=
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
fi
# delete the target backup path so we can push restoration files from the management machine.
ssh "$PRIMARY_WWW_FQDN" sudo rm -rf "$REMOTE_SOURCE_BACKUP_PATH"
# scp our local backup directory to the remote machine
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_BACKUP_PATH"
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
# now we run duplicity to restore the archive.
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
# reset folder owner to ubuntu
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"

View File

@ -1,49 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
# bring down ghost instances.
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../domain_env.sh
### Stop all services.
for APP in ghost nextcloud gitea nostr; do
# backup each language for each app.
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$APP-$LANGUAGE_CODE"
if docker stack list --format "{{.Name}}" | grep -q "$STACK_NAME"; then
docker stack rm "$STACK_NAME"
sleep 2
fi
# these variable are used by both backup/restore scripts.
export APP="$APP"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
# ensure our local backup path exists.
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
mkdir -p "$LOCAL_BACKUP_PATH"
fi
if [ "$RESTORE_WWW" = true ]; then
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done
done
done

View File

@ -1,113 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
# for each language specified in the site_definition, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
# ensure directories on remote host exist so we can mount them into the containers.
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db"
export GHOST_STACK_TAG="ghost-$STACK_NAME"
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
# todo append domain number or port number.
WEBSTACK_PATH="$SITE_PATH/webstack"
mkdir -p "$WEBSTACK_PATH"
export DOCKER_YAML_PATH="$WEBSTACK_PATH/ghost-$LANGUAGE_CODE.yml"
# here's the NGINX config. We support ghost and nextcloud.
cat > "$DOCKER_YAML_PATH" <<EOL
version: "3.8"
services:
EOL
# This is the ghost for HTTPS (not over Tor)
cat >>"$DOCKER_YAML_PATH" <<EOL
${GHOST_STACK_TAG}:
image: ${GHOST_IMAGE}
networks:
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
environment:
EOL
if [ "$LANGUAGE_CODE" = "en" ]; then
cat >>"$DOCKER_YAML_PATH" <<EOL
- url=https://${WWW_FQDN}
EOL
else
cat >>"$DOCKER_YAML_PATH" <<EOL
- url=https://${WWW_FQDN}/${LANGUAGE_CODE}
EOL
fi
cat >>"$DOCKER_YAML_PATH" <<EOL
- database__client=mysql
- database__connection__host=${GHOST_DB_STACK_TAG}
- database__connection__user=ghost
- database__connection__password=\${GHOST_MYSQL_PASSWORD}
- database__connection__database=ghost
- database__pool__min=0
- privacy__useStructuredData=true
deploy:
restart_policy:
condition: on-failure
${GHOST_DB_STACK_TAG}:
image: ${GHOST_DB_IMAGE}
networks:
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=ghost
- MYSQL_USER=ghost
- MYSQL_PASSWORD=\${GHOST_MYSQL_PASSWORD}
deploy:
restart_policy:
condition: on-failure
EOL
cat >>"$DOCKER_YAML_PATH" <<EOL
networks:
EOL
if [ "$DEPLOY_GHOST" = true ]; then
GHOSTNET_NAME="ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
GHOSTDBNET_NAME="ghostdbnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
cat >>"$DOCKER_YAML_PATH" <<EOL
${GHOSTNET_NAME}:
name: "reverse-proxy_ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
external: true
${GHOSTDBNET_NAME}:
EOL
fi
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
sleep 2
fi
done # language code
done # domain list

View File

@ -1,89 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
if [ "$DEPLOY_GITEA" = true ]; then
GITEA_PATH="$REMOTE_GITEA_PATH/$DOMAIN_NAME/${LANGUAGE_CODE}"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$GITEA_PATH/data" "$GITEA_PATH/db"
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
export STACK_TAG="gitea-$STACK_NAME"
export DB_STACK_TAG="giteadb-$STACK_NAME"
export DOCKER_YAML_PATH="$SITE_PATH/webstack/gitea-en.yml"
NET_NAME="giteanet-$DOMAIN_IDENTIFIER"
DBNET_NAME="giteadbnet-$DOMAIN_IDENTIFIER"
# here's the NGINX config. We support ghost and nextcloud.
echo "" > "$DOCKER_YAML_PATH"
cat >>"$DOCKER_YAML_PATH" <<EOL
version: "3.8"
services:
${STACK_TAG}:
image: ${GITEA_IMAGE}
volumes:
- ${GITEA_PATH}/data:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
environment:
- USER_UID=1000
- USER_GID=1000
- ROOT_URL=https://${GITEA_FQDN}
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=${DB_STACK_TAG}:3306
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__PASSWD=\${GITEA_MYSQL_PASSWORD}
networks:
- ${NET_NAME}
- ${DBNET_NAME}
deploy:
restart_policy:
condition: on-failure
${DB_STACK_TAG}:
image: ${GITEA_DB_IMAGE}
networks:
- ${DBNET_NAME}
volumes:
- ${GITEA_PATH}/db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=\${GITEA_MYSQL_ROOT_PASSWORD}
- MYSQL_PASSWORD=\${GITEA_MYSQL_PASSWORD}
- MYSQL_DATABASE=gitea
- MYSQL_USER=gitea
deploy:
restart_policy:
condition: on-failure
networks:
EOL
cat >>"$DOCKER_YAML_PATH" <<EOL
${NET_NAME}:
name: "reverse-proxy_${NET_NAME}-${LANGUAGE_CODE}"
external: true
${DBNET_NAME}:
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
sleep 1
fi
fi
done

View File

@ -1,82 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
# ensure remote directories exist
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/$DOMAIN_NAME/en/db"
ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/$DOMAIN_NAME/en/html"
sleep 2
WEBSTACK_PATH="$SITE_PATH/webstack"
mkdir -p "$WEBSTACK_PATH"
export DOCKER_YAML_PATH="$WEBSTACK_PATH/nextcloud-en.yml"
# here's the NGINX config. We support ghost and nextcloud.
cat > "$DOCKER_YAML_PATH" <<EOL
version: "3.8"
services:
${NEXTCLOUD_STACK_TAG}:
image: ${NEXTCLOUD_IMAGE}
networks:
- nextcloud-${DOMAIN_IDENTIFIER}-en
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
environment:
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
- MYSQL_HOST=${NEXTCLOUD_DB_STACK_TAG}
- NEXTCLOUD_TRUSTED_DOMAINS=${DOMAIN_NAME}
- OVERWRITEHOST=${NEXTCLOUD_FQDN}
- OVERWRITEPROTOCOL=https
- SERVERNAME=${NEXTCLOUD_FQDN}
deploy:
restart_policy:
condition: on-failure
${NEXTCLOUD_DB_STACK_TAG}:
image: ${NEXTCLOUD_DB_IMAGE}
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb_read_only_compressed=OFF
networks:
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
environment:
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
- MYSQL_DATABASE=nextcloud
- MYSQL_USER=nextcloud
deploy:
restart_policy:
condition: on-failure
networks:
nextcloud-${DOMAIN_IDENTIFIER}-en:
name: "reverse-proxy_nextcloudnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
external: true
nextclouddb-${DOMAIN_IDENTIFIER}-en:
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
sleep 1
fi
fi
done

View File

@ -1,548 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
# here's the NGINX config. We support ghost and nextcloud.
NGINX_CONF_PATH="$PROJECT_PATH/nginx.conf"
# clear the existing nginx config.
echo "" > "$NGINX_CONF_PATH"
# iterate over all our domains and create the nginx config file.
iteration=0
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
echo "after"
if [ $iteration = 0 ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
events {
worker_connections 1024;
}
http {
client_max_body_size 100m;
server_tokens off;
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
# Needed to allow very long URLs to prevent issues while signing PSBTs
server_names_hash_bucket_size 128;
proxy_buffer_size 128k;
proxy_buffers 4 256k;
proxy_busy_buffers_size 256k;
client_header_buffer_size 500k;
large_client_header_buffers 4 500k;
# Needed websocket support (used by Ledger hardware wallets)
map \$http_upgrade \$connection_upgrade {
default upgrade;
'' close;
}
# return 403 for all non-explicit hostnames
server {
listen 80 default_server;
return 301 https://${WWW_FQDN}\$request_uri;
}
EOL
fi
# ghost http to https redirects.
cat >>"$NGINX_CONF_PATH" <<EOL
# http://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
server {
listen 80;
server_name ${DOMAIN_NAME};
location / {
# request MAY get another redirect at https://domain.tld for www.
return 301 https://${DOMAIN_NAME}\$request_uri;
}
}
EOL
cat >>"$NGINX_CONF_PATH" <<EOL
# http://${WWW_FQDN} redirect to https://${WWW_FQDN}
server {
listen 80;
server_name ${WWW_FQDN};
return 301 https://${WWW_FQDN}\$request_uri;
}
EOL
# nextcloud http-to-https redirect
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# http://${NEXTCLOUD_FQDN} redirect to https://${NEXTCLOUD_FQDN}
server {
listen 80;
server_name ${NEXTCLOUD_FQDN};
return 301 https://${NEXTCLOUD_FQDN}\$request_uri;
}
EOL
fi
# gitea http to https redirect.
if [ "$DEPLOY_GITEA" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# http://${GITEA_FQDN} redirect to https://${GITEA_FQDN}
server {
listen 80;
server_name ${GITEA_FQDN};
return 301 https://${GITEA_FQDN}\$request_uri;
}
EOL
fi
# let's iterate over BTCPAY_ALT_NAMES and generate our SERVER_NAMES for btcpay server.
BTCPAY_SERVER_NAMES="$BTCPAY_USER_FQDN"
if [ -n "$BTCPAY_ALT_NAMES" ]; then
# let's stub out the rest of our site definitions, if any.
for ALT_NAME in ${BTCPAY_ALT_NAMES//,/ }; do
BTCPAY_SERVER_NAMES="$BTCPAY_SERVER_NAMES $ALT_NAME.$DOMAIN_NAME"
done
fi
# BTCPAY server http->https redirect
cat >>"$NGINX_CONF_PATH" <<EOL
# http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN}
server {
listen 80;
server_name ${BTCPAY_SERVER_NAMES};
return 301 https://\$host\$request_uri;
}
EOL
if [ $iteration = 0 ]; then
# TLS config for ghost.
cat >>"$NGINX_CONF_PATH" <<EOL
# global TLS settings
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.3;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
add_header Strict-Transport-Security "max-age=63072000" always;
ssl_stapling on;
ssl_stapling_verify on;
# default server if hostname not specified.
server {
listen 443 default_server;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
return 403;
}
# maybe helps with Twitter cards.
#map \$http_user_agent \$og_prefix {
# ~*(googlebot|twitterbot)/ /open-graph;
#}
# this map allows us to route the clients request to the correct Ghost instance
# based on the clients browser language setting.
map \$http_accept_language \$lang {
default "";
~es es;
}
EOL
fi
cat >>"$NGINX_CONF_PATH" <<EOL
# https://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
server {
listen 443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${DOMAIN_NAME};
EOL
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# We return a JSON object with name/pubkey mapping per NIP05.
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
# TODO I'm not sure about the security of this Access-Control-Allow-Origin. Read up and restrict it if possible.
location = /.well-known/nostr.json {
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{ "names": { "_": "${NOSTR_ACCOUNT_PUBKEY}" }, "relays": { "${NOSTR_ACCOUNT_PUBKEY}": [ "wss://${NOSTR_FQDN}" ] } }';
}
EOL
fi
cat >>"$NGINX_CONF_PATH" <<EOL
# catch all; send request to ${WWW_FQDN}
location / {
return 301 https://${WWW_FQDN}\$request_uri;
}
}
#access_log /var/log/nginx/ghost-access.log;
#error_log /var/log/nginx/ghost-error.log;
EOL
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# wss://$NOSTR_FQDN server block
server {
listen 443 ssl;
server_name ${NOSTR_FQDN};
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
keepalive_timeout 70;
location / {
# redirect all HTTP traffic to btcpay server
proxy_pass http://nostr-${DOMAIN_IDENTIFIER}:8080;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host \$host;
}
}
EOL
fi
cat >>"$NGINX_CONF_PATH" <<EOL
# https server block for https://${BTCPAY_SERVER_NAMES}
server {
listen 443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${BTCPAY_SERVER_NAMES};
# Route everything to the real BTCPay server
location / {
# URL of BTCPay Server
proxy_pass http://10.139.144.10:80;
proxy_set_header Host \$http_host;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
# For websockets (used by Ledger hardware wallets)
proxy_set_header Upgrade \$http_upgrade;
}
}
EOL
# Clams server entry
# cat >>"$NGINX_CONF_PATH" <<EOL
# # https server block for https://${CLAMS_FQDN}
# server {
# listen 443 ssl http2;
# ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
# ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# server_name ${CLAMS_FQDN};
# index index.js;
# root /apps/clams;
# index 200.htm;
# location / {
# try_files \$uri \$uri/ /200.htm;
# }
# location ~* \.(?:css|js|jpg|svg)$ {
# expires 30d;
# add_header Cache-Control "public";
# }
# }
# EOL
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_cache_path /tmp/${STACK_NAME} levels=1:2 keys_zone=${STACK_NAME}:600m max_size=100m inactive=24h;
EOL
done
# the open server block for the HTTPS listener for ghost
cat >>"$NGINX_CONF_PATH" <<EOL
# Main HTTPS listener for https://${WWW_FQDN}
server {
listen 443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${WWW_FQDN};
# Set the crawler policy.
location = /robots.txt {
add_header Content-Type text/plain;
return 200 "User-Agent: *\\nAllow: /\\n";
}
EOL
# # add the Onion-Location header if specifed.
# if [ "$DEPLOY_ONION_SITE" = true ]; then
# cat >>"$NGINX_CONF_PATH" <<EOL
# add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
# EOL
# fi
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
location ~ ^/(ghost/|p/|private/) {
EOL
else
cat >>"$NGINX_CONF_PATH" <<EOL
location ~ ^/${LANGUAGE_CODE}/(ghost/|p/|private/) {
EOL
fi
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_intercept_errors on;
proxy_pass http://ghost-${STACK_NAME}:2368;
}
EOL
done
ROOT_SITE_LANGUAGE_CODES="$SITE_LANGUAGE_CODES"
for LANGUAGE_CODE in ${ROOT_SITE_LANGUAGE_CODES//,/ }; do
cat >>"$NGINX_CONF_PATH" <<EOL
# Location block to back https://${WWW_FQDN}/${LANGUAGE_CODE} or https://${WWW_FQDN}/ if english.
EOL
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
location / {
EOL
if (( "$LANGUAGE_CODE_COUNT" > 1 )); then
# we only need this clause if we know there is more than once lanuage being rendered.
cat >>"$NGINX_CONF_PATH" <<EOL
# Redirect the user to the correct language using the map above.
if ( \$http_accept_language !~* '^en(.*)\$' ) {
#rewrite (.*) \$1/\$lang;
return 302 https://${WWW_FQDN}/\$lang;
}
EOL
fi
else
cat >>"$NGINX_CONF_PATH" <<EOL
location /${LANGUAGE_CODE} {
EOL
fi
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_intercept_errors on;
proxy_pass http://ghost-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}:2368;
# https://stanislas.blog/2019/08/ghost-nginx-cache/ for nginx caching instructions
# Remove cookies which are useless for anonymous visitor and prevent caching
proxy_ignore_headers Set-Cookie Cache-Control;
proxy_hide_header Set-Cookie;
# Add header for cache status (miss or hit)
add_header X-Cache-Status \$upstream_cache_status;
proxy_cache ${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE};
# Default TTL: 1 day
proxy_cache_valid 5s;
# Cache 404 pages for 1h
proxy_cache_valid 404 1h;
# use conditional GET requests to refresh the content from origin servers
proxy_cache_revalidate on;
proxy_buffering on;
# Allows starting a background subrequest to update an expired cache item,
# while a stale cached response is returned to the client.
proxy_cache_background_update on;
# Bypass cache for errors
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
}
EOL
done
# this is the closing server block for the ghost HTTPS segment
cat >>"$NGINX_CONF_PATH" <<EOL
}
EOL
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# TLS listener for ${NEXTCLOUD_FQDN}
server {
listen 443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${NEXTCLOUD_FQDN};
location / {
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 64;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://${NEXTCLOUD_STACK_TAG}:80;
}
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/reverse_proxy_configuration.html
location /.well-known/carddav {
return 301 \$scheme://\$host/remote.php/dav;
}
location /.well-known/caldav {
return 301 \$scheme://\$host/remote.php/dav;
}
}
EOL
fi
# TODO this MIGHT be part of the solution for Twitter Cards.
# location /contents {
# resolver 127.0.0.11 ipv6=off valid=5m;
# proxy_set_header X-Real-IP \$remote_addr;
# proxy_set_header Host \$http_host;
# proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto \$scheme;
# proxy_intercept_errors on;
# proxy_pass http://ghost-${DOMAIN_IDENTIFIER}-${SITE_LANGUAGE_CODES}::2368\$og_prefix\$request_uri;
# }
# this piece is for GITEA.
if [ "$DEPLOY_GITEA" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# TLS listener for ${GITEA_FQDN}
server {
listen 443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${GITEA_FQDN};
location / {
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 64;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-NginX-Proxy true;
proxy_pass http://gitea-${DOMAIN_IDENTIFIER}-en:3000;
}
}
EOL
fi
# deploy Clams browser app under the primary domain.
if [ $iteration = 0 ]; then
cat >> "$NGINX_CONF_PATH" <<EOF
# server block for the clams browser-app; just a static website
server {
listen 443 ssl;
server_name ${CLAMS_FQDN};
autoindex off;
server_tokens off;
gzip_static on;
root /browser-app;
index 200.html;
}
EOF
fi
iteration=$((iteration+1))
done
# add the closing brace.
cat >>"$NGINX_CONF_PATH" <<EOL
}
EOL

View File

@ -1,154 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
#https://github.com/fiatjaf/expensive-relay
# NOSTR RELAY WHICH REQUIRES PAYMENTS.
DOCKER_YAML_PATH="$PROJECT_PATH/nginx.yml"
cat > "$DOCKER_YAML_PATH" <<EOL
version: "3.8"
services:
nginx:
image: ${NGINX_IMAGE}
ports:
- 0.0.0.0:443:443
- 0.0.0.0:80:80
networks:
EOL
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
# We create another ghost instance under /
cat >> "$DOCKER_YAML_PATH" <<EOL
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
EOL
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- giteanet-$DOMAIN_IDENTIFIER-en
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nextcloudnet-$DOMAIN_IDENTIFIER-en
EOL
fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nostrnet-$DOMAIN_IDENTIFIER-en
EOL
fi
fi
done
done
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
EOL
if [ "$DEPLOY_CLAMS" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- clams-browser-app:/browser-app:ro
EOL
fi
cat >> "$DOCKER_YAML_PATH" <<EOL
configs:
- source: nginx-config
target: /etc/nginx/nginx.conf
deploy:
restart_policy:
condition: on-failure
configs:
nginx-config:
file: ${PROJECT_PATH}/nginx.conf
EOL
################ NETWORKS SECTION
cat >> "$DOCKER_YAML_PATH" <<EOL
networks:
EOL
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
# for each language specified in the site_definition, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
cat >> "$DOCKER_YAML_PATH" <<EOL
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
attachable: true
EOL
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
giteanet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nextcloudnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nostrnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
fi
done
done
if [ "$DEPLOY_CLAMS" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
clams-browser-app:
external: true
name: clams-root
EOL
fi
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
# iterate over all our domains and create the nginx config file.
sleep 1
fi

View File

@ -1,96 +0,0 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
docker pull "$NOSTR_RELAY_IMAGE"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site_definition"
source ../../domain_env.sh
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$NOSTR_PATH/data" "$NOSTR_PATH/db"
export STACK_TAG="nostr-$DOMAIN_IDENTIFIER"
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
DBNET_NAME="nostrdbnet-$DOMAIN_IDENTIFIER"
# here's the NGINX config. We support ghost and nextcloud.
echo "" > "$DOCKER_YAML_PATH"
cat >>"$DOCKER_YAML_PATH" <<EOL
version: "3.8"
services:
${STACK_TAG}:
image: ${NOSTR_RELAY_IMAGE}
volumes:
- ${NOSTR_PATH}/data:/usr/src/app/db
# environment:
# - USER_UID=1000
networks:
- ${NET_NAME}
configs:
- source: nostr-config
target: /usr/src/app/config.toml
deploy:
restart_policy:
condition: on-failure
networks:
${NET_NAME}:
name: "reverse-proxy_${NET_NAME}-en"
external: true
configs:
nostr-config:
file: ${NOSTR_CONFIG_PATH}
EOL
# documentation: https://git.sr.ht/~gheartsfield/nostr-rs-relay/tree/0.7.0/item/config.toml
cat >"$NOSTR_CONFIG_PATH" <<EOL
[info]
relay_url = "wss://${NOSTR_FQDN}/"
name = "${NOSTR_FQDN}"
description = "A nostr relay for ${DOMAIN_NAME} whitelisted for pubkey ${NOSTR_ACCOUNT_PUBKEY}."
pubkey = "${NOSTR_ACCOUNT_PUBKEY}"
contact = "mailto:${CERTIFICATE_EMAIL_ADDRESS}"
[options]
reject_future_seconds = 1800
[limits]
#messages_per_sec = 3
#max_event_bytes = 131072
#max_ws_message_bytes = 131072
#max_ws_frame_bytes = 131072
#broadcast_buffer = 16384
#event_persist_buffer = 4096
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted, if the variable
# is set.
pubkey_whitelist = [ "${NOSTR_ACCOUNT_PUBKEY}" ]
domain_whitelist = [ "${DOMAIN_NAME}" ]
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
sleep 1
fi
fi
done

View File

@ -1,11 +0,0 @@
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y tor
#COPY ./torrc /etc/tor/torrc
#RUN chown root:root /etc/tor/torrc
#RUN chmod 0644 /etc/tor/torrc
#RUN mkdir /data
#VOLUME /data
# RUN chown 1000:1000 -R /data
#USER 1000:1000
CMD tor -f /etc/tor/torrc

View File

@ -1,8 +0,0 @@
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
SocksPort 0
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 nginx:443
Log info file /var/log/tor/tor.log

View File

@ -1,5 +0,0 @@
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 127.0.0.1:443
Log info file /var/log/tor/tor.log

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
# see https://www.sovereign-stack.org/management/
@ -137,3 +137,16 @@ lxc restart ss-mgmt
if [ "$ADDED_COMMAND" = true ]; then
echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
fi
# deploy clams wallet.
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
else
cd "$PROJECTS_SCRIPTS_PATH"
git config --global pull.rebase false
git pull
cd -
fi

View File

@ -41,20 +41,22 @@ if ! snap list | grep -q lxd; then
sudo lxd init --auto
fi
echo "Your management machine has been provisioned!"
# run a lxd command so we don't we a warning upon first invocation
lxc list > /dev/null 2>&1
# add groups for docker and lxd
if ! groups ubuntu | grep -q docker; then
sudo addgroup docker
sudo usermod -aG docker ubuntu
sudo usermod -aG lxd ubuntu
fi
# if an SSH pubkey does not exist, we create one.
if [ ! -f /home/ubuntu/.ssh/id_rsa.pub ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f /home/ubuntu/.ssh/id_rsa -t ecdsa -b 521 -N ""
fi
echo "Your management machine has been provisioned!"