Compare commits
No commits in common. "main" and "volume-mounting" have entirely different histories.
main
...
volume-mou
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +0,0 @@
|
|||||||
[submodule "lnplay"]
|
|
||||||
path = lnplay
|
|
||||||
url = https://github.com/farscapian/lnplay
|
|
37
.vscode/settings.json
vendored
37
.vscode/settings.json
vendored
@ -1,37 +0,0 @@
|
|||||||
{
|
|
||||||
"editor.renderWhitespace": "boundary",
|
|
||||||
"editor.tabSize": 4,
|
|
||||||
"editor.detectIndentation": false,
|
|
||||||
"editor.insertSpaces": true,
|
|
||||||
"editor.cursorBlinking": "phase",
|
|
||||||
"editor.cursorWidth": 3,
|
|
||||||
"editor.formatOnSave": true,
|
|
||||||
"shellcheck.enable": true,
|
|
||||||
"shellcheck.enableQuickFix": true,
|
|
||||||
"shellcheck.run": "onType",
|
|
||||||
"shellcheck.executablePath": "shellcheck",
|
|
||||||
"shellcheck.customArgs": [
|
|
||||||
"-x"
|
|
||||||
],
|
|
||||||
"shellcheck.ignorePatterns": {},
|
|
||||||
// "shellcheck.exclude": [
|
|
||||||
// "SC1090",
|
|
||||||
// "SC1091",
|
|
||||||
// "SC2029"
|
|
||||||
// ],
|
|
||||||
"terminal.integrated.fontFamily": "monospace",
|
|
||||||
"workbench.colorCustomizations": {
|
|
||||||
"activityBar.background": "#1900a565",
|
|
||||||
"activityBar.foreground": "#e7e7e7",
|
|
||||||
"activityBar.inactiveForeground": "#e7e7e799",
|
|
||||||
"activityBarBadge.background": "#7143fc",
|
|
||||||
"activityBarBadge.foreground": "#e7e7e7",
|
|
||||||
"titleBar.activeBackground": "#029727",
|
|
||||||
"titleBar.inactiveBackground": "#02972799",
|
|
||||||
"titleBar.activeForeground": "#e7e7e7",
|
|
||||||
"titleBar.inactiveForeground": "#e7e7e799",
|
|
||||||
"statusBar.background": "#f3ad43",
|
|
||||||
"statusBarItem.hoverBackground": "#ffbc59",
|
|
||||||
"statusBar.foreground": "#000000"
|
|
||||||
}
|
|
||||||
}
|
|
9
LICENSE
Normal file
9
LICENSE
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) <year> <copyright holders>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@ -7,25 +7,25 @@ cd "$(dirname "$0")"
|
|||||||
# the script executed here from the BTCPAY repo will automatically take services down
|
# the script executed here from the BTCPAY repo will automatically take services down
|
||||||
# and bring them back up.
|
# and bring them back up.
|
||||||
|
|
||||||
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_SERVER_FQDN'."
|
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "mkdir -p $REMOTE_BACKUP_PATH; cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
|
||||||
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
||||||
# that way processes can run shutdown procedures and leave files in the correct state.
|
# that way processes can run shutdown procedures and leave files in the correct state.
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
# TODO enable encrypted archives
|
# TODO enable encrypted archives
|
||||||
# TODO switch to btcpay-backup.sh
|
# TODO switch to btcpay-backup.sh when on LXD fully.
|
||||||
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_SERVER_FQDN:$REMOTE_DATA_PATH/btcpay-backup.sh"
|
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh"
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "sudo cp $REMOTE_DATA_PATH/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
|
||||||
|
|
||||||
# next we pull the resulting backup archive down to our management machine.
|
# next we pull the resulting backup archive down to our management machine.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
|
||||||
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
||||||
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
||||||
@ -34,6 +34,6 @@ if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
||||||
scp "$BTCPAY_SERVER_FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_SERVER_FQDN'."
|
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
# these aliases are simply calling the btcpay server scripts.
|
# we append this text to the btcpay server /home/ubuntu/.bashrc so
|
||||||
alias bitcoin-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-cli.sh $@"
|
# logged in users have more common access to the variou
|
||||||
alias lightning-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-lightning-cli.sh $@"
|
|
||||||
|
alias bitcoin-cli="bitcoin-cli.sh $@"
|
||||||
|
alias lightning-cli="bitcoin-lightning-cli.sh $@"
|
||||||
|
|
||||||
|
@ -3,37 +3,56 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
|
||||||
# send an updated ~/.bashrc so we have quicker access to cli tools
|
|
||||||
scp ./bashrc.txt "ubuntu@$BTCPAY_SERVER_FQDN:$REMOTE_HOME/.bashrc"
|
# run the btcpay setup script if it hasn't been done before.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
|
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
|
./stub_btcpay_setup.sh
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
./stub_btcpay_setup.sh
|
RUN_SERVICES=true
|
||||||
|
|
||||||
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
||||||
# if an update does occur, we grab another backup.
|
# if an update does occur, we grab another backup.
|
||||||
if [ "$UPDATE_BTCPAY" = true ]; then
|
if [ "$UPDATE_BTCPAY" = true ]; then
|
||||||
# run the update.
|
# run the update.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
|
||||||
# btcpay-update.sh brings services back up, but does not take them down.
|
# btcpay-update.sh brings services back up, but does not take them down.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
||||||
|
|
||||||
sleep 30
|
sleep 20
|
||||||
|
|
||||||
elif [ "$RESTORE_BTCPAY" = true ]; then
|
elif [ "$RESTORE_BTCPAY" = true ]; then
|
||||||
# run the update.
|
# run the update.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
sleep 15
|
sleep 15
|
||||||
|
|
||||||
./restore.sh
|
./restore.sh
|
||||||
|
|
||||||
|
RUN_SERVICES=true
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
|
|
||||||
|
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
||||||
|
# the administrator may have indicated a reconfig;
|
||||||
|
# if so, we re-run setup script.
|
||||||
|
./stub_btcpay_setup.sh
|
||||||
|
|
||||||
|
RUN_SERVICES=true
|
||||||
BACKUP_BTCPAY=false
|
BACKUP_BTCPAY=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# if the script gets this far, then we grab a regular backup.
|
||||||
|
if [ "$BACKUP_BTCPAY" = true ]; then
|
||||||
|
# we just grab a regular backup
|
||||||
|
./backup_btcpay.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
|
||||||
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
# we bring the services back up by default.
|
# we bring the services back up by default.
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "FINISHED btcpayserver/go.sh"
|
||||||
|
0
btcpayserver/remote_scripts/btcpay-backup.sh
Executable file → Normal file
0
btcpayserver/remote_scripts/btcpay-backup.sh
Executable file → Normal file
0
btcpayserver/remote_scripts/btcpay-restore.sh
Executable file → Normal file
0
btcpayserver/remote_scripts/btcpay-restore.sh
Executable file → Normal file
@ -3,17 +3,31 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ "$RESTORE_BTCPAY" = false ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
# push the restoration archive to the remote server
|
# push the restoration archive to the remote server
|
||||||
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
BTCPAY_REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/btcpayserver"
|
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
|
||||||
ssh "$FQDN" mkdir -p "$BTCPAY_REMOTE_BACKUP_PATH"
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
REMOTE_BTCPAY_ARCHIVE_PATH="$BTCPAY_REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
||||||
|
|
||||||
|
# we clean up any old containers first before restoring.
|
||||||
|
ssh "$FQDN" docker system prune -f
|
||||||
|
|
||||||
# push the modified restore script to the remote directory, set permissions, and execute.
|
# push the modified restore script to the remote directory, set permissions, and execute.
|
||||||
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_DATA_PATH/btcpay-restore.sh"
|
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
|
||||||
ssh "$FQDN" "sudo mv $REMOTE_DATA_PATH/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
|
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
|
||||||
ssh "$FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
|
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
|
||||||
|
|
||||||
|
# now, we're going to take things down because aparently we this needs to be re-exececuted.
|
||||||
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "ERROR: File does not exist."
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -3,8 +3,6 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# default is for regtest
|
# default is for regtest
|
||||||
CLIGHTNING_WEBSOCKET_PORT=9736
|
CLIGHTNING_WEBSOCKET_PORT=9736
|
||||||
if [ "$BITCOIN_CHAIN" = testnet ]; then
|
if [ "$BITCOIN_CHAIN" = testnet ]; then
|
||||||
@ -15,7 +13,6 @@ fi
|
|||||||
|
|
||||||
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
|
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
|
||||||
|
|
||||||
|
|
||||||
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
||||||
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
||||||
|
|
||||||
@ -39,7 +36,7 @@ done
|
|||||||
if [ ! -d "btcpayserver-docker" ]; then
|
if [ ! -d "btcpayserver-docker" ]; then
|
||||||
echo "cloning btcpayserver-docker";
|
echo "cloning btcpayserver-docker";
|
||||||
git clone -b master ${BTCPAYSERVER_GITREPO} btcpayserver-docker;
|
git clone -b master ${BTCPAYSERVER_GITREPO} btcpayserver-docker;
|
||||||
git config --global --add safe.directory /home/ubuntu/ss-data/btcpayserver-docker
|
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
|
||||||
else
|
else
|
||||||
cd ./btcpayserver-docker
|
cd ./btcpayserver-docker
|
||||||
git pull
|
git pull
|
||||||
@ -50,7 +47,7 @@ fi
|
|||||||
cd btcpayserver-docker
|
cd btcpayserver-docker
|
||||||
|
|
||||||
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
|
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
|
||||||
export BTCPAY_ANNOUNCEABLE_HOST="${BTCPAY_USER_FQDN}"
|
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
|
||||||
export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
|
export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
|
||||||
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
|
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
|
||||||
export BTCPAYGEN_LIGHTNING="clightning"
|
export BTCPAYGEN_LIGHTNING="clightning"
|
||||||
@ -58,20 +55,14 @@ export BTCPAYGEN_CRYPTO1="btc"
|
|||||||
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
|
||||||
export BTCPAYGEN_REVERSEPROXY="nginx"
|
export BTCPAYGEN_REVERSEPROXY="nginx"
|
||||||
export BTCPAY_ENABLE_SSH=false
|
export BTCPAY_ENABLE_SSH=false
|
||||||
export BTCPAY_BASE_DIRECTORY=${REMOTE_DATA_PATH}
|
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
||||||
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
|
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
|
||||||
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
|
||||||
|
|
||||||
# if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
|
||||||
# cd ./contrib/FastSync
|
|
||||||
# ./load-utxo-set.sh
|
|
||||||
# cd -
|
|
||||||
# fi
|
|
||||||
|
|
||||||
# next we create fragments to customize various aspects of the system
|
# next we create fragments to customize various aspects of the system
|
||||||
# this block customizes clightning to ensure the correct endpoints are being advertised
|
# this block customizes clightning to ensure the correct endpoints are being advertised
|
||||||
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
|
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
|
||||||
cat > ${REMOTE_DATA_PATH}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
|
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
|
||||||
|
|
||||||
services:
|
services:
|
||||||
clightning_bitcoin:
|
clightning_bitcoin:
|
||||||
@ -79,8 +70,6 @@ services:
|
|||||||
LIGHTNINGD_OPT: |
|
LIGHTNINGD_OPT: |
|
||||||
announce-addr-dns=true
|
announce-addr-dns=true
|
||||||
experimental-websocket-port=9736
|
experimental-websocket-port=9736
|
||||||
experimental-peer-storage
|
|
||||||
experimental-offers
|
|
||||||
ports:
|
ports:
|
||||||
- "${CLIGHTNING_WEBSOCKET_PORT}:9736"
|
- "${CLIGHTNING_WEBSOCKET_PORT}:9736"
|
||||||
expose:
|
expose:
|
||||||
@ -88,21 +77,25 @@ services:
|
|||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
||||||
# run the setup script.
|
# run the setup script.
|
||||||
. ./btcpay-setup.sh -i
|
. ./btcpay-setup.sh -i
|
||||||
|
|
||||||
touch ${REMOTE_DATA_PATH}/btcpay.complete
|
touch ${REMOTE_HOME}/btcpay.complete
|
||||||
chown ubuntu:ubuntu ${REMOTE_DATA_PATH}/btcpay.complete
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
# send an updated ~/.bashrc so we have quicker access to cli tools
|
||||||
|
scp ./bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
|
||||||
|
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
|
||||||
|
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
|
||||||
|
|
||||||
# send the setup script to the remote machine.
|
# send the setup script to the remote machine.
|
||||||
scp "$SITE_PATH/btcpay.sh" "ubuntu@$BTCPAY_SERVER_FQDN:$REMOTE_DATA_PATH/btcpay_setup.sh"
|
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "chmod 0744 $REMOTE_DATA_PATH/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
|
||||||
# script is executed under sudo
|
# script is executed under sudo
|
||||||
ssh "$BTCPAY_SERVER_FQDN" "sudo bash -c $REMOTE_DATA_PATH/btcpay_setup.sh"
|
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
|
||||||
|
|
||||||
# lets give time for the containers to spin up
|
# lets give time for the containers to spin up
|
||||||
sleep 10
|
sleep 10
|
94
create_lxc_base.sh
Executable file
94
create_lxc_base.sh
Executable file
@ -0,0 +1,94 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
|
# let's download our base image.
|
||||||
|
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
# if the image doesn't exist, download it from Ubuntu's image server
|
||||||
|
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
|
||||||
|
# we don't really need to cache this locally since it gets continually updated upstream.
|
||||||
|
if [ -d "$SS_JAMMY_PATH" ]; then
|
||||||
|
lxc image import "$SS_JAMMY_PATH/meta-bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.tar.xz" \
|
||||||
|
"$SS_JAMMY_PATH/bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.qcow2" \
|
||||||
|
--alias "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
|
else
|
||||||
|
# copy the image down from canonical.
|
||||||
|
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
||||||
|
if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
# if there's no snapshot, we dispense with the old image and try again.
|
||||||
|
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
lxc delete "$BASE_IMAGE_VM_NAME" --force
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# the base image is ubuntu:22.04.
|
||||||
|
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
|
||||||
|
|
||||||
|
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
|
||||||
|
lxc config set "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
|
for CHAIN in mainnet testnet; do
|
||||||
|
for DATA in blocks chainstate; do
|
||||||
|
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
lxc start "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
|
sleep 15
|
||||||
|
while lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# ensure the ssh service is listening at localhost
|
||||||
|
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
|
||||||
|
|
||||||
|
# If we have any chaninstate or blocks in our SSME, let's push them to the
|
||||||
|
# remote host as a zfs volume that way deployments can share a common history
|
||||||
|
# of chainstate/blocks.
|
||||||
|
for CHAIN in testnet mainnet; do
|
||||||
|
for DATA in blocks chainstate; do
|
||||||
|
# if the storage snapshot doesn't yet exist, create it.
|
||||||
|
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
|
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
||||||
|
if [ -d "$DATA_PATH" ]; then
|
||||||
|
COMPLETE_FILE_PATH="$DATA_PATH/complete"
|
||||||
|
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
|
||||||
|
lxc file push --recursive --uid=999 --guid=999 --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/"
|
||||||
|
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
|
||||||
|
else
|
||||||
|
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
# stop the VM and get a snapshot.
|
||||||
|
lxc stop "$BASE_IMAGE_VM_NAME"
|
||||||
|
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
|
||||||
|
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME"
|
||||||
|
|
||||||
|
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
|
||||||
|
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
|
# now let's get a snapshot of each of the blocks/chainstate directories.
|
||||||
|
for CHAIN in testnet mainnet; do
|
||||||
|
for DATA in blocks chainstate; do
|
||||||
|
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
|
echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
|
||||||
|
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
464
deploy.sh
Executable file
464
deploy.sh
Executable file
@ -0,0 +1,464 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/main)"
|
||||||
|
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
|
||||||
|
|
||||||
|
# check to ensure dependencies are met.
|
||||||
|
for cmd in wait-for-it dig rsync sshfs lxc; do
|
||||||
|
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||||
|
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# do a spot check; if we are on production warn.
|
||||||
|
if lxc remote get-default | grep -q "production"; then
|
||||||
|
echo "WARNING: You are running command against a production system!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# check if there are any uncommited changes. It's dangerous to
|
||||||
|
# alter production systems when you have commits to make or changes to stash.
|
||||||
|
if git update-index --refresh | grep -q "needs update"; then
|
||||||
|
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
RESPONSE=
|
||||||
|
read -r -p " Are you sure you want to continue (y) ": RESPONSE
|
||||||
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
|
echo "STOPPING."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# switch to the defult project. We will switch to something more specific later.
|
||||||
|
if ! lxc info | grep "project:" | grep -q default; then
|
||||||
|
lxc project switch default
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOMAIN_NAME=
|
||||||
|
RUN_CERT_RENEWAL=true
|
||||||
|
SKIP_WWW=false
|
||||||
|
RESTORE_WWW=false
|
||||||
|
RESTORE_CERTS=false
|
||||||
|
BACKUP_CERTS=true
|
||||||
|
BACKUP_APPS=true
|
||||||
|
BACKUP_BTCPAY=true
|
||||||
|
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||||
|
RESTORE_BTCPAY=false
|
||||||
|
SKIP_BTCPAY=false
|
||||||
|
UPDATE_BTCPAY=false
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=false
|
||||||
|
REMOTE_NAME="$(lxc remote get-default)"
|
||||||
|
STOP_SERVICES=false
|
||||||
|
USER_SAYS_YES=false
|
||||||
|
RESTART_FRONT_END=true
|
||||||
|
USER_TARGET_PROJECT=
|
||||||
|
|
||||||
|
# grab any modifications from the command line.
|
||||||
|
for i in "$@"; do
|
||||||
|
case $i in
|
||||||
|
--restore-certs)
|
||||||
|
RESTORE_CERTS=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--restore-www)
|
||||||
|
RESTORE_WWW=true
|
||||||
|
BACKUP_APPS=false
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
|
RESTART_FRONT_END=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--restore-btcpay)
|
||||||
|
RESTORE_BTCPAY=true
|
||||||
|
BACKUP_BTCPAY=false
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-backup-www)
|
||||||
|
BACKUP_CERTS=false
|
||||||
|
BACKUP_APPS=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--stop)
|
||||||
|
STOP_SERVICES=true
|
||||||
|
RESTART_FRONT_END=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--restart-front-end)
|
||||||
|
RESTART_FRONT_END=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--backup-archive-path=*)
|
||||||
|
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--project=*)
|
||||||
|
USER_TARGET_PROJECT="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--update-btcpay)
|
||||||
|
UPDATE_BTCPAY=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-www)
|
||||||
|
SKIP_WWW=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-btcpay)
|
||||||
|
SKIP_BTCPAY=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-cert-renew)
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--reconfigure-btcpay)
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-y)
|
||||||
|
USER_SAYS_YES=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected option: $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||||
|
echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. "
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# set up our default paths.
|
||||||
|
source ../../defaults.sh
|
||||||
|
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export REGISTRY_DOCKER_IMAGE="registry:2"
|
||||||
|
export RESTORE_WWW="$RESTORE_WWW"
|
||||||
|
export STOP_SERVICES="$STOP_SERVICES"
|
||||||
|
export BACKUP_CERTS="$BACKUP_CERTS"
|
||||||
|
export BACKUP_APPS="$BACKUP_APPS"
|
||||||
|
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
|
||||||
|
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
|
||||||
|
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
||||||
|
export REMOTE_NAME="$REMOTE_NAME"
|
||||||
|
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
|
||||||
|
export USER_SAYS_YES="$USER_SAYS_YES"
|
||||||
|
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
|
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
||||||
|
export RESTORE_CERTS="$RESTORE_CERTS"
|
||||||
|
|
||||||
|
|
||||||
|
# todo convert this to Trezor-T
|
||||||
|
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
|
||||||
|
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
|
||||||
|
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
|
||||||
|
# generate a new SSH key for the base vm image.
|
||||||
|
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ensure our remote path is created.
|
||||||
|
mkdir -p "$REMOTE_PATH"
|
||||||
|
|
||||||
|
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
|
||||||
|
if [ ! -f "$REMOTE_DEFINITION" ]; then
|
||||||
|
echo "ERROR: The remote definition could not be found. You may need to re-run 'ss-remote'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
|
||||||
|
source "$REMOTE_DEFINITION"
|
||||||
|
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
|
||||||
|
export DEPLOYMENT_STRING="$DEPLOYMENT_STRING"
|
||||||
|
|
||||||
|
# this is our password generation mechanism. Relying on GPG for secure password generation
|
||||||
|
function new_pass {
|
||||||
|
gpg --gen-random --armor 1 25
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function stub_site_definition {
|
||||||
|
mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites"
|
||||||
|
|
||||||
|
# create a symlink from the PROJECT_PATH/sites/DOMAIN_NAME to the ss-sites/domain name
|
||||||
|
if [ ! -d "$PROJECT_PATH/sites/$DOMAIN_NAME" ]; then
|
||||||
|
ln -s "$SITE_PATH" "$PROJECT_PATH/sites/$DOMAIN_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f "$SITE_PATH/site.conf" ]; then
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
SITE_DEFINITION_PATH="$SITE_PATH/site.conf"
|
||||||
|
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
|
||||||
|
|
||||||
|
# stub out a site.conf with new passwords.
|
||||||
|
cat >"$SITE_DEFINITION_PATH" <<EOL
|
||||||
|
# https://www.sovereign-stack.org/ss-deploy/#siteconf
|
||||||
|
|
||||||
|
DOMAIN_NAME="${DOMAIN_NAME}"
|
||||||
|
# BTCPAY_ALT_NAMES="tip,store,pay,send"
|
||||||
|
SITE_LANGUAGE_CODES="en"
|
||||||
|
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
||||||
|
DEPLOY_GHOST=true
|
||||||
|
DEPLOY_CLAMS=true
|
||||||
|
DEPLOY_NEXTCLOUD=false
|
||||||
|
NOSTR_ACCOUNT_PUBKEY=
|
||||||
|
DEPLOY_GITEA=false
|
||||||
|
GHOST_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
GITEA_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
chmod 0744 "$SITE_DEFINITION_PATH"
|
||||||
|
echo "INFO: we stubbed a new site.conf for you at '$SITE_DEFINITION_PATH'. Go update it!"
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
|
||||||
|
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
|
||||||
|
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
|
||||||
|
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
||||||
|
export PROJECT_PREFIX="$PROJECT_PREFIX"
|
||||||
|
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
|
||||||
|
|
||||||
|
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||||
|
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||||
|
|
||||||
|
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
|
||||||
|
if [ -n "$USER_TARGET_PROJECT" ]; then
|
||||||
|
|
||||||
|
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
|
||||||
|
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
export PROJECT_NAME="$PROJECT_NAME"
|
||||||
|
export PROJECT_PATH="$PROJECT_PATH"
|
||||||
|
|
||||||
|
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
|
||||||
|
|
||||||
|
# create a symlink from ./remotepath/projects/project
|
||||||
|
if [ ! -d "$REMOTE_PATH/projects/$PROJECT_NAME" ]; then
|
||||||
|
ln -s "$PROJECT_PATH" "$REMOTE_PATH/projects/$PROJECT_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
|
||||||
|
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
|
||||||
|
|
||||||
|
# stub out a project.conf
|
||||||
|
cat >"$PROJECT_DEFINITION_PATH" <<EOL
|
||||||
|
# see https://www.sovereign-stack.org/ss-deploy/#projectconf for more info.
|
||||||
|
|
||||||
|
PRIMARY_DOMAIN="domain0.tld"
|
||||||
|
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
||||||
|
|
||||||
|
WWW_SERVER_MAC_ADDRESS=
|
||||||
|
# WWW_SERVER_CPU_COUNT="6"
|
||||||
|
# WWW_SERVER_MEMORY_MB="4096"
|
||||||
|
|
||||||
|
BTCPAYSERVER_MAC_ADDRESS=
|
||||||
|
# BTCPAY_SERVER_CPU_COUNT="4"
|
||||||
|
# BTCPAY_SERVER_MEMORY_MB="4096"
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
chmod 0744 "$PROJECT_DEFINITION_PATH"
|
||||||
|
echo "INFO: we stubbed a new project.conf for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
|
||||||
|
echo "INFO: Learn more at https://www.sovereign-stack.org/ss-deploy/"
|
||||||
|
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# source project defition.
|
||||||
|
source "$PROJECT_DEFINITION_PATH"
|
||||||
|
|
||||||
|
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||||
|
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
|
||||||
|
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
|
||||||
|
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project.conf."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
||||||
|
DOMAIN_LIST="${PRIMARY_DOMAIN}"
|
||||||
|
if [ -n "$OTHER_SITES_LIST" ]; then
|
||||||
|
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
export DOMAIN_LIST="$DOMAIN_LIST"
|
||||||
|
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
||||||
|
|
||||||
|
# let's provision our primary domain first.
|
||||||
|
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||||
|
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
stub_site_definition
|
||||||
|
|
||||||
|
# bring the VMs up under the primary domain name.
|
||||||
|
|
||||||
|
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
|
||||||
|
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
|
||||||
|
|
||||||
|
# iterate over all our server endpoints and provision them if needed.
|
||||||
|
# www
|
||||||
|
VPS_HOSTNAME=
|
||||||
|
|
||||||
|
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
||||||
|
# create the lxd base image.
|
||||||
|
./create_lxc_base.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
for VIRTUAL_MACHINE in www btcpayserver; do
|
||||||
|
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||||
|
FQDN=
|
||||||
|
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
source "$SITE_PATH/site.conf"
|
||||||
|
source ./domain_env.sh
|
||||||
|
|
||||||
|
# VALIDATE THE INPUT from the ENVFILE
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: DOMAIN_NAME not specified in your site.conf."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# create the lxc project as specified by PROJECT_NAME
|
||||||
|
if ! lxc project list | grep -q "$PROJECT_NAME"; then
|
||||||
|
lxc project create "$PROJECT_NAME"
|
||||||
|
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Goal is to get the macvlan interface.
|
||||||
|
LXD_SS_CONFIG_LINE=
|
||||||
|
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
|
||||||
|
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project=default | grep lxdbr0 | grep ss-config)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
|
||||||
|
echo "ERROR: the MACVLAN interface has not been specified. You may need to run 'ss-remote' again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
|
||||||
|
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
|
||||||
|
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
|
||||||
|
|
||||||
|
|
||||||
|
# Now let's switch to the new project to ensure new resources are created under the project scope.
|
||||||
|
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||||
|
echo "INFO: switch to lxd project '$PROJECT_NAME'."
|
||||||
|
lxc project switch "$PROJECT_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if the OVN network exists in this project.
|
||||||
|
if ! lxc network list | grep -q "ss-ovn"; then
|
||||||
|
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
|
||||||
|
fi
|
||||||
|
|
||||||
|
export MAC_ADDRESS_TO_PROVISION=
|
||||||
|
export VPS_HOSTNAME="$VPS_HOSTNAME"
|
||||||
|
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = www ]; then
|
||||||
|
if [ "$SKIP_WWW" = true ]; then
|
||||||
|
echo "INFO: Skipping WWW due to command line argument."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
VPS_HOSTNAME="$WWW_HOSTNAME"
|
||||||
|
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
|
||||||
|
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
|
||||||
|
|
||||||
|
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
|
||||||
|
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
|
||||||
|
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
|
||||||
|
if [ "$BITCOIN_CHAIN" = mainnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=150
|
||||||
|
elif [ "$BITCOIN_CHAIN" = testnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=70
|
||||||
|
fi
|
||||||
|
|
||||||
|
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
|
||||||
|
export FQDN="$BASE_IMAGE_VM_NAME"
|
||||||
|
ROOT_DISK_SIZE_GB=8
|
||||||
|
else
|
||||||
|
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
export FQDN="$FQDN"
|
||||||
|
export LXD_VM_NAME="${FQDN//./-}"
|
||||||
|
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||||
|
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
export PROJECT_PATH="$PROJECT_PATH"
|
||||||
|
|
||||||
|
./deploy_vm.sh
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = www ]; then
|
||||||
|
# this tells our local docker client to target the remote endpoint via SSH
|
||||||
|
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
|
||||||
|
|
||||||
|
# enable docker swarm mode so we can support docker stacks.
|
||||||
|
if docker info | grep -q "Swarm: inactive"; then
|
||||||
|
docker swarm init --advertise-addr enp6s0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
# let's stub out the rest of our site definitions, if any.
|
||||||
|
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# stub out the site_defition if it's doesn't exist.
|
||||||
|
stub_site_definition
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# now let's run the www and btcpay-specific provisioning scripts.
|
||||||
|
if [ "$SKIP_WWW" = false ]; then
|
||||||
|
./www/go.sh
|
||||||
|
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||||
|
fi
|
||||||
|
|
||||||
|
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
if [ "$SKIP_BTCPAY" = false ]; then
|
||||||
|
./btcpayserver/go.sh
|
||||||
|
|
||||||
|
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||||
|
fi
|
||||||
|
|
||||||
|
done
|
64
deploy_vm.sh
Executable file
64
deploy_vm.sh
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
|
||||||
|
SSH_STRING="Host ${FQDN}"
|
||||||
|
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
|
||||||
|
|
||||||
|
########## BEGIN
|
||||||
|
cat >> "$SSH_HOME/config" <<-EOF
|
||||||
|
|
||||||
|
${SSH_STRING}
|
||||||
|
HostName ${FQDN}
|
||||||
|
User ubuntu
|
||||||
|
EOF
|
||||||
|
###
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
||||||
|
|
||||||
|
# if the machine doesn't exist, we create it.
|
||||||
|
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
|
||||||
|
# create a base image if needed and instantiate a VM.
|
||||||
|
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
||||||
|
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
|
||||||
|
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME"
|
||||||
|
|
||||||
|
# now let's create a new VM to work with.
|
||||||
|
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
|
lxc init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
# of chainstate/blocks.
|
||||||
|
# for CHAIN in testnet mainnet; do
|
||||||
|
# for DATA in blocks chainstate; do
|
||||||
|
# MOUNT_PATH="/$CHAIN/$DATA"
|
||||||
|
# if lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
|
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$LXD_VM_NAME" "$MOUNT_PATH"
|
||||||
|
# fi
|
||||||
|
# done
|
||||||
|
# done
|
||||||
|
|
||||||
|
lxc start "$LXD_VM_NAME"
|
||||||
|
sleep 10
|
||||||
|
|
||||||
|
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
|
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
|
# create a directory to store backup archives. This is on all new vms.
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
@ -3,18 +3,19 @@
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export BTCPAY_FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
|
||||||
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
|
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
|
||||||
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
||||||
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
|
||||||
export ADMIN_ACCOUNT_USERNAME="info"
|
export ADMIN_ACCOUNT_USERNAME="info"
|
||||||
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
||||||
|
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
|
||||||
|
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
||||||
|
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||||
|
|
||||||
|
|
||||||
export REMOTE_GHOST_PATH="$REMOTE_DATA_PATH/ghost"
|
|
||||||
export REMOTE_NEXTCLOUD_PATH="$REMOTE_DATA_PATH/nextcloud"
|
|
||||||
export REMOTE_GITEA_PATH="$REMOTE_DATA_PATH/gitea"
|
|
||||||
|
|
||||||
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
|
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
|
||||||
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
|
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
|
||||||
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
|
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
|
||||||
|
1
lnplay
1
lnplay
@ -1 +0,0 @@
|
|||||||
Subproject commit e9a18f9385414c1dc34381f39c2709cf115c907f
|
|
@ -1,59 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export DEPLOY_GHOST=true
|
|
||||||
export DEPLOY_NOSTR=false
|
|
||||||
export DEPLOY_NEXTCLOUD=false
|
|
||||||
export DEPLOY_GITEA=false
|
|
||||||
export GHOST_DEPLOY_SMTP=false
|
|
||||||
export MAILGUN_FROM_ADDRESS=
|
|
||||||
export MAILGUN_SMTP_USERNAME=
|
|
||||||
export MAILGUN_SMTP_PASSWORD=
|
|
||||||
|
|
||||||
export SITE_LANGUAGE_CODES="en"
|
|
||||||
export LANGUAGE_CODE="en"
|
|
||||||
export NOSTR_ACCOUNT_PUBKEY=
|
|
||||||
|
|
||||||
|
|
||||||
# this is where the html is sourced from.
|
|
||||||
export SITE_HTML_PATH=
|
|
||||||
|
|
||||||
export GHOST_MYSQL_PASSWORD=
|
|
||||||
export GHOST_MYSQL_ROOT_PASSWORD=
|
|
||||||
export NEXTCLOUD_MYSQL_PASSWORD=
|
|
||||||
export GITEA_MYSQL_PASSWORD=
|
|
||||||
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
|
|
||||||
export GITEA_MYSQL_ROOT_PASSWORD=
|
|
||||||
export DUPLICITY_BACKUP_PASSPHRASE=
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
DEFAULT_DB_IMAGE="mariadb:10.11.2-jammy"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# run the docker stack.
|
|
||||||
export GHOST_IMAGE="ghost:5.53.3"
|
|
||||||
|
|
||||||
# TODO switch to mysql. May require intricate export work for existing sites.
|
|
||||||
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
|
||||||
#https://forum.ghost.org/t/how-to-migrate-from-mariadb-10-to-mysql-8/29575
|
|
||||||
export GHOST_DB_IMAGE="mysql:8.0.32"
|
|
||||||
|
|
||||||
|
|
||||||
export NGINX_IMAGE="nginx:1.25.1"
|
|
||||||
|
|
||||||
# version of backup is 24.0.3
|
|
||||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.4"
|
|
||||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
|
||||||
|
|
||||||
# TODO PIN the gitea version number.
|
|
||||||
export GITEA_IMAGE="gitea/gitea:latest"
|
|
||||||
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
|
||||||
|
|
||||||
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
|
|
||||||
|
|
||||||
|
|
||||||
export OTHER_SITES_LIST=
|
|
||||||
export BTCPAY_ALT_NAMES=
|
|
258
stub_lxc_profile.sh
Executable file
258
stub_lxc_profile.sh
Executable file
@ -0,0 +1,258 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
VIRTUAL_MACHINE=base
|
||||||
|
LXD_HOSTNAME=
|
||||||
|
|
||||||
|
# grab any modifications from the command line.
|
||||||
|
for i in "$@"; do
|
||||||
|
case $i in
|
||||||
|
--lxd-hostname=*)
|
||||||
|
LXD_HOSTNAME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--vm=*)
|
||||||
|
VIRTUAL_MACHINE="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected option: $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# generate the custom cloud-init file. Cloud init installs and configures sshd
|
||||||
|
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
|
||||||
|
eval "$(ssh-agent -s)"
|
||||||
|
ssh-add "$SSH_HOME/id_rsa"
|
||||||
|
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
|
||||||
|
|
||||||
|
export FILENAME="$LXD_HOSTNAME.yml"
|
||||||
|
mkdir -p "$PROJECT_PATH/cloud-init"
|
||||||
|
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
|
||||||
|
|
||||||
|
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
||||||
|
cat > "$YAML_PATH" <<EOF
|
||||||
|
config:
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = base ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
limits.cpu: 4
|
||||||
|
limits.memory: 4096MB
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = www ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
|
||||||
|
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
|
||||||
|
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if VIRTUAL_MACHINE=base, then we doing the base image.
|
||||||
|
if [ "$VIRTUAL_MACHINE" = base ]; then
|
||||||
|
# this is for the base image only...
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
user.vendor-data: |
|
||||||
|
#cloud-config
|
||||||
|
package_update: true
|
||||||
|
package_upgrade: false
|
||||||
|
package_reboot_if_required: false
|
||||||
|
|
||||||
|
preserve_hostname: false
|
||||||
|
fqdn: ${BASE_IMAGE_VM_NAME}
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- curl
|
||||||
|
- ssh-askpass
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- gnupg-agent
|
||||||
|
- software-properties-common
|
||||||
|
- lsb-release
|
||||||
|
- net-tools
|
||||||
|
- htop
|
||||||
|
- rsync
|
||||||
|
- duplicity
|
||||||
|
- sshfs
|
||||||
|
- fswatch
|
||||||
|
- jq
|
||||||
|
- git
|
||||||
|
- nano
|
||||||
|
- wait-for-it
|
||||||
|
- dnsutils
|
||||||
|
- wget
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: ubuntu
|
||||||
|
groups: docker
|
||||||
|
shell: /bin/bash
|
||||||
|
lock_passwd: false
|
||||||
|
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ${SSH_AUTHORIZED_KEY}
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
write_files:
|
||||||
|
- path: /etc/docker/daemon.json
|
||||||
|
permissions: 0644
|
||||||
|
owner: root
|
||||||
|
content: |
|
||||||
|
{
|
||||||
|
"registry-mirrors": [
|
||||||
|
"${REGISTRY_URL}"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
runcmd:
|
||||||
|
- sudo mkdir -m 0755 -p /etc/apt/keyrings
|
||||||
|
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||||
|
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||||
|
- sudo apt-get update
|
||||||
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
|
||||||
|
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
# write_files:
|
||||||
|
# - path: /etc/ssh/sshd_config
|
||||||
|
# content: |
|
||||||
|
# Port 22
|
||||||
|
# ListenAddress 0.0.0.0
|
||||||
|
# Protocol 2
|
||||||
|
# ChallengeResponseAuthentication no
|
||||||
|
# PasswordAuthentication no
|
||||||
|
# UsePAM no
|
||||||
|
# LogLevel INFO
|
||||||
|
|
||||||
|
|
||||||
|
# - path: /etc/docker/daemon.json
|
||||||
|
# content: |
|
||||||
|
# {
|
||||||
|
# "registry-mirrors": "${REGISTRY_URL}",
|
||||||
|
# "labels": "githead=${LATEST_GIT_COMMIT}"
|
||||||
|
# }
|
||||||
|
#"labels": [githead="${LATEST_GIT_COMMIT}"]
|
||||||
|
|
||||||
|
# apt:
|
||||||
|
# sources:
|
||||||
|
# docker.list:
|
||||||
|
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
|
||||||
|
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# - sudo apt-get update
|
||||||
|
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||||
|
|
||||||
|
else
|
||||||
|
# all other machines that are not the base image
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
user.vendor-data: |
|
||||||
|
#cloud-config
|
||||||
|
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
|
||||||
|
package_update: false
|
||||||
|
package_upgrade: false
|
||||||
|
package_reboot_if_required: false
|
||||||
|
|
||||||
|
preserve_hostname: true
|
||||||
|
fqdn: ${FQDN}
|
||||||
|
|
||||||
|
user.network-config: |
|
||||||
|
version: 2
|
||||||
|
ethernets:
|
||||||
|
enp5s0:
|
||||||
|
dhcp4: true
|
||||||
|
dhcp4-overrides:
|
||||||
|
route-metric: 50
|
||||||
|
match:
|
||||||
|
macaddress: ${MAC_ADDRESS_TO_PROVISION}
|
||||||
|
set-name: enp5s0
|
||||||
|
|
||||||
|
enp6s0:
|
||||||
|
dhcp4: true
|
||||||
|
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# All profiles get a root disk and cloud-init config.
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
description: Default LXD profile for ${FILENAME}
|
||||||
|
devices:
|
||||||
|
root:
|
||||||
|
path: /
|
||||||
|
pool: ss-base
|
||||||
|
type: disk
|
||||||
|
config:
|
||||||
|
source: cloud-init:config
|
||||||
|
type: disk
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Stub out the network piece for the base image.
|
||||||
|
if [ "$VIRTUAL_MACHINE" = base ]; then
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
enp6s0:
|
||||||
|
name: enp6s0
|
||||||
|
network: lxdbr0
|
||||||
|
type: nic
|
||||||
|
name: ${FILENAME}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
else
|
||||||
|
# If we are deploying a VM that attaches to the network underlay.
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
enp5s0:
|
||||||
|
nictype: macvlan
|
||||||
|
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
|
||||||
|
type: nic
|
||||||
|
enp6s0:
|
||||||
|
name: enp6s0
|
||||||
|
network: ss-ovn
|
||||||
|
type: nic
|
||||||
|
|
||||||
|
name: ${PRIMARY_DOMAIN}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
|
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
||||||
|
lxc profile create "$LXD_HOSTNAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
|
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
49
wait_for_lxc_ip.sh
Executable file
49
wait_for_lxc_ip.sh
Executable file
@ -0,0 +1,49 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
LXC_INSTANCE_NAME=
|
||||||
|
|
||||||
|
# grab any modifications from the command line.
|
||||||
|
for i in "$@"; do
|
||||||
|
case $i in
|
||||||
|
--lxd-name=*)
|
||||||
|
LXC_INSTANCE_NAME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unexpected option: $1"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# if the invoker did not set the instance name, throw an error.
|
||||||
|
if [ -z "$LXC_INSTANCE_NAME" ]; then
|
||||||
|
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
|
||||||
|
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
IP_V4_ADDRESS=
|
||||||
|
while true; do
|
||||||
|
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
|
||||||
|
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
||||||
|
if [ -n "$IP_V4_ADDRESS" ]; then
|
||||||
|
# give the machine extra time to spin up.
|
||||||
|
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
sleep 1
|
||||||
|
printf '.'
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# wait for cloud-init to complet before returning.
|
||||||
|
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
1
www/.gitignore
vendored
Normal file
1
www/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
clams
|
@ -14,5 +14,16 @@ fi
|
|||||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity "$REMOTE_SOURCE_BACKUP_PATH" "file://$REMOTE_BACKUP_PATH"
|
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity "$REMOTE_SOURCE_BACKUP_PATH" "file://$REMOTE_BACKUP_PATH"
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
# sync the remote backup path down
|
SSHFS_PATH="/tmp/sshfs_temp"
|
||||||
rsync -a "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH/" "$LOCAL_BACKUP_PATH/"
|
mkdir -p "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# now let's pull down the latest files from the backup directory.
|
||||||
|
# create a temp directory to serve as the mountpoint for the remote machine backups directory
|
||||||
|
sshfs "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# rsync the files from the remote server to our local backup path.
|
||||||
|
rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
|
||||||
|
|
||||||
|
# step 4: unmount the SSHFS filesystem and cleanup.
|
||||||
|
umount "$SSHFS_PATH"
|
||||||
|
rm -rf "$SSHFS_PATH"
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -exu
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
APP=
|
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
|
||||||
for i in "$@"; do
|
|
||||||
case $i in
|
|
||||||
--app=*)
|
|
||||||
APP="${i#*=}"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "Unexpected option: $1"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -z "$APP" ]; then
|
|
||||||
echo "ERROR: You must specify the --app= paramater."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
|
||||||
source ../../deployment_defaults.sh
|
|
||||||
source ../project_defaults.sh
|
|
||||||
source "$SITE_PATH/site.conf"
|
|
||||||
source ../domain_env.sh
|
|
||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER"
|
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
|
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
|
||||||
|
|
||||||
./backup_path.sh
|
|
||||||
done
|
|
@ -11,20 +11,22 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../project_defaults.sh
|
source ../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../domain_env.sh
|
source ../domain_env.sh
|
||||||
|
|
||||||
# with the incus side, we are trying to expose ALL OUR services from one IP address, which terminates
|
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
|
||||||
# at a cachehing reverse proxy that runs nginx.
|
# at a cachehing reverse proxy that runs nginx.
|
||||||
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs"
|
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs"
|
||||||
|
|
||||||
# this is minimum required; www and btcpay.
|
# this is minimum required; www and btcpay.
|
||||||
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
|
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
|
||||||
|
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
||||||
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
|
||||||
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
|
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
|
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
||||||
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
|
||||||
|
|
||||||
|
|
||||||
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
|
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
|
||||||
@ -36,7 +38,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
|
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
|
||||||
|
|
||||||
# execute the certbot command that we dynamically generated.
|
# execute the certbot command that we dynamically generated.
|
||||||
eval "$GENERATE_CERT_STRING"
|
eval "$GENERATE_CERT_STRING"
|
||||||
|
96
www/go.sh
96
www/go.sh
@ -3,7 +3,14 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# Create the nginx config file which covers all domainys.
|
# redirect all docker commands to the remote host.
|
||||||
|
DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
|
||||||
|
export DOCKER_HOST="$DOCKER_HOST"
|
||||||
|
|
||||||
|
# prepare clams images and such
|
||||||
|
./prepare_clams.sh
|
||||||
|
|
||||||
|
# Create the nginx config file which covers all domains.
|
||||||
bash -c ./stub/nginx_config.sh
|
bash -c ./stub/nginx_config.sh
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
@ -11,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../project_defaults.sh
|
source ../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../domain_env.sh
|
source ../domain_env.sh
|
||||||
|
|
||||||
@ -51,15 +58,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
|
||||||
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
|
||||||
echo "ERROR: When deploying nostr, you MUST specify NOSTR_ACCOUNT_PUBKEY."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
||||||
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site.conf."
|
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site.conf."
|
||||||
exit 1
|
exit 1
|
||||||
@ -72,21 +70,66 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
|
./stop_docker_stacks.sh
|
||||||
|
|
||||||
|
|
||||||
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
# TODO check if there are any other stacks that are left running (other than reverse proxy)
|
||||||
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
# if so, this may mean the user has disabled one or more domains and that existing sites/services
|
||||||
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
|
||||||
STACKS_STILL_RUNNING=false
|
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
|
||||||
if [[ $(docker stack list | wc -l) -gt 2 ]]; then
|
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
|
||||||
echo "WARNING! You still have stacks running. If you have modified the SITES list,"
|
echo "exiting."
|
||||||
echo " you may need to go remove the docker stacks running the remote machine."
|
exit 1
|
||||||
STACKS_STILL_RUNNING=true
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ok, the backend stacks are stopped.
|
||||||
|
if [ "$RESTART_FRONT_END" = true ]; then
|
||||||
|
# remove the nginx stack
|
||||||
|
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
docker stack rm reverse-proxy
|
||||||
|
|
||||||
|
# wait for all docker containers to stop.
|
||||||
|
# TODO see if there's a way to check for this.
|
||||||
|
sleep 20
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# generate the certs and grab a backup
|
# generate the certs and grab a backup
|
||||||
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ] && [ "$STACKS_STILL_RUNNING" = false ]; then
|
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
|
||||||
./generate_certs.sh
|
./generate_certs.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# let's backup all our letsencrypt certs
|
||||||
|
export APP="letsencrypt"
|
||||||
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# source the site path so we know what features it has.
|
||||||
|
source ../../../defaults.sh
|
||||||
|
source "$SITE_PATH/site.conf"
|
||||||
|
source ../domain_env.sh
|
||||||
|
|
||||||
|
# these variable are used by both backup/restore scripts.
|
||||||
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||||
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
|
||||||
|
# we grab a backup of the certs unless we're restoring.
|
||||||
|
if [ "$RESTORE_CERTS" = true ]; then
|
||||||
|
./restore_path.sh
|
||||||
|
else
|
||||||
|
./backup_path.sh
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
||||||
./stub/nginx_yml.sh
|
./stub/nginx_yml.sh
|
||||||
|
|
||||||
@ -95,3 +138,22 @@ fi
|
|||||||
./stub/nextcloud_yml.sh
|
./stub/nextcloud_yml.sh
|
||||||
./stub/gitea_yml.sh
|
./stub/gitea_yml.sh
|
||||||
./stub/nostr_yml.sh
|
./stub/nostr_yml.sh
|
||||||
|
|
||||||
|
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||||
|
# # WWW_FQDN is in our certificate, so we resolve to that.
|
||||||
|
# wait-for-it -t 320 "$WWW_FQDN:80"
|
||||||
|
# wait-for-it -t 320 "$WWW_FQDN:443"
|
||||||
|
|
||||||
|
# # open bowser tabs.
|
||||||
|
# if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
# xdg-open "http://$WWW_FQDN" > /dev/null 2>&1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
# if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
# xdg-open "http://$NEXTCLOUD_FQDN" > /dev/null 2>&1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
# if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
# xdg-open "http://$GITEA_FQDN" > /dev/null 2>&1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
35
www/prepare_clams.sh
Executable file
35
www/prepare_clams.sh
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# deploy clams wallet.
|
||||||
|
LOCAL_CLAMS_REPO_PATH="$(pwd)/clams"
|
||||||
|
CLAMS_APP_DOCKER_REPO_URL="https://github.com/farscapian/clams-app-docker"
|
||||||
|
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
|
||||||
|
git clone "$CLAMS_APP_DOCKER_REPO_URL" "$LOCAL_CLAMS_REPO_PATH"
|
||||||
|
else
|
||||||
|
cd "$LOCAL_CLAMS_REPO_PATH"
|
||||||
|
git config --global pull.rebase false
|
||||||
|
git pull
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME"
|
||||||
|
BROWSER_APP_GIT_TAG="1.5.0"
|
||||||
|
BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app"
|
||||||
|
BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG"
|
||||||
|
|
||||||
|
# build the browser-app image.
|
||||||
|
if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then
|
||||||
|
docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \
|
||||||
|
--build-arg VERSION="$BROWSER_APP_GIT_TAG" \
|
||||||
|
-t "$BROWSER_APP_IMAGE_NAME" \
|
||||||
|
"$(pwd)/clams/frontend/browser-app/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the clams-root volume doesn't exist, we create and seed it.
|
||||||
|
if ! docker volume list | grep -q clams-root; then
|
||||||
|
docker volume create clams-root
|
||||||
|
docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME"
|
||||||
|
fi
|
@ -19,6 +19,7 @@ if [ "$USER_SAYS_YES" = false ]; then
|
|||||||
RESPONSE=
|
RESPONSE=
|
||||||
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
|
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
|
||||||
if [ "$RESPONSE" != "y" ]; then
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
|
echo "STOPPING."
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -37,4 +38,4 @@ scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
|||||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||||
|
|
||||||
# reset folder owner to ubuntu
|
# reset folder owner to ubuntu
|
||||||
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_DATA_PATH/$APP"
|
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
@ -1,18 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# this scripts brings down the docker stacks on www
|
|
||||||
|
|
||||||
# bring down ghost instances.
|
# bring down ghost instances.
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../deployment_defaults.sh
|
source ../../../defaults.sh
|
||||||
source ../project_defaults.sh
|
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../domain_env.sh
|
source ../domain_env.sh
|
||||||
|
|
||||||
@ -29,8 +26,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
|
|
||||||
# these variable are used by both backup/restore scripts.
|
# these variable are used by both backup/restore scripts.
|
||||||
export APP="$APP"
|
export APP="$APP"
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
|
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||||
|
|
||||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||||
@ -39,13 +36,14 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
# remove the nginx stack
|
if [ "$RESTORE_WWW" = true ]; then
|
||||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
./restore_path.sh
|
||||||
docker stack rm reverse-proxy
|
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||||
|
elif [ "$BACKUP_APPS" = true ]; then
|
||||||
sleep 10
|
# if we're not restoring, then we may or may not back up.
|
||||||
|
./backup_path.sh
|
||||||
fi
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
@ -3,23 +3,15 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
docker pull "$GHOST_IMAGE"
|
|
||||||
|
|
||||||
DEPLOY_STACK=false
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../project_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
DEPLOY_STACK=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# for each language specified in the site.conf, we spawn a separate ghost container
|
# for each language specified in the site.conf, we spawn a separate ghost container
|
||||||
# at https://www.domain.com/$LANGUAGE_CODE
|
# at https://www.domain.com/$LANGUAGE_CODE
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
@ -27,8 +19,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
|
|
||||||
# ensure directories on remote host exist so we can mount them into the containers.
|
# ensure directories on remote host exist so we can mount them into the containers.
|
||||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME"
|
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME"
|
||||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/db"
|
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db"
|
||||||
|
|
||||||
export GHOST_STACK_TAG="ghost-$STACK_NAME"
|
export GHOST_STACK_TAG="ghost-$STACK_NAME"
|
||||||
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
|
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
|
||||||
@ -52,7 +44,7 @@ EOL
|
|||||||
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
|
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
|
||||||
environment:
|
environment:
|
||||||
EOL
|
EOL
|
||||||
if [ "$LANGUAGE_CODE" = "en" ]; then
|
if [ "$LANGUAGE_CODE" = "en" ]; then
|
||||||
@ -73,21 +65,6 @@ EOL
|
|||||||
- database__connection__database=ghost
|
- database__connection__database=ghost
|
||||||
- database__pool__min=0
|
- database__pool__min=0
|
||||||
- privacy__useStructuredData=true
|
- privacy__useStructuredData=true
|
||||||
EOL
|
|
||||||
|
|
||||||
# INSERT EMAIL OPTIONS HERE
|
|
||||||
if [ "$GHOST_DEPLOY_SMTP" = true ]; then
|
|
||||||
cat >>"$DOCKER_YAML_PATH" <<EOL
|
|
||||||
- mail__transport=SMTP
|
|
||||||
- mail__from=${MAILGUN_FROM_ADDRESS}
|
|
||||||
- mail__options__auth__user=${MAILGUN_SMTP_USERNAME}
|
|
||||||
- mail__options__auth__pass=${MAILGUN_SMTP_PASSWORD}
|
|
||||||
- mail__options__host=smtp.mailgun.org
|
|
||||||
- mail__options__port=587
|
|
||||||
EOL
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat >>"$DOCKER_YAML_PATH" <<EOL
|
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@ -97,7 +74,7 @@ EOL
|
|||||||
networks:
|
networks:
|
||||||
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
|
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
||||||
- MYSQL_DATABASE=ghost
|
- MYSQL_DATABASE=ghost
|
||||||
@ -126,7 +103,7 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_STACK" = true ]; then
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
||||||
sleep 2
|
sleep 2
|
||||||
fi
|
fi
|
||||||
|
@ -3,16 +3,12 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
docker pull "$GITEA_IMAGE"
|
|
||||||
docker pull "$GITEA_DB_IMAGE"
|
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../project_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
@ -84,10 +80,10 @@ EOL
|
|||||||
${DBNET_NAME}:
|
${DBNET_NAME}:
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
|
||||||
sleep 1
|
sleep 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
@ -3,16 +3,12 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
docker pull "$NEXTCLOUD_IMAGE"
|
|
||||||
|
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../project_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
@ -39,7 +35,7 @@ services:
|
|||||||
- nextcloud-${DOMAIN_IDENTIFIER}-en
|
- nextcloud-${DOMAIN_IDENTIFIER}-en
|
||||||
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
|
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
|
||||||
environment:
|
environment:
|
||||||
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
- MYSQL_DATABASE=nextcloud
|
- MYSQL_DATABASE=nextcloud
|
||||||
@ -59,7 +55,7 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
- nextclouddb-${DOMAIN_IDENTIFIER}-en
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
|
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
|
||||||
environment:
|
environment:
|
||||||
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
||||||
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
@ -78,7 +74,9 @@ networks:
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
done
|
done
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# here's the NGINX config. We support ghost and nextcloud.
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
@ -18,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
|
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../project_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
@ -31,8 +31,6 @@ events {
|
|||||||
http {
|
http {
|
||||||
client_max_body_size 100m;
|
client_max_body_size 100m;
|
||||||
server_tokens off;
|
server_tokens off;
|
||||||
sendfile on;
|
|
||||||
include mime.types;
|
|
||||||
|
|
||||||
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
|
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
|
||||||
# Needed to allow very long URLs to prevent issues while signing PSBTs
|
# Needed to allow very long URLs to prevent issues while signing PSBTs
|
||||||
@ -172,7 +170,7 @@ EOL
|
|||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# https://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
|
# https://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
@ -182,7 +180,7 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# We return a JSON object with name/pubkey mapping per NIP05.
|
# We return a JSON object with name/pubkey mapping per NIP05.
|
||||||
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
|
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
|
||||||
@ -209,7 +207,7 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# wss://$NOSTR_FQDN server block
|
# wss://$NOSTR_FQDN server block
|
||||||
server {
|
server {
|
||||||
@ -241,7 +239,7 @@ EOL
|
|||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# https server block for https://${BTCPAY_SERVER_NAMES}
|
# https server block for https://${BTCPAY_SERVER_NAMES}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
@ -251,7 +249,8 @@ EOL
|
|||||||
|
|
||||||
# Route everything to the real BTCPay server
|
# Route everything to the real BTCPay server
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://10.10.10.66:80;
|
# URL of BTCPay Server
|
||||||
|
proxy_pass http://$LXD_VM_NAME.lxd:80;
|
||||||
proxy_set_header Host \$http_host;
|
proxy_set_header Host \$http_host;
|
||||||
proxy_set_header X-Forwarded-Proto \$scheme;
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
proxy_set_header X-Real-IP \$remote_addr;
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
@ -264,8 +263,36 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
# Clams server entry
|
||||||
|
|
||||||
|
# cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# # https server block for https://${CLAMS_FQDN}
|
||||||
|
# server {
|
||||||
|
# listen 443 ssl http2;
|
||||||
|
|
||||||
|
# ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
|
# ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
|
# ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
|
|
||||||
|
# server_name ${CLAMS_FQDN};
|
||||||
|
# index index.js;
|
||||||
|
|
||||||
|
# root /apps/clams;
|
||||||
|
# index 200.htm;
|
||||||
|
|
||||||
|
# location / {
|
||||||
|
# try_files \$uri \$uri/ /200.htm;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# location ~* \.(?:css|js|jpg|svg)$ {
|
||||||
|
# expires 30d;
|
||||||
|
# add_header Cache-Control "public";
|
||||||
|
# }
|
||||||
|
|
||||||
|
# }
|
||||||
|
|
||||||
|
# EOL
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
|
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
@ -280,7 +307,7 @@ EOL
|
|||||||
|
|
||||||
# Main HTTPS listener for https://${WWW_FQDN}
|
# Main HTTPS listener for https://${WWW_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
@ -296,6 +323,14 @@ EOL
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
# # add the Onion-Location header if specifed.
|
||||||
|
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
# cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
|
||||||
|
|
||||||
|
# EOL
|
||||||
|
# fi
|
||||||
|
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
|
||||||
|
|
||||||
@ -397,13 +432,13 @@ EOL
|
|||||||
}
|
}
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# TLS listener for ${NEXTCLOUD_FQDN}
|
# TLS listener for ${NEXTCLOUD_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
@ -436,11 +471,26 @@ EOL
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TODO this MIGHT be part of the solution for Twitter Cards.
|
||||||
|
# location /contents {
|
||||||
|
# resolver 127.0.0.11 ipv6=off valid=5m;
|
||||||
|
# proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
# proxy_set_header Host \$http_host;
|
||||||
|
# proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
# proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
# proxy_intercept_errors on;
|
||||||
|
# proxy_pass http://ghost-${DOMAIN_IDENTIFIER}-${SITE_LANGUAGE_CODES}::2368\$og_prefix\$request_uri;
|
||||||
|
# }
|
||||||
|
# this piece is for GITEA.
|
||||||
|
|
||||||
if [ "$DEPLOY_GITEA" = true ]; then
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# TLS listener for ${GITEA_FQDN}
|
# TLS listener for ${GITEA_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
@ -464,6 +514,31 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# deploy Clams browser app under the primary domain.
|
||||||
|
if [ $iteration = 0 ]; then
|
||||||
|
|
||||||
|
cat >> "$NGINX_CONF_PATH" <<EOF
|
||||||
|
|
||||||
|
# server block for the clams browser-app; just a static website
|
||||||
|
server {
|
||||||
|
listen 443 ssl;
|
||||||
|
|
||||||
|
server_name ${CLAMS_FQDN};
|
||||||
|
|
||||||
|
autoindex off;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
gzip_static on;
|
||||||
|
|
||||||
|
root /browser-app;
|
||||||
|
index 200.html;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
iteration=$((iteration+1))
|
iteration=$((iteration+1))
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -3,8 +3,6 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
docker pull "$NGINX_IMAGE"
|
|
||||||
|
|
||||||
#https://github.com/fiatjaf/expensive-relay
|
#https://github.com/fiatjaf/expensive-relay
|
||||||
# NOSTR RELAY WHICH REQUIRES PAYMENTS.
|
# NOSTR RELAY WHICH REQUIRES PAYMENTS.
|
||||||
DOCKER_YAML_PATH="$PROJECT_PATH/nginx.yml"
|
DOCKER_YAML_PATH="$PROJECT_PATH/nginx.yml"
|
||||||
@ -25,19 +23,16 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../../deployment_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source ../../project_defaults.sh
|
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
|
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
# We create another ghost instance under /
|
# We create another ghost instance under /
|
||||||
|
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$LANGUAGE_CODE" = en ]; then
|
if [ "$LANGUAGE_CODE" = en ]; then
|
||||||
if [ "$DEPLOY_GITEA" = "true" ]; then
|
if [ "$DEPLOY_GITEA" = "true" ]; then
|
||||||
@ -52,7 +47,7 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
- nostrnet-$DOMAIN_IDENTIFIER-en
|
- nostrnet-$DOMAIN_IDENTIFIER-en
|
||||||
EOL
|
EOL
|
||||||
@ -65,8 +60,13 @@ done
|
|||||||
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
volumes:
|
volumes:
|
||||||
- ${REMOTE_DATA_PATH_LETSENCRYPT}:/etc/letsencrypt:ro
|
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
|
||||||
EOL
|
EOL
|
||||||
|
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||||
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- clams-browser-app:/browser-app:ro
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
configs:
|
configs:
|
||||||
@ -82,36 +82,39 @@ configs:
|
|||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
################ NETWORKS SECTION
|
################ NETWORKS SECTION
|
||||||
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
networks:
|
networks:
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|
||||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../../deployment_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source ../../project_defaults.sh
|
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
# for each language specified in the site.conf, we spawn a separate ghost container
|
# for each language specified in the site.conf, we spawn a separate ghost container
|
||||||
# at https://www.domain.com/$LANGUAGE_CODE
|
# at https://www.domain.com/$LANGUAGE_CODE
|
||||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||||
if [ "$DEPLOY_GHOST" = true ]; then
|
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
|
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$LANGUAGE_CODE" = en ]; then
|
if [ "$LANGUAGE_CODE" = en ]; then
|
||||||
if [ "$DEPLOY_GITEA" = true ]; then
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
giteanet-$DOMAIN_IDENTIFIER-en:
|
giteanet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -119,23 +122,33 @@ EOL
|
|||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
nextcloudnet-$DOMAIN_IDENTIFIER-en:
|
nextcloudnet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
nostrnet-$DOMAIN_IDENTIFIER-en:
|
nostrnet-$DOMAIN_IDENTIFIER-en:
|
||||||
attachable: true
|
attachable: true
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
# for some reason we need to wait here. See if there's a fix; poll for service readiness?
|
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||||
sleep 5
|
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||||
|
volumes:
|
||||||
|
clams-browser-app:
|
||||||
|
external: true
|
||||||
|
name: clams-root
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" reverse-proxy
|
|
||||||
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
||||||
# iterate over all our domains and create the nginx config file.
|
# iterate over all our domains and create the nginx config file.
|
||||||
sleep 3
|
sleep 1
|
||||||
|
fi
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
docker pull "$NOSTR_RELAY_IMAGE"
|
docker pull "$NOSTR_RELAY_IMAGE"
|
||||||
@ -10,12 +10,12 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
|
|
||||||
# source the site path so we know what features it has.
|
# source the site path so we know what features it has.
|
||||||
source ../../project_defaults.sh
|
source ../../../../defaults.sh
|
||||||
source "$SITE_PATH/site.conf"
|
source "$SITE_PATH/site.conf"
|
||||||
source ../../domain_env.sh
|
source ../../domain_env.sh
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR" = true ]; then
|
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
REMOTE_NOSTR_PATH="$REMOTE_DATA_PATH/nostr"
|
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"
|
||||||
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
|
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
|
||||||
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
|
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
|
||||||
|
|
||||||
@ -25,6 +25,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
|
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
|
||||||
|
|
||||||
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
|
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
|
||||||
|
DBNET_NAME="nostrdbnet-$DOMAIN_IDENTIFIER"
|
||||||
|
|
||||||
# here's the NGINX config. We support ghost and nextcloud.
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
echo "" > "$DOCKER_YAML_PATH"
|
echo "" > "$DOCKER_YAML_PATH"
|
||||||
@ -85,8 +86,11 @@ pubkey_whitelist = [ "${NOSTR_ACCOUNT_PUBKEY}" ]
|
|||||||
domain_whitelist = [ "${DOMAIN_NAME}" ]
|
domain_whitelist = [ "${DOMAIN_NAME}" ]
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
if [ "$STOP_SERVICES" = false ]; then
|
||||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
||||||
sleep 1
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
Loading…
Reference in New Issue
Block a user