Compare commits

...

92 Commits

Author SHA1 Message Date
123fef5103
Enable debuggin on www 2023-12-14 19:53:26 -05:00
76b480a053
Update lnplay 2023-12-14 12:45:31 -05:00
0f9f564b82
Update lnplay 2023-11-30 16:34:19 -05:00
52239974d4
Update lnplay. 2023-11-29 21:34:07 -05:00
4a16abdf10
Update LNPlay 2023-11-29 09:24:54 -05:00
f4cc0f3105
Fix some wiring issues. 2023-11-29 09:24:44 -05:00
37eb43aa9b
Updates 2023-11-21 16:49:19 -05:00
8aa285b096
Update lnplay to incus branch. 2023-09-22 17:55:10 -06:00
a8a6f7d289
Update lnplay. 2023-09-15 19:28:28 -04:00
13df5bf172
Update lnplay. 2023-09-15 10:42:35 -04:00
934bb5e559
Update lnplay 2023-09-15 10:28:27 -04:00
13366cbbd8
Update lnplay. 2023-09-15 10:25:05 -04:00
5ca1988954
Update lnplay. 2023-09-15 09:38:12 -04:00
ee058c1a18
Update LNPLAY 2023-09-15 09:26:13 -04:00
1fcf670920
Update lnplay head. 2023-09-14 22:35:45 -04:00
ba9e9e2d7b
Update lnplay 2023-09-14 22:27:21 -04:00
ce9cce66ee
Update lnplay head. 2023-09-14 22:18:24 -04:00
09b17764bd
Update lnplay head. 2023-09-14 16:56:56 -04:00
b793c32c1d
Update lnplay head. 2023-09-14 16:46:40 -04:00
709d8f0fe7
Update lnplay head. 2023-09-14 12:56:33 -04:00
a5c9d4b817
Update lnplay. 2023-09-14 12:04:14 -04:00
cd55292001
Update lnplay head. 2023-09-14 09:47:09 -04:00
e699ed6fdd
Add lnplay git submodule. 2023-09-13 10:36:29 -04:00
c8615fb70a
Remove the lnplay gitsubmodule. 2023-09-13 10:32:45 -04:00
6ef965d59d
Add lnplay back into repo. 2023-09-13 09:21:11 -04:00
4bc2774e8f
Delete clamsserver path 2023-09-13 09:19:34 -04:00
96908cc765
Removed submodule lnplay 2023-09-13 09:18:47 -04:00
7dbfb2e4a7
Remove Clams from project deployment. 2023-09-05 21:48:44 -04:00
9eceb40dba
Update to latest clams-server. 2023-08-20 20:47:06 -04:00
6256ecd1b7
Refactor Backup 2023-08-20 20:46:21 -04:00
56e0e057a6
Update clams root. 2023-08-13 12:17:33 -04:00
afe947d4db
Add clams-server as submodule. 2023-08-12 12:50:15 -04:00
ba25d9e430
Fix --announce-addr in btcpayserver. 2023-08-12 12:03:23 -04:00
88d6299df0
nitpicks and forgotten STOP_SERVICES 2023-08-11 10:00:21 -04:00
7745e0a4e1
Make consistent docker image pulls. 2023-08-11 09:57:22 -04:00
a4ebfb19b6
Move backup_path.sh up a level, refactors. 2023-08-11 09:55:57 -04:00
3e5dcb561c
Remove STOP_SERVICES flag. 2023-08-11 09:53:49 -04:00
5659f06da8
Add Ghost/mailgun support for roygbiv.guide. 2023-08-11 09:49:25 -04:00
7a1aa38b02
BTCPayserver networking updates. 2023-08-11 09:48:20 -04:00
96f5931ffa
Remove Clams functionality in lieu of clams-server 2023-08-11 09:41:53 -04:00
ca069c7dec
Updates to clams deployment. 2023-05-16 22:01:47 -04:00
42a1604146
Fix Clams deployment. 2023-04-14 12:49:13 -04:00
7da3f29f42
Fix bug. 2023-04-12 09:56:59 -04:00
3a6b073a01
Remove License. 2023-04-12 09:54:57 -04:00
2e68e93303
Moved deployment stuff to project rpo. 2023-04-07 13:59:55 -04:00
63c0eed271
Move wait_for_lxc_ip from project repo. 2023-04-07 13:59:36 -04:00
7731a1fd6e
Move btcpayserver .bashrc init. 2023-04-07 13:59:14 -04:00
9576b56ae8
Refactor 2023-04-07 10:40:38 -04:00
d536b85d51
Refactoring 2023-04-07 10:20:15 -04:00
e175418148
add a settings.json for vscode 2023-04-06 15:08:59 -04:00
ec8afae4ab
Fix bug in nginx.conf. 2023-04-06 15:08:34 -04:00
dd33c34710
Create relative symlinks. 2023-04-06 11:13:51 -04:00
f05daa9bfb
Minor updates. 2023-04-05 20:05:40 -04:00
d3eba31bf4
Refactor restore backup 2023-04-05 15:44:26 -04:00
9a15ada7e3
Remove unnecessary variable assignments. 2023-04-05 12:08:25 -04:00
55de36dadc
Imrove deployment reliability. 2023-04-04 16:23:42 -04:00
305c1afcbd
Update CLI parsing. 2023-04-04 11:44:23 -04:00
73ec75a9f2
The rest of it. 2023-04-04 11:10:38 -04:00
e8303a54a1
BTCPay updates. 2023-04-04 11:10:14 -04:00
b3c3be4df1
Update clams. 2023-04-04 11:09:35 -04:00
4c214cf7ad
Imrove control flow. 2023-04-04 11:08:22 -04:00
29038cfb0a
Nostr updates. 2023-04-04 10:56:34 -04:00
078ba9ac35
ss-deploy CLI updates. 2023-04-04 10:54:31 -04:00
68b786aaa2
Wire up storage volumes. 2023-04-04 10:52:09 -04:00
ea78a2b734
Update Clams deployment. 2023-04-02 09:30:34 -04:00
6d59329a21
Ensure lxc profiles target correct project context 2023-04-02 09:29:41 -04:00
c3c187311e
Update variable names. 2023-04-02 09:28:42 -04:00
ec04b8e274
Remove default project switch in deploy.sh 2023-04-02 09:25:33 -04:00
0c28ca4102
BTCPay updates. 2023-04-02 09:24:59 -04:00
00b9c9bb55
Check for base VM in default project; rm if exists 2023-04-02 09:23:18 -04:00
3aaf137707
Add lxc storage volume mounting work. 2023-04-02 09:22:38 -04:00
b5c1c22db0
Unindent due to project iteration removal. 2023-04-02 09:20:34 -04:00
589a062e99
Scope ss-base creation to default project. 2023-04-02 09:18:45 -04:00
8251d552a2
Comment out block/chainistate volume mounting. 2023-04-02 09:17:51 -04:00
7a710d0916
Only renew certs when ! --stop 2023-03-27 11:05:20 -04:00
79924633f5
Improve stdout output. 2023-03-27 11:05:02 -04:00
130bcedf94
Make Clams browser-app non-default 2023-03-27 11:04:29 -04:00
9c17dd84d5
Fix FQDN bug in DOCKER_HOST env. 2023-03-22 16:34:06 -04:00
1690092f65
Bring FastSync back until storage volumes work. 2023-03-22 16:33:33 -04:00
8268e0bbb5
Disable image compression. 2023-03-22 16:32:47 -04:00
75c7b9ffaf
Disable volume mounting. 2023-03-21 13:18:44 -04:00
0ede72d556
Mount storage volumes only on btcpayserver. 2023-03-20 20:12:25 -04:00
ba153765a7
Load cached lxd images if any. 2023-03-20 12:58:33 -04:00
5260567a18
Simplify file sync issues. 2023-03-20 10:32:32 -04:00
a8fc059d81
Comment out clams for now. 2023-03-20 10:24:33 -04:00
4747f88d13
Simplify file sync with rsync. 2023-03-20 10:23:52 -04:00
4fb926a8cf
Fix backup bug. 2023-03-19 16:40:11 -04:00
c2f3612bee
Switch to docker long form commands. 2023-03-19 13:44:37 -04:00
5b612efb92
Fix backup/restore bug. 2023-03-19 13:44:09 -04:00
c67c872470
Minor updates regarding volume attachments. 2023-03-19 13:43:48 -04:00
70608d41f2
Update error checking during btcpay restore. 2023-03-19 13:43:10 -04:00
bc4ac89c19
Fix restore cert bug. 2023-03-18 18:30:48 -04:00
32 changed files with 426 additions and 1420 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "lnplay"]
path = lnplay
url = https://github.com/farscapian/lnplay

37
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,37 @@
{
"editor.renderWhitespace": "boundary",
"editor.tabSize": 4,
"editor.detectIndentation": false,
"editor.insertSpaces": true,
"editor.cursorBlinking": "phase",
"editor.cursorWidth": 3,
"editor.formatOnSave": true,
"shellcheck.enable": true,
"shellcheck.enableQuickFix": true,
"shellcheck.run": "onType",
"shellcheck.executablePath": "shellcheck",
"shellcheck.customArgs": [
"-x"
],
"shellcheck.ignorePatterns": {},
// "shellcheck.exclude": [
// "SC1090",
// "SC1091",
// "SC2029"
// ],
"terminal.integrated.fontFamily": "monospace",
"workbench.colorCustomizations": {
"activityBar.background": "#1900a565",
"activityBar.foreground": "#e7e7e7",
"activityBar.inactiveForeground": "#e7e7e799",
"activityBarBadge.background": "#7143fc",
"activityBarBadge.foreground": "#e7e7e7",
"titleBar.activeBackground": "#029727",
"titleBar.inactiveBackground": "#02972799",
"titleBar.activeForeground": "#e7e7e7",
"titleBar.inactiveForeground": "#e7e7e799",
"statusBar.background": "#f3ad43",
"statusBarItem.hoverBackground": "#ffbc59",
"statusBar.foreground": "#000000"
}
}

View File

@ -1,9 +0,0 @@
MIT License
Copyright (c) <year> <copyright holders>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -7,25 +7,25 @@ cd "$(dirname "$0")"
# the script executed here from the BTCPAY repo will automatically take services down
# and bring them back up.
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_SERVER_FQDN'."
sleep 5
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
ssh "$BTCPAY_SERVER_FQDN" "mkdir -p $REMOTE_BACKUP_PATH; cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
# that way processes can run shutdown procedures and leave files in the correct state.
sleep 10
# TODO enable encrypted archives
# TODO switch to btcpay-backup.sh when on LXD fully.
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_FQDN:$REMOTE_HOME/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "sudo cp $REMOTE_HOME/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
# TODO switch to btcpay-backup.sh
scp ./remote_scripts/btcpay-backup.sh "$BTCPAY_SERVER_FQDN:$REMOTE_DATA_PATH/btcpay-backup.sh"
ssh "$BTCPAY_SERVER_FQDN" "sudo cp $REMOTE_DATA_PATH/btcpay-backup.sh $BTCPAY_SERVER_APPPATH/btcpay-backup.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
ssh "$BTCPAY_SERVER_FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c $BTCPAY_SERVER_APPPATH/btcpay-backup.sh"
# next we pull the resulting backup archive down to our management machine.
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
ssh "$BTCPAY_SERVER_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_BACKUP_PATH/btcpay.tar.gz"
ssh "$BTCPAY_SERVER_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_BACKUP_PATH/btcpay.tar.gz"
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
@ -34,6 +34,6 @@ if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
fi
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
scp "$BTCPAY_SERVER_FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_SERVER_FQDN'."

View File

@ -1,6 +1,3 @@
# we append this text to the btcpay server /home/ubuntu/.bashrc so
# logged in users have more common access to the variou
alias bitcoin-cli="bitcoin-cli.sh $@"
alias lightning-cli="bitcoin-lightning-cli.sh $@"
# these aliases are simply calling the btcpay server scripts.
alias bitcoin-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-cli.sh $@"
alias lightning-cli="/home/ubuntu/ss-data/btcpayserver-docker/bitcoin-lightning-cli.sh $@"

View File

@ -3,56 +3,37 @@
set -eu
cd "$(dirname "$0")"
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
# run the btcpay setup script if it hasn't been done before.
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
./stub_btcpay_setup.sh
BACKUP_BTCPAY=false
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# send an updated ~/.bashrc so we have quicker access to cli tools
scp ./bashrc.txt "ubuntu@$BTCPAY_SERVER_FQDN:$REMOTE_HOME/.bashrc"
ssh "$BTCPAY_SERVER_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
ssh "$BTCPAY_SERVER_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
fi
RUN_SERVICES=true
./stub_btcpay_setup.sh
# we will re-run the btcpayserver provisioning scripts if directed to do so.
# if an update does occur, we grab another backup.
if [ "$UPDATE_BTCPAY" = true ]; then
# run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
# btcpay-update.sh brings services back up, but does not take them down.
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
ssh "$BTCPAY_SERVER_FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
sleep 20
sleep 30
elif [ "$RESTORE_BTCPAY" = true ]; then
# run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
sleep 15
./restore.sh
RUN_SERVICES=true
BACKUP_BTCPAY=false
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
# the administrator may have indicated a reconfig;
# if so, we re-run setup script.
./stub_btcpay_setup.sh
RUN_SERVICES=true
BACKUP_BTCPAY=false
fi
# if the script gets this far, then we grab a regular backup.
if [ "$BACKUP_BTCPAY" = true ]; then
# we just grab a regular backup
./backup_btcpay.sh
fi
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
# we bring the services back up by default.
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
# we bring the services back up by default.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
fi
echo "FINISHED btcpayserver/go.sh"

0
btcpayserver/remote_scripts/btcpay-backup.sh Normal file → Executable file
View File

0
btcpayserver/remote_scripts/btcpay-restore.sh Normal file → Executable file
View File

View File

@ -3,31 +3,17 @@
set -e
cd "$(dirname "$0")"
if [ "$RESTORE_BTCPAY" = false ]; then
exit 0
fi
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
# push the restoration archive to the remote server
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
BTCPAY_REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/btcpayserver"
ssh "$FQDN" mkdir -p "$BTCPAY_REMOTE_BACKUP_PATH"
REMOTE_BTCPAY_ARCHIVE_PATH="$BTCPAY_REMOTE_BACKUP_PATH/btcpay.tar.gz"
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
# we clean up any old containers first before restoring.
ssh "$FQDN" docker system prune -f
# push the modified restore script to the remote directory, set permissions, and execute.
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_HOME/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_HOME/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BTCPAY_DOCKER_COMPOSE=$REMOTE_HOME/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
# now, we're going to take things down because aparently we this needs to be re-exececuted.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
echo "ERROR: File does not exist."
exit 1
scp ./remote_scripts/btcpay-restore.sh "$FQDN:$REMOTE_DATA_PATH/btcpay-restore.sh"
ssh "$FQDN" "sudo mv $REMOTE_DATA_PATH/btcpay-restore.sh $BTCPAY_SERVER_APPPATH/btcpay-restore.sh && sudo chmod 0755 $BTCPAY_SERVER_APPPATH/btcpay-restore.sh"
ssh "$FQDN" "cd $REMOTE_DATA_PATH/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_DATA_PATH BTCPAY_DOCKER_COMPOSE=$REMOTE_DATA_PATH/btcpayserver-docker/Generated/docker-compose.generated.yml bash -c '$BTCPAY_SERVER_APPPATH/btcpay-restore.sh $REMOTE_BTCPAY_ARCHIVE_PATH'"
fi

View File

@ -3,6 +3,8 @@
set -e
cd "$(dirname "$0")"
# default is for regtest
CLIGHTNING_WEBSOCKET_PORT=9736
if [ "$BITCOIN_CHAIN" = testnet ]; then
@ -13,6 +15,7 @@ fi
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
@ -36,7 +39,7 @@ done
if [ ! -d "btcpayserver-docker" ]; then
echo "cloning btcpayserver-docker";
git clone -b master ${BTCPAYSERVER_GITREPO} btcpayserver-docker;
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
git config --global --add safe.directory /home/ubuntu/ss-data/btcpayserver-docker
else
cd ./btcpayserver-docker
git pull
@ -47,7 +50,7 @@ fi
cd btcpayserver-docker
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
export BTCPAY_ANNOUNCEABLE_HOST="${BTCPAY_USER_FQDN}"
export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
export BTCPAYGEN_LIGHTNING="clightning"
@ -55,14 +58,20 @@ export BTCPAYGEN_CRYPTO1="btc"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage-s;bitcoin-clightning.custom;"
export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
export BTCPAY_BASE_DIRECTORY=${REMOTE_DATA_PATH}
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https;"
export REVERSEPROXY_DEFAULT_HOST="$BTCPAY_USER_FQDN"
# if [ "\$NBITCOIN_NETWORK" != regtest ]; then
# cd ./contrib/FastSync
# ./load-utxo-set.sh
# cd -
# fi
# next we create fragments to customize various aspects of the system
# this block customizes clightning to ensure the correct endpoints are being advertised
# We want to advertise the correct ipv4 endpoint for remote hosts to get in touch.
cat > ${REMOTE_HOME}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
cat > ${REMOTE_DATA_PATH}/btcpayserver-docker/docker-compose-generator/docker-fragments/bitcoin-clightning.custom.yml <<EOF
services:
clightning_bitcoin:
@ -70,6 +79,8 @@ services:
LIGHTNINGD_OPT: |
announce-addr-dns=true
experimental-websocket-port=9736
experimental-peer-storage
experimental-offers
ports:
- "${CLIGHTNING_WEBSOCKET_PORT}:9736"
expose:
@ -77,25 +88,21 @@ services:
EOF
# run the setup script.
. ./btcpay-setup.sh -i
touch ${REMOTE_HOME}/btcpay.complete
touch ${REMOTE_DATA_PATH}/btcpay.complete
chown ubuntu:ubuntu ${REMOTE_DATA_PATH}/btcpay.complete
EOL
# send an updated ~/.bashrc so we have quicker access to cli tools
scp ./bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
# send the setup script to the remote machine.
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
scp "$SITE_PATH/btcpay.sh" "ubuntu@$BTCPAY_SERVER_FQDN:$REMOTE_DATA_PATH/btcpay_setup.sh"
ssh "$BTCPAY_SERVER_FQDN" "chmod 0744 $REMOTE_DATA_PATH/btcpay_setup.sh"
# script is executed under sudo
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
ssh "$BTCPAY_SERVER_FQDN" "sudo bash -c $REMOTE_DATA_PATH/btcpay_setup.sh"
# lets give time for the containers to spin up
sleep 10

View File

@ -1,94 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
if [ -d "$SS_JAMMY_PATH" ]; then
lxc image import "$SS_JAMMY_PATH/meta-bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.tar.xz" \
"$SS_JAMMY_PATH/bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.qcow2" \
--alias "$UBUNTU_BASE_IMAGE_NAME"
else
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME"
for CHAIN in mainnet testnet; do
for DATA in blocks chainstate; do
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
done
done
lxc start "$BASE_IMAGE_VM_NAME"
sleep 15
while lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
# If we have any chaninstate or blocks in our SSME, let's push them to the
# remote host as a zfs volume that way deployments can share a common history
# of chainstate/blocks.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
# if the storage snapshot doesn't yet exist, create it.
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
if [ -d "$DATA_PATH" ]; then
COMPLETE_FILE_PATH="$DATA_PATH/complete"
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
lxc file push --recursive --uid=999 --guid=999 --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/"
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
else
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
fi
fi
fi
done
done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME"
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME"
fi
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME"
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
lxc delete -f "$BASE_IMAGE_VM_NAME"
# now let's get a snapshot of each of the blocks/chainstate directories.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
fi
done
done

464
deploy.sh
View File

@ -1,464 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/main)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs lxc; do
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
exit 1
fi
done
# do a spot check; if we are on production warn.
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running command against a production system!"
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
# switch to the defult project. We will switch to something more specific later.
if ! lxc info | grep "project:" | grep -q default; then
lxc project switch default
fi
DOMAIN_NAME=
RUN_CERT_RENEWAL=true
SKIP_WWW=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=true
BACKUP_APPS=true
BACKUP_BTCPAY=true
BACKUP_BTCPAY_ARCHIVE_PATH=
RESTORE_BTCPAY=false
SKIP_BTCPAY=false
UPDATE_BTCPAY=false
RECONFIGURE_BTCPAY_SERVER=false
REMOTE_NAME="$(lxc remote get-default)"
STOP_SERVICES=false
USER_SAYS_YES=false
RESTART_FRONT_END=true
USER_TARGET_PROJECT=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www)
RESTORE_WWW=true
BACKUP_APPS=false
RUN_CERT_RENEWAL=false
RESTART_FRONT_END=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
BACKUP_BTCPAY=false
RUN_CERT_RENEWAL=false
shift
;;
--no-backup-www)
BACKUP_CERTS=false
BACKUP_APPS=false
shift
;;
--stop)
STOP_SERVICES=true
RESTART_FRONT_END=true
shift
;;
--restart-front-end)
RESTART_FRONT_END=true
shift
;;
--backup-archive-path=*)
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
shift
;;
--project=*)
USER_TARGET_PROJECT="${i#*=}"
shift
;;
--update-btcpay)
UPDATE_BTCPAY=true
shift
;;
--skip-www)
SKIP_WWW=true
shift
;;
--skip-btcpay)
SKIP_BTCPAY=true
shift
;;
--no-cert-renew)
RUN_CERT_RENEWAL=false
shift
;;
--reconfigure-btcpay)
RECONFIGURE_BTCPAY_SERVER=true
shift
;;
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. "
exit 1
fi
# set up our default paths.
source ../../defaults.sh
export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES"
export BACKUP_CERTS="$BACKUP_CERTS"
export BACKUP_APPS="$BACKUP_APPS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export REMOTE_NAME="$REMOTE_NAME"
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
# ensure our remote path is created.
mkdir -p "$REMOTE_PATH"
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The remote definition could not be found. You may need to re-run 'ss-remote'."
exit 1
fi
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
source "$REMOTE_DEFINITION"
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
export DEPLOYMENT_STRING="$DEPLOYMENT_STRING"
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
gpg --gen-random --armor 1 25
}
function stub_site_definition {
mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites"
# create a symlink from the PROJECT_PATH/sites/DOMAIN_NAME to the ss-sites/domain name
if [ ! -d "$PROJECT_PATH/sites/$DOMAIN_NAME" ]; then
ln -s "$SITE_PATH" "$PROJECT_PATH/sites/$DOMAIN_NAME"
fi
if [ ! -f "$SITE_PATH/site.conf" ]; then
# check to see if the enf file exists. exist if not.
SITE_DEFINITION_PATH="$SITE_PATH/site.conf"
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
# stub out a site.conf with new passwords.
cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-deploy/#siteconf
DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true
DEPLOY_CLAMS=true
DEPLOY_NEXTCLOUD=false
NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site.conf for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1
fi
fi
}
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
export PROJECT_PREFIX="$PROJECT_PREFIX"
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
if [ -n "$USER_TARGET_PROJECT" ]; then
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
continue
fi
fi
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECT_PATH"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
# create a symlink from ./remotepath/projects/project
if [ ! -d "$REMOTE_PATH/projects/$PROJECT_NAME" ]; then
ln -s "$PROJECT_PATH" "$REMOTE_PATH/projects/$PROJECT_NAME"
fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# stub out a project.conf
cat >"$PROJECT_DEFINITION_PATH" <<EOL
# see https://www.sovereign-stack.org/ss-deploy/#projectconf for more info.
PRIMARY_DOMAIN="domain0.tld"
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
WWW_SERVER_MAC_ADDRESS=
# WWW_SERVER_CPU_COUNT="6"
# WWW_SERVER_MEMORY_MB="4096"
BTCPAYSERVER_MAC_ADDRESS=
# BTCPAY_SERVER_CPU_COUNT="4"
# BTCPAY_SERVER_MEMORY_MB="4096"
EOL
chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project.conf for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/ss-deploy/"
exit 1
fi
# source project defition.
source "$PROJECT_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
fi
export DOMAIN_LIST="$DOMAIN_LIST"
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
# let's provision our primary domain first.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
stub_site_definition
# bring the VMs up under the primary domain name.
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
# iterate over all our server endpoints and provision them if needed.
# www
VPS_HOSTNAME=
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
# create the lxd base image.
./create_lxc_base.sh
fi
for VIRTUAL_MACHINE in www btcpayserver; do
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
FQDN=
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site.conf"
source ./domain_env.sh
# VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: DOMAIN_NAME not specified in your site.conf."
exit 1
fi
# create the lxc project as specified by PROJECT_NAME
if ! lxc project list | grep -q "$PROJECT_NAME"; then
lxc project create "$PROJECT_NAME"
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=false
fi
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project=default | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
echo "ERROR: the MACVLAN interface has not been specified. You may need to run 'ss-remote' again."
exit 1
fi
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
# Now let's switch to the new project to ensure new resources are created under the project scope.
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
echo "INFO: switch to lxd project '$PROJECT_NAME'."
lxc project switch "$PROJECT_NAME"
fi
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
fi
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$SKIP_WWW" = true ]; then
echo "INFO: Skipping WWW due to command line argument."
continue
fi
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
if [ "$BITCOIN_CHAIN" = mainnet ]; then
ROOT_DISK_SIZE_GB=150
elif [ "$BITCOIN_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=70
fi
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
export FQDN="$BASE_IMAGE_VM_NAME"
ROOT_DISK_SIZE_GB=8
else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
exit
fi
export FQDN="$FQDN"
export LXD_VM_NAME="${FQDN//./-}"
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
export PROJECT_PATH="$PROJECT_PATH"
./deploy_vm.sh
if [ "$VIRTUAL_MACHINE" = www ]; then
# this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0
fi
fi
done
# let's stub out the rest of our site definitions, if any.
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# stub out the site_defition if it's doesn't exist.
stub_site_definition
done
# now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ]; then
./www/go.sh
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
fi
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ]; then
./btcpayserver/go.sh
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
fi
done

View File

@ -1,64 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}"
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
########## BEGIN
cat >> "$SSH_HOME/config" <<-EOF
${SSH_STRING}
HostName ${FQDN}
User ubuntu
EOF
###
fi
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
# if the machine doesn't exist, we create it.
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
exit 1
fi
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME"
# now let's create a new VM to work with.
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
lxc init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
# of chainstate/blocks.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# MOUNT_PATH="/$CHAIN/$DATA"
# if lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$LXD_VM_NAME" "$MOUNT_PATH"
# fi
# done
# done
lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
fi
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
# create a directory to store backup archives. This is on all new vms.
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"

View File

@ -3,19 +3,18 @@
set -e
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
export BTCPAY_FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
export REMOTE_GHOST_PATH="$REMOTE_DATA_PATH/ghost"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_DATA_PATH/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_DATA_PATH/gitea"
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"

1
lnplay Submodule

@ -0,0 +1 @@
Subproject commit e9a18f9385414c1dc34381f39c2709cf115c907f

59
project_defaults.sh Executable file
View File

@ -0,0 +1,59 @@
#!/bin/bash
set -e
export DEPLOY_GHOST=true
export DEPLOY_NOSTR=false
export DEPLOY_NEXTCLOUD=false
export DEPLOY_GITEA=false
export GHOST_DEPLOY_SMTP=false
export MAILGUN_FROM_ADDRESS=
export MAILGUN_SMTP_USERNAME=
export MAILGUN_SMTP_PASSWORD=
export SITE_LANGUAGE_CODES="en"
export LANGUAGE_CODE="en"
export NOSTR_ACCOUNT_PUBKEY=
# this is where the html is sourced from.
export SITE_HTML_PATH=
export GHOST_MYSQL_PASSWORD=
export GHOST_MYSQL_ROOT_PASSWORD=
export NEXTCLOUD_MYSQL_PASSWORD=
export GITEA_MYSQL_PASSWORD=
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
export GITEA_MYSQL_ROOT_PASSWORD=
export DUPLICITY_BACKUP_PASSPHRASE=
DEFAULT_DB_IMAGE="mariadb:10.11.2-jammy"
# run the docker stack.
export GHOST_IMAGE="ghost:5.53.3"
# TODO switch to mysql. May require intricate export work for existing sites.
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
#https://forum.ghost.org/t/how-to-migrate-from-mariadb-10-to-mysql-8/29575
export GHOST_DB_IMAGE="mysql:8.0.32"
export NGINX_IMAGE="nginx:1.25.1"
# version of backup is 24.0.3
export NEXTCLOUD_IMAGE="nextcloud:25.0.4"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
# TODO PIN the gitea version number.
export GITEA_IMAGE="gitea/gitea:latest"
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export OTHER_SITES_LIST=
export BTCPAY_ALT_NAMES=

View File

@ -1,258 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
VIRTUAL_MACHINE=base
LXD_HOSTNAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxd-hostname=*)
LXD_HOSTNAME="${i#*=}"
shift
;;
--vm=*)
VIRTUAL_MACHINE="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
mkdir -p "$PROJECT_PATH/cloud-init"
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat > "$YAML_PATH" <<EOF
config:
EOF
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: 4
limits.memory: 4096MB
EOF
fi
if [ "$VIRTUAL_MACHINE" = www ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
EOF
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
EOF
fi
# if VIRTUAL_MACHINE=base, then we doing the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
# this is for the base image only...
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
package_update: true
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: false
fqdn: ${BASE_IMAGE_VM_NAME}
packages:
- curl
- ssh-askpass
- apt-transport-https
- ca-certificates
- gnupg-agent
- software-properties-common
- lsb-release
- net-tools
- htop
- rsync
- duplicity
- sshfs
- fswatch
- jq
- git
- nano
- wait-for-it
- dnsutils
- wget
groups:
- docker
users:
- name: ubuntu
groups: docker
shell: /bin/bash
lock_passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
]
}
EOF
fi
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
EOF
# write_files:
# - path: /etc/ssh/sshd_config
# content: |
# Port 22
# ListenAddress 0.0.0.0
# Protocol 2
# ChallengeResponseAuthentication no
# PasswordAuthentication no
# UsePAM no
# LogLevel INFO
# - path: /etc/docker/daemon.json
# content: |
# {
# "registry-mirrors": "${REGISTRY_URL}",
# "labels": "githead=${LATEST_GIT_COMMIT}"
# }
#"labels": [githead="${LATEST_GIT_COMMIT}"]
# apt:
# sources:
# docker.list:
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
# - sudo apt-get update
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
# all other machines that are not the base image
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
package_update: false
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: true
fqdn: ${FQDN}
user.network-config: |
version: 2
ethernets:
enp5s0:
dhcp4: true
dhcp4-overrides:
route-metric: 50
match:
macaddress: ${MAC_ADDRESS_TO_PROVISION}
set-name: enp5s0
enp6s0:
dhcp4: true
EOF
fi
# All profiles get a root disk and cloud-init config.
cat >> "$YAML_PATH" <<EOF
description: Default LXD profile for ${FILENAME}
devices:
root:
path: /
pool: ss-base
type: disk
config:
source: cloud-init:config
type: disk
EOF
# Stub out the network piece for the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic
enp6s0:
name: enp6s0
network: ss-ovn
type: nic
name: ${PRIMARY_DOMAIN}
EOF
fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"

View File

@ -1,49 +0,0 @@
#!/bin/bash
set -e
LXC_INSTANCE_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxd-name=*)
LXC_INSTANCE_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# if the invoker did not set the instance name, throw an error.
if [ -z "$LXC_INSTANCE_NAME" ]; then
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
exit 1
fi
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
exit 1
fi
IP_V4_ADDRESS=
while true; do
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
if [ -n "$IP_V4_ADDRESS" ]; then
# give the machine extra time to spin up.
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
break
else
sleep 1
printf '.'
fi
done
# wait for cloud-init to complet before returning.
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

1
www/.gitignore vendored
View File

@ -1 +0,0 @@
clams

View File

@ -14,16 +14,5 @@ fi
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity "$REMOTE_SOURCE_BACKUP_PATH" "file://$REMOTE_BACKUP_PATH"
ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
SSHFS_PATH="/tmp/sshfs_temp"
mkdir -p "$SSHFS_PATH"
# now let's pull down the latest files from the backup directory.
# create a temp directory to serve as the mountpoint for the remote machine backups directory
sshfs "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH"
# rsync the files from the remote server to our local backup path.
rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
# step 4: unmount the SSHFS filesystem and cleanup.
umount "$SSHFS_PATH"
rm -rf "$SSHFS_PATH"
# sync the remote backup path down
rsync -a "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH/" "$LOCAL_BACKUP_PATH/"

47
www/backup_www.sh Executable file
View File

@ -0,0 +1,47 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
APP=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--app=*)
APP="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ -z "$APP" ]; then
echo "ERROR: You must specify the --app= paramater."
exit 1
fi
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../deployment_defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
./backup_path.sh
done

View File

@ -11,22 +11,20 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
# with the incus side, we are trying to expose ALL OUR services from one IP address, which terminates
# at a cachehing reverse proxy that runs nginx.
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs"
ssh "$PRIMARY_WWW_FQDN" sudo mkdir -p "$REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs"
# this is minimum required; www and btcpay.
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
if [ "$DEPLOY_NOSTR" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
@ -38,7 +36,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
done
fi
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_HOME/letsencrypt/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
GENERATE_CERT_STRING="docker run -it --rm --name certbot -p 80:80 -p 443:443 -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME:/etc/letsencrypt -v /var/lib/letsencrypt:/var/lib/letsencrypt -v $REMOTE_DATA_PATH_LETSENCRYPT/$DOMAIN_NAME/_logs:/var/log/letsencrypt certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand ${DOMAIN_STRING} --email $CERTIFICATE_EMAIL_ADDRESS"
# execute the certbot command that we dynamically generated.
eval "$GENERATE_CERT_STRING"

102
www/go.sh
View File

@ -3,14 +3,7 @@
set -eu
cd "$(dirname "$0")"
# redirect all docker commands to the remote host.
DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
export DOCKER_HOST="$DOCKER_HOST"
# prepare clams images and such
./prepare_clams.sh
# Create the nginx config file which covers all domains.
# Create the nginx config file which covers all domainys.
bash -c ./stub/nginx_config.sh
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
@ -18,10 +11,10 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
### Let's check to ensure all the requiredsettings are set.
if [ "$DEPLOY_GHOST" = true ]; then
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
@ -58,6 +51,15 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
fi
fi
if [ "$DEPLOY_NOSTR" = true ]; then
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
echo "ERROR: When deploying nostr, you MUST specify NOSTR_ACCOUNT_PUBKEY."
exit 1
fi
fi
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site.conf."
exit 1
@ -70,64 +72,19 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
done
./stop_docker_stacks.sh
# TODO check if there are any other stacks that are left running (other than reverse proxy)
# if so, this may mean the user has disabled one or more domains and that existing sites/services
# are still running. We should prompt the user of this and quit. They have to go manually docker stack remove these.
if [[ $(docker stack ls | wc -l) -gt 2 ]]; then
echo "WARNING! You still have stacks running. If you have modified the SITES list, you may need to go remove the docker stacks runnong the remote machine."
echo "exiting."
exit 1
STACKS_STILL_RUNNING=false
if [[ $(docker stack list | wc -l) -gt 2 ]]; then
echo "WARNING! You still have stacks running. If you have modified the SITES list,"
echo " you may need to go remove the docker stacks running the remote machine."
STACKS_STILL_RUNNING=true
fi
# ok, the backend stacks are stopped.
if [ "$RESTART_FRONT_END" = true ]; then
# remove the nginx stack
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
sleep 2
docker stack rm reverse-proxy
# wait for all docker containers to stop.
# TODO see if there's a way to check for this.
sleep 20
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
./generate_certs.sh
fi
# let's backup all our letsencrypt certs
export APP="letsencrypt"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
# these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
# we grab a backup of the certs unless we're restoring.
if [ "$RESTORE_CERTS" = true ]; then
./restore_path.sh
else
./backup_path.sh
fi
done
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ] && [ "$STACKS_STILL_RUNNING" = false ]; then
./generate_certs.sh
fi
# nginx gets deployed first since it "owns" the docker networks of downstream services.
@ -138,22 +95,3 @@ fi
./stub/nextcloud_yml.sh
./stub/gitea_yml.sh
./stub/nostr_yml.sh
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
# # WWW_FQDN is in our certificate, so we resolve to that.
# wait-for-it -t 320 "$WWW_FQDN:80"
# wait-for-it -t 320 "$WWW_FQDN:443"
# # open bowser tabs.
# if [ "$DEPLOY_GHOST" = true ]; then
# xdg-open "http://$WWW_FQDN" > /dev/null 2>&1
# fi
# if [ "$DEPLOY_NEXTCLOUD" = true ]; then
# xdg-open "http://$NEXTCLOUD_FQDN" > /dev/null 2>&1
# fi
# if [ "$DEPLOY_GITEA" = true ]; then
# xdg-open "http://$GITEA_FQDN" > /dev/null 2>&1
# fi

View File

@ -1,35 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
# deploy clams wallet.
LOCAL_CLAMS_REPO_PATH="$(pwd)/clams"
CLAMS_APP_DOCKER_REPO_URL="https://github.com/farscapian/clams-app-docker"
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
git clone "$CLAMS_APP_DOCKER_REPO_URL" "$LOCAL_CLAMS_REPO_PATH"
else
cd "$LOCAL_CLAMS_REPO_PATH"
git config --global pull.rebase false
git pull
cd -
fi
# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME"
BROWSER_APP_GIT_TAG="1.5.0"
BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app"
BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG"
# build the browser-app image.
if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then
docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \
--build-arg VERSION="$BROWSER_APP_GIT_TAG" \
-t "$BROWSER_APP_IMAGE_NAME" \
"$(pwd)/clams/frontend/browser-app/"
fi
# If the clams-root volume doesn't exist, we create and seed it.
if ! docker volume list | grep -q clams-root; then
docker volume create clams-root
docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME"
fi

View File

@ -19,7 +19,6 @@ if [ "$USER_SAYS_YES" = false ]; then
RESPONSE=
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
@ -38,4 +37,4 @@ scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
# reset folder owner to ubuntu
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_DATA_PATH/$APP"

View File

@ -1,15 +1,18 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
# this scripts brings down the docker stacks on www
# bring down ghost instances.
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../../deployment_defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
@ -26,8 +29,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
# these variable are used by both backup/restore scripts.
export APP="$APP"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH/www/$APP/$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_DATA_PATH/$APP/$DOMAIN_NAME"
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
@ -36,14 +39,13 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
mkdir -p "$LOCAL_BACKUP_PATH"
fi
if [ "$RESTORE_WWW" = true ]; then
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done
done
done
# remove the nginx stack
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
docker stack rm reverse-proxy
sleep 10
fi

View File

@ -3,15 +3,23 @@
set -eu
cd "$(dirname "$0")"
docker pull "$GHOST_IMAGE"
DEPLOY_STACK=false
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
if [ "$DEPLOY_GHOST" = true ]; then
DEPLOY_STACK=true
fi
# for each language specified in the site.conf, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
@ -19,8 +27,8 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
# ensure directories on remote host exist so we can mount them into the containers.
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_HOME/ghost/$DOMAIN_NAME/$LANGUAGE_CODE/db"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/ghost" "$REMOTE_GHOST_PATH/$DOMAIN_NAME/$LANGUAGE_CODE/db"
export GHOST_STACK_TAG="ghost-$STACK_NAME"
export GHOST_DB_STACK_TAG="ghostdb-$STACK_NAME"
@ -44,7 +52,7 @@ EOL
- ghostnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/ghost:/var/lib/ghost/content
environment:
EOL
if [ "$LANGUAGE_CODE" = "en" ]; then
@ -65,6 +73,21 @@ EOL
- database__connection__database=ghost
- database__pool__min=0
- privacy__useStructuredData=true
EOL
# INSERT EMAIL OPTIONS HERE
if [ "$GHOST_DEPLOY_SMTP" = true ]; then
cat >>"$DOCKER_YAML_PATH" <<EOL
- mail__transport=SMTP
- mail__from=${MAILGUN_FROM_ADDRESS}
- mail__options__auth__user=${MAILGUN_SMTP_USERNAME}
- mail__options__auth__pass=${MAILGUN_SMTP_PASSWORD}
- mail__options__host=smtp.mailgun.org
- mail__options__port=587
EOL
fi
cat >>"$DOCKER_YAML_PATH" <<EOL
deploy:
restart_policy:
condition: on-failure
@ -74,7 +97,7 @@ EOL
networks:
- ghostdbnet-${DOMAIN_IDENTIFIER}-${LANGUAGE_CODE}
volumes:
- ${REMOTE_HOME}/ghost/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
- ${REMOTE_GHOST_PATH}/${DOMAIN_NAME}/${LANGUAGE_CODE}/db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
- MYSQL_DATABASE=ghost
@ -103,7 +126,7 @@ EOL
EOL
fi
if [ "$STOP_SERVICES" = false ]; then
if [ "$DEPLOY_STACK" = true ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
sleep 2
fi

View File

@ -3,12 +3,16 @@
set -eu
cd "$(dirname "$0")"
docker pull "$GITEA_IMAGE"
docker pull "$GITEA_DB_IMAGE"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
@ -80,10 +84,10 @@ EOL
${DBNET_NAME}:
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
sleep 1
fi
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
sleep 1
fi
done

View File

@ -3,12 +3,16 @@
set -eu
cd "$(dirname "$0")"
docker pull "$NEXTCLOUD_IMAGE"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
@ -35,7 +39,7 @@ services:
- nextcloud-${DOMAIN_IDENTIFIER}-en
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/html:/var/www/html
environment:
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
- MYSQL_DATABASE=nextcloud
@ -55,7 +59,7 @@ services:
networks:
- nextclouddb-${DOMAIN_IDENTIFIER}-en
volumes:
- ${REMOTE_HOME}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
- ${REMOTE_DATA_PATH}/nextcloud/${DOMAIN_NAME}/en/db:/var/lib/mysql
environment:
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
@ -74,9 +78,7 @@ networks:
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
sleep 1
fi
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
sleep 1
fi
done

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eu
cd "$(dirname "$0")"
# here's the NGINX config. We support ghost and nextcloud.
@ -18,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
@ -31,6 +31,8 @@ events {
http {
client_max_body_size 100m;
server_tokens off;
sendfile on;
include mime.types;
# next two sets commands and connection_upgrade block come from https://docs.btcpayserver.org/FAQ/Deployment/#can-i-use-an-existing-nginx-server-as-a-reverse-proxy-with-ssl-termination
# Needed to allow very long URLs to prevent issues while signing PSBTs
@ -49,8 +51,8 @@ http {
# return 403 for all non-explicit hostnames
server {
listen 80 default_server;
return 301 https://${WWW_FQDN}\$request_uri;
listen 80 default_server;
return 301 https://${WWW_FQDN}\$request_uri;
}
EOL
@ -170,7 +172,7 @@ EOL
cat >>"$NGINX_CONF_PATH" <<EOL
# https://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
server {
listen 443 ssl http2;
listen 443 ssl;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -180,7 +182,7 @@ EOL
EOL
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
if [ "$DEPLOY_NOSTR" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# We return a JSON object with name/pubkey mapping per NIP05.
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
@ -207,7 +209,7 @@ EOL
EOL
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
if [ "$DEPLOY_NOSTR" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# wss://$NOSTR_FQDN server block
server {
@ -239,7 +241,7 @@ EOL
cat >>"$NGINX_CONF_PATH" <<EOL
# https server block for https://${BTCPAY_SERVER_NAMES}
server {
listen 443 ssl http2;
listen 443 ssl;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -249,8 +251,7 @@ EOL
# Route everything to the real BTCPay server
location / {
# URL of BTCPay Server
proxy_pass http://$LXD_VM_NAME.lxd:80;
proxy_pass http://10.10.10.66:80;
proxy_set_header Host \$http_host;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-Real-IP \$remote_addr;
@ -263,51 +264,23 @@ EOL
EOL
# Clams server entry
# cat >>"$NGINX_CONF_PATH" <<EOL
# # https server block for https://${CLAMS_FQDN}
# server {
# listen 443 ssl http2;
# ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
# ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# server_name ${CLAMS_FQDN};
# index index.js;
# root /apps/clams;
# index 200.htm;
# location / {
# try_files \$uri \$uri/ /200.htm;
# }
# location ~* \.(?:css|js|jpg|svg)$ {
# expires 30d;
# add_header Cache-Control "public";
# }
# }
# EOL
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
cat >>"$NGINX_CONF_PATH" <<EOL
if [ "$DEPLOY_GHOST" = true ]; then
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_cache_path /tmp/${STACK_NAME} levels=1:2 keys_zone=${STACK_NAME}:600m max_size=100m inactive=24h;
EOL
done
done
# the open server block for the HTTPS listener for ghost
cat >>"$NGINX_CONF_PATH" <<EOL
# the open server block for the HTTPS listener for ghost
cat >>"$NGINX_CONF_PATH" <<EOL
# Main HTTPS listener for https://${WWW_FQDN}
server {
listen 443 ssl http2;
listen 443 ssl;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -323,28 +296,20 @@ EOL
EOL
# # add the Onion-Location header if specifed.
# if [ "$DEPLOY_ONION_SITE" = true ]; then
# cat >>"$NGINX_CONF_PATH" <<EOL
# add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
# EOL
# fi
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
location ~ ^/(ghost/|p/|private/) {
EOL
else
else
cat >>"$NGINX_CONF_PATH" <<EOL
location ~ ^/${LANGUAGE_CODE}/(ghost/|p/|private/) {
EOL
fi
fi
cat >>"$NGINX_CONF_PATH" <<EOL
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
@ -355,22 +320,22 @@ EOL
EOL
done
done
ROOT_SITE_LANGUAGE_CODES="$SITE_LANGUAGE_CODES"
for LANGUAGE_CODE in ${ROOT_SITE_LANGUAGE_CODES//,/ }; do
ROOT_SITE_LANGUAGE_CODES="$SITE_LANGUAGE_CODES"
for LANGUAGE_CODE in ${ROOT_SITE_LANGUAGE_CODES//,/ }; do
cat >>"$NGINX_CONF_PATH" <<EOL
cat >>"$NGINX_CONF_PATH" <<EOL
# Location block to back https://${WWW_FQDN}/${LANGUAGE_CODE} or https://${WWW_FQDN}/ if english.
EOL
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
if [ "$LANGUAGE_CODE" = en ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
location / {
EOL
if (( "$LANGUAGE_CODE_COUNT" > 1 )); then
# we only need this clause if we know there is more than once lanuage being rendered.
cat >>"$NGINX_CONF_PATH" <<EOL
if (( "$LANGUAGE_CODE_COUNT" > 1 )); then
# we only need this clause if we know there is more than once lanuage being rendered.
cat >>"$NGINX_CONF_PATH" <<EOL
# Redirect the user to the correct language using the map above.
if ( \$http_accept_language !~* '^en(.*)\$' ) {
#rewrite (.*) \$1/\$lang;
@ -378,15 +343,15 @@ EOL
}
EOL
fi
fi
else
cat >>"$NGINX_CONF_PATH" <<EOL
else
cat >>"$NGINX_CONF_PATH" <<EOL
location /${LANGUAGE_CODE} {
EOL
fi
fi
cat >>"$NGINX_CONF_PATH" <<EOL
cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
@ -424,7 +389,7 @@ EOL
EOL
done
done
# this is the closing server block for the ghost HTTPS segment
cat >>"$NGINX_CONF_PATH" <<EOL
@ -432,13 +397,13 @@ EOL
}
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# TLS listener for ${NEXTCLOUD_FQDN}
server {
listen 443 ssl http2;
listen 443 ssl;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -471,26 +436,11 @@ EOL
fi
# TODO this MIGHT be part of the solution for Twitter Cards.
# location /contents {
# resolver 127.0.0.11 ipv6=off valid=5m;
# proxy_set_header X-Real-IP \$remote_addr;
# proxy_set_header Host \$http_host;
# proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Proto \$scheme;
# proxy_intercept_errors on;
# proxy_pass http://ghost-${DOMAIN_IDENTIFIER}-${SITE_LANGUAGE_CODES}::2368\$og_prefix\$request_uri;
# }
# this piece is for GITEA.
if [ "$DEPLOY_GITEA" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL
# TLS listener for ${GITEA_FQDN}
server {
listen 443 ssl http2;
listen 443 ssl;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -514,31 +464,6 @@ EOL
EOL
fi
# deploy Clams browser app under the primary domain.
if [ $iteration = 0 ]; then
cat >> "$NGINX_CONF_PATH" <<EOF
# server block for the clams browser-app; just a static website
server {
listen 443 ssl;
server_name ${CLAMS_FQDN};
autoindex off;
server_tokens off;
gzip_static on;
root /browser-app;
index 200.html;
}
EOF
fi
iteration=$((iteration+1))
done

View File

@ -3,6 +3,8 @@
set -e
cd "$(dirname "$0")"
docker pull "$NGINX_IMAGE"
#https://github.com/fiatjaf/expensive-relay
# NOSTR RELAY WHICH REQUIRES PAYMENTS.
DOCKER_YAML_PATH="$PROJECT_PATH/nginx.yml"
@ -18,57 +20,55 @@ services:
networks:
EOL
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
# source the site path so we know what features it has.
source ../../../deployment_defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
# We create another ghost instance under /
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
# We create another ghost instance under /
if [ "$DEPLOY_GHOST" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
EOL
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
fi
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- giteanet-$DOMAIN_IDENTIFIER-en
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nextcloudnet-$DOMAIN_IDENTIFIER-en
EOL
fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nostrnet-$DOMAIN_IDENTIFIER-en
EOL
fi
fi
done
if [ "$DEPLOY_NEXTCLOUD" = "true" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nextcloudnet-$DOMAIN_IDENTIFIER-en
EOL
fi
if [ "$DEPLOY_NOSTR" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- nostrnet-$DOMAIN_IDENTIFIER-en
EOL
fi
fi
done
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
EOL
if [ "$DEPLOY_CLAMS" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
- clams-browser-app:/browser-app:ro
EOL
fi
done
cat >> "$DOCKER_YAML_PATH" <<EOL
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
- ${REMOTE_DATA_PATH_LETSENCRYPT}:/etc/letsencrypt:ro
EOL
cat >> "$DOCKER_YAML_PATH" <<EOL
configs:
- source: nginx-config
target: /etc/nginx/nginx.conf
@ -82,73 +82,60 @@ configs:
EOL
################ NETWORKS SECTION
cat >> "$DOCKER_YAML_PATH" <<EOL
cat >> "$DOCKER_YAML_PATH" <<EOL
networks:
EOL
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../deployment_defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
# source the site path so we know what features it has.
source ../../../../defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
# for each language specified in the site.conf, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
# for each language specified in the site.conf, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
if [ "$DEPLOY_GHOST" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE:
attachable: true
EOL
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
giteanet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nextcloudnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nostrnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
fi
done
done
if [ "$DEPLOY_CLAMS" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
volumes:
clams-browser-app:
external: true
name: clams-root
EOL
fi
if [ "$LANGUAGE_CODE" = en ]; then
if [ "$DEPLOY_GITEA" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
giteanet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nextcloudnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
# iterate over all our domains and create the nginx config file.
sleep 1
fi
if [ "$DEPLOY_NOSTR" = true ]; then
cat >> "$DOCKER_YAML_PATH" <<EOL
nostrnet-$DOMAIN_IDENTIFIER-en:
attachable: true
EOL
fi
fi
done
done
# for some reason we need to wait here. See if there's a fix; poll for service readiness?
sleep 5
docker stack deploy -c "$DOCKER_YAML_PATH" reverse-proxy
# iterate over all our domains and create the nginx config file.
sleep 3

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eu
cd "$(dirname "$0")"
docker pull "$NOSTR_RELAY_IMAGE"
@ -10,12 +10,12 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"
if [ "$DEPLOY_NOSTR" = true ]; then
REMOTE_NOSTR_PATH="$REMOTE_DATA_PATH/nostr"
NOSTR_PATH="$REMOTE_NOSTR_PATH/$DOMAIN_NAME"
NOSTR_CONFIG_PATH="$SITE_PATH/webstack/nostr.config"
@ -25,7 +25,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOCKER_YAML_PATH="$SITE_PATH/webstack/nostr.yml"
NET_NAME="nostrnet-$DOMAIN_IDENTIFIER"
DBNET_NAME="nostrdbnet-$DOMAIN_IDENTIFIER"
# here's the NGINX config. We support ghost and nextcloud.
echo "" > "$DOCKER_YAML_PATH"
@ -86,11 +85,8 @@ pubkey_whitelist = [ "${NOSTR_ACCOUNT_PUBKEY}" ]
domain_whitelist = [ "${DOMAIN_NAME}" ]
EOL
if [ "$STOP_SERVICES" = false ]; then
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
sleep 1
fi
fi
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
sleep 1
fi
done