forked from ss/sovereign-stack
Moved files.
Signed-off-by: Derek Smith <derek@farscapian.com>
This commit is contained in:
parent
6e9803357b
commit
a30c736444
12
Dockerfile
12
Dockerfile
@ -1,19 +1,17 @@
|
|||||||
FROM ubuntu:21.04
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync duplicity sshfs snapd lxd-client
|
RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync duplicity sshfs snapd lxd-client
|
||||||
|
|
||||||
RUN mkdir /sovereign-stack
|
RUN mkdir /sovereign-stack
|
||||||
COPY ./ /sovereign-stack
|
COPY ./deployment /sovereign-stack
|
||||||
WORKDIR /sovereign-stack
|
WORKDIR /sovereign-stack
|
||||||
|
|
||||||
RUN mkdir /site
|
RUN mkdir /domain
|
||||||
VOLUME /site
|
VOLUME /domain
|
||||||
ENV SITE_PATH=/site
|
ENV SITE_PATH=/domain
|
||||||
|
|
||||||
COPY ./entrypoint.sh /entrypoint.sh
|
COPY ./entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod 0744 /entrypoint.sh
|
RUN chmod 0744 /entrypoint.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CMD /entrypoint.sh
|
CMD /entrypoint.sh
|
17
README.md
17
README.md
@ -1,3 +1,16 @@
|
|||||||
# Documentation
|
# Directions
|
||||||
|
|
||||||
Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository.
|
ALL files at this level SHOULD be executed on the management machine! If you have just downloaded Soveregin Stack, run `install.sh` to install all the dependencies required by Sovereign Stack scripts.
|
||||||
|
|
||||||
|
# Dockerfile
|
||||||
|
|
||||||
|
If you want to run the Sovereign Stack management machine activities inside a docker container, you can do so by 1) building the image and 2) running the resulting sovereign stack docker container:
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
docker build -t sovereign-stack .
|
||||||
|
|
||||||
|
## Running
|
||||||
|
|
||||||
|
docker run -it sovereign-stack \
|
||||||
|
-v "$HOME/.sites/domain.tld"
|
14
deploy.sh
14
deploy.sh
@ -3,7 +3,6 @@
|
|||||||
set -exu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
check_dependencies () {
|
check_dependencies () {
|
||||||
for cmd in "$@"; do
|
for cmd in "$@"; do
|
||||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||||
@ -24,7 +23,7 @@ VPS_HOSTING_TARGET=
|
|||||||
RUN_CERT_RENEWAL=true
|
RUN_CERT_RENEWAL=true
|
||||||
USER_NO_BACKUP=false
|
USER_NO_BACKUP=false
|
||||||
USER_RUN_RESTORE=false
|
USER_RUN_RESTORE=false
|
||||||
BTC_CHAIN=testnet
|
BTC_CHAIN=regtest
|
||||||
UPDATE_BTCPAY=false
|
UPDATE_BTCPAY=false
|
||||||
RECONFIGURE_BTCPAY_SERVER=false
|
RECONFIGURE_BTCPAY_SERVER=false
|
||||||
BTCPAY_ADDITIONAL_HOSTNAMES=
|
BTCPAY_ADDITIONAL_HOSTNAMES=
|
||||||
@ -72,6 +71,10 @@ for i in "$@"; do
|
|||||||
BTC_CHAIN=mainnet
|
BTC_CHAIN=mainnet
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--testnet)
|
||||||
|
BTC_CHAIN=testnet
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--reconfigure-btcpay)
|
--reconfigure-btcpay)
|
||||||
RECONFIGURE_BTCPAY_SERVER=true
|
RECONFIGURE_BTCPAY_SERVER=true
|
||||||
shift
|
shift
|
||||||
@ -96,6 +99,11 @@ if [ -z "$VPS_HOSTING_TARGET" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: You MUST specify --domain=domain.tld"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
export VPS_HOSTING_TARGET="$VPS_HOSTING_TARGET"
|
export VPS_HOSTING_TARGET="$VPS_HOSTING_TARGET"
|
||||||
export LXD_DISK_TO_USE="$LXD_DISK_TO_USE"
|
export LXD_DISK_TO_USE="$LXD_DISK_TO_USE"
|
||||||
@ -176,8 +184,6 @@ for APP_TO_DEPLOY in btcpay www umbrel; do
|
|||||||
if [ "$MACHINE_EXISTS" = true ]; then
|
if [ "$MACHINE_EXISTS" = true ]; then
|
||||||
# we delete the machine if the user has directed us to
|
# we delete the machine if the user has directed us to
|
||||||
if [ "$MIGRATE_VPS" = true ]; then
|
if [ "$MIGRATE_VPS" = true ]; then
|
||||||
|
|
||||||
|
|
||||||
# run the domain_init based on user input.
|
# run the domain_init based on user input.
|
||||||
if [ "$USER_NO_BACKUP" = true ]; then
|
if [ "$USER_NO_BACKUP" = true ]; then
|
||||||
echo "Machine exists. We don't need to back it up because the user has directed --no-backup."
|
echo "Machine exists. We don't need to back it up because the user has directed --no-backup."
|
||||||
|
3
deployment/README.md
Normal file
3
deployment/README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Documentation
|
||||||
|
|
||||||
|
Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository.
|
9
deployment/backup_btcpay.sh
Executable file
9
deployment/backup_btcpay.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# take the services down, create a backup archive, then pull it down.
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./backup.sh"
|
||||||
|
ssh "$FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
scp "$FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$LOCAL_BACKUP_PATH/btcpay-$1.tar.gz"
|
24
deployment/backup_www.sh
Executable file
24
deployment/backup_www.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files.
|
||||||
|
# we could eliminate that and simply save duplicity backups to the management machine running the script
|
||||||
|
# this could be done by using a local path and mounting it on the remote VPS.
|
||||||
|
# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs
|
||||||
|
|
||||||
|
# step 1: run duplicity on the remote system to backup all files to the remote system.
|
||||||
|
ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --exclude "$REMOTE_HOME/backups" "$REMOTE_HOME" "file://$REMOTE_BACKUP_PATH"
|
||||||
|
ssh "$FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
|
# now let's pull down the latest files from the backup directory.
|
||||||
|
# create a temp directory to serve as the mountpoint for the remote machine backups directory
|
||||||
|
sshfs "$FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# rsync the files from the remote server to our local backup path.
|
||||||
|
rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
|
||||||
|
|
||||||
|
# step 4: unmount the SSHFS filesystem and cleanup.
|
||||||
|
umount "$SSHFS_PATH"
|
||||||
|
rm -rf "$SSHFS_PATH"
|
3
deployment/command.sh
Normal file
3
deployment/command.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
bash -c ./deploy.sh --domain=bitizen.store --hosting-provider=lxd --macvlan-interface=eno3 --storage-backend=/dev/sda
|
141
deployment/defaults.sh
Normal file
141
deployment/defaults.sh
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
export DEPLOY_WWW_SERVER=false
|
||||||
|
export DEPLOY_BTCPPAY_SERVER=false
|
||||||
|
export DEPLOY_UMBREL_VPS=false
|
||||||
|
|
||||||
|
export DEPLOY_GHOST=true
|
||||||
|
export DEPLOY_NOSTR=false
|
||||||
|
export DEPLOY_ONION_SITE=false
|
||||||
|
export DEPLOY_NEXTCLOUD=false
|
||||||
|
export DEPLOY_GITEA=false
|
||||||
|
|
||||||
|
export WWW_HOSTNAME="www"
|
||||||
|
export BTCPAY_HOSTNAME="btcpay"
|
||||||
|
export UMBREL_HOSTNAME="umbrel"
|
||||||
|
export NEXTCLOUD_HOSTNAME="nextcloud"
|
||||||
|
export GITEA_HOSTNAME="git"
|
||||||
|
export NOSTR_HOSTNAME="messages"
|
||||||
|
export NOSTR_ACCOUNT_PUBKEY=
|
||||||
|
|
||||||
|
export DDNS_PASSWORD=
|
||||||
|
|
||||||
|
# this is where the html is sourced from.
|
||||||
|
export SITE_HTML_PATH=
|
||||||
|
|
||||||
|
# enter your AWS Access Key and Secret Access Key here.
|
||||||
|
export AWS_ACCESS_KEY=
|
||||||
|
export AWS_SECRET_ACCESS_KEY=
|
||||||
|
|
||||||
|
# if overridden, the app will be deployed to proxy $BTCPAY_HOSTNAME.$DOMAIN_NAME requests to the URL specified.
|
||||||
|
# this is useful when you want to oursource your BTCPAY fullnode/lightning node.
|
||||||
|
#export BTCPAY_HANDLER_URL=
|
||||||
|
|
||||||
|
|
||||||
|
export SMTP_SERVER="smtp.mailgun.org"
|
||||||
|
export SMTP_PORT="587"
|
||||||
|
|
||||||
|
# default AWS region and AMI (free-tier AMI ubuntu 20.10)
|
||||||
|
export AWS_REGION="us-east-1"
|
||||||
|
|
||||||
|
# AMI NAME:
|
||||||
|
# ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20220420
|
||||||
|
export AWS_AMI_ID="ami-09d56f8956ab235b3"
|
||||||
|
WWW_INSTANCE_TYPE="t2.micro"
|
||||||
|
BTCPAY_INSTANCE_TYPE="t2.medium"
|
||||||
|
|
||||||
|
# goal will be to keep any particular instance to run AT OR BELOW t2.medium.
|
||||||
|
# other options are t2.small, micro, nano; micro is the free-tier eligible.
|
||||||
|
# [1=vCPUs, 1=Mem(GiB)]
|
||||||
|
# nano [1,0.5], micro [1,1] (free-tier eligible), small [1,2], medium [2,4], large [2,8], xlarge [4,16], 2xlarge [8,32]
|
||||||
|
|
||||||
|
export WWW_INSTANCE_TYPE="$WWW_INSTANCE_TYPE"
|
||||||
|
export BTCPAY_INSTANCE_TYPE="$BTCPAY_INSTANCE_TYPE"
|
||||||
|
|
||||||
|
export SMTP_PASSWORD=
|
||||||
|
export GHOST_MYSQL_PASSWORD=
|
||||||
|
export GHOST_MYSQL_ROOT_PASSWORD=
|
||||||
|
export NEXTCLOUD_MYSQL_PASSWORD=
|
||||||
|
export GITEA_MYSQL_PASSWORD=
|
||||||
|
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
|
||||||
|
export GITEA_MYSQL_ROOT_PASSWORD=
|
||||||
|
export DUPLICITY_BACKUP_PASSPHRASE=
|
||||||
|
#opt-add-fireflyiii;opt-add-zammad
|
||||||
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-configurator;"
|
||||||
|
export SSH_HOME="$HOME/.ssh"
|
||||||
|
export VLAN_INTERFACE=
|
||||||
|
export CACHE_DIR="$HOME/cache"
|
||||||
|
export VM_NAME=
|
||||||
|
export DEV_MEMORY_MB="4096"
|
||||||
|
export DEV_CPU_COUNT="4"
|
||||||
|
export SSHFS_PATH="/tmp/sshfs_temp"
|
||||||
|
|
||||||
|
export NEXTCLOUD_SPACE_GB=10
|
||||||
|
|
||||||
|
DEV_LXD_REMOTE="$(lxc remote get-default)"
|
||||||
|
export DEV_LXD_REMOTE="$DEV_LXD_REMOTE"
|
||||||
|
|
||||||
|
export SITE_TITLE=
|
||||||
|
|
||||||
|
# we use this later when we create a VM, we annotate what git commit (from a tag) we used.
|
||||||
|
LATEST_GIT_TAG="$(git describe --abbrev=0)"
|
||||||
|
export LATEST_GIT_TAG="$LATEST_GIT_TAG"
|
||||||
|
|
||||||
|
LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)"
|
||||||
|
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
|
||||||
|
|
||||||
|
|
||||||
|
# let's ensure all the tools are installed
|
||||||
|
if [ ! -f "$(which rsync)" ]; then
|
||||||
|
echo "ERROR: rsync is not installed. You may want to install your dependencies."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=1091
|
||||||
|
export SITE_PATH="$HOME/.sites"
|
||||||
|
export LXD_DISK_TO_USE=
|
||||||
|
|
||||||
|
|
||||||
|
ENABLE_NGINX_CACHING=false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
# 1 add check for ~/.aws/credentials and stub one out
|
||||||
|
# 2 ensure install.sh has been run by checking for tor, docker-machine, lxd, wait-for-it, etc.
|
||||||
|
# 3 pretty much just run the install script if anything is awry
|
||||||
|
# 4 maybe check to ensure all the CNAME and A+ records are there first so we can quit before machine creation.
|
||||||
|
|
||||||
|
export SITE_PATH="$SITE_PATH/$DOMAIN_NAME"
|
||||||
|
if [ ! -d "$SITE_PATH" ]; then
|
||||||
|
echo "ERROR: '$SITE_PATH' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export SITE_PATH="$SITE_PATH"
|
||||||
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
|
|
||||||
|
# if we're running aws/public, we enable nginx caching since it's a public site.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
# TODO the correct behavior is to be =true, but cookies aren't working right now.
|
||||||
|
ENABLE_NGINX_CACHING=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEFAULT_DB_IMAGE="mariadb:10.6.5"
|
||||||
|
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
|
||||||
|
|
||||||
|
# run the docker stack.
|
||||||
|
export GHOST_IMAGE="ghost:4.44.0"
|
||||||
|
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
export NGINX_IMAGE="nginx:1.21.6"
|
||||||
|
export NEXTCLOUD_IMAGE="nextcloud:23.0.2"
|
||||||
|
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
|
export GITEA_IMAGE="gitea/gitea:latest"
|
||||||
|
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
|
export WWW_MAC_ADDRESS=
|
||||||
|
export BTCPAY_MAC_ADDRESS=
|
||||||
|
export UMBREL_MAC_ADDRESS=
|
99
deployment/domain_init.sh
Executable file
99
deployment/domain_init.sh
Executable file
@ -0,0 +1,99 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa
|
||||||
|
if [ ! -f "$SSH_HOME/id_rsa" ]; then
|
||||||
|
# generate a new SSH key for the base vm image.
|
||||||
|
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if an authorized_keys file does not exist, we'll stub one out with the current user.
|
||||||
|
# add additional id_rsa.pub entries manually for more administrative logins.
|
||||||
|
if [ ! -f "$SITE_PATH/authorized_keys" ]; then
|
||||||
|
cat "$SSH_HOME/id_rsa.pub" >> "$SITE_PATH/authorized_keys"
|
||||||
|
fi
|
||||||
|
|
||||||
|
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
|
||||||
|
SSH_STRING="Host ${FQDN}"
|
||||||
|
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
|
||||||
|
|
||||||
|
########## BEGIN
|
||||||
|
cat >> "$SSH_HOME/config" <<-EOF
|
||||||
|
|
||||||
|
${SSH_STRING}
|
||||||
|
HostName ${FQDN}
|
||||||
|
User ubuntu
|
||||||
|
EOF
|
||||||
|
###
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# when set to true, this flag indicates that a new VPS was created during THIS script run.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
# let's create the remote VPS if needed.
|
||||||
|
if ! docker-machine ls -q --filter name="$FQDN" | grep -q "$FQDN"; then
|
||||||
|
RUN_BACKUP=false
|
||||||
|
|
||||||
|
./provision_vps.sh
|
||||||
|
|
||||||
|
./prepare_vps_host.sh
|
||||||
|
fi
|
||||||
|
elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
||||||
|
|
||||||
|
#check to ensure the MACVLAN interface has been set by the user
|
||||||
|
if [ -z "$MACVLAN_INTERFACE" ]; then
|
||||||
|
echo "ERROR: MACVLAN_INTERFACE has not been defined. Use '--macvlan-interface=eno1' for example."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's first check to ensure there's a cert.tar.gz. We need a valid cert for testing.
|
||||||
|
if [ ! -f "$SITE_PATH/certs.tar.gz" ]; then
|
||||||
|
echo "ERROR: We need a valid cert for testing."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if the machine doesn't exist, we create it.
|
||||||
|
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
export RUN_BACKUP=false
|
||||||
|
|
||||||
|
# create a base image if needed and instantiate a VM.
|
||||||
|
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
||||||
|
echo "ERROR: You MUST define a MAC Address for all your machines."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
./provision_lxc.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# prepare the VPS to support our applications and backups and stuff.
|
||||||
|
./prepare_vps_host.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# this tells our local docker client to target the remote endpoint via SSH
|
||||||
|
export DOCKER_HOST="ssh://ubuntu@$FQDN"
|
||||||
|
|
||||||
|
# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying.
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ]; then
|
||||||
|
./go_www.sh
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
./go_btcpay.sh
|
||||||
|
elif [ "$APP_TO_DEPLOY" = umbrel ]; then
|
||||||
|
./go_umbrel.sh
|
||||||
|
elif [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
# renew the certs; certbot takes care of seeing if we need to actually renew.
|
||||||
|
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "INFO: Please run 'docker-machine rm -f $FQDN' to remove the remote VPS."
|
||||||
|
exit
|
||||||
|
else
|
||||||
|
echo "ERROR: APP_TO_DEPLOY not set correctly. Please refer to the documentation for allowable values."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Successfull deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET; Latest git tag is $LATEST_GIT_TAG" >> "$SITE_PATH/debug.log"
|
2
deployment/down_btcpay_compose.sh
Normal file
2
deployment/down_btcpay_compose.sh
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
0
deployment/expensive-relay/Dockerfile
Normal file
0
deployment/expensive-relay/Dockerfile
Normal file
0
deployment/expensive-relay/conf.conf
Normal file
0
deployment/expensive-relay/conf.conf
Normal file
27
deployment/generate_certs.sh
Executable file
27
deployment/generate_certs.sh
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
|
||||||
|
docker pull certbot/certbot
|
||||||
|
|
||||||
|
docker run -it --rm \
|
||||||
|
--name certbot \
|
||||||
|
-p 80:80 \
|
||||||
|
-p 443:443 \
|
||||||
|
-v /etc/letsencrypt:/etc/letsencrypt \
|
||||||
|
-v /var/lib/letsencrypt:/var/lib/letsencrypt certbot/certbot \
|
||||||
|
certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
|
||||||
|
|
||||||
|
# backup the certs to our SITE_PATH/certs.tar.gz so we have them handy (for local development)
|
||||||
|
ssh "$FQDN" sudo tar -zcvf "$REMOTE_HOME/certs.tar.gz" -C /etc ./letsencrypt
|
||||||
|
ssh "$FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/certs.tar.gz"
|
||||||
|
|
||||||
|
# now pull the tarballs down the local machine.
|
||||||
|
scp "$FQDN:$REMOTE_HOME/certs.tar.gz" "$SITE_PATH/certs.tar.gz"
|
||||||
|
else
|
||||||
|
echo "INFO: Skipping certificate renewal since we're on hosting provider=lxd."
|
||||||
|
fi
|
58
deployment/go_btcpay.sh
Executable file
58
deployment/go_btcpay.sh
Executable file
@ -0,0 +1,58 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
# shellcheck disable=SC2029
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# we will re-run the btcpay provisioning scripts if directed to do so.
|
||||||
|
# if an update does occur, we grab another backup.
|
||||||
|
if [ "$UPDATE_BTCPAY" = true ]; then
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
# grab a backup PRIOR to update
|
||||||
|
./backup_btcpay.sh "before-update-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run the update.
|
||||||
|
# shellcheck disable=SC2029
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-update.sh"
|
||||||
|
|
||||||
|
else
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
# we just grab a regular backup
|
||||||
|
./backup_btcpay.sh "regular-backup-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run a restoration if specified.
|
||||||
|
if [ "$RUN_RESTORE" = true ]; then
|
||||||
|
# shellcheck disable=SC2029
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
|
||||||
|
./restore_btcpay.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the administrator may have indicated a reconfig; if so, re-run the setup (useful for adding alternative names to TLS)
|
||||||
|
if [ "$RECONFIGURE_BTCPAY_SERVER" = true ]; then
|
||||||
|
# re-run the setup script.
|
||||||
|
./run_btcpay_setup.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$MIGRATE_VPS" = false ]; then
|
||||||
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
|
# we bring the services back up by default.
|
||||||
|
# shellcheck disable=SC2029
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-up.sh"
|
||||||
|
|
||||||
|
# we wait for lightning to comone line too.
|
||||||
|
wait-for-it -t -60 "$FQDN:80"
|
||||||
|
wait-for-it -t -60 "$FQDN:443"
|
||||||
|
|
||||||
|
xdg-open "http://$FQDN"
|
||||||
|
else
|
||||||
|
echo "WARNING: The '--migrate' flag was specified. BTCPay Server services HAVE NOT BEEN TURNED ON!"
|
||||||
|
echo "NOTE: You can restore your latest backup to a new host that has BTCPay Server installed."
|
||||||
|
fi
|
51
deployment/go_umbrel.sh
Executable file
51
deployment/go_umbrel.sh
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
ssh "$FQDN" "
|
||||||
|
set -x
|
||||||
|
|
||||||
|
cd /home/ubuntu
|
||||||
|
|
||||||
|
# first, lets make sure we have the latest code. We use git over HTTPS and store it in ~/umbrel
|
||||||
|
# ~/umbrel is the only folder we need to backup
|
||||||
|
if [ ! -d ./umbrel ]; then
|
||||||
|
git clone https://github.com/getumbrel/umbrel.git ./umbrel
|
||||||
|
else
|
||||||
|
|
||||||
|
if [ -f ./umbrel/scripts/stop ]; then
|
||||||
|
sudo ./umbrel/scripts/stop
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
"
|
||||||
|
|
||||||
|
# # DO SOME BACKUP OPERATION
|
||||||
|
|
||||||
|
# ssh "$FQDN" "
|
||||||
|
# set -x
|
||||||
|
|
||||||
|
# mkdir -p /home/ubuntu/backup
|
||||||
|
|
||||||
|
# sudo PASSPHRASE=${DUPLICITY_BACKUP_PASSPHRASE} duplicity --exclude ${REMOTE_HOME}/umbrel/bitcoin/blocks ${REMOTE_HOME}/umbrel file://${REMOTE_BACKUP_PATH}
|
||||||
|
# sudo chown -R ubuntu:ubuntu ${REMOTE_BACKUP_PATH}
|
||||||
|
# "
|
||||||
|
|
||||||
|
# Start services back up.
|
||||||
|
ssh "$FQDN" "
|
||||||
|
set -ex
|
||||||
|
cd /home/ubuntu/umbrel
|
||||||
|
|
||||||
|
git config pull.rebase true
|
||||||
|
git fetch --all --tags
|
||||||
|
git checkout master
|
||||||
|
git pull
|
||||||
|
git checkout tags/v0.4.17
|
||||||
|
|
||||||
|
# To use Umbrel on mainnet, run:
|
||||||
|
sudo NETWORK=$BTC_CHAIN /home/ubuntu/umbrel/scripts/start
|
||||||
|
"
|
||||||
|
|
||||||
|
# we wait for lightning to comone line too.
|
||||||
|
wait-for-it -t -60 "$FQDN:80"
|
||||||
|
|
||||||
|
xdg-open "http://$FQDN"
|
101
deployment/go_www.sh
Executable file
101
deployment/go_www.sh
Executable file
@ -0,0 +1,101 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
TOR_CONFIG_PATH=
|
||||||
|
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db"
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/db/data"
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/db/logs"
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_NEXTCLOUD_PATH/html"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_GITEA_PATH/data $REMOTE_GITEA_PATH/db"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# enable docker swarm mode so we can support docker stacks.
|
||||||
|
if ! docker info | grep -q "Swarm: active"; then
|
||||||
|
docker swarm init
|
||||||
|
fi
|
||||||
|
|
||||||
|
# stop services.
|
||||||
|
if docker stack list --format "{{.Name}}" | grep -q webstack; then
|
||||||
|
docker stack rm webstack
|
||||||
|
sleep 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
# this will generate letsencrypt certs and pull them down locally.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" != lxd ]; then
|
||||||
|
# really we should change this if clause to some thing like
|
||||||
|
# "if the perimeter firewall allows port 80/443, then go ahead."
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ] && [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# restore the certs. If they don't exist in a backup we restore from SITE_PATH
|
||||||
|
if [ -f "$SITE_PATH/certs.tar.gz" ]; then
|
||||||
|
scp "$SITE_PATH/certs.tar.gz" "ubuntu@$FQDN:$REMOTE_HOME/certs.tar.gz"
|
||||||
|
ssh "$FQDN" "sudo tar -xvf $REMOTE_HOME/certs.tar.gz -C /etc"
|
||||||
|
else
|
||||||
|
echo "ERROR: Certificates do not exist locally."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
./backup_www.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$RUN_RESTORE" = true ]; then
|
||||||
|
./restore_www.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
# ensure the tor image is built
|
||||||
|
docker build -t tor:latest ./tor
|
||||||
|
|
||||||
|
# if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore.
|
||||||
|
# this is how we generate a new torv3 endpoint.
|
||||||
|
if ! ssh "$FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_HOME/tor"
|
||||||
|
TOR_CONFIG_PATH="$(pwd)/tor/torrc-init"
|
||||||
|
export TOR_CONFIG_PATH="$TOR_CONFIG_PATH"
|
||||||
|
docker stack deploy -c ./tor.yml torstack
|
||||||
|
sleep 20
|
||||||
|
docker stack rm torstack
|
||||||
|
sleep 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
ONION_ADDRESS="$(ssh "$FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)"
|
||||||
|
export ONION_ADDRESS="$ONION_ADDRESS"
|
||||||
|
|
||||||
|
# # Since we run a separate ghost process, we create a new directory and symlink it to the original
|
||||||
|
# if ! ssh "$FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then
|
||||||
|
# ssh "$FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes"
|
||||||
|
# fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$RUN_SERVICES" = true ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" webstack
|
||||||
|
|
||||||
|
# start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||||
|
wait-for-it -t 320 "$FQDN:80"
|
||||||
|
wait-for-it -t 320 "$FQDN:443"
|
||||||
|
|
||||||
|
# open bowser tabs.
|
||||||
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
xdg-open "http://$FQDN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
xdg-open "http://$NEXTCLOUD_FQDN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
xdg-open "http://$GITEA_FQDN"
|
||||||
|
fi
|
||||||
|
fi
|
148
deployment/lxc_profile.yml
Normal file
148
deployment/lxc_profile.yml
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
config:
|
||||||
|
limits.cpu: "${DEV_CPU_COUNT}"
|
||||||
|
limits.memory: "${DEV_MEMORY_MB}MB"
|
||||||
|
user.vendor-data: |
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
|
||||||
|
package_update: true
|
||||||
|
package_upgrade: false
|
||||||
|
package_reboot_if_required: false
|
||||||
|
|
||||||
|
preserve_hostname: false
|
||||||
|
fqdn: ${FQDN}
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- curl
|
||||||
|
- ssh-askpass
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- gnupg-agent
|
||||||
|
- software-properties-common
|
||||||
|
- lsb-release
|
||||||
|
- net-tools
|
||||||
|
- htop
|
||||||
|
- rsync
|
||||||
|
- duplicity
|
||||||
|
- sshfs
|
||||||
|
- fswatch
|
||||||
|
- jq
|
||||||
|
- git
|
||||||
|
- nano
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: ubuntu
|
||||||
|
shell: /bin/bash
|
||||||
|
lock_passwd: false
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
sudo:
|
||||||
|
- ALL=(ALL) NOPASSWD:ALL
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ${SSH_AUTHORIZED_KEY}
|
||||||
|
|
||||||
|
write_files:
|
||||||
|
- path: ${REMOTE_HOME}/docker.asc
|
||||||
|
content: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
|
||||||
|
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
|
||||||
|
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
|
||||||
|
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
|
||||||
|
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
|
||||||
|
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
|
||||||
|
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
|
||||||
|
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
|
||||||
|
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
|
||||||
|
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
|
||||||
|
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
|
||||||
|
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
|
||||||
|
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
|
||||||
|
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
|
||||||
|
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
|
||||||
|
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
|
||||||
|
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
|
||||||
|
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
|
||||||
|
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
|
||||||
|
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
|
||||||
|
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
|
||||||
|
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
|
||||||
|
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
|
||||||
|
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
|
||||||
|
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
|
||||||
|
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
|
||||||
|
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
|
||||||
|
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
|
||||||
|
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
|
||||||
|
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
|
||||||
|
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
|
||||||
|
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
|
||||||
|
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
|
||||||
|
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
|
||||||
|
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
|
||||||
|
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
|
||||||
|
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
|
||||||
|
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
|
||||||
|
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
|
||||||
|
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
|
||||||
|
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
|
||||||
|
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
|
||||||
|
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
|
||||||
|
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
|
||||||
|
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
|
||||||
|
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
|
||||||
|
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
|
||||||
|
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
|
||||||
|
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
|
||||||
|
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
|
||||||
|
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
|
||||||
|
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
|
||||||
|
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
|
||||||
|
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
|
||||||
|
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
|
||||||
|
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
|
||||||
|
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
|
||||||
|
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
|
||||||
|
=0YYh
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
- path: /etc/ssh/ssh_config
|
||||||
|
content: |
|
||||||
|
Port 22
|
||||||
|
ListenAddress 0.0.0.0
|
||||||
|
Protocol 2
|
||||||
|
ChallengeResponseAuthentication no
|
||||||
|
PasswordAuthentication no
|
||||||
|
UsePAM no
|
||||||
|
LogLevel INFO
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- cat ${REMOTE_HOME}/docker.asc | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
- sudo rm ${REMOTE_HOME}/docker.asc
|
||||||
|
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||||
|
- sudo apt-get remove docker docker.io containerd runc
|
||||||
|
- sudo apt-get update
|
||||||
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||||
|
- echo "alias ll='ls -lah'" >> ${REMOTE_HOME}/.bash_profile
|
||||||
|
- sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||||
|
- sudo chmod +x /usr/local/bin/docker-compose
|
||||||
|
- sudo apt-get install -y openssh-server
|
||||||
|
|
||||||
|
description: Default LXD profile for ${DOMAIN_NAME}
|
||||||
|
devices:
|
||||||
|
root:
|
||||||
|
path: /
|
||||||
|
pool: default
|
||||||
|
type: disk
|
||||||
|
config:
|
||||||
|
source: cloud-init:config
|
||||||
|
type: disk
|
||||||
|
enp5s0:
|
||||||
|
nictype: macvlan
|
||||||
|
parent: ${MACVLAN_INTERFACE}
|
||||||
|
type: nic
|
||||||
|
name: ${LXD_VM_NAME}
|
16
deployment/prepare_vps_host.sh
Executable file
16
deployment/prepare_vps_host.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
|
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
|
# create a directory to store backup archives. This is on all new vms.
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
||||||
|
|
||||||
|
if [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
echo "INFO: new machine detected. Provisioning BTCPay server scripts."
|
||||||
|
|
||||||
|
./run_btcpay_setup.sh
|
||||||
|
exit
|
||||||
|
fi
|
151
deployment/provision_lxc.sh
Executable file
151
deployment/provision_lxc.sh
Executable file
@ -0,0 +1,151 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
# check to ensure the admin has specified a MACVLAN interface
|
||||||
|
if [ -z "$MACVLAN_INTERFACE" ]; then
|
||||||
|
echo "ERROR: MACVLAN_INTERFACE not defined in project."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The base VM image.
|
||||||
|
BASE_LXC_IMAGE="ubuntu/22.04/cloud"
|
||||||
|
|
||||||
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
|
if ! lxc profile list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
lxc profile create "$LXD_VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# generate the custom cloud-init file. Cloud init installs and configures sshd
|
||||||
|
SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub")
|
||||||
|
eval "$(ssh-agent -s)"
|
||||||
|
ssh-add "$SSH_HOME/id_rsa"
|
||||||
|
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
|
||||||
|
envsubst < ./lxc_profile.yml > "$SITE_PATH/cloud-init-$APP_TO_DEPLOY.yml"
|
||||||
|
|
||||||
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
|
lxc profile edit "$LXD_VM_NAME" < "$SITE_PATH/cloud-init-$APP_TO_DEPLOY.yml"
|
||||||
|
|
||||||
|
function wait_for_lxc_ip {
|
||||||
|
|
||||||
|
LXC_INSTANCE_NAME="$1"
|
||||||
|
IP_V4_ADDRESS=
|
||||||
|
while true; do
|
||||||
|
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
|
||||||
|
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
||||||
|
if [ -n "$IP_V4_ADDRESS" ]; then
|
||||||
|
# give the machine extra time to spin up.
|
||||||
|
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
sleep 1
|
||||||
|
printf '.'
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_ddns {
|
||||||
|
# now that the VM has an IP, we can update the DNS record. TODO add additional DNS providers here; namecheap only atm.
|
||||||
|
DDNS_STRING="$VPS_HOSTNAME"
|
||||||
|
if [ "$VPS_HOSTNAME" = www ]; then
|
||||||
|
# next update our DDNS record. TODO enable local/remote name provider.
|
||||||
|
DDNS_STRING="@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if the DNS record is incorrect, we run DDNS to get it corrected yo.
|
||||||
|
if "$(getent hosts "$FQDN" | awk '{ print $1 }')" != "$IP_V4_ADDRESS"; then
|
||||||
|
curl "https://dynamicdns.park-your-domain.com/update?host=$DDNS_STRING&domain=$DOMAIN_NAME&password=$DDNS_PASSWORD&ip=$IP_V4_ADDRESS"
|
||||||
|
|
||||||
|
DDNS_SLEEP_SECONDS=60
|
||||||
|
while true; do
|
||||||
|
# we test the www CNAME here so we can be assured the underlying has corrected.
|
||||||
|
if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$IP_V4_ADDRESS" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "SUCCESS: The DNS appears to be configured correctly."
|
||||||
|
|
||||||
|
echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow stale DNS records to expire."
|
||||||
|
sleep "$DDNS_SLEEP_SECONDS";
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "." && sleep 2;
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# create the default storage pool if necessary
|
||||||
|
if ! lxc storage list --format csv | grep -q default; then
|
||||||
|
if [ -n "$LXD_DISK_TO_USE" ]; then
|
||||||
|
lxc storage create default zfs source="$LXD_DISK_TO_USE" size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
else
|
||||||
|
lxc storage create default zfs size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If our template doesn't exist, we create one.
|
||||||
|
if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
|
||||||
|
|
||||||
|
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
||||||
|
if lxc list -q --format csv | grep -q "$VM_NAME"; then
|
||||||
|
lxc delete "$VM_NAME" --force
|
||||||
|
|
||||||
|
# remove the ssh known endpoint else we get warnings.
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's download our base image.
|
||||||
|
if ! lxc image list --format csv --columns l | grep -q "ubuntu-base"; then
|
||||||
|
# if the image doesn't exist, download it from Ubuntu's image server
|
||||||
|
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
|
||||||
|
# we don't really need to cache this locally since it gets continually updated upstream.
|
||||||
|
lxc image copy "images:$BASE_LXC_IMAGE" "$DEV_LXD_REMOTE": --alias "ubuntu-base" --public --vm
|
||||||
|
fi
|
||||||
|
|
||||||
|
lxc init \
|
||||||
|
--profile="$LXD_VM_NAME" \
|
||||||
|
"ubuntu-base" \
|
||||||
|
"$VM_NAME" --vm
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
|
||||||
|
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
|
||||||
|
lxc start "$VM_NAME"
|
||||||
|
|
||||||
|
# let's wait a minimum of 15 seconds before we start checking for an IP address.
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# let's wait for the LXC vm remote machine to get an IP address.
|
||||||
|
wait_for_lxc_ip "$VM_NAME"
|
||||||
|
|
||||||
|
# Let's remove any entry in our known_hosts, then add it back.
|
||||||
|
# we are using IP address here so we don't have to rely on external DNS
|
||||||
|
# configuration for the base image preparataion.
|
||||||
|
ssh-keygen -R "$IP_V4_ADDRESS"
|
||||||
|
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
|
||||||
|
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu "$REMOTE_HOME"
|
||||||
|
|
||||||
|
# stop the VM and get a snapshot.
|
||||||
|
lxc stop "$VM_NAME"
|
||||||
|
lxc publish "$DEV_LXD_REMOTE:$VM_NAME" --alias "$VM_NAME" --public
|
||||||
|
lxc delete "$VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# now let's create a new VM to work with.
|
||||||
|
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
lxc start "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
wait_for_lxc_ip "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
run_ddns
|
||||||
|
|
||||||
|
# remove any existing SSH identities for the host, then add it back.
|
||||||
|
ssh-keygen -R "$IP_V4_ADDRESS"
|
89
deployment/provision_vps.sh
Executable file
89
deployment/provision_vps.sh
Executable file
@ -0,0 +1,89 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ ! -f "$HOME/.aws/credentials" ]; then
|
||||||
|
|
||||||
|
# TODO write a credential file baseline
|
||||||
|
echo "ERROR: Please update your '$HOME/.aws/credentials' file before continuing."
|
||||||
|
mkdir -p "$HOME/.aws"
|
||||||
|
touch "$HOME/.aws/credentials"
|
||||||
|
|
||||||
|
# stub out a site_definition with new passwords.
|
||||||
|
cat >"$HOME/.aws/credentials" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# enter your AWS Access Key and Secret Access Key here.
|
||||||
|
export AWS_ACCESS_KEY=
|
||||||
|
export AWS_SECRET_ACCESS_KEY=
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source "$HOME/.aws/credentials"
|
||||||
|
|
||||||
|
if [ -z "$AWS_ACCESS_KEY" ]; then
|
||||||
|
echo "ERROR: AWS_ACCESS_KEY is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||||
|
echo "ERROR: AWS_SECRET_ACCESS_KEY is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Note, we assume the script has already made sure the machine doesn't exist.
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ] || [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
# creates a public VM in AWS and provisions the bcm website.
|
||||||
|
docker-machine create --driver amazonec2 \
|
||||||
|
--amazonec2-open-port 80 \
|
||||||
|
--amazonec2-open-port 443 \
|
||||||
|
--amazonec2-open-port 8448 \
|
||||||
|
--amazonec2-access-key "$AWS_ACCESS_KEY" \
|
||||||
|
--amazonec2-secret-key "$AWS_SECRET_ACCESS_KEY" \
|
||||||
|
--amazonec2-region "$AWS_REGION" \
|
||||||
|
--amazonec2-ami "$AWS_AMI_ID" \
|
||||||
|
--amazonec2-root-size "$ROOT_DISK_SIZE_GB" \
|
||||||
|
--amazonec2-instance-type "$WWW_INSTANCE_TYPE" \
|
||||||
|
--engine-label tag="$LATEST_GIT_TAG" \
|
||||||
|
--engine-label commit="$LATEST_GIT_COMMIT" \
|
||||||
|
"$FQDN"
|
||||||
|
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
# creates a public VM in AWS and provisions the bcm website.
|
||||||
|
docker-machine create --driver amazonec2 \
|
||||||
|
--amazonec2-open-port 80 \
|
||||||
|
--amazonec2-open-port 443 \
|
||||||
|
--amazonec2-open-port 9735 \
|
||||||
|
--amazonec2-access-key "$AWS_ACCESS_KEY" \
|
||||||
|
--amazonec2-secret-key "$AWS_SECRET_ACCESS_KEY" \
|
||||||
|
--amazonec2-region "$AWS_REGION" \
|
||||||
|
--amazonec2-ami "$AWS_AMI_ID" \
|
||||||
|
--amazonec2-root-size "$ROOT_DISK_SIZE_GB" \
|
||||||
|
--amazonec2-instance-type "$BTCPAY_INSTANCE_TYPE" \
|
||||||
|
--engine-label tag="$LATEST_GIT_TAG" \
|
||||||
|
--engine-label commit="$LATEST_GIT_COMMIT" \
|
||||||
|
"$FQDN"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-machine scp "$SITE_PATH/authorized_keys" "$FQDN:$REMOTE_HOME/authorized_keys"
|
||||||
|
docker-machine ssh "$FQDN" "cat $REMOTE_HOME/authorized_keys >> $REMOTE_HOME/.ssh/authorized_keys"
|
||||||
|
|
||||||
|
# we have to ensure ubuntu is able to do sudo less docker commands.
|
||||||
|
docker-machine ssh "$FQDN" sudo usermod -aG docker ubuntu
|
||||||
|
|
||||||
|
# we restart so dockerd starts with fresh group membership.
|
||||||
|
docker-machine ssh "$FQDN" sudo systemctl restart docker
|
||||||
|
|
||||||
|
# TODO INSTALL DOCKER COMPOSE
|
||||||
|
|
||||||
|
# let's wire up the DNS so subsequent ssh commands resolve to the VPS.
|
||||||
|
./run_ddns.sh
|
||||||
|
|
||||||
|
# remove the SSH hostname from known_hosts as we'll
|
||||||
|
# todo why do we need this again?
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
21
deployment/restore_btcpay.sh
Executable file
21
deployment/restore_btcpay.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# this scripts ASSUMES services have already been taken down.
|
||||||
|
|
||||||
|
# first let's ask the user for the absolute path to the backup file that we want to restore.
|
||||||
|
FILE_PATH=
|
||||||
|
read -r -p "Please enter the absolute path of the backup file you want to restore: ": FILE_PATH
|
||||||
|
if [ -f "$FILE_PATH" ]; then
|
||||||
|
# then we grab a backup of the existing stuff BEFORE the restoration attempt
|
||||||
|
./backup_btcpay.sh "before-restore-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
|
||||||
|
echo "INFO: Restoring BTCPAY Server: $FILE_PATH"
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
|
scp "$FILE_PATH" "$FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
|
ssh "$FQDN" "cd /; sudo tar -xzvf $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
|
else
|
||||||
|
echo "ERROR: File does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
19
deployment/restore_www.sh
Executable file
19
deployment/restore_www.sh
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# first, this is a restore operation. We need to ask the administrator
|
||||||
|
# if they want to continue because it results in data loss.
|
||||||
|
# indeed, our first step is the delete the home directory on the remote server.
|
||||||
|
|
||||||
|
# delete the home directory so we know we are restoring all files from the duplicity archive.
|
||||||
|
ssh "$FQDN" sudo rm -rf "$REMOTE_HOME/*"
|
||||||
|
|
||||||
|
# scp our local backup directory to the remote machine
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
|
# TODO instead of scp the files up there, lets' mount the local backup folder to a remote folder then just run a duplicity restore.
|
||||||
|
scp -r "$LOCAL_BACKUP_PATH/" "$FQDN:$REMOTE_HOME/backups/$APP_TO_DEPLOY"
|
||||||
|
|
||||||
|
# now we run duplicity to restore the archive.
|
||||||
|
ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/" "$REMOTE_HOME/"
|
60
deployment/run_btcpay_setup.sh
Executable file
60
deployment/run_btcpay_setup.sh
Executable file
@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
|
||||||
|
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
||||||
|
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
||||||
|
|
||||||
|
# This is the config for a basic proxy to the listening port 127.0.0.1:2368
|
||||||
|
# It also supports modern TLS, so SSL certs must be available.
|
||||||
|
cat > "$SITE_PATH/btcpay.sh" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# wait for cloud-init to complete yo
|
||||||
|
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# get pre-reqs
|
||||||
|
apt-get update && apt-get install -y git wget
|
||||||
|
|
||||||
|
if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" != "master" ] && [ "$EXISTING_REMOTE" != "master" ]; then echo "existing btcpayserver-docker folder found that did not match our specified fork. Moving. (Current branch: $EXISTING_BRANCH, Current remote: $EXISTING_REMOTE)"; mv "btcpayserver-docker" "btcpayserver-docker_$(date +%s)"; fi
|
||||||
|
if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" == "master" ] && [ "$EXISTING_REMOTE" == "master" ]; then echo "existing btcpayserver-docker folder found, pulling instead of cloning."; git pull; fi
|
||||||
|
if [ ! -d "btcpayserver-docker" ]; then echo "cloning btcpayserver-docker"; git clone -b master https://github.com/btcpayserver/btcpayserver-docker btcpayserver-docker; fi
|
||||||
|
|
||||||
|
export BTCPAY_HOST="${FQDN}"
|
||||||
|
export NBITCOIN_NETWORK="${BTC_CHAIN}"
|
||||||
|
export LIGHTNING_ALIAS="${DOMAIN_NAME}"
|
||||||
|
export LETSENCRYPT_EMAIL="${CERTIFICATE_EMAIL_ADDRESS}"
|
||||||
|
export BTCPAYGEN_LIGHTNING="clightning"
|
||||||
|
export BTCPAYGEN_CRYPTO1="btc"
|
||||||
|
|
||||||
|
# opt-save-storage keeps 1 year of blocks (prunes to 100 GB)
|
||||||
|
# opt-add-btctransmuter adds transmuter software
|
||||||
|
#
|
||||||
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="${BTCPAYGEN_ADDITIONAL_FRAGMENTS}"
|
||||||
|
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
|
||||||
|
export BTCPAY_ENABLE_SSH=true
|
||||||
|
|
||||||
|
cd btcpayserver-docker
|
||||||
|
|
||||||
|
# run fast_sync if it's not been done before.
|
||||||
|
if [ ! -f /home/ubuntu/fast_sync_completed ]; then
|
||||||
|
cd ./contrib/FastSync
|
||||||
|
./load-utxo-set.sh
|
||||||
|
touch /home/ubuntu/fast_sync_completed
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# provision the btcpay server
|
||||||
|
. ./btcpay-setup.sh -i
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# send the setup script to the remote machine.
|
||||||
|
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
ssh "$FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
ssh "$FQDN" "sudo bash -c ./btcpay_setup.sh"
|
38
deployment/run_ddns.sh
Executable file
38
deployment/run_ddns.sh
Executable file
@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
DDNS_STRING=
|
||||||
|
|
||||||
|
# for the www stack, we register only the domain name so our URLs look like https://$DOMAIN_NAME
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ] || [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
DDNS_STRING="@"
|
||||||
|
else
|
||||||
|
DDNS_STRING="$DDNS_HOST"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# wait for DNS to get setup. Pass in the IP address of the actual VPS.
|
||||||
|
MACHINE_IP="$(docker-machine ip "$FQDN")"
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
|
||||||
|
# wire DNS entries using namecheap DDNS API (via HTTPS rather than ddclient)
|
||||||
|
curl "https://dynamicdns.park-your-domain.com/update?host=$DDNS_STRING&domain=$DOMAIN_NAME&password=$DDNS_PASSWORD&ip=$MACHINE_IP"
|
||||||
|
|
||||||
|
#install dependencies.
|
||||||
|
docker-machine ssh "$FQDN" sudo apt-get -qq install -y wait-for-it git rsync duplicity sshfs
|
||||||
|
fi
|
||||||
|
|
||||||
|
DDNS_SLEEP_SECONDS=60
|
||||||
|
while true; do
|
||||||
|
# we test the www CNAME here so we can be assured the underlying has corrected.
|
||||||
|
if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$MACHINE_IP" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "SUCCESS: The DNS appears to be configured correctly."
|
||||||
|
|
||||||
|
echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow stale DNS records to expire."
|
||||||
|
sleep "$DDNS_SLEEP_SECONDS";
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "." && sleep 2;
|
||||||
|
done
|
237
deployment/shared.sh
Executable file
237
deployment/shared.sh
Executable file
@ -0,0 +1,237 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
if [ ! -d "$SITE_PATH" ]; then
|
||||||
|
echo "ERROR: '$SITE_PATH' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
function new_pass {
|
||||||
|
apg -a 1 -M nc -n 3 -m 26 -E GHIJKLMNOPQRSTUVWXYZ | head -n1 | awk '{print $1;}'
|
||||||
|
}
|
||||||
|
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
SITE_DEFINITION_PATH="$SITE_PATH/site_definition"
|
||||||
|
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
|
||||||
|
echo "WARNING: '$SITE_DEFINITION_PATH' does not exist! We have stubbed one out for you, but you need to UPDATE IT!"
|
||||||
|
|
||||||
|
# stub out a site_definition with new passwords.
|
||||||
|
cat >"$SITE_DEFINITION_PATH" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export SITE_TITLE="Short Title of Project"
|
||||||
|
export DOMAIN_NAME="domain.tld"
|
||||||
|
export DDNS_PASSWORD=
|
||||||
|
export SMTP_PASSWORD=
|
||||||
|
|
||||||
|
# TODO VERIFY SECURE RNG
|
||||||
|
export GHOST_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export GITEA_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
#export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
||||||
|
#export DEPLOY_WWW_SERVER=false
|
||||||
|
#export DEPLOY_BTCPAY_SERVER=false
|
||||||
|
#export DEPLOY_UMBREL_VPS=false
|
||||||
|
export DEPLOY_GHOST=true
|
||||||
|
export DEPLOY_NOSTR=false
|
||||||
|
export DEPLOY_NEXTCLOUD=true
|
||||||
|
export DEPLOY_ONION_SITE=false
|
||||||
|
export NOSTR_ACCOUNT_PUBKEY="CHANGE_ME"
|
||||||
|
|
||||||
|
# valid options: 'regtest' and 'mainnet'
|
||||||
|
#export BTC_CHAIN=regtest
|
||||||
|
#export WWW_INSTANCE_TYPE="t2.medium"
|
||||||
|
#export BTCPAY_ADDITIONAL_HOSTNAMES="pay.domain.tld"
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
chmod 0744 "$SITE_DEFINITION_PATH"
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOCKER_YAML_PATH="$SITE_PATH/appstack.yml"
|
||||||
|
export DOCKER_YAML_PATH="$DOCKER_YAML_PATH"
|
||||||
|
|
||||||
|
# TODO add file existence check
|
||||||
|
if [ ! -f "$SITE_PATH/site_definition" ]; then
|
||||||
|
echo "ERROR: site_definition does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
source "$SITE_PATH/site_definition"
|
||||||
|
|
||||||
|
export REMOTE_HOME="/home/ubuntu"
|
||||||
|
BACKUP_TIMESTAMP="$(date +"%Y-%m")"
|
||||||
|
UNIX_BACKUP_TIMESTAMP="$(date +%s)"
|
||||||
|
export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP"
|
||||||
|
export UNIX_BACKUP_TIMESTAMP="$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
|
||||||
|
LOCAL_BACKUP_PATH="$SITE_PATH/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
|
||||||
|
export LOCAL_BACKUP_PATH="$LOCAL_BACKUP_PATH"
|
||||||
|
BACKUP_PATH_CREATED=false
|
||||||
|
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||||
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
BACKUP_PATH_CREATED=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
export BACKUP_PATH_CREATED="$BACKUP_PATH_CREATED"
|
||||||
|
mkdir -p "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# VALIDATE THE INPUT from the ENVFILE
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: DOMAIN_NAME not specified. Use the --domain-name= option."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TODO, ensure VPS_HOSTING_TARGET is in range.
|
||||||
|
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
export ADMIN_ACCOUNT_USERNAME="info"
|
||||||
|
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
||||||
|
export MAIL_FROM="$SITE_TITLE <$CERTIFICATE_EMAIL_ADDRESS>"
|
||||||
|
export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
|
||||||
|
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||||
|
|
||||||
|
touch "$SITE_PATH/debug.log"
|
||||||
|
export SMTP_LOGIN="www@mail.$DOMAIN_NAME"
|
||||||
|
export VM_NAME="sovereign-stack-base"
|
||||||
|
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
|
||||||
|
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
||||||
|
|
||||||
|
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
|
||||||
|
export ROOT_DISK_SIZE_GB=20
|
||||||
|
|
||||||
|
DDNS_HOST=
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ]; then
|
||||||
|
DDNS_HOST="$WWW_HOSTNAME"
|
||||||
|
ROOT_DISK_SIZE_GB=$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
DDNS_HOST="$BTCPAY_HOSTNAME"
|
||||||
|
if [ "$BTC_CHAIN" = mainnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=150
|
||||||
|
elif [ "$BTC_CHAIN" = testnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=40
|
||||||
|
fi
|
||||||
|
elif [ "$APP_TO_DEPLOY" = umbrel ]; then
|
||||||
|
DDNS_HOST="$UMBREL_HOSTNAME"
|
||||||
|
if [ "$BTC_CHAIN" = mainnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=1000
|
||||||
|
elif [ "$BTC_CHAIN" = testnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=70
|
||||||
|
fi
|
||||||
|
elif [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
DDNS_HOST="$WWW_HOSTNAME"
|
||||||
|
ROOT_DISK_SIZE_GB=8
|
||||||
|
else
|
||||||
|
echo "ERROR: APP_TO_DEPLOY not within allowable bounds."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# we use this in other subshells.
|
||||||
|
export APP_TO_DEPLOY="$APP_TO_DEPLOY"
|
||||||
|
export DDNS_HOST="$DDNS_HOST"
|
||||||
|
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
|
||||||
|
export LXD_VM_NAME="${FQDN//./-}"
|
||||||
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
|
export ROOT_DISK_SIZE_GB=$ROOT_DISK_SIZE_GB
|
||||||
|
export WWW_INSTANCE_TYPE="$WWW_INSTANCE_TYPE"
|
||||||
|
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH"
|
||||||
|
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GHOST_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$GHOST_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GHOST_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
if [ -z "$GITEA_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GITEA_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "$GITEA_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GITEA_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
if [ -z "$NEXTCLOUD_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure NEXTCLOUD_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NEXTCLOUD_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure NEXTCLOUD_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NOSTR" = true ]; then
|
||||||
|
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
|
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
|
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
||||||
|
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$SMTP_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure SMTP_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DDNS_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure DDNS_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: Ensure DOMAIN_NAME is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$SITE_TITLE" ]; then
|
||||||
|
echo "ERROR: Ensure SITE_TITLE is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DEPLOY_BTCPPAY_SERVER" ]; then
|
||||||
|
echo "ERROR: Ensure DEPLOY_BTCPPAY_SERVER is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ -z "$DEPLOY_UMBREL_VPS" ]; then
|
||||||
|
echo "ERROR: Ensure DEPLOY_UMBREL_VPS is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
|
echo "ERROR: You MUST specify a Nostr public key. This is how you get all your social features."
|
||||||
|
echo "INFO: Go to your site_definition file and set the NOSTR_ACCOUNT_PUBKEY variable."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
BIN
deployment/signal-desktop-keyring.gpg
Normal file
BIN
deployment/signal-desktop-keyring.gpg
Normal file
Binary file not shown.
334
deployment/stub_docker_yml.sh
Executable file
334
deployment/stub_docker_yml.sh
Executable file
@ -0,0 +1,334 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
if [ -z "$ONION_ADDRESS" ]; then
|
||||||
|
echo "ERROR: ONION_ADDRESS is not defined."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
|
echo "" > "$DOCKER_YAML_PATH"
|
||||||
|
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
# This is the ghost for HTTPS (not over Tor)
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
ghost:
|
||||||
|
image: ${GHOST_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghost-net
|
||||||
|
- ghostdb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/ghost_site:/var/lib/ghost/content
|
||||||
|
environment:
|
||||||
|
- url=https://${FQDN}
|
||||||
|
- mail__from="${MAIL_FROM}"
|
||||||
|
- mail__options__service=SMTP
|
||||||
|
- mail__transport=SMTP
|
||||||
|
- mail__options__host=${SMTP_SERVER}
|
||||||
|
- mail__options__port=${SMTP_PORT}
|
||||||
|
- mail__options__auth__user=${SMTP_LOGIN}
|
||||||
|
- mail__options__auth__pass=\${SMTP_PASSWORD}
|
||||||
|
- database__client=mysql
|
||||||
|
- database__connection__host=ghostdb
|
||||||
|
- database__connection__user=ghost
|
||||||
|
- database__connection__password=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
- database__connection__database=ghost
|
||||||
|
- database__pool__min=0
|
||||||
|
- privacy__useStructuredData=true
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
ghostdb:
|
||||||
|
image: ${GHOST_DB_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghostdb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/ghost_db:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=ghost
|
||||||
|
- MYSQL_USER=ghost
|
||||||
|
- MYSQL_PASSWORD=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nextcloud-db:
|
||||||
|
image: ${NEXTCLOUD_DB_IMAGE}
|
||||||
|
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb_read_only_compressed=OFF
|
||||||
|
networks:
|
||||||
|
- nextclouddb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/nextcloud/db/data:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
nextcloud:
|
||||||
|
image: ${NEXTCLOUD_IMAGE}
|
||||||
|
networks:
|
||||||
|
- nextclouddb-net
|
||||||
|
- nextcloud-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/nextcloud/html:/var/www/html
|
||||||
|
environment:
|
||||||
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
- MYSQL_HOST=nextcloud-db
|
||||||
|
- NEXTCLOUD_TRUSTED_DOMAINS=${DOMAIN_NAME}
|
||||||
|
- OVERWRITEHOST=${NEXTCLOUD_FQDN}
|
||||||
|
- OVERWRITEPROTOCOL=https
|
||||||
|
- SERVERNAME=${NEXTCLOUD_FQDN}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NOSTR" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
# TODO
|
||||||
|
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
gitea:
|
||||||
|
image: ${GITEA_IMAGE}
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_GITEA_PATH}/data:/data
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
environment:
|
||||||
|
- USER_UID=1000
|
||||||
|
- USER_GID=1000
|
||||||
|
- GITEA__database__DB_TYPE=mysql
|
||||||
|
- GITEA__database__HOST=gitea-db:3306
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__PASSWD=${GITEA_MYSQL_PASSWORD}
|
||||||
|
networks:
|
||||||
|
- gitea-net
|
||||||
|
- giteadb-net
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
gitea-db:
|
||||||
|
image: ${GITEA_DB_IMAGE}
|
||||||
|
networks:
|
||||||
|
- giteadb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_GITEA_PATH}/db:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=\${GITEA_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_PASSWORD=\${GITEA_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=gitea
|
||||||
|
- MYSQL_USER=gitea
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
# a hidden service that routes to the nginx container at http://onionurl.onion server block
|
||||||
|
tor-onion:
|
||||||
|
image: tor:latest
|
||||||
|
networks:
|
||||||
|
- tor-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor:/var/lib/tor
|
||||||
|
- tor-logs:/var/log/tor
|
||||||
|
configs:
|
||||||
|
- source: tor-config
|
||||||
|
target: /etc/tor/torrc
|
||||||
|
mode: 0644
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
tor-ghost:
|
||||||
|
image: ${GHOST_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghostdb-net
|
||||||
|
- ghost-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor_ghost:/var/lib/ghost/content
|
||||||
|
environment:
|
||||||
|
- url=https://${ONION_ADDRESS}
|
||||||
|
- mail__from=${MAIL_FROM}
|
||||||
|
- mail__options__service=SMTP
|
||||||
|
- mail__transport=SMTP
|
||||||
|
- mail__options__host=${SMTP_SERVER}
|
||||||
|
- mail__options__port=${SMTP_PORT}
|
||||||
|
- mail__options__auth__user=${SMTP_LOGIN}
|
||||||
|
- mail__options__auth__pass=\${SMTP_PASSWORD}
|
||||||
|
- database__client=mysql
|
||||||
|
- database__connection__host=ghostdb
|
||||||
|
- database__connection__user=ghost
|
||||||
|
- database__connection__password=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
- database__connection__database=ghost
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
#https://github.com/fiatjaf/expensive-relay
|
||||||
|
# NOSTR RELAY WHICH REQUIRES PAYMENTS.
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nginx:
|
||||||
|
image: ${NGINX_IMAGE}
|
||||||
|
ports:
|
||||||
|
- 0.0.0.0:443:443
|
||||||
|
- 0.0.0.0:80:80
|
||||||
|
- 0.0.0.0:8448:8448
|
||||||
|
networks:
|
||||||
|
- ghost-net
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
# NGINX required
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nginx:
|
||||||
|
image: ${NGINX_IMAGE}
|
||||||
|
ports:
|
||||||
|
- 0.0.0.0:443:443
|
||||||
|
- 0.0.0.0:80:80
|
||||||
|
- 0.0.0.0:8448:8448
|
||||||
|
networks:
|
||||||
|
- ghost-net
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- torghost-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- nextcloud-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- gitea-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- tor-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the rest of the nginx config
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
volumes:
|
||||||
|
- /etc/letsencrypt:/etc/letsencrypt:ro
|
||||||
|
configs:
|
||||||
|
- source: nginx-config
|
||||||
|
target: /etc/nginx/nginx.conf
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
tor-data:
|
||||||
|
tor-logs:
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
#-------------------------
|
||||||
|
|
||||||
|
# networks ----------------------
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
networks:
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
ghost-net:
|
||||||
|
ghostdb-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nextclouddb-net:
|
||||||
|
nextcloud-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
gitea-net:
|
||||||
|
giteadb-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
tor-net:
|
||||||
|
torghost-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
# configs ----------------------
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
|
||||||
|
configs:
|
||||||
|
nginx-config:
|
||||||
|
file: ${SITE_PATH}/nginx.conf
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
tor-config:
|
||||||
|
file: $(pwd)/tor/torrc
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
# -----------------------------
|
354
deployment/stub_nginxconf.sh
Executable file
354
deployment/stub_nginxconf.sh
Executable file
@ -0,0 +1,354 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
if [ -z "$ONION_ADDRESS" ]; then
|
||||||
|
echo "ERROR: ONION_ADDRESS is not defined."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
|
NGINX_CONF_PATH="$SITE_PATH/nginx.conf"
|
||||||
|
echo "" > "$NGINX_CONF_PATH"
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
client_max_body_size 100m;
|
||||||
|
server_names_hash_bucket_size 128;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
# this server block returns a 403 for all non-explicit host requests.
|
||||||
|
#server {
|
||||||
|
# listen 80 default_server;
|
||||||
|
# return 403;
|
||||||
|
#}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
# ghost http to https redirects.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${DOMAIN_NAME} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
|
||||||
|
server_name ${DOMAIN_NAME};
|
||||||
|
|
||||||
|
location / {
|
||||||
|
# request MAY get another redirect at https://domain.tld for www.
|
||||||
|
return 301 https://${DOMAIN_NAME}\$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${FQDN} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${FQDN};
|
||||||
|
return 301 https://${FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# nextcloud http-to-https redirect
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${NEXTCLOUD_FQDN} redirect to https://${NEXTCLOUD_FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${NEXTCLOUD_FQDN};
|
||||||
|
return 301 https://${NEXTCLOUD_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gitea http to https redirect.
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${GITEA_FQDN} redirect to https://${GITEA_FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${GITEA_FQDN};
|
||||||
|
return 301 https://${GITEA_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TLS config for ghost.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# global TLS settings
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
ssl_protocols TLSv1.3;
|
||||||
|
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
||||||
|
ssl_trusted_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
ssl_session_tickets off;
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
resolver 198.54.117.10;
|
||||||
|
|
||||||
|
|
||||||
|
# default server if hostname not specified.
|
||||||
|
#server {
|
||||||
|
# listen 443 default_server;
|
||||||
|
# return 403;
|
||||||
|
#}
|
||||||
|
|
||||||
|
# map \$http_user_agent \$og_prefix {
|
||||||
|
# ~*(googlebot|twitterbot)/ /open-graph;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# https://${DOMAIN_NAME} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
server_name ${DOMAIN_NAME};
|
||||||
|
|
||||||
|
EOL
|
||||||
|
###########################################
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NOSTR" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# We return a JSON object with name/pubkey mapping per NIP05.
|
||||||
|
# https://www.reddit.com/r/nostr/comments/rrzk76/nip05_mapping_usernames_to_dns_domains_by_fiatjaf/sssss
|
||||||
|
# TODO I'm not sure about the security of this Access-Control-Allow-Origin. Read up and restrict it if possible.
|
||||||
|
location = /.well-known/nostr.json {
|
||||||
|
add_header Content-Type application/json;
|
||||||
|
add_header Access-Control-Allow-Origin *;
|
||||||
|
return 200 '{ "names": { "_": "${NOSTR_ACCOUNT_PUBKEY}" } }';
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# catch all; send request to ${FQDN}
|
||||||
|
location / {
|
||||||
|
return 301 https://${FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
#####################################################
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
}
|
||||||
|
|
||||||
|
#access_log /var/log/nginx/ghost-access.log;
|
||||||
|
#error_log /var/log/nginx/ghost-error.log;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# main TLS listener; proxies requests to ghost service. NGINX configured to cache
|
||||||
|
proxy_cache_path /tmp/nginx_ghost levels=1:2 keys_zone=ghostcache:600m max_size=100m inactive=24h;
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the open server block for the HTTPS listener
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
server_name ${FQDN};
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# add the Onion-Location header if specifed.
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
|
||||||
|
# No cache + keep cookies for admin and previews
|
||||||
|
location ~ ^/(ghost/|p/|private/) {
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_intercept_errors on;
|
||||||
|
proxy_pass http://ghost:2368;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# proxy config for ghost
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# Set the crawler policy.
|
||||||
|
location = /robots.txt {
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
return 200 "User-Agent: *\\nAllow: /\\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_intercept_errors on;
|
||||||
|
proxy_pass http://ghost:2368;
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# https://stanislas.blog/2019/08/ghost-nginx-cache/ for nginx caching instructions
|
||||||
|
# Remove cookies which are useless for anonymous visitor and prevent caching
|
||||||
|
proxy_ignore_headers Set-Cookie Cache-Control;
|
||||||
|
proxy_hide_header Set-Cookie;
|
||||||
|
|
||||||
|
# Add header for cache status (miss or hit)
|
||||||
|
add_header X-Cache-Status \$upstream_cache_status;
|
||||||
|
proxy_cache ghostcache;
|
||||||
|
|
||||||
|
# Default TTL: 1 day
|
||||||
|
proxy_cache_valid 5s;
|
||||||
|
|
||||||
|
# Cache 404 pages for 1h
|
||||||
|
proxy_cache_valid 404 1h;
|
||||||
|
|
||||||
|
# use conditional GET requests to refresh the content from origin servers
|
||||||
|
proxy_cache_revalidate on;
|
||||||
|
proxy_buffering on;
|
||||||
|
|
||||||
|
# Allows starting a background subrequest to update an expired cache item,
|
||||||
|
# while a stale cached response is returned to the client.
|
||||||
|
proxy_cache_background_update on;
|
||||||
|
|
||||||
|
# Bypass cache for errors
|
||||||
|
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# this is the closing location / block for the ghost HTTPS segment
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# TODO this MIGHT be part of the solution for Twitter Cards.
|
||||||
|
# location /contents {
|
||||||
|
# resolver 127.0.0.11 ipv6=off valid=5m;
|
||||||
|
# proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
# proxy_set_header Host \$http_host;
|
||||||
|
# proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
# proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
# proxy_intercept_errors on;
|
||||||
|
# proxy_pass http://ghost:2368\$og_prefix\$request_uri;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# this is the closing server block for the ghost HTTPS segment
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# tor config
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# server listener for tor v3 onion endpoint
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${ONION_ADDRESS};
|
||||||
|
#access_log /var/log/nginx/tor-www.log;
|
||||||
|
|
||||||
|
# administration not allowed over tor interface.
|
||||||
|
location /ghost { deny all; }
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For 1.1.1.1;
|
||||||
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
|
proxy_set_header X-Real-IP 1.1.1.1;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
proxy_pass http://tor-ghost:2368;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# TLS listener for ${NEXTCLOUD_FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${NEXTCLOUD_FQDN};
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_headers_hash_max_size 512;
|
||||||
|
proxy_headers_hash_bucket_size 64;
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_set_header X-NginX-Proxy true;
|
||||||
|
|
||||||
|
proxy_pass http://nextcloud:80;
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/reverse_proxy_configuration.html
|
||||||
|
location /.well-known/carddav {
|
||||||
|
return 301 \$scheme://\$host/remote.php/dav;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /.well-known/caldav {
|
||||||
|
return 301 \$scheme://\$host/remote.php/dav;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# TLS listener for ${GITEA_FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
server_name ${GITEA_FQDN};
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_headers_hash_max_size 512;
|
||||||
|
proxy_headers_hash_bucket_size 64;
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_set_header X-NginX-Proxy true;
|
||||||
|
|
||||||
|
proxy_pass http://gitea:3000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# add the closing brace.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
}
|
||||||
|
EOL
|
32
deployment/tor.yml
Normal file
32
deployment/tor.yml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
|
||||||
|
# a hidden service that routes to the nginx container at http://onionurl.onion server block
|
||||||
|
tor-onion:
|
||||||
|
image: tor:latest
|
||||||
|
networks:
|
||||||
|
- tor-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor:/var/lib/tor
|
||||||
|
- tor-logs:/var/log/tor
|
||||||
|
configs:
|
||||||
|
- source: tor-config
|
||||||
|
target: /etc/tor/torrc
|
||||||
|
mode: 0644
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
tor-data:
|
||||||
|
tor-logs:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
tor-net:
|
||||||
|
attachable: true
|
||||||
|
|
||||||
|
configs:
|
||||||
|
tor-config:
|
||||||
|
file: ${TOR_CONFIG_PATH}
|
11
deployment/tor/Dockerfile
Normal file
11
deployment/tor/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
FROM ubuntu:22.04
|
||||||
|
RUN apt-get update && apt-get install -y tor
|
||||||
|
#COPY ./torrc /etc/tor/torrc
|
||||||
|
#RUN chown root:root /etc/tor/torrc
|
||||||
|
#RUN chmod 0644 /etc/tor/torrc
|
||||||
|
|
||||||
|
#RUN mkdir /data
|
||||||
|
#VOLUME /data
|
||||||
|
# RUN chown 1000:1000 -R /data
|
||||||
|
#USER 1000:1000
|
||||||
|
CMD tor -f /etc/tor/torrc
|
8
deployment/tor/torrc
Normal file
8
deployment/tor/torrc
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
|
||||||
|
SocksPort 0
|
||||||
|
|
||||||
|
HiddenServiceDir /var/lib/tor/www
|
||||||
|
HiddenServiceVersion 3
|
||||||
|
HiddenServicePort 443 nginx:443
|
||||||
|
|
||||||
|
Log info file /var/log/tor/tor.log
|
5
deployment/tor/torrc-init
Normal file
5
deployment/tor/torrc-init
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
HiddenServiceDir /var/lib/tor/www
|
||||||
|
HiddenServiceVersion 3
|
||||||
|
HiddenServicePort 443 127.0.0.1:443
|
||||||
|
|
||||||
|
Log info file /var/log/tor/tor.log
|
@ -5,4 +5,4 @@ if [ -z "$DOMAIN_NAME" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
/sovereign-stack/deploy.sh --domain="$DOMAIN_NAME"
|
/sovereign-stack/deploy.sh --domain="$DOMAIN_NAME" --hosting-provider=lxd
|
||||||
|
Loading…
Reference in New Issue
Block a user