Initial Commit
This commit is contained in:
commit
c761bd1a84
19
.gitignore
vendored
Normal file
19
.gitignore
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
archive/*
|
||||||
|
|
||||||
|
# these are generated files, so we don't commit them. But they still show up
|
||||||
|
# so that's useful for development.
|
||||||
|
cloud-init.yml
|
||||||
|
|
||||||
|
# this gets generated locally then push to remote vps for execution.
|
||||||
|
temp_btcpay.sh
|
||||||
|
|
||||||
|
nginx.conf
|
||||||
|
|
||||||
|
# this file can be used by the USER to reset the LXD stack. It deletes a lxc machine VM (which you should update)
|
||||||
|
# then clears the lxd stack including profile, images, and storage pools
|
||||||
|
# then runs go.sh
|
||||||
|
reset.sh
|
||||||
|
|
||||||
|
# you can create an env in this directory. The code will source it if the file exists.
|
||||||
|
# This is a good place to put your LXD info
|
||||||
|
#env
|
17
51-trezor.rules
Normal file
17
51-trezor.rules
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Trezor: The Original Hardware Wallet
|
||||||
|
# https://trezor.io/
|
||||||
|
#
|
||||||
|
# Put this file into /etc/udev/rules.d
|
||||||
|
#
|
||||||
|
# If you are creating a distribution package,
|
||||||
|
# put this into /usr/lib/udev/rules.d or /lib/udev/rules.d
|
||||||
|
# depending on your distribution
|
||||||
|
|
||||||
|
# Trezor
|
||||||
|
SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||||
|
KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl"
|
||||||
|
|
||||||
|
# Trezor v2
|
||||||
|
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||||
|
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||||
|
KERNEL=="hidraw*", ATTRS{idVendor}=="1209", ATTRS{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl"
|
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
FROM ubuntu:21.04
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync duplicity sshfs snapd lxd-client
|
||||||
|
|
||||||
|
RUN mkdir /sovereign-stack
|
||||||
|
COPY ./ /sovereign-stack
|
||||||
|
WORKDIR /sovereign-stack
|
||||||
|
|
||||||
|
RUN mkdir /site
|
||||||
|
VOLUME /site
|
||||||
|
ENV SITE_PATH=/site
|
||||||
|
|
||||||
|
COPY ./entrypoint.sh /entrypoint.sh
|
||||||
|
RUN chmod 0744 /entrypoint.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
CMD /entrypoint.sh
|
3
README.md
Normal file
3
README.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Documentation
|
||||||
|
|
||||||
|
Please visit the [https://www.sovereign-stack.org](Sovereign Stack) website for documentation related to this repository.
|
9
backup_btcpay.sh
Executable file
9
backup_btcpay.sh
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# take the services down, create a backup archive, then pull it down.
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./backup.sh"
|
||||||
|
ssh "$FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||||
|
scp "$FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$LOCAL_BACKUP_PATH/btcpay-$1.tar.gz"
|
24
backup_www.sh
Executable file
24
backup_www.sh
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files.
|
||||||
|
# we could eliminate that and simply save duplicity backups to the management machine running the script
|
||||||
|
# this could be done by using a local path and mounting it on the remote VPS.
|
||||||
|
# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs
|
||||||
|
|
||||||
|
# step 1: run duplicity on the remote system to backup all files to the remote system.
|
||||||
|
ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --exclude "$REMOTE_HOME/backups" "$REMOTE_HOME" "file://$REMOTE_BACKUP_PATH"
|
||||||
|
ssh "$FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
|
# now let's pull down the latest files from the backup directory.
|
||||||
|
# create a temp directory to serve as the mountpoint for the remote machine backups directory
|
||||||
|
sshfs "$FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# rsync the files from the remote server to our local backup path.
|
||||||
|
rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
|
||||||
|
|
||||||
|
# step 4: unmount the SSHFS filesystem and cleanup.
|
||||||
|
umount "$SSHFS_PATH"
|
||||||
|
rm -rf "$SSHFS_PATH"
|
11
clear_lxd_stack.sh
Executable file
11
clear_lxd_stack.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
LXD_VM_NAME="www-sovereign-stack-org"
|
||||||
|
|
||||||
|
lxc delete -f "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
lxc profile delete "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
lxc image delete "sovereign-stack-base" "ubuntu-21-04"
|
||||||
|
|
||||||
|
#lxc storage delete default
|
3
command.sh
Normal file
3
command.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
bash -c ./refresh.sh --domain=bitizen.store --hosting-provider=lxd --macvlan-interface=eno3 --storage-backend=/dev/sda
|
138
defaults.sh
Normal file
138
defaults.sh
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
export DEPLOY_WWW_SERVER=true
|
||||||
|
export DEPLOY_BTCPPAY_SERVER=false
|
||||||
|
|
||||||
|
# if true, then we deploy a VPS with Jitsi/Matrix
|
||||||
|
export DEPLOY_GHOST=true
|
||||||
|
export DEPLOY_MATRIX=false
|
||||||
|
export DEPLOY_ONION_SITE=false
|
||||||
|
export DEPLOY_NEXTCLOUD=false
|
||||||
|
export DEPLOY_GITEA=false
|
||||||
|
|
||||||
|
export WWW_HOSTNAME="www"
|
||||||
|
export BTCPAY_HOSTNAME="btcpay"
|
||||||
|
export NEXTCLOUD_HOSTNAME="nextcloud"
|
||||||
|
export MATRIX_HOSTNAME="chat"
|
||||||
|
export GITEA_HOSTNAME="git"
|
||||||
|
|
||||||
|
export DDNS_PASSWORD=
|
||||||
|
|
||||||
|
# this is where the html is sourced from.
|
||||||
|
export SITE_HTML_PATH=
|
||||||
|
|
||||||
|
# enter your AWS Access Key and Secret Access Key here.
|
||||||
|
export AWS_ACCESS_KEY=
|
||||||
|
export AWS_SECRET_ACCESS_KEY=
|
||||||
|
|
||||||
|
# if overridden, the app will be deployed to proxy $BTCPAY_HOSTNAME.$DOMAIN_NAME requests to the URL specified.
|
||||||
|
# this is useful when you want to oursource your BTCPAY fullnode/lightning node.
|
||||||
|
#export BTCPAY_HANDLER_URL=
|
||||||
|
|
||||||
|
|
||||||
|
export SMTP_SERVER="smtp.mailgun.org"
|
||||||
|
export SMTP_PORT="587"
|
||||||
|
|
||||||
|
# default AWS region and AMI (free-tier AMI ubuntu 20.10)
|
||||||
|
export AWS_REGION="us-east-1"
|
||||||
|
|
||||||
|
# AMI NAME:
|
||||||
|
# ubuntu-minimal/images/hvm-ssd/ubuntu-hirsute-21.04-amd64-minimal-20211130-907a40d2-dca2-4750-b073-b3254c031ab6
|
||||||
|
export AWS_AMI_ID="ami-080435381cbbb5b9b"
|
||||||
|
WWW_INSTANCE_TYPE="t2.micro"
|
||||||
|
BTCPAY_INSTANCE_TYPE="t2.medium"
|
||||||
|
|
||||||
|
# goal will be to keep any particular instance to run AT OR BELOW t2.medium.
|
||||||
|
# other options are t2.small, micro, nano; micro is the free-tier eligible.
|
||||||
|
# [1=vCPUs, 1=Mem(GiB)]
|
||||||
|
# nano [1,0.5], micro [1,1] (free-tier eligible), small [1,2], medium [2,4], large [2,8], xlarge [4,16], 2xlarge [8,32]
|
||||||
|
|
||||||
|
export WWW_INSTANCE_TYPE="$WWW_INSTANCE_TYPE"
|
||||||
|
export BTCPAY_INSTANCE_TYPE="$BTCPAY_INSTANCE_TYPE"
|
||||||
|
|
||||||
|
export SMTP_PASSWORD=
|
||||||
|
export GHOST_MYSQL_PASSWORD=
|
||||||
|
export GHOST_MYSQL_ROOT_PASSWORD=
|
||||||
|
export NEXTCLOUD_MYSQL_PASSWORD=
|
||||||
|
export GITEA_MYSQL_PASSWORD=
|
||||||
|
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
|
||||||
|
export GITEA_MYSQL_ROOT_PASSWORD=
|
||||||
|
export DUPLICITY_BACKUP_PASSPHRASE=
|
||||||
|
#opt-add-fireflyiii;opt-add-zammad
|
||||||
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-configurator;"
|
||||||
|
export SSH_HOME="$HOME/.ssh"
|
||||||
|
export VLAN_INTERFACE=
|
||||||
|
export CACHE_DIR="$HOME/cache"
|
||||||
|
export VM_NAME=
|
||||||
|
export DEV_MEMORY_MB="4096"
|
||||||
|
export DEV_CPU_COUNT="4"
|
||||||
|
export SSHFS_PATH="/tmp/sshfs_temp"
|
||||||
|
|
||||||
|
export NEXTCLOUD_SPACE_GB=10
|
||||||
|
|
||||||
|
# TODO add LXD check to ensure it's installed.
|
||||||
|
DEV_LXD_REMOTE="$(lxc remote get-default)"
|
||||||
|
export DEV_LXD_REMOTE="$DEV_LXD_REMOTE"
|
||||||
|
|
||||||
|
export SITE_TITLE=
|
||||||
|
|
||||||
|
# we use this later when we create a VM, we annotate what git commit (from a tag) we used.
|
||||||
|
LATEST_GIT_TAG="$(git describe --abbrev=0)"
|
||||||
|
export LATEST_GIT_TAG="$LATEST_GIT_TAG"
|
||||||
|
|
||||||
|
LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)"
|
||||||
|
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
|
||||||
|
|
||||||
|
|
||||||
|
# let's ensure all the tools are installed
|
||||||
|
if [ ! -f "$(which rsync)" ]; then
|
||||||
|
echo "ERROR: rsync is not installed. You may want to install your dependencies."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=1091
|
||||||
|
export SITE_PATH="$HOME/.sites"
|
||||||
|
export LXD_DISK_TO_USE=
|
||||||
|
|
||||||
|
|
||||||
|
ENABLE_NGINX_CACHING=false
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
# 1 add check for ~/.aws/credentials and stub one out
|
||||||
|
# 2 ensure install.sh has been run by checking for tor, docker-machine, lxd, wait-for-it, etc.
|
||||||
|
# 3 pretty much just run the install script if anything is awry
|
||||||
|
# 4 maybe check to ensure all the CNAME and A+ records are there first so we can quit before machine creation.
|
||||||
|
|
||||||
|
export SITE_PATH="$SITE_PATH/$DOMAIN_NAME"
|
||||||
|
if [ ! -d "$SITE_PATH" ]; then
|
||||||
|
echo "ERROR: '$SITE_PATH' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
export SITE_PATH="$SITE_PATH"
|
||||||
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
|
export DEPLOY_BTCPAY_SERVER=false
|
||||||
|
|
||||||
|
# if we're running aws/public, we enable nginx caching since it's a public site.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
# TODO the correct behavior is to be =true, but cookies aren't working right now.
|
||||||
|
ENABLE_NGINX_CACHING=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEFAULT_DB_IMAGE="mariadb:10.6.5"
|
||||||
|
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
|
||||||
|
|
||||||
|
# run the docker stack.
|
||||||
|
export GHOST_IMAGE="ghost:4.32.0"
|
||||||
|
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
export NGINX_IMAGE="nginx:1.21.4"
|
||||||
|
export NEXTCLOUD_IMAGE="nextcloud:23.0.0"
|
||||||
|
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
export MATRIX_IMAGE="matrixdotorg/synapse:v1.49.0"
|
||||||
|
export MATRIX_DB_IMAGE="postgres:13.4"
|
||||||
|
export GITEA_IMAGE="gitea/gitea:latest"
|
||||||
|
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
93
domain_init.sh
Executable file
93
domain_init.sh
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exuo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa
|
||||||
|
if [ ! -f "$SSH_HOME/id_rsa" ]; then
|
||||||
|
# generate a new SSH key for the base vm image.
|
||||||
|
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if an authorized_keys file does not exist, we'll stub one out with the current user.
|
||||||
|
# add additional id_rsa.pub entries manually for more administrative logins.
|
||||||
|
if [ ! -f "$SITE_PATH/authorized_keys" ]; then
|
||||||
|
cat "$SSH_HOME/id_rsa.pub" >> "$SITE_PATH/authorized_keys"
|
||||||
|
fi
|
||||||
|
|
||||||
|
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
|
||||||
|
SSH_STRING="Host ${FQDN}"
|
||||||
|
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
|
||||||
|
|
||||||
|
########## BEGIN
|
||||||
|
cat >> "$SSH_HOME/config" <<-EOF
|
||||||
|
|
||||||
|
${SSH_STRING}
|
||||||
|
HostName ${FQDN}
|
||||||
|
User ubuntu
|
||||||
|
EOF
|
||||||
|
###
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
# when set to true, this flag indicates that a new VPS was created during THIS script run.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
# let's create the remote VPS if needed.
|
||||||
|
if ! docker-machine ls -q --filter name="$FQDN" | grep -q "$FQDN"; then
|
||||||
|
RUN_BACKUP=false
|
||||||
|
|
||||||
|
./provision_vps.sh
|
||||||
|
|
||||||
|
./prepare_vps_host.sh
|
||||||
|
fi
|
||||||
|
elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
||||||
|
|
||||||
|
#check to ensure the MACVLAN interface has been set by the user
|
||||||
|
if [ -z "$DEV_MACVLAN_INTERFACE" ]; then
|
||||||
|
echo "ERROR: DEV_MACVLAN_INTERFACE has not been defined. Use '--macvlan-interface=eno1' for example."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's first check to ensure there's a cert.tar.gz. We need a valid cert for testing.
|
||||||
|
if [ ! -f "$SITE_PATH/certs.tar.gz" ]; then
|
||||||
|
echo "ERROR: We need a valid cert for testing. Please use the '--app=certonly' first."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if the machine doesn't exist, we create it.
|
||||||
|
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
RUN_BACKUP=false
|
||||||
|
|
||||||
|
# create a base image if needed and instantiate a VM.
|
||||||
|
./provision_lxc.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# prepare the VPS to support our applications and backups and stuff.
|
||||||
|
./prepare_vps_host.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# clear
|
||||||
|
|
||||||
|
# this tells our local docker client to target the remote endpoint via SSH
|
||||||
|
export DOCKER_HOST="ssh://ubuntu@$FQDN"
|
||||||
|
|
||||||
|
# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying.
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ]; then
|
||||||
|
./go_www.sh
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
./go_btcpay.sh
|
||||||
|
elif [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
# renew the certs; certbot takes care of seeing if we need to actually renew.
|
||||||
|
if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "INFO: Please run 'docker-machine rm -f $FQDN' to remove the remote VPS."
|
||||||
|
exit
|
||||||
|
else
|
||||||
|
echo "ERROR: APP_TO_DEPLOY not set correctly. Please refer to the documentation for allowable values."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Successfull deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET; Latest git tag is $LATEST_GIT_TAG" >> "$SITE_PATH/debug.log"
|
2
down_btcpay_compose.sh
Normal file
2
down_btcpay_compose.sh
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
8
entrypoint.sh
Normal file
8
entrypoint.sh
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: DOMAIN_NAME not defined.".
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
/sovereign-stack/refresh.sh --domain="$DOMAIN_NAME"
|
23
env
Executable file
23
env
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
###########
|
||||||
|
# ######
|
||||||
|
export DEV_MEMORY_MB="4096"
|
||||||
|
export DEV_CPU_COUNT="4"
|
||||||
|
export DEV_WWW_MAC_ADDRESS="00:16:3E:AD:25:2C"
|
||||||
|
export DEV_BTCPAY_MAC_ADDRESS="00:16:3E:AD:25:2D"
|
||||||
|
|
||||||
|
LXD_DISK_TO_USE=
|
||||||
|
|
||||||
|
if [ "$(lxc remote get-default)" = "antsle" ]; then
|
||||||
|
DEV_MACVLAN_INTERFACE="eno3"
|
||||||
|
LXD_DISK_TO_USE="/dev/sda"
|
||||||
|
elif [ "$(lxc remote get-default)" = "local" ]; then
|
||||||
|
DEV_MACVLAN_INTERFACE="enp5s0"
|
||||||
|
fi
|
||||||
|
|
||||||
|
export DEV_MACVLAN_INTERFACE="$DEV_MACVLAN_INTERFACE"
|
||||||
|
export DEV_WWW_MAC_ADDRESS="$DEV_WWW_MAC_ADDRESS"
|
||||||
|
export DEV_BTCPAY_MAC_ADDRESS="$DEV_BTCPAY_MAC_ADDRESS"
|
||||||
|
export LXD_DISK_TO_USE="$LXD_DISK_TO_USE"
|
||||||
|
######
|
22
generate_certs.sh
Executable file
22
generate_certs.sh
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exuo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
|
||||||
|
docker pull certbot/certbot
|
||||||
|
|
||||||
|
docker run -it --rm \
|
||||||
|
--name certbot \
|
||||||
|
-p 80:80 \
|
||||||
|
-p 443:443 \
|
||||||
|
-v /etc/letsencrypt:/etc/letsencrypt \
|
||||||
|
-v /var/lib/letsencrypt:/var/lib/letsencrypt certbot/certbot \
|
||||||
|
certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$NEXTCLOUD_FQDN" -d "$MATRIX_FQDN" -d "$GITEA_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
|
||||||
|
|
||||||
|
# backup the certs to our SITE_PATH/certs.tar.gz so we have them handy (for local development)
|
||||||
|
ssh "$FQDN" sudo tar -zcvf "$REMOTE_HOME/certs.tar.gz" -C /etc ./letsencrypt
|
||||||
|
ssh "$FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/certs.tar.gz"
|
||||||
|
|
||||||
|
# now pull the tarballs down the local machine.
|
||||||
|
scp "$FQDN:$REMOTE_HOME/certs.tar.gz" "$SITE_PATH/certs.tar.gz"
|
54
go_btcpay.sh
Executable file
54
go_btcpay.sh
Executable file
@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exuo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# we will re-run the btcpay provisioning scripts if directed to do so.
|
||||||
|
# if an update does occur, we grab another backup.
|
||||||
|
if [ "$UPDATE_BTCPAY" = true ]; then
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
# grab a backup PRIOR to update
|
||||||
|
./backup_btcpay.sh "before-update-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run the update.
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-update.sh"
|
||||||
|
|
||||||
|
else
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
# we just grab a regular backup
|
||||||
|
./backup_btcpay.sh "regular-backup-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# run a restoration if specified.
|
||||||
|
if [ "$RUN_RESTORE" = true ]; then
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-down.sh"
|
||||||
|
./restore_btcpay.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$RECONFIGURE_BTCPAY_SERVER" = true ]; then
|
||||||
|
# re-run the setup script.
|
||||||
|
./run_btcpay_setup.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$MIGRATE_BTCPAY_SERVER" = false ]; then
|
||||||
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
|
# we bring the services back up by default.
|
||||||
|
ssh "$FQDN" "cd $REMOTE_HOME/btcpayserver-docker/; sudo bash -c ./btcpay-up.sh"
|
||||||
|
|
||||||
|
# we wait for lightning to comone line too.
|
||||||
|
wait-for-it -t -60 "$FQDN:80"
|
||||||
|
wait-for-it -t -60 "$FQDN:443"
|
||||||
|
|
||||||
|
xdg-open "http://$FQDN"
|
||||||
|
else
|
||||||
|
echo "WARNING: The '--migrate' flag was specified. BTCPay Server services HAVE NOT BEEN TURNED ON!"
|
||||||
|
echo "NOTE: You can restore your latest backup to a new host that has BTCPay Server installed."
|
||||||
|
fi
|
136
go_www.sh
Executable file
136
go_www.sh
Executable file
@ -0,0 +1,136 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exuo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
TOR_CONFIG_PATH=
|
||||||
|
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db"
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_NEXTCLOUD_PATH/db/data" \
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_NEXTCLOUD_PATH/db/logs" \
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_NEXTCLOUD_PATH/html"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_GITEA_PATH/data" "$REMOTE_GITEA_PATH/db"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# enable docker swarm mode so we can support docker stacks.
|
||||||
|
if ! docker info | grep -q "Swarm: active"; then
|
||||||
|
docker swarm init
|
||||||
|
fi
|
||||||
|
|
||||||
|
# stop services.
|
||||||
|
if docker stack list --format "{{.Name}}" | grep -q webstack; then
|
||||||
|
docker stack rm webstack
|
||||||
|
sleep 10
|
||||||
|
fi
|
||||||
|
|
||||||
|
# this will generate letsencrypt certs and pull them down locally.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" != lxd ]; then
|
||||||
|
# really we should change this if clause to some thing like
|
||||||
|
# "if the perimeter firewall allows port 80/443, then go ahead."
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ] && [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||||
|
./generate_certs.sh
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
|
||||||
|
# restore the certs. If they don't exist in a backup we restore from SITE_PATH
|
||||||
|
if [ -f "$SITE_PATH/certs.tar.gz" ]; then
|
||||||
|
scp "$SITE_PATH/certs.tar.gz" "ubuntu@$FQDN:$REMOTE_HOME/certs.tar.gz"
|
||||||
|
ssh "$FQDN" sudo tar -xvf "$REMOTE_HOME/certs.tar.gz" -C /etc
|
||||||
|
else
|
||||||
|
echo "ERROR: Certificates do not exist locally. You need to obtain some, perhaps by running with '--app=certonly'."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$RUN_BACKUP" = true ]; then
|
||||||
|
./backup_www.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$RUN_RESTORE" = true ]; then
|
||||||
|
./restore_www.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
NEW_MATRIX_DEPLOYMENT=false
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
if ! ssh "$FQDN" "[ -d $REMOTE_HOME/matrix ]"; then
|
||||||
|
NEW_MATRIX_DEPLOYMENT=true
|
||||||
|
ssh "$FQDN" "mkdir $REMOTE_HOME/matrix && mkdir $REMOTE_HOME/matrix/db && mkdir $REMOTE_HOME/matrix/data"
|
||||||
|
|
||||||
|
docker run -it --rm -v "$REMOTE_HOME/matrix/data":/data \
|
||||||
|
-e SYNAPSE_SERVER_NAME="${DOMAIN_NAME}" \
|
||||||
|
-e SYNAPSE_REGISTRATION_SHARED_SECRET="${MATRIX_SHARED_SECRET}" \
|
||||||
|
-e SYNAPSE_REPORT_STATS=yes \
|
||||||
|
-e POSTGRES_PASSWORD="${MATRIX_DB_PASSWORD}" \
|
||||||
|
-e SYNAPSE_NO_TLS=1 \
|
||||||
|
-e SYNAPSE_ENABLE_REGISTRATION=yes \
|
||||||
|
-e SYNAPSE_LOG_LEVEL=DEBUG \
|
||||||
|
-e POSTGRES_DB=synapse \
|
||||||
|
-e POSTGRES_HOST=matrix-db \
|
||||||
|
-e POSTGRES_USER=synapse \
|
||||||
|
-e POSTGRES_PASSWORD="${MATRIX_DB_PASSWORD}" \
|
||||||
|
"$MATRIX_IMAGE" generate
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
# ensure the tor image is built
|
||||||
|
docker build -t tor:latest ./tor
|
||||||
|
|
||||||
|
# if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore.
|
||||||
|
# this is how we generate a new torv3 endpoint.
|
||||||
|
if ! ssh "$FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then
|
||||||
|
ssh "$FQDN" "mkdir -p $REMOTE_HOME/tor"
|
||||||
|
TOR_CONFIG_PATH="$(pwd)/tor/torrc-init"
|
||||||
|
export TOR_CONFIG_PATH="$TOR_CONFIG_PATH"
|
||||||
|
docker stack deploy -c ./tor.yml torstack
|
||||||
|
sleep 20
|
||||||
|
docker stack rm torstack
|
||||||
|
sleep 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
ONION_ADDRESS="$(ssh "$FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)"
|
||||||
|
export ONION_ADDRESS="$ONION_ADDRESS"
|
||||||
|
|
||||||
|
# # Since we run a separate ghost process, we create a new directory and symlink it to the original
|
||||||
|
# if ! ssh "$FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then
|
||||||
|
# ssh "$FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes"
|
||||||
|
# fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$RUN_SERVICES" = true ]; then
|
||||||
|
docker stack deploy -c "$DOCKER_YAML_PATH" webstack
|
||||||
|
|
||||||
|
# start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||||
|
wait-for-it -t 320 "$DOMAIN_NAME:80"
|
||||||
|
wait-for-it -t 320 "$DOMAIN_NAME:443"
|
||||||
|
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
# If this is a new Matrix deployment, then we should add the default admin user.
|
||||||
|
if [ $NEW_MATRIX_DEPLOYMENT = true ]; then
|
||||||
|
# get the container ID for matrix/synapse.
|
||||||
|
MATRIX_CONTAINER_ID="$(docker ps | grep matrixdotorg | awk '{print $1;}')"
|
||||||
|
|
||||||
|
# create the user.
|
||||||
|
docker exec -it "$MATRIX_CONTAINER_ID" register_new_matrix_user http://localhost:8008 -u "$ADMIN_ACCOUNT_USERNAME" -p "$MATRIX_ADMIN_PASSWORD" -a --config /data/homeserver.yaml
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# open bowser tabs.
|
||||||
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
xdg-open "http://$FQDN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
xdg-open "http://$NEXTCLOUD_FQDN"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
xdg-open "http://$GITTEA_FQDN"
|
||||||
|
fi
|
||||||
|
fi
|
30
install.sh
Executable file
30
install.sh
Executable file
@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
sudo apt-get update
|
||||||
|
|
||||||
|
sudo apt-get install -y wait-for-it dnsutils tor rsync sshfs
|
||||||
|
|
||||||
|
if [ ! -f $(which lxd) ]; then
|
||||||
|
sudo snap install lxd
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's ensure docker-machine is available.
|
||||||
|
base="https://github.com/docker/machine/releases/download/v0.16.2"
|
||||||
|
curl -L "$base/docker-machine-$(uname -s)-$(uname -m)" >/tmp/docker-machine
|
||||||
|
sudo mv /tmp/docker-machine /usr/local/bin/docker-machine
|
||||||
|
chmod +x /usr/local/bin/docker-machine
|
||||||
|
|
||||||
|
# NOTE!!! DOCKER CLI MUST BE INSTALLED VIA instructions at https://docs.docker.com/engine/install/ubuntu/ DO NOT USE SNAP
|
||||||
|
sudo apt-get remove docker docker-engine docker.io containerd runc
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
|
||||||
|
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install docker-ce-cli -y
|
||||||
|
|
||||||
|
# install trezor requirements https://wiki.trezor.io/Apps:SSH_agent
|
||||||
|
sudo apt update && sudo apt install python3-pip libusb-1.0-0-dev libudev-dev pinentry-curses
|
||||||
|
pip3 install trezor_agent
|
||||||
|
|
||||||
|
sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
|
142
lxc_profile.yml
Normal file
142
lxc_profile.yml
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
config:
|
||||||
|
limits.cpu: "${DEV_CPU_COUNT}"
|
||||||
|
limits.memory: "${DEV_MEMORY_MB}MB"
|
||||||
|
user.vendor-data: |
|
||||||
|
#cloud-config
|
||||||
|
|
||||||
|
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
|
||||||
|
package_update: true
|
||||||
|
package_upgrade: false
|
||||||
|
package_reboot_if_required: false
|
||||||
|
|
||||||
|
preserve_hostname: false
|
||||||
|
fqdn: ${FQDN}
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- curl
|
||||||
|
- ssh-askpass
|
||||||
|
- apt-transport-https
|
||||||
|
- ca-certificates
|
||||||
|
- gnupg-agent
|
||||||
|
- software-properties-common
|
||||||
|
- lsb-release
|
||||||
|
- net-tools
|
||||||
|
- htop
|
||||||
|
- rsync
|
||||||
|
- duplicity
|
||||||
|
- sshfs
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: ubuntu
|
||||||
|
shell: /bin/bash
|
||||||
|
lock_passwd: false
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
sudo:
|
||||||
|
- ALL=(ALL) NOPASSWD:ALL
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ${SSH_AUTHORIZED_KEY}
|
||||||
|
|
||||||
|
write_files:
|
||||||
|
- path: ${REMOTE_HOME}/docker.asc
|
||||||
|
content: |
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
|
||||||
|
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
|
||||||
|
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
|
||||||
|
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
|
||||||
|
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
|
||||||
|
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
|
||||||
|
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
|
||||||
|
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
|
||||||
|
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
|
||||||
|
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
|
||||||
|
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
|
||||||
|
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
|
||||||
|
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
|
||||||
|
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
|
||||||
|
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
|
||||||
|
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
|
||||||
|
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
|
||||||
|
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
|
||||||
|
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
|
||||||
|
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
|
||||||
|
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
|
||||||
|
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
|
||||||
|
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
|
||||||
|
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
|
||||||
|
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
|
||||||
|
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
|
||||||
|
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
|
||||||
|
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
|
||||||
|
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
|
||||||
|
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
|
||||||
|
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
|
||||||
|
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
|
||||||
|
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
|
||||||
|
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
|
||||||
|
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
|
||||||
|
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
|
||||||
|
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
|
||||||
|
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
|
||||||
|
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
|
||||||
|
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
|
||||||
|
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
|
||||||
|
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
|
||||||
|
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
|
||||||
|
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
|
||||||
|
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
|
||||||
|
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
|
||||||
|
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
|
||||||
|
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
|
||||||
|
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
|
||||||
|
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
|
||||||
|
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
|
||||||
|
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
|
||||||
|
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
|
||||||
|
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
|
||||||
|
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
|
||||||
|
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
|
||||||
|
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
|
||||||
|
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
|
||||||
|
=0YYh
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
- path: /etc/ssh/ssh_config
|
||||||
|
content: |
|
||||||
|
Port 22
|
||||||
|
ListenAddress 0.0.0.0
|
||||||
|
Protocol 2
|
||||||
|
ChallengeResponseAuthentication no
|
||||||
|
PasswordAuthentication no
|
||||||
|
UsePAM no
|
||||||
|
LogLevel INFO
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- cat ${REMOTE_HOME}/docker.asc | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||||
|
- sudo rm ${REMOTE_HOME}/docker.asc
|
||||||
|
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||||
|
- sudo apt-get remove docker docker.io containerd runc
|
||||||
|
- sudo apt-get update
|
||||||
|
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||||
|
- echo "alias ll='ls -lah'" >> ${REMOTE_HOME}/.bash_profile
|
||||||
|
- sudo apt-get install -y openssh-server
|
||||||
|
|
||||||
|
description: Default LXD profile for ${DOMAIN_NAME}
|
||||||
|
devices:
|
||||||
|
root:
|
||||||
|
path: /
|
||||||
|
pool: default
|
||||||
|
type: disk
|
||||||
|
config:
|
||||||
|
source: cloud-init:config
|
||||||
|
type: disk
|
||||||
|
enp5s0:
|
||||||
|
nictype: macvlan
|
||||||
|
parent: ${DEV_MACVLAN_INTERFACE}
|
||||||
|
type: nic
|
||||||
|
name: ${LXD_VM_NAME}
|
16
prepare_vps_host.sh
Executable file
16
prepare_vps_host.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
|
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
|
# create a directory to store backup archives. This is on all new vms.
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
||||||
|
|
||||||
|
if [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
echo "INFO: new machine detected. Provisioning BTCPay server scripts."
|
||||||
|
|
||||||
|
./run_btcpay_setup.sh
|
||||||
|
exit
|
||||||
|
fi
|
125
provision_lxc.sh
Executable file
125
provision_lxc.sh
Executable file
@ -0,0 +1,125 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# check to ensure the admin has specified a MACVLAN interface
|
||||||
|
if [ -z "$DEV_MACVLAN_INTERFACE" ]; then
|
||||||
|
echo "ERROR: DEV_MACVLAN_INTERFACE not defined in project."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The base VM image.
|
||||||
|
BASE_LXC_IMAGE="ubuntu/21.04/cloud"
|
||||||
|
|
||||||
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
|
if ! lxc profile list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
lxc profile create "$LXD_VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# generate the custom cloud-init file. Cloud init installs and configures sshd
|
||||||
|
SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub")
|
||||||
|
eval "$(ssh-agent -s)"
|
||||||
|
ssh-add "$SSH_HOME/id_rsa"
|
||||||
|
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
|
||||||
|
envsubst < ./lxc_profile.yml > "$SITE_PATH/cloud-init.yml"
|
||||||
|
|
||||||
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
|
cat "$SITE_PATH/cloud-init.yml" | lxc profile edit "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
wait_for_lxc_ip () {
|
||||||
|
|
||||||
|
LXC_INSTANCE_NAME="$1"
|
||||||
|
IP_V4_ADDRESS=
|
||||||
|
while true; do
|
||||||
|
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
|
||||||
|
if [ -n "$IP_V4_ADDRESS" ]; then
|
||||||
|
# give the machine extra time to spin up.
|
||||||
|
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
sleep 1
|
||||||
|
printf '.'
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# create the default storage pool if necessary
|
||||||
|
if ! lxc storage list --format csv | grep -q default; then
|
||||||
|
if [ -n "$LXD_DISK_TO_USE" ]; then
|
||||||
|
lxc storage create default zfs source="$LXD_DISK_TO_USE" size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
else
|
||||||
|
lxc storage create default zfs size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
MAC_ADDRESS_TO_PROVISION="$DEV_WWW_MAC_ADDRESS"
|
||||||
|
if [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
MAC_ADDRESS_TO_PROVISION="$DEV_BTCPAY_MAC_ADDRESS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If our template doesn't exist, we create one.
|
||||||
|
if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
|
||||||
|
|
||||||
|
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
||||||
|
if lxc list -q --format csv | grep -q "$VM_NAME"; then
|
||||||
|
lxc delete "$VM_NAME" --force
|
||||||
|
|
||||||
|
# remove the ssh known endpoint else we get warnings.
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's download our base image.
|
||||||
|
if ! lxc image list --format csv --columns l | grep -q "ubuntu-21-04"; then
|
||||||
|
# if the image doesn't exist, download it from Ubuntu's image server
|
||||||
|
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
|
||||||
|
# we don't really need to cache this locally since it gets continually updated upstream.
|
||||||
|
lxc image copy "images:$BASE_LXC_IMAGE" "$DEV_LXD_REMOTE": --alias "ubuntu-21-04" --public --vm
|
||||||
|
fi
|
||||||
|
|
||||||
|
lxc init \
|
||||||
|
--profile="sovereign-stack" \
|
||||||
|
"ubuntu-21-04" \
|
||||||
|
"$VM_NAME" --vm
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
|
||||||
|
lxc start "$VM_NAME"
|
||||||
|
|
||||||
|
# let's wait a minimum of 15 seconds before we start checking for an IP address.
|
||||||
|
sleep 15
|
||||||
|
|
||||||
|
# let's wait for the LXC vm remote machine to get an IP address.
|
||||||
|
wait_for_lxc_ip "$VM_NAME"
|
||||||
|
|
||||||
|
# Let's remove any entry in our known_hosts, then add it back.
|
||||||
|
# we are using IP address here so we don't have to rely on external DNS
|
||||||
|
# configuration for the base image preparataion.
|
||||||
|
ssh-keygen -R "$IP_V4_ADDRESS"
|
||||||
|
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
|
||||||
|
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu "$REMOTE_HOME"
|
||||||
|
|
||||||
|
# stop the VM and get a snapshot.
|
||||||
|
lxc stop "$VM_NAME"
|
||||||
|
lxc publish "$DEV_LXD_REMOTE:$VM_NAME" --alias "$VM_NAME" --public
|
||||||
|
lxc delete "$VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# now let's create a new VM to work with.
|
||||||
|
lxc init --profile="sovereign-stack" "$VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
lxc start "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
wait_for_lxc_ip "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
# remove any existing SSH identities for the host, then add it back.
|
||||||
|
ssh-keygen -R "$IP_V4_ADDRESS"
|
93
provision_vps.sh
Executable file
93
provision_vps.sh
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -euo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ ! -f "$HOME/.aws/credentials" ]; then
|
||||||
|
|
||||||
|
# TODO write a credential file baseline
|
||||||
|
echo "ERROR: Please update your '$HOME/.aws/credentials' file before continuing."
|
||||||
|
mkdir -p "$HOME/.aws"
|
||||||
|
touch "$HOME/.aws/credentials"
|
||||||
|
|
||||||
|
# stub out a site_definition with new passwords.
|
||||||
|
cat >"$HOME/.aws/credentials" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# enter your AWS Access Key and Secret Access Key here.
|
||||||
|
export AWS_ACCESS_KEY=
|
||||||
|
export AWS_SECRET_ACCESS_KEY=
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source "$HOME/.aws/credentials"
|
||||||
|
|
||||||
|
if [ -z "$AWS_ACCESS_KEY" ]; then
|
||||||
|
echo "ERROR: AWS_ACCESS_KEY is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
|
||||||
|
echo "ERROR: AWS_SECRET_ACCESS_KEY is not set."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ports: All ports go to nginx; 8448 directs to the matrix federation servoce.
|
||||||
|
|
||||||
|
# Note, we assume the script has already made sure the machine doesn't exist.
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ] || [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
# creates a public VM in AWS and provisions the bcm website.
|
||||||
|
docker-machine create --driver amazonec2 \
|
||||||
|
--amazonec2-open-port 80 \
|
||||||
|
--amazonec2-open-port 443 \
|
||||||
|
--amazonec2-open-port 8448 \
|
||||||
|
--amazonec2-access-key "$AWS_ACCESS_KEY" \
|
||||||
|
--amazonec2-secret-key "$AWS_SECRET_ACCESS_KEY" \
|
||||||
|
--amazonec2-region "$AWS_REGION" \
|
||||||
|
--amazonec2-ami "$AWS_AMI_ID" \
|
||||||
|
--amazonec2-root-size "$ROOT_DISK_SIZE_GB" \
|
||||||
|
--amazonec2-instance-type "$WWW_INSTANCE_TYPE" \
|
||||||
|
--engine-label tag="$LATEST_GIT_TAG" \
|
||||||
|
--engine-label commit="$LATEST_GIT_COMMIT" \
|
||||||
|
"$FQDN"
|
||||||
|
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
# creates a public VM in AWS and provisions the bcm website.
|
||||||
|
docker-machine create --driver amazonec2 \
|
||||||
|
--amazonec2-open-port 80 \
|
||||||
|
--amazonec2-open-port 443 \
|
||||||
|
--amazonec2-open-port 9735 \
|
||||||
|
--amazonec2-access-key "$AWS_ACCESS_KEY" \
|
||||||
|
--amazonec2-secret-key "$AWS_SECRET_ACCESS_KEY" \
|
||||||
|
--amazonec2-region "$AWS_REGION" \
|
||||||
|
--amazonec2-ami "$AWS_AMI_ID" \
|
||||||
|
--amazonec2-root-size "$ROOT_DISK_SIZE_GB" \
|
||||||
|
--amazonec2-instance-type "$BTCPAY_INSTANCE_TYPE" \
|
||||||
|
--engine-label tag="$LATEST_GIT_TAG" \
|
||||||
|
--engine-label commit="$LATEST_GIT_COMMIT" \
|
||||||
|
"$FQDN"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker-machine scp "$SITE_PATH/authorized_keys" "$FQDN:$REMOTE_HOME/authorized_keys"
|
||||||
|
docker-machine ssh "$FQDN" "cat $REMOTE_HOME/authorized_keys >> $REMOTE_HOME/.ssh/authorized_keys"
|
||||||
|
|
||||||
|
# we have to ensure ubuntu is able to do sudo less docker commands.
|
||||||
|
docker-machine ssh "$FQDN" sudo usermod -aG docker ubuntu
|
||||||
|
|
||||||
|
# we restart so dockerd starts with fresh group membership.
|
||||||
|
docker-machine ssh "$FQDN" sudo systemctl restart docker
|
||||||
|
|
||||||
|
# TODO INSTALL DOCKER COMPOSE
|
||||||
|
|
||||||
|
# let's wire up the DNS so subsequent ssh commands resolve to the VPS.
|
||||||
|
./run_ddns.sh
|
||||||
|
|
||||||
|
# remove the SSH hostname from known_hosts as we'll
|
||||||
|
# todo why do we need this again?
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
14
publish_tag.sh
Executable file
14
publish_tag.sh
Executable file
@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# this script will tag the repo then push it to origin
|
||||||
|
TAG_NAME=v0.0.14
|
||||||
|
COMIT_MESSAGE="Creating commit on $(date)."
|
||||||
|
TAG_MESSAGE="Creating tag $TAG_NAME on $(date)."
|
||||||
|
|
||||||
|
# create a git commit with staged changes.
|
||||||
|
git commit -m "$COMIT_MESSAGE" -s
|
||||||
|
git tag -a "$TAG_NAME" -m "$TAG_MESSAGE" -s
|
||||||
|
|
||||||
|
# optional; push to remote
|
||||||
|
git push --all
|
||||||
|
git push --tags
|
182
refresh.sh
Executable file
182
refresh.sh
Executable file
@ -0,0 +1,182 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exuo nounset
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
# the user can specify an env which sets some environment variables. See the default content of env.
|
||||||
|
if [ -f "$(pwd)/env" ]; then
|
||||||
|
source "$(pwd)/env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
USER_DELETE_MACHINE=false
|
||||||
|
DOMAIN_NAME=
|
||||||
|
VPS_HOSTING_TARGET=lxd
|
||||||
|
RUN_CERT_RENEWAL=true
|
||||||
|
USER_NO_BACKUP=false
|
||||||
|
USER_RUN_RESTORE=false
|
||||||
|
BTC_CHAIN=testnet
|
||||||
|
UPDATE_BTCPAY=false
|
||||||
|
MIGRATE_BTCPAY_SERVER=false
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=false
|
||||||
|
BTCPAY_ADDITIONAL_HOSTNAMES=
|
||||||
|
|
||||||
|
for i in "$@"; do
|
||||||
|
case $i in
|
||||||
|
--domain=*)
|
||||||
|
DOMAIN_NAME="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--hosting-provider=*)
|
||||||
|
VPS_HOSTING_TARGET="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--restore)
|
||||||
|
USER_RUN_RESTORE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--update)
|
||||||
|
UPDATE_BTCPAY=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-backup)
|
||||||
|
USER_NO_BACKUP=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--delete)
|
||||||
|
USER_DELETE_MACHINE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--storage-backend=*)
|
||||||
|
LXD_DISK_TO_USE="${i#*=}"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--no-cert-renew)
|
||||||
|
RUN_CERT_RENEWAL=false
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--mainnet)
|
||||||
|
BTC_CHAIN=mainnet
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--migrate)
|
||||||
|
MIGRATE_BTCPAY_SERVER=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--reconfigure-btcpay)
|
||||||
|
RECONFIGURE_BTCPAY_SERVER=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
# unknown option
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||||
|
export VPS_HOSTING_TARGET="$VPS_HOSTING_TARGET"
|
||||||
|
export LXD_DISK_TO_USE="$LXD_DISK_TO_USE"
|
||||||
|
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
||||||
|
|
||||||
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
|
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
|
||||||
|
export MIGRATE_BTCPAY_SERVER="$MIGRATE_BTCPAY_SERVER"
|
||||||
|
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
|
||||||
|
|
||||||
|
# # first of all, if there are uncommited changes, we quit. You better stash your work yo!
|
||||||
|
# if git update-index --refresh| grep -q "needs update"; then
|
||||||
|
# echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'."
|
||||||
|
# exit 1
|
||||||
|
# fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source ./defaults.sh
|
||||||
|
|
||||||
|
# iterate over all our server endpoints and provision them if needed.
|
||||||
|
# www
|
||||||
|
for APP_TO_DEPLOY in btcpay www; do
|
||||||
|
FQDN=
|
||||||
|
export APP_TO_DEPLOY="$APP_TO_DEPLOY"
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source ./shared.sh
|
||||||
|
|
||||||
|
# skip this iteration if the site_definition says not to deploy btcpay server.
|
||||||
|
if [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
if [ "$DEPLOY_BTCPAY_SERVER" = false ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# skip if the server config is set to not deploy.
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ]; then
|
||||||
|
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
if [ "$DEPLOY_WWW_SERVER" = false ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
export FQDN="$FQDN"
|
||||||
|
|
||||||
|
# generate the docker yaml and nginx configs.
|
||||||
|
./stub_docker_yml.sh
|
||||||
|
./stub_nginxconf.sh
|
||||||
|
|
||||||
|
MACHINE_EXISTS=false
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ] && docker-machine ls -q | grep -q "$FQDN"; then
|
||||||
|
MACHINE_EXISTS=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = lxd ] && lxc list --format csv | grep -q "$FQDN"; then
|
||||||
|
MACHINE_EXISTS=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$USER_NO_BACKUP" = true ]; then
|
||||||
|
RUN_BACKUP=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$MACHINE_EXISTS" = true ]; then
|
||||||
|
# we delete the machine if the user has directed us to
|
||||||
|
if [ "$USER_DELETE_MACHINE" = true ]; then
|
||||||
|
# run the domain_init based on user input.
|
||||||
|
if [ "$USER_NO_BACKUP" = true ]; then
|
||||||
|
echo "Machine exists. We don't need to back it up because the user has directed --no-backup."
|
||||||
|
else
|
||||||
|
echo "Machine exists. Since we're going to delete it, let's grab a backup. We don't need to restore services since we're deleting it."
|
||||||
|
RUN_RESTORE=false RUN_BACKUP=true RUN_SERVICES=false ./domain_init.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# delete the remote VPS.
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
|
if [ "$APP_TO_DEPLOY" != btcpay ]; then
|
||||||
|
# docker-machine rm -f "$FQDN"
|
||||||
|
echo "ERROR: NOT IMPLEMENTED"
|
||||||
|
fi
|
||||||
|
elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
||||||
|
lxc delete --force "$LXD_VM_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Then we run the script again to re-instantiate a new VPS, restoring all user data
|
||||||
|
# if restore directory doesn't exist, then we end up with a new site.
|
||||||
|
echo "INFO: Recreating the remote VPS then restoring user data."
|
||||||
|
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP=false RUN_SERVICES=true ./domain_init.sh
|
||||||
|
else
|
||||||
|
if [ "$USER_NO_BACKUP" = true ]; then
|
||||||
|
RUN_BACKUP=false
|
||||||
|
echo "INFO: Maintaining existing VPS. RUN_BACKUP=$RUN_BACKUP RUN_RESTORE=$USER_RUN_RESTORE"
|
||||||
|
else
|
||||||
|
RUN_BACKUP=true
|
||||||
|
echo "INFO: Maintaining existing VPS. RUN_BACKUP=$RUN_BACKUP RUN_RESTORE=$USER_RUN_RESTORE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP="$RUN_BACKUP" RUN_SERVICES=true ./domain_init.sh
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ "$USER_DELETE_MACHINE" = true ]; then
|
||||||
|
echo "INFO: User has indicated to delete the machine, but it doesn't exist. Going to create it anyway."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# The machine does not exist. Let's bring it into existence, restoring from latest backup.
|
||||||
|
echo "Machine does not exist. RUN_RESTORE=$USER_RUN_RESTORE RUN_BACKUP=false"
|
||||||
|
RUN_RESTORE="$USER_RUN_RESTORE" RUN_BACKUP=false RUN_SERVICES=true ./domain_init.sh
|
||||||
|
fi
|
||||||
|
done
|
21
restore_btcpay.sh
Executable file
21
restore_btcpay.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# this scripts ASSUMES services have already been taken down.
|
||||||
|
|
||||||
|
# first let's ask the user for the absolute path to the backup file that we want to restore.
|
||||||
|
FILE_PATH=
|
||||||
|
read -r -p "Please enter the absolute path of the backup file you want to restore: ": FILE_PATH
|
||||||
|
if [ -f "$FILE_PATH" ]; then
|
||||||
|
# then we grab a backup of the existing stuff BEFORE the restoration attempt
|
||||||
|
./backup_btcpay.sh "before-restore-$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
|
||||||
|
echo "INFO: Restoring BTCPAY Server: $FILE_PATH"
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
|
scp "$FILE_PATH" "$FQDN:$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
|
ssh "$FQDN" "cd /; sudo tar -xzvf $REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||||
|
else
|
||||||
|
echo "ERROR: File does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
20
restore_www.sh
Executable file
20
restore_www.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
|
# first, this is a restore operation. We need to ask the administrator
|
||||||
|
# if they want to continue because it results in data loss.
|
||||||
|
# indeed, our first step is the delete the home directory on the remote server.
|
||||||
|
|
||||||
|
# delete the home directory so we know we are restoring all files from the duplicity archive.
|
||||||
|
ssh "$FQDN" sudo rm -rf "$REMOTE_HOME/*"
|
||||||
|
|
||||||
|
# scp our local backup directory to the remote machine
|
||||||
|
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||||
|
|
||||||
|
# TODO instead of scp the files up there, lets' mount the local backup folder to a remote folder then just run a duplicity restore.
|
||||||
|
scp -r "$LOCAL_BACKUP_PATH/" "$FQDN:$REMOTE_HOME/backups/$APP_TO_DEPLOY"
|
||||||
|
|
||||||
|
# now we run duplicity to restore the archive.
|
||||||
|
ssh "$FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/" "$REMOTE_HOME/"
|
||||||
|
#ssh "$FQDN" sudo tar -xvf "$REMOTE_HOME/certs.tar.gz" -C /etc
|
60
run_btcpay_setup.sh
Executable file
60
run_btcpay_setup.sh
Executable file
@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
|
||||||
|
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
||||||
|
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
||||||
|
|
||||||
|
# This is the config for a basic proxy to the listening port 127.0.0.1:2368
|
||||||
|
# It also supports modern TLS, so SSL certs must be available.
|
||||||
|
cat > "$SITE_PATH/btcpay.sh" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# wait for cloud-init to complete yo
|
||||||
|
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# get pre-reqs
|
||||||
|
apt-get update && apt-get install -y git wget
|
||||||
|
|
||||||
|
if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" != "master" ] && [ "$EXISTING_REMOTE" != "master" ]; then echo "existing btcpayserver-docker folder found that did not match our specified fork. Moving. (Current branch: $EXISTING_BRANCH, Current remote: $EXISTING_REMOTE)"; mv "btcpayserver-docker" "btcpayserver-docker_$(date +%s)"; fi
|
||||||
|
if [ -d "btcpayserver-docker" ] && [ "$EXISTING_BRANCH" == "master" ] && [ "$EXISTING_REMOTE" == "master" ]; then echo "existing btcpayserver-docker folder found, pulling instead of cloning."; git pull; fi
|
||||||
|
if [ ! -d "btcpayserver-docker" ]; then echo "cloning btcpayserver-docker"; git clone -b master https://github.com/btcpayserver/btcpayserver-docker btcpayserver-docker; fi
|
||||||
|
|
||||||
|
export BTCPAY_HOST="${FQDN}"
|
||||||
|
export NBITCOIN_NETWORK="${BTC_CHAIN}"
|
||||||
|
export LIGHTNING_ALIAS="${DOMAIN_NAME}"
|
||||||
|
export LETSENCRYPT_EMAIL="${CERTIFICATE_EMAIL_ADDRESS}"
|
||||||
|
export BTCPAYGEN_LIGHTNING="clightning"
|
||||||
|
export BTCPAYGEN_CRYPTO1="btc"
|
||||||
|
|
||||||
|
# opt-save-storage keeps 1 year of blocks (prunes to 100 GB)
|
||||||
|
# opt-add-btctransmuter adds transmuter software
|
||||||
|
#
|
||||||
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="${BTCPAYGEN_ADDITIONAL_FRAGMENTS}"
|
||||||
|
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
|
||||||
|
export BTCPAY_ENABLE_SSH=true
|
||||||
|
|
||||||
|
cd btcpayserver-docker
|
||||||
|
|
||||||
|
# run fast_sync if it's not been done before.
|
||||||
|
if [ ! -f /home/ubuntu/fast_sync_completed ]; then
|
||||||
|
cd ./contrib/FastSync
|
||||||
|
./load-utxo-set.sh
|
||||||
|
touch /home/ubuntu/fast_sync_completed
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
|
||||||
|
# provision the btcpay server
|
||||||
|
. ./btcpay-setup.sh -i
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# send the setup script to the remote machine.
|
||||||
|
scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
ssh "$FQDN" chmod 0744 "$REMOTE_HOME/btcpay_setup.sh"
|
||||||
|
ssh "$FQDN" sudo bash -c ./btcpay_setup.sh
|
54
run_ddns.sh
Executable file
54
run_ddns.sh
Executable file
@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# create the ddclient.conf file
|
||||||
|
cat >/tmp/ddclient.conf <<EOL
|
||||||
|
### ddclient.conf
|
||||||
|
### namecheap
|
||||||
|
##################
|
||||||
|
use=web, web=checkip.dyndns.com/, web-skip='IP Address'
|
||||||
|
protocol=namecheap
|
||||||
|
server=dynamicdns.park-your-domain.com
|
||||||
|
login=${DOMAIN_NAME}
|
||||||
|
password=${DDNS_PASSWORD}
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# for the www stack, we register only the domain name so our URLs look like https://$DOMAIN_NAME
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ] || [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
DDNS_STRING="@"
|
||||||
|
else
|
||||||
|
DDNS_STRING="$DDNS_HOST"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# append the correct DDNS string to ddclient.conf
|
||||||
|
echo "$DDNS_STRING" >> /tmp/ddclient.conf
|
||||||
|
|
||||||
|
cat /tmp/ddclient.conf
|
||||||
|
|
||||||
|
# send the ddclient.conf file to the remote vps.
|
||||||
|
docker-machine scp /tmp/ddclient.conf "$FQDN:$REMOTE_HOME/ddclient.conf"
|
||||||
|
docker-machine ssh "$FQDN" sudo cp "$REMOTE_HOME/ddclient.conf" /etc/ddclient.conf
|
||||||
|
docker-machine ssh "$FQDN" sudo chown root:root /etc/ddclient.conf
|
||||||
|
docker-machine ssh "$FQDN" sudo chmod 0600 /etc/ddclient.conf
|
||||||
|
docker-machine ssh "$FQDN" sudo apt-get -qq install -y ddclient wait-for-it git rsync duplicity sshfs
|
||||||
|
docker-machine ssh "$FQDN" sudo ddclient
|
||||||
|
|
||||||
|
# wait for DNS to get setup. Pass in the IP address of the actual VPS.
|
||||||
|
echo "INFO: Verifying correct DNS configuration. This may take a while."
|
||||||
|
MACHINE_IP="$(docker-machine ip "$FQDN")"
|
||||||
|
|
||||||
|
DDNS_SLEEP_SECONDS=60
|
||||||
|
while true; do
|
||||||
|
# we test the www CNAME here so we can be assured the underlying has corrected.
|
||||||
|
if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$MACHINE_IP" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "SUCCESS: The DNS appears to be configured correctly."
|
||||||
|
|
||||||
|
echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow stale DNS records to expire."
|
||||||
|
sleep "$DDNS_SLEEP_SECONDS";
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "." && sleep 2;
|
||||||
|
done
|
202
shared.sh
Executable file
202
shared.sh
Executable file
@ -0,0 +1,202 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
if [ ! -d "$SITE_PATH" ]; then
|
||||||
|
echo "ERROR: '$SITE_PATH' does not exist."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
function new_pass {
|
||||||
|
apg -a 1 -M nc -n 3 -m 26 -E GHIJKLMNOPQRSTUVWXYZ | head -n1 | awk '{print $1;}'
|
||||||
|
}
|
||||||
|
|
||||||
|
# check to see if the enf file exists. exist if not.
|
||||||
|
SITE_DEFINITION_PATH="$SITE_PATH/site_definition"
|
||||||
|
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
|
||||||
|
echo "WARNING: '$SITE_DEFINITION_PATH' does not exist! We have stubbed one out for you, but you need to UPDATE IT!"
|
||||||
|
|
||||||
|
# stub out a site_definition with new passwords.
|
||||||
|
cat >"$SITE_DEFINITION_PATH" <<EOL
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export SITE_TITLE="Short Title of Project"
|
||||||
|
export DOMAIN_NAME="domain.tld"
|
||||||
|
export DDNS_PASSWORD="GET_SHARED_SECRET_FROM_DNS_PROVIDER"
|
||||||
|
export SMTP_PASSWORD="GET_SHARED_SECRET_FROM_EMAIL_PROVIDER"
|
||||||
|
export GHOST_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export GITEA_MYSQL_PASSWORD="$(new_pass)"
|
||||||
|
export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
#export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||||
|
export MATRIX_DB_PASSWORD="$(new_pass)"
|
||||||
|
export MATRIX_SHARED_SECRET="$(new_pass)"
|
||||||
|
export MATRIX_ADMIN_PASSWORD="$(new_pass)"
|
||||||
|
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
||||||
|
export DEPLOY_GHOST=true
|
||||||
|
export DEPLOY_MATRIX=true
|
||||||
|
export DEPLOY_NEXTCLOUD=true
|
||||||
|
export DEPLOY_ONION_SITE=false
|
||||||
|
#export DEPLOY_BTCPAY_SERVER=true
|
||||||
|
#export WWW_INSTANCE_TYPE="t2.medium"
|
||||||
|
#export BTCPAY_ADDITIONAL_HOSTNAMES="pay.domain.tld"
|
||||||
|
#export DEV_WWW_MAC_ADDRESS="00:16:3E:AD:25:2C"
|
||||||
|
#export DEV_BTCPAY_MAC_ADDRESS="00:16:3E:AD:25:2D"
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
chmod 0744 "$SITE_DEFINITION_PATH"
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOCKER_YAML_PATH="$SITE_PATH/appstack.yml"
|
||||||
|
export DOCKER_YAML_PATH="$DOCKER_YAML_PATH"
|
||||||
|
|
||||||
|
# TODO add file existence check
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
source "$SITE_PATH/site_definition"
|
||||||
|
|
||||||
|
export REMOTE_HOME="/home/ubuntu"
|
||||||
|
BACKUP_TIMESTAMP="$(date +"%Y-%m")"
|
||||||
|
UNIX_BACKUP_TIMESTAMP="$(date +%s)"
|
||||||
|
export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP"
|
||||||
|
export UNIX_BACKUP_TIMESTAMP="$UNIX_BACKUP_TIMESTAMP"
|
||||||
|
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
|
||||||
|
LOCAL_BACKUP_PATH="$SITE_PATH/backups/$APP_TO_DEPLOY/$BACKUP_TIMESTAMP"
|
||||||
|
export LOCAL_BACKUP_PATH="$LOCAL_BACKUP_PATH"
|
||||||
|
BACKUP_PATH_CREATED=false
|
||||||
|
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||||
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
|
BACKUP_PATH_CREATED=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
export BACKUP_PATH_CREATED="$BACKUP_PATH_CREATED"
|
||||||
|
mkdir -p "$SSHFS_PATH"
|
||||||
|
|
||||||
|
# VALIDATE THE INPUT from the ENVFILE
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: DOMAIN_NAME not specified. Use the --domain-name= option."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TODO, ensure VPS_HOSTING_TARGET is in range.
|
||||||
|
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export MATRIX_FQDN="$MATRIX_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
export ADMIN_ACCOUNT_USERNAME="info"
|
||||||
|
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
|
||||||
|
export MAIL_FROM="$SITE_TITLE <$CERTIFICATE_EMAIL_ADDRESS>"
|
||||||
|
export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
|
||||||
|
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||||
|
|
||||||
|
touch "$SITE_PATH/debug.log"
|
||||||
|
export SMTP_LOGIN="www@mail.$DOMAIN_NAME"
|
||||||
|
export VM_NAME="sovereign-stack-base"
|
||||||
|
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
|
||||||
|
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
||||||
|
|
||||||
|
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
|
||||||
|
export ROOT_DISK_SIZE_GB=20
|
||||||
|
|
||||||
|
DDNS_HOST=
|
||||||
|
if [ "$APP_TO_DEPLOY" = www ]; then
|
||||||
|
DDNS_HOST="$WWW_HOSTNAME"
|
||||||
|
ROOT_DISK_SIZE_GB=$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))
|
||||||
|
elif [ "$APP_TO_DEPLOY" = btcpay ]; then
|
||||||
|
DDNS_HOST="$BTCPAY_HOSTNAME"
|
||||||
|
if [ "$BTC_CHAIN" = mainnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=150
|
||||||
|
elif [ "$BTC_CHAIN" = testnet ]; then
|
||||||
|
ROOT_DISK_SIZE_GB=40
|
||||||
|
fi
|
||||||
|
elif [ "$APP_TO_DEPLOY" = certonly ]; then
|
||||||
|
DDNS_HOST="$WWW_HOSTNAME"
|
||||||
|
ROOT_DISK_SIZE_GB=8
|
||||||
|
else
|
||||||
|
echo "ERROR: APP_TO_DEPLOY not within allowable bounds."
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# we use this in other subshells.
|
||||||
|
export APP_TO_DEPLOY="$APP_TO_DEPLOY"
|
||||||
|
export DDNS_HOST="$DDNS_HOST"
|
||||||
|
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
|
||||||
|
export LXD_VM_NAME="${FQDN//./-}"
|
||||||
|
export BTC_CHAIN="$BTC_CHAIN"
|
||||||
|
export ROOT_DISK_SIZE_GB=$ROOT_DISK_SIZE_GB
|
||||||
|
export WWW_INSTANCE_TYPE="$WWW_INSTANCE_TYPE"
|
||||||
|
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH"
|
||||||
|
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||||
|
|
||||||
|
if [ -z "$GHOST_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GHOST_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$GHOST_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GHOST_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NEXTCLOUD_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure NEXTCLOUD_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$GITEA_MYSQL_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GITEA_MYSQL_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NEXTCLOUD_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure NEXTCLOUD_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$GITEA_MYSQL_ROOT_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure GITEA_MYSQL_ROOT_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "$MATRIX_ADMIN_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure MATRIX_ADMIN_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
||||||
|
echo "ERROR: Ensure DUPLICITY_BACKUP_PASSPHRASE is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$SMTP_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure SMTP_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DDNS_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure DDNS_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
echo "ERROR: Ensure DOMAIN_NAME is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$SITE_TITLE" ]; then
|
||||||
|
echo "ERROR: Ensure SITE_TITLE is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$MATRIX_DB_PASSWORD" ]; then
|
||||||
|
echo "ERROR: Ensure MATRIX_DB_PASSWORD is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$DEPLOY_BTCPPAY_SERVER" ]; then
|
||||||
|
echo "ERROR: Ensure DEPLOY_BTCPPAY_SERVER is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
357
stub_docker_yml.sh
Executable file
357
stub_docker_yml.sh
Executable file
@ -0,0 +1,357 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
if [ -z "$ONION_ADDRESS" ]; then
|
||||||
|
echo "ERROR: ONION_ADDRESS is not defined."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
|
echo "" > "$DOCKER_YAML_PATH"
|
||||||
|
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
# This is the ghost for HTTPS (not over Tor)
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
ghost:
|
||||||
|
image: ${GHOST_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghost-net
|
||||||
|
- ghostdb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/ghost_site:/var/lib/ghost/content
|
||||||
|
environment:
|
||||||
|
- url=https://${FQDN}
|
||||||
|
- mail__from="${MAIL_FROM}"
|
||||||
|
- mail__options__service=SMTP
|
||||||
|
- mail__transport=SMTP
|
||||||
|
- mail__options__host=${SMTP_SERVER}
|
||||||
|
- mail__options__port=${SMTP_PORT}
|
||||||
|
- mail__options__auth__user=${SMTP_LOGIN}
|
||||||
|
- mail__options__auth__pass=\${SMTP_PASSWORD}
|
||||||
|
- database__client=mysql
|
||||||
|
- database__connection__host=ghostdb
|
||||||
|
- database__connection__user=ghost
|
||||||
|
- database__connection__password=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
- database__connection__database=ghost
|
||||||
|
- database__pool__min=0
|
||||||
|
- privacy__useStructuredData=true
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
ghostdb:
|
||||||
|
image: ${GHOST_DB_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghostdb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/ghost_db:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=\${GHOST_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=ghost
|
||||||
|
- MYSQL_USER=ghost
|
||||||
|
- MYSQL_PASSWORD=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nextcloud-db:
|
||||||
|
image: ${NEXTCLOUD_DB_IMAGE}
|
||||||
|
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb_read_only_compressed=OFF
|
||||||
|
networks:
|
||||||
|
- nextclouddb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/nextcloud/db/data:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MARIADB_ROOT_PASSWORD=\${NEXTCLOUD_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
nextcloud:
|
||||||
|
image: ${NEXTCLOUD_IMAGE}
|
||||||
|
networks:
|
||||||
|
- nextclouddb-net
|
||||||
|
- nextcloud-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/nextcloud/html:/var/www/html
|
||||||
|
environment:
|
||||||
|
- MYSQL_PASSWORD=\${NEXTCLOUD_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=nextcloud
|
||||||
|
- MYSQL_USER=nextcloud
|
||||||
|
- MYSQL_HOST=nextcloud-db
|
||||||
|
- NEXTCLOUD_TRUSTED_DOMAINS=${DOMAIN_NAME}
|
||||||
|
- OVERWRITEHOST=${NEXTCLOUD_FQDN}
|
||||||
|
- OVERWRITEPROTOCOL=https
|
||||||
|
- SERVERNAME=${NEXTCLOUD_FQDN}
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
matrix:
|
||||||
|
image: ${MATRIX_IMAGE}
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/matrix/data:/data
|
||||||
|
networks:
|
||||||
|
- matrix-net
|
||||||
|
- matrixdb-net
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
matrix-db:
|
||||||
|
image: ${MATRIX_DB_IMAGE}
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/matrix/db:/var/lib/postgresql/data
|
||||||
|
networks:
|
||||||
|
- matrixdb-net
|
||||||
|
environment:
|
||||||
|
- POSTGRES_PASSWORD=\${MATRIX_DB_PASSWORD}
|
||||||
|
- POSTGRES_USER=synapse
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
gitea:
|
||||||
|
image: ${GITEA_IMAGE}
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_GITEA_PATH}/data:/data
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
environment:
|
||||||
|
- USER_UID=1000
|
||||||
|
- USER_GID=1000
|
||||||
|
- GITEA__database__DB_TYPE=mysql
|
||||||
|
- GITEA__database__HOST=gitea-db:3306
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__PASSWD=${GITEA_MYSQL_PASSWORD}
|
||||||
|
networks:
|
||||||
|
- gitea-net
|
||||||
|
- giteadb-net
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
gitea-db:
|
||||||
|
image: ${GITEA_DB_IMAGE}
|
||||||
|
networks:
|
||||||
|
- giteadb-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_GITEA_PATH}/db:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=\${GITEA_MYSQL_ROOT_PASSWORD}
|
||||||
|
- MYSQL_PASSWORD=\${GITEA_MYSQL_PASSWORD}
|
||||||
|
- MYSQL_DATABASE=gitea
|
||||||
|
- MYSQL_USER=gitea
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
# a hidden service that routes to the nginx container at http://onionurl.onion server block
|
||||||
|
tor-onion:
|
||||||
|
image: tor:latest
|
||||||
|
networks:
|
||||||
|
- tor-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor:/var/lib/tor
|
||||||
|
- tor-logs:/var/log/tor
|
||||||
|
configs:
|
||||||
|
- source: tor-config
|
||||||
|
target: /etc/tor/torrc
|
||||||
|
mode: 0644
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
tor-ghost:
|
||||||
|
image: ${GHOST_IMAGE}
|
||||||
|
networks:
|
||||||
|
- ghostdb-net
|
||||||
|
- ghost-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor_ghost:/var/lib/ghost/content
|
||||||
|
environment:
|
||||||
|
- url=https://${ONION_ADDRESS}
|
||||||
|
- mail__from=${MAIL_FROM}
|
||||||
|
- mail__options__service=SMTP
|
||||||
|
- mail__transport=SMTP
|
||||||
|
- mail__options__host=${SMTP_SERVER}
|
||||||
|
- mail__options__port=${SMTP_PORT}
|
||||||
|
- mail__options__auth__user=${SMTP_LOGIN}
|
||||||
|
- mail__options__auth__pass=\${SMTP_PASSWORD}
|
||||||
|
- database__client=mysql
|
||||||
|
- database__connection__host=ghostdb
|
||||||
|
- database__connection__user=ghost
|
||||||
|
- database__connection__password=\${GHOST_MYSQL_PASSWORD}
|
||||||
|
- database__connection__database=ghost
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# NGINX required
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nginx:
|
||||||
|
image: ${NGINX_IMAGE}
|
||||||
|
ports:
|
||||||
|
- 0.0.0.0:443:443
|
||||||
|
- 0.0.0.0:80:80
|
||||||
|
- 0.0.0.0:8448:8448
|
||||||
|
networks:
|
||||||
|
- ghost-net
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- torghost-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- nextcloud-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- gitea-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- matrix-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
- tor-net
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the rest of the nginx config
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
volumes:
|
||||||
|
- /etc/letsencrypt:/etc/letsencrypt:ro
|
||||||
|
configs:
|
||||||
|
- source: nginx-config
|
||||||
|
target: /etc/nginx/nginx.conf
|
||||||
|
deploy:
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
tor-data:
|
||||||
|
tor-logs:
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
#-------------------------
|
||||||
|
|
||||||
|
# networks ----------------------
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
networks:
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GHOST" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
ghost-net:
|
||||||
|
ghostdb-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
nextclouddb-net:
|
||||||
|
nextcloud-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
matrix-net:
|
||||||
|
matrixdb-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
gitea-net:
|
||||||
|
giteadb-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
tor-net:
|
||||||
|
torghost-net:
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
# -------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
# configs ----------------------
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
|
||||||
|
configs:
|
||||||
|
nginx-config:
|
||||||
|
file: ${SITE_PATH}/nginx.conf
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$DOCKER_YAML_PATH" <<EOL
|
||||||
|
tor-config:
|
||||||
|
file: $(pwd)/tor/torrc
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
# -----------------------------
|
371
stub_nginxconf.sh
Executable file
371
stub_nginxconf.sh
Executable file
@ -0,0 +1,371 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
if [ -z "$ONION_ADDRESS" ]; then
|
||||||
|
echo "ERROR: ONION_ADDRESS is not defined."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# here's the NGINX config. We support ghost and nextcloud.
|
||||||
|
NGINX_CONF_PATH="$SITE_PATH/nginx.conf"
|
||||||
|
echo "" > "$NGINX_CONF_PATH"
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
http {
|
||||||
|
client_max_body_size 100m;
|
||||||
|
server_names_hash_bucket_size 128;
|
||||||
|
server_tokens off;
|
||||||
|
|
||||||
|
# this server block returns a 403 for all non-explicit host requests.
|
||||||
|
#server {
|
||||||
|
# listen 80 default_server;
|
||||||
|
# return 403;
|
||||||
|
#}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
|
||||||
|
# ghost http to https redirects.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${DOMAIN_NAME} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${DOMAIN_NAME};
|
||||||
|
return 301 https://${FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
# http://${FQDN} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${FQDN};
|
||||||
|
return 301 https://${FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# nextcloud http-to-https redirect
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${NEXTCLOUD_FQDN} redirect to https://${NEXTCLOUD_FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${NEXTCLOUD_FQDN};
|
||||||
|
return 301 https://${NEXTCLOUD_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# matrix http to https redirect.
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${MATRIX_FQDN} redirect to https://${MATRIX_FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${MATRIX_FQDN};
|
||||||
|
return 301 https://${MATRIX_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# gitea http to https redirect.
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# http://${GITEA_FQDN} redirect to https://${GITEA_FQDN}
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
listen [::]:80;
|
||||||
|
server_name ${GITEA_FQDN};
|
||||||
|
return 301 https://${GITEA_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# TLS config for ghost.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# global TLS settings
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
ssl_protocols TLSv1.3;
|
||||||
|
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
||||||
|
ssl_trusted_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_session_timeout 1d;
|
||||||
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
ssl_session_tickets off;
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
resolver 198.54.117.10;
|
||||||
|
|
||||||
|
|
||||||
|
# default server if hostname not specified.
|
||||||
|
#server {
|
||||||
|
# listen 443 default_server;
|
||||||
|
# return 403;
|
||||||
|
#}
|
||||||
|
|
||||||
|
# map \$http_user_agent \$og_prefix {
|
||||||
|
# ~*(googlebot|twitterbot)/ /open-graph;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# https://${DOMAIN_NAME} redirect to https://${FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${DOMAIN_NAME};
|
||||||
|
return 301 https://${FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
|
access_log /var/log/nginx/ghost-access.log;
|
||||||
|
error_log /var/log/nginx/ghost-error.log;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# main TLS listener; proxies requests to ghost service. NGINX configured to cache
|
||||||
|
proxy_cache_path /tmp/nginx_ghost levels=1:2 keys_zone=ghostcache:600m max_size=100m inactive=24h;
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# the open server block for the HTTPS listener
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${FQDN};
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# add the Onion-Location header if specifed.
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
add_header Onion-Location https://${ONION_ADDRESS}\$request_uri;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
|
||||||
|
# No cache + keep cookies for admin and previews
|
||||||
|
location ~ ^/(ghost/|p/|private/) {
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_intercept_errors on;
|
||||||
|
proxy_pass http://ghost:2368;
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# proxy config for ghost
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# Set the crawler policy.
|
||||||
|
location = /robots.txt {
|
||||||
|
add_header Content-Type text/plain;
|
||||||
|
return 200 "User-Agent: *\\nAllow: /\\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_intercept_errors on;
|
||||||
|
proxy_pass http://ghost:2368;
|
||||||
|
EOL
|
||||||
|
|
||||||
|
if [ "$ENABLE_NGINX_CACHING" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# https://stanislas.blog/2019/08/ghost-nginx-cache/ for nginx caching instructions
|
||||||
|
# Remove cookies which are useless for anonymous visitor and prevent caching
|
||||||
|
proxy_ignore_headers Set-Cookie Cache-Control;
|
||||||
|
proxy_hide_header Set-Cookie;
|
||||||
|
|
||||||
|
# Add header for cache status (miss or hit)
|
||||||
|
add_header X-Cache-Status \$upstream_cache_status;
|
||||||
|
proxy_cache ghostcache;
|
||||||
|
|
||||||
|
# Default TTL: 1 day
|
||||||
|
proxy_cache_valid 5s;
|
||||||
|
|
||||||
|
# Cache 404 pages for 1h
|
||||||
|
proxy_cache_valid 404 1h;
|
||||||
|
|
||||||
|
# use conditional GET requests to refresh the content from origin servers
|
||||||
|
proxy_cache_revalidate on;
|
||||||
|
proxy_buffering on;
|
||||||
|
|
||||||
|
# Allows starting a background subrequest to update an expired cache item,
|
||||||
|
# while a stale cached response is returned to the client.
|
||||||
|
proxy_cache_background_update on;
|
||||||
|
|
||||||
|
# Bypass cache for errors
|
||||||
|
proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
|
||||||
|
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# this is the closing location / block for the ghost HTTPS segment
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# TODO this MIGHT be part of the solution for Twitter Cards.
|
||||||
|
# location /contents {
|
||||||
|
# resolver 127.0.0.11 ipv6=off valid=5m;
|
||||||
|
# proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
# proxy_set_header Host \$http_host;
|
||||||
|
# proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
# proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
# proxy_intercept_errors on;
|
||||||
|
# proxy_pass http://ghost:2368\$og_prefix\$request_uri;
|
||||||
|
# }
|
||||||
|
|
||||||
|
# setup delegation for matrix
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# Set up delegation for matrix: https://github.com/matrix-org/synapse/blob/develop/docs/delegate.md
|
||||||
|
location /.well-known/matrix/server {
|
||||||
|
default_type application/json;
|
||||||
|
return 200 '{"m.server": "${MATRIX_FQDN}:8448"}';
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# this is the closing server block for the ghost HTTPS segment
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# tor config
|
||||||
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# server listener for tor v3 onion endpoint
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${ONION_ADDRESS};
|
||||||
|
#access_log /var/log/nginx/tor-www.log;
|
||||||
|
|
||||||
|
# administration not allowed over tor interface.
|
||||||
|
location /ghost { deny all; }
|
||||||
|
location / {
|
||||||
|
proxy_set_header X-Forwarded-For 1.1.1.1;
|
||||||
|
proxy_set_header X-Forwarded-Proto https;
|
||||||
|
proxy_set_header X-Real-IP 1.1.1.1;
|
||||||
|
proxy_set_header Host \$http_host;
|
||||||
|
proxy_pass http://tor-ghost:2368;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_NEXTCLOUD" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# TLS listener for ${NEXTCLOUD_FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
server_name ${NEXTCLOUD_FQDN};
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_headers_hash_max_size 512;
|
||||||
|
proxy_headers_hash_bucket_size 64;
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_set_header X-NginX-Proxy true;
|
||||||
|
|
||||||
|
proxy_pass http://nextcloud:80;
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/reverse_proxy_configuration.html
|
||||||
|
location /.well-known/carddav {
|
||||||
|
return 301 \$scheme://\$host/remote.php/dav;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /.well-known/caldav {
|
||||||
|
return 301 \$scheme://\$host/remote.php/dav;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$DEPLOY_MATRIX" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# TLS listener for ${MATRIX_FQDN} (matrix)
|
||||||
|
server {
|
||||||
|
# matrix RESTful calls.
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
# for the federation port
|
||||||
|
listen 8448 ssl http2;
|
||||||
|
listen [::]:8448 ssl http2;
|
||||||
|
|
||||||
|
server_name ${MATRIX_FQDN};
|
||||||
|
|
||||||
|
location ~* ^(\/_matrix|\/_synapse\/client) {
|
||||||
|
proxy_pass http://matrix:8008;
|
||||||
|
proxy_set_header X-Forwarded-For \$remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
client_max_body_size 50M;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$DEPLOY_GITEA" = true ]; then
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
# TLS listener for ${GITEA_FQDN}
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
listen [::]:443 ssl http2;
|
||||||
|
|
||||||
|
server_name ${GITEA_FQDN};
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_headers_hash_max_size 512;
|
||||||
|
proxy_headers_hash_bucket_size 64;
|
||||||
|
proxy_set_header X-Real-IP \$remote_addr;
|
||||||
|
proxy_set_header Host \$host;
|
||||||
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
||||||
|
proxy_set_header X-NginX-Proxy true;
|
||||||
|
|
||||||
|
proxy_pass http://gitea:3000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
# add the closing brace.
|
||||||
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
}
|
||||||
|
EOL
|
32
tor.yml
Normal file
32
tor.yml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
|
||||||
|
# a hidden service that routes to the nginx container at http://onionurl.onion server block
|
||||||
|
tor-onion:
|
||||||
|
image: tor:latest
|
||||||
|
networks:
|
||||||
|
- tor-net
|
||||||
|
volumes:
|
||||||
|
- ${REMOTE_HOME}/tor:/var/lib/tor
|
||||||
|
- tor-logs:/var/log/tor
|
||||||
|
configs:
|
||||||
|
- source: tor-config
|
||||||
|
target: /etc/tor/torrc
|
||||||
|
mode: 0644
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
tor-data:
|
||||||
|
tor-logs:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
tor-net:
|
||||||
|
attachable: true
|
||||||
|
|
||||||
|
configs:
|
||||||
|
tor-config:
|
||||||
|
file: ${TOR_CONFIG_PATH}
|
11
tor/Dockerfile
Normal file
11
tor/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
FROM ubuntu:latest
|
||||||
|
RUN apt-get update && apt-get install -y tor
|
||||||
|
#COPY ./torrc /etc/tor/torrc
|
||||||
|
#RUN chown root:root /etc/tor/torrc
|
||||||
|
#RUN chmod 0644 /etc/tor/torrc
|
||||||
|
|
||||||
|
#RUN mkdir /data
|
||||||
|
#VOLUME /data
|
||||||
|
# RUN chown 1000:1000 -R /data
|
||||||
|
#USER 1000:1000
|
||||||
|
CMD tor -f /etc/tor/torrc
|
8
tor/torrc
Normal file
8
tor/torrc
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
|
||||||
|
SocksPort 0
|
||||||
|
|
||||||
|
HiddenServiceDir /var/lib/tor/www
|
||||||
|
HiddenServiceVersion 3
|
||||||
|
HiddenServicePort 443 nginx:443
|
||||||
|
|
||||||
|
Log info file /var/log/tor/tor.log
|
5
tor/torrc-init
Normal file
5
tor/torrc-init
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
HiddenServiceDir /var/lib/tor/www
|
||||||
|
HiddenServiceVersion 3
|
||||||
|
HiddenServicePort 443 127.0.0.1:443
|
||||||
|
|
||||||
|
Log info file /var/log/tor/tor.log
|
Loading…
Reference in New Issue
Block a user