Compare commits
No commits in common. "25139b514c1c35cf6673e6d1e1751043e1572388" and "ddad272b9893cef30a2818341d929f0387b91e42" have entirely different histories.
25139b514c
...
ddad272b98
44
cluster.sh
44
cluster.sh
@ -35,7 +35,9 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
|||||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||||
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||||
export PROJECT_NAME="public"
|
export PROJECT_NAME="public"
|
||||||
#export REGISTRY_URL="https://index.docker.io/v1/"
|
export REGISTRY_URL="http://$(hostname).$(resolvectl status | grep 'DNS Domain:' | awk '{ print $3 }'):5000"
|
||||||
|
export REGISTRY_USERNAME="CHANGE_ME"
|
||||||
|
export REGISTRY_PASSWORD="CHANGE_ME"
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
@ -110,15 +112,15 @@ else
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if the disk is loop-based, then we assume the / path exists.
|
# # if the disk is loop-based, then we assume the / path exists.
|
||||||
if [ "$DISK_TO_USE" != loop ]; then
|
# if [ "$DISK_TO_USE" != loop ]; then
|
||||||
# ensure we actually have that disk/partition on the system.
|
# # ensure we actually have that disk/partition on the system.
|
||||||
if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then
|
# if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then
|
||||||
echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk."
|
# echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk."
|
||||||
echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example."
|
# echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example."
|
||||||
exit 1
|
# exit 1
|
||||||
fi
|
# fi
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
# The MGMT Plane IP is the IP address that the LXD API binds to, which happens
|
# The MGMT Plane IP is the IP address that the LXD API binds to, which happens
|
||||||
# to be the same as whichever SSH connection you're coming in on.
|
# to be the same as whichever SSH connection you're coming in on.
|
||||||
@ -146,27 +148,21 @@ if ! command -v lxc >/dev/null 2>&1; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
ssh -t "ubuntu@$FQDN" "
|
ssh -t "ubuntu@$FQDN" "
|
||||||
set -ex
|
# set host firewall policy.
|
||||||
|
# allow LXD API from management network.
|
||||||
|
# sudo ufw allow from ${IP_OF_MGMT_MACHINE}/32 proto tcp to $MGMT_PLANE_IP port 8443
|
||||||
|
|
||||||
# install ufw and allow SSH.
|
# enable it.
|
||||||
sudo apt update
|
# if sudo ufw status | grep -q 'Status: inactive'; then
|
||||||
sudo apt upgrade -y
|
# sudo ufw enable
|
||||||
sudo apt install ufw htop dnsutils nano -y
|
# fi
|
||||||
sudo ufw allow ssh
|
|
||||||
sudo ufw allow 8443/tcp comment 'allow LXD management'
|
|
||||||
|
|
||||||
# enable the host firewall
|
# install lxd as a snap if it's not installed. We only really use the LXC part of this package.
|
||||||
if sudo ufw status | grep -q 'Status: inactive'; then
|
|
||||||
sudo ufw enable
|
|
||||||
fi
|
|
||||||
|
|
||||||
# install lxd as a snap if it's not installed.
|
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --candidate
|
sudo snap install lxd --candidate
|
||||||
sleep 4
|
sleep 4
|
||||||
fi
|
fi
|
||||||
"
|
"
|
||||||
|
|
||||||
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
||||||
# just attach VMs to the network interface used for for the default route.
|
# just attach VMs to the network interface used for for the default route.
|
||||||
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
||||||
|
@ -107,4 +107,3 @@ export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
|
|||||||
|
|
||||||
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
|
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
|
||||||
export ROOT_DISK_SIZE_GB=20
|
export ROOT_DISK_SIZE_GB=20
|
||||||
export REGISTRY_URL="https://index.docker.io/v1/"
|
|
||||||
|
48
deploy.sh
48
deploy.sh
@ -175,34 +175,34 @@ source "$CLUSTER_DEFINITION"
|
|||||||
|
|
||||||
# if the registry URL isn't defined, then we just use the upstream dockerhub.
|
# if the registry URL isn't defined, then we just use the upstream dockerhub.
|
||||||
# recommended to run a registry cache on your management machine though.
|
# recommended to run a registry cache on your management machine though.
|
||||||
# if [ -n "$REGISTRY_URL" ]; then
|
if [ -n "$REGISTRY_URL" ]; then
|
||||||
|
|
||||||
# cat > "$CLUSTER_PATH/registry.yml" <<EOL
|
cat > "$CLUSTER_PATH/registry.yml" <<EOL
|
||||||
# version: 0.1
|
version: 0.1
|
||||||
# http:
|
http:
|
||||||
# addr: 0.0.0.0:5000
|
addr: 0.0.0.0:5000
|
||||||
# host: ${REGISTRY_URL}
|
host: ${REGISTRY_URL}
|
||||||
|
|
||||||
# proxy:
|
proxy:
|
||||||
# remoteurl: ${REGISTRY_URL}
|
remoteurl: ${REGISTRY_URL}
|
||||||
# username: ${REGISTRY_USERNAME}
|
username: ${REGISTRY_USERNAME}
|
||||||
# password: ${REGISTRY_PASSWORD}
|
password: ${REGISTRY_PASSWORD}
|
||||||
# EOL
|
EOL
|
||||||
|
|
||||||
# # enable docker swarm mode so we can support docker stacks.
|
# enable docker swarm mode so we can support docker stacks.
|
||||||
# if docker info | grep -q "Swarm: inactive"; then
|
if docker info | grep -q "Swarm: inactive"; then
|
||||||
# docker swarm init
|
docker swarm init
|
||||||
# fi
|
fi
|
||||||
|
|
||||||
# mkdir -p "${CACHES_DIR}/registry_images"
|
mkdir -p "${CACHES_DIR}/registry_images"
|
||||||
|
|
||||||
# # run a docker registry pull through cache on the management machine.
|
# run a docker registry pull through cache on the management machine.
|
||||||
# if [ "$DEPLOY_MGMT_REGISTRY" = true ]; then
|
if [ "$DEPLOY_MGMT_REGISTRY" = true ]; then
|
||||||
# if ! docker stack list | grep -q registry; then
|
if ! docker stack list | grep -q registry; then
|
||||||
# docker stack deploy -c management/registry_mirror.yml registry
|
docker stack deploy -c management/registry_mirror.yml registry
|
||||||
# fi
|
fi
|
||||||
# fi
|
fi
|
||||||
# fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# this is our password generation mechanism. Relying on GPG for secure password generation
|
# this is our password generation mechanism. Relying on GPG for secure password generation
|
||||||
@ -492,7 +492,7 @@ export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
|||||||
|
|
||||||
stub_site_definition
|
stub_site_definition
|
||||||
|
|
||||||
# bring the VMs up under the primary domain name.
|
# bring the vms up under the primary domain name.
|
||||||
instantiate_vms
|
instantiate_vms
|
||||||
|
|
||||||
# let's stub out the rest of our site definitions, if any.
|
# let's stub out the rest of our site definitions, if any.
|
||||||
|
@ -26,34 +26,7 @@ EOF
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
function prepare_host {
|
||||||
|
|
||||||
# if the machine doesn't exist, we create it.
|
|
||||||
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|
||||||
|
|
||||||
# create a base image if needed and instantiate a VM.
|
|
||||||
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
|
||||||
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site defintion."
|
|
||||||
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
./stub_lxc_profile.sh "$LXD_VM_NAME"
|
|
||||||
|
|
||||||
# now let's create a new VM to work with.
|
|
||||||
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
|
|
||||||
|
|
||||||
# let's PIN the HW address for now so we don't exhaust IP
|
|
||||||
# and so we can set DNS internally.
|
|
||||||
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
|
||||||
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
|
||||||
|
|
||||||
lxc start "$LXD_VM_NAME"
|
|
||||||
|
|
||||||
./wait_for_lxc_ip.sh "$LXD_VM_NAME"
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
@ -68,3 +41,21 @@ if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
|
||||||
|
|
||||||
|
# if the machine doesn't exist, we create it.
|
||||||
|
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
|
||||||
|
# create a base image if needed and instantiate a VM.
|
||||||
|
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
||||||
|
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site defintion."
|
||||||
|
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
./provision_lxc.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
prepare_host
|
||||||
|
18
deployment/provision_lxc.sh
Executable file
18
deployment/provision_lxc.sh
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
./stub_lxc_profile.sh "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
# now let's create a new VM to work with.
|
||||||
|
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
|
|
||||||
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
|
# and so we can set DNS internally.
|
||||||
|
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
lxc start "$LXD_VM_NAME"
|
||||||
|
|
||||||
|
./wait_for_lxc_ip.sh "$LXD_VM_NAME"
|
29
deployment/run_ddns.sh
Executable file
29
deployment/run_ddns.sh
Executable file
@ -0,0 +1,29 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
DDNS_STRING=
|
||||||
|
|
||||||
|
# for the www stack, we register only the domain name so our URLs look like https://$DOMAIN_NAME
|
||||||
|
if [ "$VIRTUAL_MACHINE" = www ] || [ "$VIRTUAL_MACHINE" = certonly ]; then
|
||||||
|
DDNS_STRING="@"
|
||||||
|
else
|
||||||
|
DDNS_STRING="$DDNS_HOST"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# wait for DNS to get setup. Pass in the IP address of the actual VPS.
|
||||||
|
MACHINE_IP="$(docker-machine ip "$FQDN")"
|
||||||
|
DDNS_SLEEP_SECONDS=60
|
||||||
|
while true; do
|
||||||
|
# we test the www CNAME here so we can be assured the underlying has corrected.
|
||||||
|
if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$MACHINE_IP" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "SUCCESS: The DNS appears to be configured correctly."
|
||||||
|
|
||||||
|
echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow cached DNS records to expire."
|
||||||
|
sleep "$DDNS_SLEEP_SECONDS";
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
|
||||||
|
printf "." && sleep 2;
|
||||||
|
done
|
@ -60,6 +60,11 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
|||||||
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
|
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||||
|
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
|
||||||
@ -84,30 +89,30 @@ done
|
|||||||
|
|
||||||
./stop_docker_stacks.sh
|
./stop_docker_stacks.sh
|
||||||
|
|
||||||
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
# # ensure the tor image is built
|
# ensure the tor image is built
|
||||||
# docker build -t tor:latest ./tor
|
docker build -t tor:latest ./tor
|
||||||
|
|
||||||
# # if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore.
|
# if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore.
|
||||||
# # this is how we generate a new torv3 endpoint.
|
# this is how we generate a new torv3 endpoint.
|
||||||
# if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then
|
if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then
|
||||||
# ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_HOME/tor"
|
ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_HOME/tor"
|
||||||
# TOR_CONFIG_PATH="$(pwd)/tor/torrc-init"
|
TOR_CONFIG_PATH="$(pwd)/tor/torrc-init"
|
||||||
# export TOR_CONFIG_PATH="$TOR_CONFIG_PATH"
|
export TOR_CONFIG_PATH="$TOR_CONFIG_PATH"
|
||||||
# docker stack deploy -c ./tor.yml torstack
|
docker stack deploy -c ./tor.yml torstack
|
||||||
# sleep 20
|
sleep 20
|
||||||
# docker stack rm torstack
|
docker stack rm torstack
|
||||||
# sleep 20
|
sleep 20
|
||||||
# fi
|
fi
|
||||||
|
|
||||||
# ONION_ADDRESS="$(ssh "$PRIMARY_WWW_FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)"
|
ONION_ADDRESS="$(ssh "$PRIMARY_WWW_FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)"
|
||||||
# export ONION_ADDRESS="$ONION_ADDRESS"
|
export ONION_ADDRESS="$ONION_ADDRESS"
|
||||||
|
|
||||||
# # # Since we run a separate ghost process, we create a new directory and symlink it to the original
|
# # Since we run a separate ghost process, we create a new directory and symlink it to the original
|
||||||
# # if ! ssh "$PRIMARY_WWW_FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then
|
# if ! ssh "$PRIMARY_WWW_FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then
|
||||||
# # ssh "$PRIMARY_WWW_FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes"
|
# ssh "$PRIMARY_WWW_FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes"
|
||||||
# # fi
|
|
||||||
# fi
|
# fi
|
||||||
|
fi
|
||||||
|
|
||||||
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
# nginx gets deployed first since it "owns" the docker networks of downstream services.
|
||||||
./stub/nginx_yml.sh
|
./stub/nginx_yml.sh
|
||||||
|
@ -50,8 +50,6 @@ done
|
|||||||
|
|
||||||
|
|
||||||
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||||
sleep 2
|
|
||||||
|
|
||||||
docker stack rm reverse-proxy
|
docker stack rm reverse-proxy
|
||||||
|
|
||||||
if [ "$STOP_SERVICES" = true ]; then
|
if [ "$STOP_SERVICES" = true ]; then
|
||||||
|
@ -114,22 +114,13 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# let's iterate over BTCPAY_ALT_NAMES and generate our SERVER_NAMES for btcpay server.
|
|
||||||
BTCPAY_SERVER_NAMES="$BTCPAY_USER_FQDN"
|
|
||||||
if [ -n "$BTCPAY_ALT_NAMES" ]; then
|
|
||||||
# let's stub out the rest of our site definitions, if any.
|
|
||||||
for ALT_NAME in ${BTCPAY_ALT_NAMES//,/ }; do
|
|
||||||
BTCPAY_SERVER_NAMES="$BTCPAY_SERVER_NAMES $ALT_NAME.$DOMAIN_NAME"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# BTCPAY server http->https redirect
|
# BTCPAY server http->https redirect
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN}
|
# http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 80;
|
listen 80;
|
||||||
listen [::]:80;
|
listen [::]:80;
|
||||||
server_name ${BTCPAY_SERVER_NAMES};
|
server_name ${BTCPAY_USER_FQDN};
|
||||||
return 301 https://${BTCPAY_USER_FQDN}\$request_uri;
|
return 301 https://${BTCPAY_USER_FQDN}\$request_uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,8 +180,14 @@ EOL
|
|||||||
|
|
||||||
server_name ${DOMAIN_NAME};
|
server_name ${DOMAIN_NAME};
|
||||||
|
|
||||||
|
# catch all; send request to ${WWW_FQDN}
|
||||||
|
location / {
|
||||||
|
return 301 https://${WWW_FQDN}\$request_uri;
|
||||||
|
}
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
|
|
||||||
if [ "$DEPLOY_NOSTR_RELAY" = true ]; then
|
if [ "$DEPLOY_NOSTR_RELAY" = true ]; then
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# We return a JSON object with name/pubkey mapping per NIP05.
|
# We return a JSON object with name/pubkey mapping per NIP05.
|
||||||
@ -206,11 +203,6 @@ EOL
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
|
|
||||||
# catch all; send request to ${WWW_FQDN}
|
|
||||||
location / {
|
|
||||||
return 301 https://${WWW_FQDN}\$request_uri;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#access_log /var/log/nginx/ghost-access.log;
|
#access_log /var/log/nginx/ghost-access.log;
|
||||||
@ -219,7 +211,7 @@ EOL
|
|||||||
EOL
|
EOL
|
||||||
|
|
||||||
cat >>"$NGINX_CONF_PATH" <<EOL
|
cat >>"$NGINX_CONF_PATH" <<EOL
|
||||||
# https server block for https://${BTCPAY_SERVER_NAMES}
|
# https server block for https://${BTCPAY_USER_FQDN}
|
||||||
server {
|
server {
|
||||||
listen 443 ssl http2;
|
listen 443 ssl http2;
|
||||||
|
|
||||||
@ -227,7 +219,7 @@ EOL
|
|||||||
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
|
||||||
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
|
||||||
|
|
||||||
server_name ${BTCPAY_SERVER_NAMES};
|
server_name ${BTCPAY_USER_FQDN};
|
||||||
|
|
||||||
# Route everything to the real BTCPay server
|
# Route everything to the real BTCPay server
|
||||||
location / {
|
location / {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||||
@ -16,10 +16,6 @@ fi
|
|||||||
|
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
|
|
||||||
# TODO REVIEW management machine software requirements
|
|
||||||
# is docker-ce actually needed here? prefer to move docker registry
|
|
||||||
# to a host on SERVERS LAN so that it can operate
|
|
||||||
# TODO document which dependencies are required by what software, e.g., trezor, docker, etc.
|
|
||||||
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
|
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
|
||||||
apt-transport-https ca-certificates lsb-release \
|
apt-transport-https ca-certificates lsb-release \
|
||||||
docker-ce-cli docker-ce containerd.io docker-compose-plugin \
|
docker-ce-cli docker-ce containerd.io docker-compose-plugin \
|
||||||
|
Loading…
Reference in New Issue
Block a user