1
1

Compare commits

..

No commits in common. "25139b514c1c35cf6673e6d1e1751043e1572388" and "ddad272b9893cef30a2818341d929f0387b91e42" have entirely different histories.

10 changed files with 153 additions and 129 deletions

View File

@ -35,7 +35,9 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)" export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED" export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
export PROJECT_NAME="public" export PROJECT_NAME="public"
#export REGISTRY_URL="https://index.docker.io/v1/" export REGISTRY_URL="http://$(hostname).$(resolvectl status | grep 'DNS Domain:' | awk '{ print $3 }'):5000"
export REGISTRY_USERNAME="CHANGE_ME"
export REGISTRY_PASSWORD="CHANGE_ME"
EOL EOL
@ -110,15 +112,15 @@ else
exit 1 exit 1
fi fi
# if the disk is loop-based, then we assume the / path exists. # # if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then # if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system. # # ensure we actually have that disk/partition on the system.
if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then # if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then
echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk." # echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk."
echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example." # echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example."
exit 1 # exit 1
fi # fi
fi # fi
# The MGMT Plane IP is the IP address that the LXD API binds to, which happens # The MGMT Plane IP is the IP address that the LXD API binds to, which happens
# to be the same as whichever SSH connection you're coming in on. # to be the same as whichever SSH connection you're coming in on.
@ -146,27 +148,21 @@ if ! command -v lxc >/dev/null 2>&1; then
fi fi
ssh -t "ubuntu@$FQDN" " ssh -t "ubuntu@$FQDN" "
set -ex # set host firewall policy.
# allow LXD API from management network.
# sudo ufw allow from ${IP_OF_MGMT_MACHINE}/32 proto tcp to $MGMT_PLANE_IP port 8443
# install ufw and allow SSH. # enable it.
sudo apt update # if sudo ufw status | grep -q 'Status: inactive'; then
sudo apt upgrade -y # sudo ufw enable
sudo apt install ufw htop dnsutils nano -y # fi
sudo ufw allow ssh
sudo ufw allow 8443/tcp comment 'allow LXD management'
# enable the host firewall # install lxd as a snap if it's not installed. We only really use the LXC part of this package.
if sudo ufw status | grep -q 'Status: inactive'; then
sudo ufw enable
fi
# install lxd as a snap if it's not installed.
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --candidate sudo snap install lxd --candidate
sleep 4 sleep 4
fi fi
" "
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll # if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
# just attach VMs to the network interface used for for the default route. # just attach VMs to the network interface used for for the default route.
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then

View File

@ -107,4 +107,3 @@ export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA. # this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
export ROOT_DISK_SIZE_GB=20 export ROOT_DISK_SIZE_GB=20
export REGISTRY_URL="https://index.docker.io/v1/"

View File

@ -164,7 +164,7 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster defintion could not be found. You may need to re-run 'ss-cluster create'." echo "ERROR: The cluster defintion could not be found. You may need to re-run 'ss-cluster create'."
exit 1 exit 1
fi fi
source "$CLUSTER_DEFINITION" source "$CLUSTER_DEFINITION"
###########################3 ###########################3
@ -175,34 +175,34 @@ source "$CLUSTER_DEFINITION"
# if the registry URL isn't defined, then we just use the upstream dockerhub. # if the registry URL isn't defined, then we just use the upstream dockerhub.
# recommended to run a registry cache on your management machine though. # recommended to run a registry cache on your management machine though.
# if [ -n "$REGISTRY_URL" ]; then if [ -n "$REGISTRY_URL" ]; then
# cat > "$CLUSTER_PATH/registry.yml" <<EOL cat > "$CLUSTER_PATH/registry.yml" <<EOL
# version: 0.1 version: 0.1
# http: http:
# addr: 0.0.0.0:5000 addr: 0.0.0.0:5000
# host: ${REGISTRY_URL} host: ${REGISTRY_URL}
# proxy: proxy:
# remoteurl: ${REGISTRY_URL} remoteurl: ${REGISTRY_URL}
# username: ${REGISTRY_USERNAME} username: ${REGISTRY_USERNAME}
# password: ${REGISTRY_PASSWORD} password: ${REGISTRY_PASSWORD}
# EOL EOL
# # enable docker swarm mode so we can support docker stacks. # enable docker swarm mode so we can support docker stacks.
# if docker info | grep -q "Swarm: inactive"; then if docker info | grep -q "Swarm: inactive"; then
# docker swarm init docker swarm init
# fi fi
# mkdir -p "${CACHES_DIR}/registry_images" mkdir -p "${CACHES_DIR}/registry_images"
# # run a docker registry pull through cache on the management machine. # run a docker registry pull through cache on the management machine.
# if [ "$DEPLOY_MGMT_REGISTRY" = true ]; then if [ "$DEPLOY_MGMT_REGISTRY" = true ]; then
# if ! docker stack list | grep -q registry; then if ! docker stack list | grep -q registry; then
# docker stack deploy -c management/registry_mirror.yml registry docker stack deploy -c management/registry_mirror.yml registry
# fi fi
# fi fi
# fi fi
# this is our password generation mechanism. Relying on GPG for secure password generation # this is our password generation mechanism. Relying on GPG for secure password generation
@ -262,7 +262,7 @@ function instantiate_vms {
export MAC_ADDRESS_TO_PROVISION= export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME" export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME" export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
# ensure the admin has set the MAC address for the base image. # ensure the admin has set the MAC address for the base image.
if [ -z "$SOVEREIGN_STACK_MAC_ADDRESS" ]; then if [ -z "$SOVEREIGN_STACK_MAC_ADDRESS" ]; then
echo "ERROR: SOVEREIGN_STACK_MAC_ADDRESS is undefined. Check your project definition." echo "ERROR: SOVEREIGN_STACK_MAC_ADDRESS is undefined. Check your project definition."
@ -334,7 +334,7 @@ function instantiate_vms {
# delete the remote VPS. # delete the remote VPS.
lxc delete --force "$LXD_VM_NAME" lxc delete --force "$LXD_VM_NAME"
# Then we run the script again to re-instantiate a new VPS, restoring all user data # Then we run the script again to re-instantiate a new VPS, restoring all user data
# if restore directory doesn't exist, then we end up with a new site. # if restore directory doesn't exist, then we end up with a new site.
echo "INFO: Recreating the remote VPS then restoring user data." echo "INFO: Recreating the remote VPS then restoring user data."
sleep 2 sleep 2
@ -350,7 +350,7 @@ function instantiate_vms {
fi fi
# The machine does not exist. Let's bring it into existence, restoring from latest backup. # The machine does not exist. Let's bring it into existence, restoring from latest backup.
echo "Machine does not exist. Creating." echo "Machine does not exist. Creating."
./deployment/deploy_vms.sh ./deployment/deploy_vms.sh
fi fi
@ -442,7 +442,7 @@ EOL
chmod 0744 "$PROJECT_DEFINITION_PATH" chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it yo!" echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it yo!"
echo "INFO: Learn more at https://www.sovereign-stack.org/project-definitions/" echo "INFO: Learn more at https://www.sovereign-stack.org/project-definitions/"
exit 1 exit 1
fi fi
@ -467,7 +467,7 @@ if [ "$PROJECT_NAME" != "$CURRENT_PROJECT" ]; then
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!" echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
lxc project create "$PROJECT_NAME" lxc project create "$PROJECT_NAME"
fi fi
echo "INFO: switch to lxd project '$PROJECT_NAME'." echo "INFO: switch to lxd project '$PROJECT_NAME'."
lxc project switch "$PROJECT_NAME" lxc project switch "$PROJECT_NAME"
@ -492,7 +492,7 @@ export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
stub_site_definition stub_site_definition
# bring the VMs up under the primary domain name. # bring the vms up under the primary domain name.
instantiate_vms instantiate_vms
# let's stub out the rest of our site definitions, if any. # let's stub out the rest of our site definitions, if any.

View File

@ -26,6 +26,23 @@ EOF
fi fi
function prepare_host {
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
# create a directory to store backup archives. This is on all new vms.
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
# if this execution is for btcpayserver, then we run the stub/btcpay setup script
# but only if it hasn't been executed before.
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
./btcpayserver/stub_btcpay_setup.sh
fi
fi
}
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN" ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
# if the machine doesn't exist, we create it. # if the machine doesn't exist, we create it.
@ -38,33 +55,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
exit 1 exit 1
fi fi
./stub_lxc_profile.sh "$LXD_VM_NAME" ./provision_lxc.sh
# now let's create a new VM to work with.
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME"
./wait_for_lxc_ip.sh "$LXD_VM_NAME"
fi
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
# create a directory to store backup archives. This is on all new vms.
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
# if this execution is for btcpayserver, then we run the stub/btcpay setup script
# but only if it hasn't been executed before.
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
./btcpayserver/stub_btcpay_setup.sh
fi
fi fi
prepare_host

18
deployment/provision_lxc.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
./stub_lxc_profile.sh "$LXD_VM_NAME"
# now let's create a new VM to work with.
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME"
./wait_for_lxc_ip.sh "$LXD_VM_NAME"

29
deployment/run_ddns.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
set -eu
DDNS_STRING=
# for the www stack, we register only the domain name so our URLs look like https://$DOMAIN_NAME
if [ "$VIRTUAL_MACHINE" = www ] || [ "$VIRTUAL_MACHINE" = certonly ]; then
DDNS_STRING="@"
else
DDNS_STRING="$DDNS_HOST"
fi
# wait for DNS to get setup. Pass in the IP address of the actual VPS.
MACHINE_IP="$(docker-machine ip "$FQDN")"
DDNS_SLEEP_SECONDS=60
while true; do
# we test the www CNAME here so we can be assured the underlying has corrected.
if [[ "$(getent hosts "$FQDN" | awk '{ print $1 }')" == "$MACHINE_IP" ]]; then
echo ""
echo "SUCCESS: The DNS appears to be configured correctly."
echo "INFO: Waiting $DDNS_SLEEP_SECONDS seconds to allow cached DNS records to expire."
sleep "$DDNS_SLEEP_SECONDS";
break;
fi
printf "." && sleep 2;
done

View File

@ -60,6 +60,11 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition." echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
exit 1 exit 1
fi fi
if [ -z "$NOSTR_ACCOUNT_PUBKEY" ]; then
echo "ERROR: Ensure NOSTR_ACCOUNT_PUBKEY is configured in your site_definition."
exit 1
fi
fi fi
if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then if [ -z "$DUPLICITY_BACKUP_PASSPHRASE" ]; then
@ -84,30 +89,30 @@ done
./stop_docker_stacks.sh ./stop_docker_stacks.sh
# if [ "$DEPLOY_ONION_SITE" = true ]; then if [ "$DEPLOY_ONION_SITE" = true ]; then
# # ensure the tor image is built # ensure the tor image is built
# docker build -t tor:latest ./tor docker build -t tor:latest ./tor
# # if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore. # if the tor folder doesn't exist, we provision a new one. Otherwise you need to restore.
# # this is how we generate a new torv3 endpoint. # this is how we generate a new torv3 endpoint.
# if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_HOME/tor/www ]"; then
# ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_HOME/tor" ssh "$PRIMARY_WWW_FQDN" "mkdir -p $REMOTE_HOME/tor"
# TOR_CONFIG_PATH="$(pwd)/tor/torrc-init" TOR_CONFIG_PATH="$(pwd)/tor/torrc-init"
# export TOR_CONFIG_PATH="$TOR_CONFIG_PATH" export TOR_CONFIG_PATH="$TOR_CONFIG_PATH"
# docker stack deploy -c ./tor.yml torstack docker stack deploy -c ./tor.yml torstack
# sleep 20 sleep 20
# docker stack rm torstack docker stack rm torstack
# sleep 20 sleep 20
# fi fi
# ONION_ADDRESS="$(ssh "$PRIMARY_WWW_FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)" ONION_ADDRESS="$(ssh "$PRIMARY_WWW_FQDN" sudo cat "${REMOTE_HOME}"/tor/www/hostname)"
# export ONION_ADDRESS="$ONION_ADDRESS" export ONION_ADDRESS="$ONION_ADDRESS"
# # # Since we run a separate ghost process, we create a new directory and symlink it to the original # # Since we run a separate ghost process, we create a new directory and symlink it to the original
# # if ! ssh "$PRIMARY_WWW_FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then # if ! ssh "$PRIMARY_WWW_FQDN" "[ -L $REMOTE_HOME/tor_ghost ]"; then
# # ssh "$PRIMARY_WWW_FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes" # ssh "$PRIMARY_WWW_FQDN" ln -s "$REMOTE_HOME/ghost_site/themes $REMOTE_HOME/tor_ghost/themes"
# # fi # fi
# fi fi
# nginx gets deployed first since it "owns" the docker networks of downstream services. # nginx gets deployed first since it "owns" the docker networks of downstream services.
./stub/nginx_yml.sh ./stub/nginx_yml.sh

View File

@ -50,8 +50,6 @@ done
if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
sleep 2
docker stack rm reverse-proxy docker stack rm reverse-proxy
if [ "$STOP_SERVICES" = true ]; then if [ "$STOP_SERVICES" = true ]; then

View File

@ -114,22 +114,13 @@ EOL
EOL EOL
fi fi
# let's iterate over BTCPAY_ALT_NAMES and generate our SERVER_NAMES for btcpay server.
BTCPAY_SERVER_NAMES="$BTCPAY_USER_FQDN"
if [ -n "$BTCPAY_ALT_NAMES" ]; then
# let's stub out the rest of our site definitions, if any.
for ALT_NAME in ${BTCPAY_ALT_NAMES//,/ }; do
BTCPAY_SERVER_NAMES="$BTCPAY_SERVER_NAMES $ALT_NAME.$DOMAIN_NAME"
done
fi
# BTCPAY server http->https redirect # BTCPAY server http->https redirect
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
# http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN} # http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80; listen [::]:80;
server_name ${BTCPAY_SERVER_NAMES}; server_name ${BTCPAY_USER_FQDN};
return 301 https://${BTCPAY_USER_FQDN}\$request_uri; return 301 https://${BTCPAY_USER_FQDN}\$request_uri;
} }
@ -188,9 +179,15 @@ EOL
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${DOMAIN_NAME}; server_name ${DOMAIN_NAME};
# catch all; send request to ${WWW_FQDN}
location / {
return 301 https://${WWW_FQDN}\$request_uri;
}
EOL EOL
if [ "$DEPLOY_NOSTR_RELAY" = true ]; then if [ "$DEPLOY_NOSTR_RELAY" = true ]; then
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
# We return a JSON object with name/pubkey mapping per NIP05. # We return a JSON object with name/pubkey mapping per NIP05.
@ -206,11 +203,6 @@ EOL
fi fi
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
# catch all; send request to ${WWW_FQDN}
location / {
return 301 https://${WWW_FQDN}\$request_uri;
}
} }
#access_log /var/log/nginx/ghost-access.log; #access_log /var/log/nginx/ghost-access.log;
@ -219,7 +211,7 @@ EOL
EOL EOL
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
# https server block for https://${BTCPAY_SERVER_NAMES} # https server block for https://${BTCPAY_USER_FQDN}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
@ -227,7 +219,7 @@ EOL
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
server_name ${BTCPAY_SERVER_NAMES}; server_name ${BTCPAY_USER_FQDN};
# Route everything to the real BTCPay server # Route everything to the real BTCPay server
location / { location / {

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -e
cd "$(dirname "$0")" cd "$(dirname "$0")"
# let's check to ensure the management machine is on the Baseline ubuntu 21.04 # let's check to ensure the management machine is on the Baseline ubuntu 21.04
@ -16,10 +16,6 @@ fi
sudo apt-get update sudo apt-get update
# TODO REVIEW management machine software requirements
# is docker-ce actually needed here? prefer to move docker registry
# to a host on SERVERS LAN so that it can operate
# TODO document which dependencies are required by what software, e.g., trezor, docker, etc.
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \ sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
apt-transport-https ca-certificates lsb-release \ apt-transport-https ca-certificates lsb-release \
docker-ce-cli docker-ce containerd.io docker-compose-plugin \ docker-ce-cli docker-ce containerd.io docker-compose-plugin \