Undo this commit.

This commit is contained in:
Derek Smith 2023-02-01 14:44:05 -05:00
parent d91ba02e7f
commit 2c0645c951
Signed by: farscapian
GPG Key ID: 8F1CD799CCA516CC
48 changed files with 1133 additions and 652 deletions

2
.gitignore vendored
View File

@ -1 +1 @@
./reset.sh
publish.sh

View File

@ -10,11 +10,13 @@
"shellcheck.enableQuickFix": true,
"shellcheck.run": "onType",
"shellcheck.executablePath": "shellcheck",
"shellcheck.customArgs": [],
"shellcheck.customArgs": [
"-x"
],
"shellcheck.ignorePatterns": {},
"shellcheck.exclude": [
// "SC1090",
// "SC1091",
"SC1090",
"SC1091",
"SC2029"
],
"terminal.integrated.fontFamily": "monospace",

1
NOTES
View File

@ -1 +0,0 @@
Trezor MUST Use the "Crypto" firmware with shitcoin support in order for 2FA (WEBAUTHN) to work. Bummer.

View File

@ -1,10 +1,29 @@
#!/bin/bash
set -eu
set -ex
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
export WWW_SERVER_MAC_ADDRESS=
export DEPLOY_WWW_SERVER=false
export DEPLOY_BTCPAY_SERVER=false
export DEPLOY_GHOST=false
export DEPLOY_NEXTCLOUD=false
@ -16,6 +35,8 @@ export BTCPAY_HOSTNAME_IN_CERT="btcpay"
export NEXTCLOUD_HOSTNAME="nextcloud"
export GITEA_HOSTNAME="git"
export NOSTR_HOSTNAME="relay"
export CLAMS_HOSTNAME="clams"
export CLAMS_GIT_REPO="https://github.com/farscapian/clams-app-docker.git"
export SITE_LANGUAGE_CODES="en"
export LANGUAGE_CODE="en"
@ -37,7 +58,7 @@ export DUPLICITY_BACKUP_PASSPHRASE=
export SSH_HOME="$HOME/.ssh"
export PASS_HOME="$HOME/.password-store"
export VM_NAME="sovereign-stack-base"
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
@ -48,23 +69,6 @@ export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
export NEXTCLOUD_SPACE_GB=10
# first of all, if there are uncommited changes, we quit. You better stash or commit!
# Remote VPS instances are tagged with your current git HEAD so we know which code revision
# used when provisioning the VPS.
#LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)"
#export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# check if there are any uncommited changes. It's dangerous to instantiate VMs using
# code that hasn't been committed.
# if git update-index --refresh | grep -q "needs update"; then
# echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'."
# exit 1
# fi
BTC_CHAIN=regtest
export BTC_CHAIN="$BTC_CHAIN"
DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
@ -89,7 +93,6 @@ export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export SOVEREIGN_STACK_MAC_ADDRESS=
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
@ -97,9 +100,11 @@ export CLUSTERS_DIR="$HOME/ss-clusters"
export PROJECTS_DIR="$HOME/ss-projects"
export SITES_PATH="$HOME/ss-sites"
# The base VM image.
export BASE_LXC_IMAGE="ubuntu/22.04/cloud"
export LXD_UBUNTU_BASE_VERSION="22.04"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
# Deploy a registry cache on your management machine.
export DEPLOY_MGMT_REGISTRY=false
@ -114,3 +119,4 @@ export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
export ROOT_DISK_SIZE_GB=20
export REGISTRY_URL="https://index.docker.io/v1/"
export PRIMARY_DOMAIN=

View File

@ -35,7 +35,7 @@ cd btcpayserver-docker
export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
export NBITCOIN_NETWORK="${BTC_CHAIN}"
export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc"

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -ex
cd "$(dirname "$0")"
# This script is meant to be executed on the management machine.
@ -8,7 +8,7 @@ cd "$(dirname "$0")"
# to use LXD.
DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE=
DISK_TO_USE=loop
# override the cluster name.
CLUSTER_NAME="${1:-}"
@ -18,7 +18,7 @@ if [ -z "$CLUSTER_NAME" ]; then
fi
#shellcheck disable=SC1091
source ./defaults.sh
source ../defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
@ -30,11 +30,10 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
cat >"$CLUSTER_DEFINITION" <<EOL
#!/bin/bash
# see https://www.sovereign-stack.org/cluster_definition for more info!
# see https://www.sovereign-stack.org/cluster-definition for more info!
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
export PROJECT_NAME="regtest"
export BITCOIN_CHAIN="regtest"
#export REGISTRY_URL="https://index.docker.io/v1/"
EOL
@ -42,7 +41,7 @@ EOL
chmod 0744 "$CLUSTER_DEFINITION"
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you."
echo "Use this file to customize your cluster deployment;"
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for an example."
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information."
exit 1
fi
@ -50,6 +49,12 @@ source "$CLUSTER_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
FQDN="${2:-}"
if [ -z "$FQDN" ]; then
echo "ERROR: You MUST provide the FQDN of the cluster host."
exit
fi
shift
if [ -z "$FQDN" ]; then
@ -96,12 +101,10 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now."
echo ""
ssh "ubuntu@$FQDN" lsblk
ssh "ubuntu@$FQDN" lsblk --paths
echo "Please enter the disk or partition that Sovereign Stack will use to store data (default: loop): "
read -r DISK_TO_USE
else
DISK_TO_USE=loop
fi
else
@ -113,8 +116,8 @@ fi
# if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system.
if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then
echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk."
if ! ssh "ubuntu@$FQDN" lsblk --paths | grep -q "$DISK_TO_USE"; then
echo "ERROR: We could not findthe disk you specified. Please run this command again and supply a different disk."
echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example."
exit 1
fi
@ -134,13 +137,18 @@ if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
fi
if ! command -v lxc >/dev/null 2>&1; then
if lxc profile list --format csv | grep -q sovereign-stack; then
lxc profile delete sovereign-stack
if lxc profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc profile delete "$BASE_IMAGE_VM_NAME"
sleep 1
fi
if lxc network list --format csv | grep -q lxdbrSS; then
lxc network delete lxdbrSS
if lxc network list --format csv | grep -q lxdbr0; then
lxc network delete lxdbr0
sleep 1
fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
sleep 1
fi
fi
@ -148,22 +156,13 @@ fi
ssh -t "ubuntu@$FQDN" "
set -e
# install ufw and allow SSH.
sudo apt update
sudo apt upgrade -y
sudo apt install ufw htop dnsutils nano -y
sudo ufw allow ssh
sudo ufw allow 8443/tcp comment 'allow LXD management'
# enable the host firewall
if sudo ufw status | grep -q 'Status: inactive'; then
sudo ufw enable
fi
# install tool/dependencies
sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
# install lxd as a snap if it's not installed.
if ! snap list | grep -q lxd; then
sudo snap install lxd --candidate
sleep 4
sudo snap install lxd
sleep 10
fi
"
@ -173,27 +172,41 @@ if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)"
fi
# stub out the lxd init file for the remote SSH endpoint.
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/lxdinit_profile.yml"
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
# run lxd init on the remote server.
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
config:
core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_CLUSTER_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15
networks:
- name: lxdbrSS
- name: lxdbr0
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-}"
type: bridge
config:
ipv4.nat: "true"
ipv4.dhcp: "true"
ipv6.address: "none"
dns.mode: "managed"
- name: lxdbr1
description: "For regtest"
type: bridge
config:
ipv4.address: 10.139.144.1/24
ipv4.nat: "false"
ipv4.dhcp: "false"
ipv6.address: "none"
dns.mode: "none"
#managed: true
description: ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-},${DISK_TO_USE:-}
# lxdbrSS is an isolated inter-vm network segment with no outbount Internet access.
ipv4.nat: false
ipv4.dhcp: true
ipv6.address: none
dns.mode: managed
profiles:
- config: {}
description: "default profile for sovereign-stack instances."
devices:
root:
path: /
pool: ss-base
type: disk
name: default
cluster:
server_name: ${CLUSTER_NAME}
enabled: true
@ -206,8 +219,7 @@ cluster:
cluster_token: ""
EOF
# configure the LXD Daemon with our preseed.
cat "$CLUSTER_MASTER_LXD_INIT" | ssh "ubuntu@$FQDN" lxd init --preseed
# #
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
if wait-for-it -t 20 "$FQDN:8443"; then
@ -222,4 +234,17 @@ else
exit 1
fi
echo "HINT: Now you can consider running 'ss-deploy'."
# create the default storage pool if necessary
if ! lxc storage list --format csv | grep -q ss-base; then
if [ "$DISK_TO_USE" != loop ]; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
lxc storage create ss-base zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs
lxc storage create ss-base zfs
fi
fi

54
deployment/cluster_env.sh Executable file
View File

@ -0,0 +1,54 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'."
exit 1
fi
fi
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
# ensure the cluster definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information."
exit 1
fi
source "$CLUSTER_DEFINITION"
# source project defition.
# Now let's load the project definition.
PROJECT_PATH="$PROJECTS_DIR/$BITCOIN_CHAIN"
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
source "$PRIMARY_SITE_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
exit 1
fi

View File

@ -1,61 +1,42 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
./stub_lxc_profile.sh sovereign-stack
./stub_lxc_profile.sh "$BASE_IMAGE_VM_NAME"
# create the default storage pool if necessary
if ! lxc storage list --format csv | grep -q "sovereign-stack"; then
if [ "$DISK_TO_USE" != loop ]; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE.
lxc storage create "sovereign-stack" zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs
lxc storage create "sovereign-stack" zfs
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
# If our template doesn't exist, we create one.
if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list -q --format csv | grep -q "$VM_NAME"; then
lxc delete "$VM_NAME" --force
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VM_NAME"
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$(date +%Y-%m)"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "ubuntu-base"; then
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "ubuntu-base" --public --vm --auto-update
fi
# this vm is used temperarily with
lxc init --profile="sovereign-stack" "ubuntu-base" "$VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$SOVEREIGN_STACK_MAC_ADDRESS"
lxc config set "$BASE_IMAGE_VM_NAME"
lxc start "$VM_NAME"
sleep 10
lxc start "$BASE_IMAGE_VM_NAME"
sleep 70
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
# let's wait for the LXC vm remote machine to get an IP address.
./wait_for_lxc_ip.sh "$VM_NAME"
# stop the VM and get a snapshot.
lxc stop "$VM_NAME"
lxc publish "$CLUSTER_NAME:$VM_NAME" --alias "$VM_NAME" --public
lxc stop "$BASE_IMAGE_VM_NAME"
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$(date +%Y-%m)"
fi

View File

@ -1,11 +1,8 @@
#!/bin/bash
set -e
set -ex
cd "$(dirname "$0")"
RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
export RESPOSITORY_PATH="$RESPOSITORY_PATH"
./check_dependencies.sh
DOMAIN_NAME=
@ -108,7 +105,7 @@ if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
fi
# set up our default paths.
source ./defaults.sh
source ../defaults.sh
export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2"
@ -126,26 +123,23 @@ export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
# ensure our cluster path is created.
mkdir -p "$CLUSTER_PATH"
# if an authorized_keys file does not exist, we'll stub one out with the current user.
# add additional id_rsa.pub entries manually for more administrative logins.
if [ ! -f "$CLUSTER_PATH/authorized_keys" ]; then
cat "$SSH_HOME/id_rsa.pub" >> "$CLUSTER_PATH/authorized_keys"
echo "INFO: Sovereign Stack just stubbed out '$CLUSTER_PATH/authorized_keys'. Go update it."
echo " Add ssh pubkeys for your various management machines, if any."
echo " By default we added your main ssh pubkey: '$SSH_HOME/id_rsa.pub'."
exit 1
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
# ensure our cluster path is created.
mkdir -p "$CLUSTER_PATH"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
#########################################
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster'."
exit 1
fi
@ -158,7 +152,6 @@ function new_pass {
function instantiate_vms {
export BTC_CHAIN="$BTC_CHAIN"
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
@ -173,7 +166,7 @@ function instantiate_vms {
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh"
source ./domain_env.sh
# VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then
@ -181,12 +174,15 @@ function instantiate_vms {
exit 1
fi
# # switch to the default project
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# first let's get the DISK_TO_USE and DATA_PLANE_MACVLAN_INTERFACE from the ss-config
# which is set up during LXD cluster creation ss-cluster.
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv | grep lxdbrSS | grep -q ss-config; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbrSS | grep ss-config)"
if lxc network list --format csv | grep lxdbr0 | grep -q ss-config; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
@ -196,23 +192,26 @@ function instantiate_vms {
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
DISK_TO_USE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f3)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
export DISK_TO_USE="$DISK_TO_USE"
./deployment/create_lxc_base.sh
# # switch to the default project to ensure the base image is created.
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# create the lxd base image.
./create_lxc_base.sh
# # now switch to the current chain project.
# if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then
# lxc project switch "$BITCOIN_CHAIN"
# fi
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
# ensure the admin has set the MAC address for the base image.
if [ -z "$SOVEREIGN_STACK_MAC_ADDRESS" ]; then
echo "ERROR: SOVEREIGN_STACK_MAC_ADDRESS is undefined. Check your project definition."
exit 1
fi
DDNS_HOST=
if [ "$VIRTUAL_MACHINE" = www ]; then
@ -226,17 +225,19 @@ function instantiate_vms {
DDNS_HOST="$WWW_HOSTNAME"
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
DDNS_HOST="$BTCPAY_HOSTNAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
if [ "$BTC_CHAIN" = mainnet ]; then
if [ "$BITCOIN_CHAIN" = mainnet ]; then
ROOT_DISK_SIZE_GB=150
elif [ "$BTC_CHAIN" = testnet ]; then
elif [ "$BITCOIN_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=70
fi
elif [ "$VIRTUAL_MACHINE" = "sovereign-stack" ]; then
DDNS_HOST="sovereign-stack-base"
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
DDNS_HOST="$BASE_IMAGE_VM_NAME"
ROOT_DISK_SIZE_GB=8
else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
@ -249,17 +250,13 @@ function instantiate_vms {
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
./deployment/deploy_vms.sh
# if the local docker client isn't logged in, do so;
# this helps prevent docker pull errors since they throttle.
# if [ ! -f "$HOME/.docker/config.json" ]; then
# echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin
# fi
./deploy_vms.sh
# this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0
@ -293,7 +290,7 @@ export SITE_LANGUAGE_CODES="en"
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
export DEPLOY_GHOST=true
export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY="NOSTR_IDENTITY_PUBKEY_GOES_HERE"
export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD="$(new_pass)"
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
@ -305,7 +302,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!"
echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1
fi
@ -313,28 +310,26 @@ EOL
}
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
PROJECT_PATH="$PROJECTS_DIR/$BITCOIN_CHAIN"
mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects"
export PROJECT_PATH="$PROJECT_PATH"
# create a symlink from ./clusterpath/projects/project
if [ ! -d "$CLUSTER_PATH/projects/$PROJECT_NAME" ]; then
ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$PROJECT_NAME"
if [ ! -d "$CLUSTER_PATH/projects/$BITCOIN_CHAIN" ]; then
ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$BITCOIN_CHAIN"
fi
# check if we need to provision a new lxc project.
if [ "$PROJECT_NAME" != "$CURRENT_PROJECT" ]; then
if ! lxc project list | grep -q "$PROJECT_NAME"; then
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
lxc project create "$PROJECT_NAME"
fi
echo "INFO: switch to lxd project '$PROJECT_NAME'."
lxc project switch "$PROJECT_NAME"
# create the lxc project as specified by BITCOIN_CHAIN
if ! lxc project list | grep -q "$BITCOIN_CHAIN"; then
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
lxc project create "$BITCOIN_CHAIN"
fi
# # check if we need to provision a new lxc project.
# if [ "$BITCOIN_CHAIN" != "$CURRENT_PROJECT" ]; then
# echo "INFO: switch to lxd project '$BITCOIN_CHAIN'."
# lxc project switch "$BITCOIN_CHAIN"
# fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
@ -346,11 +341,10 @@ if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# see https://www.sovereign-stack.org/project-definition for more info.
export WWW_SERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
export BTC_CHAIN="regtest|testnet|mainnet"
export PRIMARY_DOMAIN="domain0.tld"
export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
export PRIMARY_DOMAIN=
#export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="6"
@ -359,8 +353,8 @@ export WWW_SERVER_MEMORY_MB="4096"
EOL
chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it yo!"
echo "INFO: Learn more at https://www.sovereign-stack.org/project-definitions/"
echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/projects/"
exit 1
fi
@ -368,6 +362,22 @@ fi
# source project defition.
source "$PROJECT_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project_definition."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then
@ -403,11 +413,32 @@ done
# now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
bash -c "./deployment/www/go.sh"
bash -c "./www/go.sh"
ssh ubuntu@"$PRIMARY_WWW_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead
fi
#
LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
bash -c "./deployment/btcpayserver/go.sh"
bash -c "./btcpayserver/go.sh"
ssh ubuntu@"$BTCPAY_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead
fi
# deploy clams wallet.
LOCAL_CLAMS_PATH="$(pwd)/www/clams"
if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
if [ ! -d "$LOCAL_CLAMS_PATH" ]; then
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_PATH"
else
cd "$LOCAL_CLAMS_PATH"
git pull
cd -
fi
fi

View File

@ -1,15 +1,8 @@
#!/bin/bash
set -eu
set -ex
cd "$(dirname "$0")"
# let's make sure we have an ssh keypair. We just use $SSH_HOME/id_rsa
# TODO convert this to SSH private key held on Trezor. THus trezor-T required for
# login operations. This should be configurable of course.
if [ ! -f "$SSH_HOME/id_rsa" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}"
@ -40,8 +33,9 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
./stub_lxc_profile.sh "$LXD_VM_NAME"
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$(date +%Y-%m)" "$LXD_VM_NAME"
# now let's create a new VM to work with.
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
@ -50,8 +44,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
lxc start "$LXD_VM_NAME"
./wait_for_lxc_ip.sh "$LXD_VM_NAME"
bash -c "./wait_for_lxc_ip.sh --lxc-name=$LXD_VM_NAME"
fi
# scan the remote machine and install it's identity in our SSH known_hosts file.
@ -68,3 +61,6 @@ if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
fi
fi
ssh "$PRIMARY_WWW_FQDN" -- echo ""

38
deployment/destroy.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
RESPONSE=
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
. ../defaults.sh
. ./cluster_env.sh
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
if lxc list | grep -q "$LXD_NAME"; then
lxc delete -f "$LXD_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi
if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME"
fi
done
# delete the base image so it can be created.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi

View File

@ -8,11 +8,11 @@ export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
export BTC_CHAIN="$BTC_CHAIN"
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"

15
deployment/help.txt Normal file
View File

@ -0,0 +1,15 @@
Sovereign Stack Help.
You are in the Sovereign Stack management environment. From here, you can issue several commands:
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack.
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
ss-show - show the lxd resources associated with the current remote.
For more infomation about all these topics, consult the Sovereign Stack website. Relevant posts include:
- https://www.sovereign-stack.org/commands

51
deployment/migrate.sh Executable file
View File

@ -0,0 +1,51 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
USER_SAYS_YES=false
for i in "$@"; do
case $i in
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
;;
esac
done
. ../defaults.sh
. ./cluster_env.sh
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
# run deploy which backups up everything, but doesnt restart any services.
bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
# call the destroy script. If user proceed, then user data is DESTROYED!
USER_SAYS_YES="$USER_SAYS_YES" ./destroy.sh
# Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"

78
deployment/reset.sh Executable file
View File

@ -0,0 +1,78 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
source ../defaults.sh
echo "Need to uncomment"
exit 1
# ./destroy.sh
# # these only get initialzed upon creation, so we MUST delete here so they get recreated.
# if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
# lxc profile delete "$BASE_IMAGE_VM_NAME"
# fi
# if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
# lxc image rm "$BASE_IMAGE_VM_NAME"
# fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
# fi
# CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
# if ! lxc info | grep -q "project: default"; then
# lxc project switch default
# lxc project delete "$CURRENT_PROJECT"
# fi
# if lxc profile show default | grep -q "root:"; then
# lxc profile device remove default root
# fi
# if lxc profile show default| grep -q "eth0:"; then
# lxc profile device remove default eth0
# fi
# if lxc network list --format csv | grep -q lxdbr0; then
# lxc network delete lxdbr0
# fi
# if lxc network list --format csv | grep -q lxdbr1; then
# lxc network delete lxdbr1
# fi
# if lxc storage list --format csv | grep -q ss-base; then
# lxc storage delete ss-base
# fi
# CURRENT_REMOTE="$(lxc remote get-default)"
# if ! lxc remote get-default | grep -q "local"; then
# lxc remote switch local
# lxc remote remove "$CURRENT_REMOTE"
# fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
# fi
# if snap list | grep -q lxd; then
# sudo snap remove lxd
# sleep 2
# fi
# if zfs list | grep -q sovereign-stack; then
# sudo zfs destroy -r sovereign-stack
# fi
# if zfs list | grep -q "sovereign-stack"; then
# sudo zfs destroy -r "rpool/lxd"
# fi

View File

@ -3,5 +3,8 @@
lxc list
lxc network list
lxc profile list
lxc storage list
lxc image list
lxc storage list
lxc storage info ss-base
lxc project list
lxc remote list

View File

@ -1,11 +1,12 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
LXD_HOSTNAME="$1"
LXD_HOSTNAME="${1:-}"
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub")
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
@ -36,8 +37,13 @@ EOF
fi
# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image.
if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
# first of all, if there are uncommited changes, we quit. You better stash or commit!
# Remote VPS instances are tagged with your current git HEAD so we know which code revision
# used when provisioning the VPS.
LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
# this is for the base image only...
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
@ -48,7 +54,13 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
package_reboot_if_required: false
preserve_hostname: false
fqdn: sovereign-stack
fqdn: ${BASE_IMAGE_VM_NAME}
apt:
sources:
docker.list:
source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable"
keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
packages:
- curl
@ -70,6 +82,10 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
- wait-for-it
- dnsutils
- wget
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
groups:
- docker
@ -83,73 +99,7 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
write_files:
- path: /home/ubuntu/docker.asc
content: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
=0YYh
-----END PGP PUBLIC KEY BLOCK-----
- path: /etc/ssh/ssh_config
content: |
Port 22
@ -163,25 +113,13 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
- path: /etc/docker/daemon.json
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
]
"registry-mirrors": ["${REGISTRY_URL}"],
"labels": [ "githead=${LATEST_GIT_COMMIT}" ]
}
runcmd:
- cat /home/ubuntu/docker.asc | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
- sudo rm /home/ubuntu/docker.asc
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
- echo "alias bitcoin-cli=\"bitcoin-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- echo "alias lightning-cli=\"bitcoin-lightning-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
- sudo chmod +x /usr/local/bin/docker-compose
- sudo apt-get install -y openssh-server
EOF
else
@ -194,7 +132,7 @@ else
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: false
preserve_hostname: true
fqdn: ${FQDN}
user.network-config: |
@ -235,7 +173,7 @@ description: Default LXD profile for ${FILENAME}
devices:
root:
path: /
pool: sovereign-stack
pool: ss-base
type: disk
config:
source: cloud-init:config
@ -243,20 +181,19 @@ devices:
EOF
# Stub out the network piece for the base image.
if [ "$LXD_HOSTNAME" = sovereign-stack ] ; then
if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ] ; then
# If we are deploying the www, we attach the vm to the underlay via macvlan.
#
cat >> "$YAML_PATH" <<EOF
enp5s0:
name: enp5s0
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying the www, we attach the vm to the underlay via macvlan.
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
@ -264,10 +201,10 @@ cat >> "$YAML_PATH" <<EOF
type: nic
enp6s0:
name: enp6s0
network: lxdbrSS
network: lxdbr1
type: nic
name: ${FILENAME}
name: ${PRIMARY_DOMAIN}
EOF
fi
@ -275,8 +212,7 @@ fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"

View File

@ -1,8 +1,34 @@
#!/bin/bash
set -e
set -ex
LXC_INSTANCE_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxc-name=*)
LXC_INSTANCE_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# if the invoker did not set the instance name, throw an error.
if [ -z "$LXC_INSTANCE_NAME" ]; then
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
exit 1
fi
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
exit 1
fi
LXC_INSTANCE_NAME="$1"
IP_V4_ADDRESS=
while true; do
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
@ -17,12 +43,3 @@ while true; do
printf '.'
fi
done
# Let's remove any entry in our known_hosts, then add it back.
# we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS"
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu

1
deployment/www/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
clams

View File

@ -3,17 +3,11 @@
set -eu
cd "$(dirname "$0")"
# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files.
# we could eliminate that and simply save duplicity backups to the management machine running the script
# this could be done by using a local path and mounting it on the remote VPS.
# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs
# step 1: run duplicity on the remote system to backup all files to the remote system.
# --allow-source-mismatch
# this script backups up a source path to a destination folder on the remote VM
# then pulls that data down to the maanagement environment
# if the source files to backup don't exist on the remote host, we return.
if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_SOURCE_BACKUP_PATH ]"; then
echo "INFO: The path to backup does not exist. There's nothing to backup! That's ok, execution will continue."
exit 0
fi
@ -33,4 +27,3 @@ rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
# step 4: unmount the SSHFS filesystem and cleanup.
umount "$SSHFS_PATH"
rm -rf "$SSHFS_PATH"

View File

@ -1,7 +1,6 @@
#!/bin/bash
set -e
set -ex
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
docker pull certbot/certbot:latest
@ -12,9 +11,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh"
source ../../defaults.sh
source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh"
source ../domain_env.sh
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
# at a cachehing reverse proxy that runs nginx.
@ -23,6 +22,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
# this is minimum required; www and btcpay.
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi

View File

@ -14,10 +14,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh"
source ../../defaults.sh
source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh"
source ../domain_env.sh