1
1

Undo this commit.

This commit is contained in:
Derek Smith 2023-02-01 14:44:05 -05:00
parent d91ba02e7f
commit 2c0645c951
Signed by: farscapian
GPG Key ID: 8F1CD799CCA516CC
48 changed files with 1133 additions and 652 deletions

2
.gitignore vendored
View File

@ -1 +1 @@
./reset.sh publish.sh

View File

@ -10,11 +10,13 @@
"shellcheck.enableQuickFix": true, "shellcheck.enableQuickFix": true,
"shellcheck.run": "onType", "shellcheck.run": "onType",
"shellcheck.executablePath": "shellcheck", "shellcheck.executablePath": "shellcheck",
"shellcheck.customArgs": [], "shellcheck.customArgs": [
"-x"
],
"shellcheck.ignorePatterns": {}, "shellcheck.ignorePatterns": {},
"shellcheck.exclude": [ "shellcheck.exclude": [
// "SC1090", "SC1090",
// "SC1091", "SC1091",
"SC2029" "SC2029"
], ],
"terminal.integrated.fontFamily": "monospace", "terminal.integrated.fontFamily": "monospace",

1
NOTES
View File

@ -1 +0,0 @@
Trezor MUST Use the "Crypto" firmware with shitcoin support in order for 2FA (WEBAUTHN) to work. Bummer.

View File

@ -1,10 +1,29 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
export WWW_SERVER_MAC_ADDRESS=
export DEPLOY_WWW_SERVER=false export DEPLOY_WWW_SERVER=false
export DEPLOY_BTCPAY_SERVER=false
export DEPLOY_GHOST=false export DEPLOY_GHOST=false
export DEPLOY_NEXTCLOUD=false export DEPLOY_NEXTCLOUD=false
@ -16,6 +35,8 @@ export BTCPAY_HOSTNAME_IN_CERT="btcpay"
export NEXTCLOUD_HOSTNAME="nextcloud" export NEXTCLOUD_HOSTNAME="nextcloud"
export GITEA_HOSTNAME="git" export GITEA_HOSTNAME="git"
export NOSTR_HOSTNAME="relay" export NOSTR_HOSTNAME="relay"
export CLAMS_HOSTNAME="clams"
export CLAMS_GIT_REPO="https://github.com/farscapian/clams-app-docker.git"
export SITE_LANGUAGE_CODES="en" export SITE_LANGUAGE_CODES="en"
export LANGUAGE_CODE="en" export LANGUAGE_CODE="en"
@ -37,7 +58,7 @@ export DUPLICITY_BACKUP_PASSPHRASE=
export SSH_HOME="$HOME/.ssh" export SSH_HOME="$HOME/.ssh"
export PASS_HOME="$HOME/.password-store" export PASS_HOME="$HOME/.password-store"
export VM_NAME="sovereign-stack-base"
export BTCPAY_SERVER_CPU_COUNT="4" export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096" export BTCPAY_SERVER_MEMORY_MB="4096"
@ -48,23 +69,6 @@ export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
export NEXTCLOUD_SPACE_GB=10 export NEXTCLOUD_SPACE_GB=10
# first of all, if there are uncommited changes, we quit. You better stash or commit!
# Remote VPS instances are tagged with your current git HEAD so we know which code revision
# used when provisioning the VPS.
#LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)"
#export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# check if there are any uncommited changes. It's dangerous to instantiate VMs using
# code that hasn't been committed.
# if git update-index --refresh | grep -q "needs update"; then
# echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'."
# exit 1
# fi
BTC_CHAIN=regtest
export BTC_CHAIN="$BTC_CHAIN"
DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy" DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
@ -89,7 +93,6 @@ export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay" export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export SOVEREIGN_STACK_MAC_ADDRESS=
export WWW_SERVER_MAC_ADDRESS= export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS= export BTCPAYSERVER_MAC_ADDRESS=
@ -97,9 +100,11 @@ export CLUSTERS_DIR="$HOME/ss-clusters"
export PROJECTS_DIR="$HOME/ss-projects" export PROJECTS_DIR="$HOME/ss-projects"
export SITES_PATH="$HOME/ss-sites" export SITES_PATH="$HOME/ss-sites"
# The base VM image. # The base VM image.
export BASE_LXC_IMAGE="ubuntu/22.04/cloud" export LXD_UBUNTU_BASE_VERSION="22.04"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
# Deploy a registry cache on your management machine. # Deploy a registry cache on your management machine.
export DEPLOY_MGMT_REGISTRY=false export DEPLOY_MGMT_REGISTRY=false
@ -114,3 +119,4 @@ export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs"
# this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA. # this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA.
export ROOT_DISK_SIZE_GB=20 export ROOT_DISK_SIZE_GB=20
export REGISTRY_URL="https://index.docker.io/v1/" export REGISTRY_URL="https://index.docker.io/v1/"
export PRIMARY_DOMAIN=

View File

@ -35,7 +35,7 @@ cd btcpayserver-docker
export BTCPAY_HOST="${BTCPAY_USER_FQDN}" export BTCPAY_HOST="${BTCPAY_USER_FQDN}"
export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}" export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}"
export NBITCOIN_NETWORK="${BTC_CHAIN}" export NBITCOIN_NETWORK="${BITCOIN_CHAIN}"
export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}" export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}"
export BTCPAYGEN_LIGHTNING="clightning" export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc" export BTCPAYGEN_CRYPTO1="btc"

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
# This script is meant to be executed on the management machine. # This script is meant to be executed on the management machine.
@ -8,7 +8,7 @@ cd "$(dirname "$0")"
# to use LXD. # to use LXD.
DATA_PLANE_MACVLAN_INTERFACE= DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE= DISK_TO_USE=loop
# override the cluster name. # override the cluster name.
CLUSTER_NAME="${1:-}" CLUSTER_NAME="${1:-}"
@ -18,7 +18,7 @@ if [ -z "$CLUSTER_NAME" ]; then
fi fi
#shellcheck disable=SC1091 #shellcheck disable=SC1091
source ./defaults.sh source ../defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
@ -30,11 +30,10 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
cat >"$CLUSTER_DEFINITION" <<EOL cat >"$CLUSTER_DEFINITION" <<EOL
#!/bin/bash #!/bin/bash
# see https://www.sovereign-stack.org/cluster_definition for more info! # see https://www.sovereign-stack.org/cluster-definition for more info!
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)" export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED" export BITCOIN_CHAIN="regtest"
export PROJECT_NAME="regtest"
#export REGISTRY_URL="https://index.docker.io/v1/" #export REGISTRY_URL="https://index.docker.io/v1/"
EOL EOL
@ -42,7 +41,7 @@ EOL
chmod 0744 "$CLUSTER_DEFINITION" chmod 0744 "$CLUSTER_DEFINITION"
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you." echo "We stubbed out a '$CLUSTER_DEFINITION' file for you."
echo "Use this file to customize your cluster deployment;" echo "Use this file to customize your cluster deployment;"
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for an example." echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information."
exit 1 exit 1
fi fi
@ -50,6 +49,12 @@ source "$CLUSTER_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
FQDN="${2:-}" FQDN="${2:-}"
if [ -z "$FQDN" ]; then
echo "ERROR: You MUST provide the FQDN of the cluster host."
exit
fi
shift shift
if [ -z "$FQDN" ]; then if [ -z "$FQDN" ]; then
@ -96,12 +101,10 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now." echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now."
echo "" echo ""
ssh "ubuntu@$FQDN" lsblk ssh "ubuntu@$FQDN" lsblk --paths
echo "Please enter the disk or partition that Sovereign Stack will use to store data (default: loop): " echo "Please enter the disk or partition that Sovereign Stack will use to store data (default: loop): "
read -r DISK_TO_USE read -r DISK_TO_USE
else
DISK_TO_USE=loop
fi fi
else else
@ -113,8 +116,8 @@ fi
# if the disk is loop-based, then we assume the / path exists. # if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system. # ensure we actually have that disk/partition on the system.
if ssh "ubuntu@$FQDN" lsblk | grep -q "$DISK_TO_USE"; then if ! ssh "ubuntu@$FQDN" lsblk --paths | grep -q "$DISK_TO_USE"; then
echo "ERROR: We could not the disk you specified. Please run this command again and supply a different disk." echo "ERROR: We could not findthe disk you specified. Please run this command again and supply a different disk."
echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example." echo "NOTE: You can always specify on the command line by adding the '--disk=/dev/sdd', for example."
exit 1 exit 1
fi fi
@ -134,13 +137,18 @@ if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
fi fi
if ! command -v lxc >/dev/null 2>&1; then if ! command -v lxc >/dev/null 2>&1; then
if lxc profile list --format csv | grep -q sovereign-stack; then if lxc profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc profile delete sovereign-stack lxc profile delete "$BASE_IMAGE_VM_NAME"
sleep 1 sleep 1
fi fi
if lxc network list --format csv | grep -q lxdbrSS; then if lxc network list --format csv | grep -q lxdbr0; then
lxc network delete lxdbrSS lxc network delete lxdbr0
sleep 1
fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
sleep 1 sleep 1
fi fi
fi fi
@ -148,22 +156,13 @@ fi
ssh -t "ubuntu@$FQDN" " ssh -t "ubuntu@$FQDN" "
set -e set -e
# install ufw and allow SSH. # install tool/dependencies
sudo apt update sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
sudo apt upgrade -y
sudo apt install ufw htop dnsutils nano -y
sudo ufw allow ssh
sudo ufw allow 8443/tcp comment 'allow LXD management'
# enable the host firewall
if sudo ufw status | grep -q 'Status: inactive'; then
sudo ufw enable
fi
# install lxd as a snap if it's not installed. # install lxd as a snap if it's not installed.
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --candidate sudo snap install lxd
sleep 4 sleep 10
fi fi
" "
@ -173,27 +172,41 @@ if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)" DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)"
fi fi
# stub out the lxd init file for the remote SSH endpoint. # run lxd init on the remote server.
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/lxdinit_profile.yml" cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
config: config:
core.https_address: ${MGMT_PLANE_IP}:8443 core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_CLUSTER_PASSWORD} core.trust_password: ${LXD_CLUSTER_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15 images.auto_update_interval: 15
networks: networks:
- name: lxdbrSS - name: lxdbr0
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-}"
type: bridge
config:
ipv4.nat: "true"
ipv4.dhcp: "true"
ipv6.address: "none"
dns.mode: "managed"
- name: lxdbr1
description: "For regtest"
type: bridge type: bridge
config: config:
ipv4.address: 10.139.144.1/24 ipv4.address: 10.139.144.1/24
ipv4.nat: "false" ipv4.nat: false
ipv4.dhcp: "false" ipv4.dhcp: true
ipv6.address: "none" ipv6.address: none
dns.mode: "none" dns.mode: managed
#managed: true profiles:
description: ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-},${DISK_TO_USE:-} - config: {}
# lxdbrSS is an isolated inter-vm network segment with no outbount Internet access. description: "default profile for sovereign-stack instances."
devices:
root:
path: /
pool: ss-base
type: disk
name: default
cluster: cluster:
server_name: ${CLUSTER_NAME} server_name: ${CLUSTER_NAME}
enabled: true enabled: true
@ -206,8 +219,7 @@ cluster:
cluster_token: "" cluster_token: ""
EOF EOF
# configure the LXD Daemon with our preseed. # #
cat "$CLUSTER_MASTER_LXD_INIT" | ssh "ubuntu@$FQDN" lxd init --preseed
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it. # ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
if wait-for-it -t 20 "$FQDN:8443"; then if wait-for-it -t 20 "$FQDN:8443"; then
@ -222,4 +234,17 @@ else
exit 1 exit 1
fi fi
echo "HINT: Now you can consider running 'ss-deploy'." # create the default storage pool if necessary
if ! lxc storage list --format csv | grep -q ss-base; then
if [ "$DISK_TO_USE" != loop ]; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
lxc storage create ss-base zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs
lxc storage create ss-base zfs
fi
fi

54
deployment/cluster_env.sh Executable file
View File

@ -0,0 +1,54 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'."
exit 1
fi
fi
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
# ensure the cluster definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information."
exit 1
fi
source "$CLUSTER_DEFINITION"
# source project defition.
# Now let's load the project definition.
PROJECT_PATH="$PROJECTS_DIR/$BITCOIN_CHAIN"
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
source "$PRIMARY_SITE_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
exit 1
fi

View File

@ -1,61 +1,42 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
cd "$(dirname "$0")" cd "$(dirname "$0")"
./stub_lxc_profile.sh sovereign-stack ./stub_lxc_profile.sh "$BASE_IMAGE_VM_NAME"
# create the default storage pool if necessary
if ! lxc storage list --format csv | grep -q "sovereign-stack"; then
if [ "$DISK_TO_USE" != loop ]; then # let's download our base image.
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'. if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# TODO do some sanity/resource checking on DISK_TO_USE.
lxc storage create "sovereign-stack" zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs
lxc storage create "sovereign-stack" zfs
fi
fi
# If our template doesn't exist, we create one.
if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list -q --format csv | grep -q "$VM_NAME"; then
lxc delete "$VM_NAME" --force
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VM_NAME"
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "ubuntu-base"; then
# if the image doesn't exist, download it from Ubuntu's image server # if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs # TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream. # we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "ubuntu-base" --public --vm --auto-update lxc image copy "images:$BASE_LXC_IMAGE" "$CLUSTER_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$(date +%Y-%m)"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi fi
# this vm is used temperarily with else
lxc init --profile="sovereign-stack" "ubuntu-base" "$VM_NAME" --vm # the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
# TODO move this sovereign-stack-base construction VM to separate dedicated IP # TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$SOVEREIGN_STACK_MAC_ADDRESS" lxc config set "$BASE_IMAGE_VM_NAME"
lxc start "$VM_NAME" lxc start "$BASE_IMAGE_VM_NAME"
sleep 10
# let's wait for the LXC vm remote machine to get an IP address. sleep 70
./wait_for_lxc_ip.sh "$VM_NAME"
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
# stop the VM and get a snapshot. # stop the VM and get a snapshot.
lxc stop "$VM_NAME" lxc stop "$BASE_IMAGE_VM_NAME"
lxc publish "$CLUSTER_NAME:$VM_NAME" --alias "$VM_NAME" --public lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$(date +%Y-%m)"
fi fi

View File

@ -1,11 +1,8 @@
#!/bin/bash #!/bin/bash
set -e set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
export RESPOSITORY_PATH="$RESPOSITORY_PATH"
./check_dependencies.sh ./check_dependencies.sh
DOMAIN_NAME= DOMAIN_NAME=
@ -108,7 +105,7 @@ if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
fi fi
# set up our default paths. # set up our default paths.
source ./defaults.sh source ../defaults.sh
export DOMAIN_NAME="$DOMAIN_NAME" export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2" export REGISTRY_DOCKER_IMAGE="registry:2"
@ -126,26 +123,23 @@ export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END" export RESTART_FRONT_END="$RESTART_FRONT_END"
# ensure our cluster path is created. # todo convert this to Trezor-T
mkdir -p "$CLUSTER_PATH" SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
# if an authorized_keys file does not exist, we'll stub one out with the current user. if [ ! -f "$SSH_PUBKEY_PATH" ]; then
# add additional id_rsa.pub entries manually for more administrative logins. # generate a new SSH key for the base vm image.
if [ ! -f "$CLUSTER_PATH/authorized_keys" ]; then ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
cat "$SSH_HOME/id_rsa.pub" >> "$CLUSTER_PATH/authorized_keys"
echo "INFO: Sovereign Stack just stubbed out '$CLUSTER_PATH/authorized_keys'. Go update it."
echo " Add ssh pubkeys for your various management machines, if any."
echo " By default we added your main ssh pubkey: '$SSH_HOME/id_rsa.pub'."
exit 1
fi fi
# ensure our cluster path is created.
mkdir -p "$CLUSTER_PATH"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
######################################### #########################################
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'." echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster'."
exit 1 exit 1
fi fi
@ -158,7 +152,6 @@ function new_pass {
function instantiate_vms { function instantiate_vms {
export BTC_CHAIN="$BTC_CHAIN"
export UPDATE_BTCPAY="$UPDATE_BTCPAY" export UPDATE_BTCPAY="$UPDATE_BTCPAY"
export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER" export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER"
@ -173,7 +166,7 @@ function instantiate_vms {
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ./domain_env.sh
# VALIDATE THE INPUT from the ENVFILE # VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then if [ -z "$DOMAIN_NAME" ]; then
@ -181,12 +174,15 @@ function instantiate_vms {
exit 1 exit 1
fi fi
# # switch to the default project
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# first let's get the DISK_TO_USE and DATA_PLANE_MACVLAN_INTERFACE from the ss-config # Goal is to get the macvlan interface.
# which is set up during LXD cluster creation ss-cluster.
LXD_SS_CONFIG_LINE= LXD_SS_CONFIG_LINE=
if lxc network list --format csv | grep lxdbrSS | grep -q ss-config; then if lxc network list --format csv | grep lxdbr0 | grep -q ss-config; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbrSS | grep ss-config)" LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbr0 | grep ss-config)"
fi fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then if [ -z "$LXD_SS_CONFIG_LINE" ]; then
@ -196,23 +192,26 @@ function instantiate_vms {
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')" CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)" DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
DISK_TO_USE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f3)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE" export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
export DISK_TO_USE="$DISK_TO_USE"
./deployment/create_lxc_base.sh
# # switch to the default project to ensure the base image is created.
# if ! lxc project list --format csv | grep -a "default (current)"; then
# lxc project switch default
# fi
# create the lxd base image.
./create_lxc_base.sh
# # now switch to the current chain project.
# if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then
# lxc project switch "$BITCOIN_CHAIN"
# fi
export MAC_ADDRESS_TO_PROVISION= export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME" export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME" export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
# ensure the admin has set the MAC address for the base image.
if [ -z "$SOVEREIGN_STACK_MAC_ADDRESS" ]; then
echo "ERROR: SOVEREIGN_STACK_MAC_ADDRESS is undefined. Check your project definition."
exit 1
fi
DDNS_HOST= DDNS_HOST=
if [ "$VIRTUAL_MACHINE" = www ]; then if [ "$VIRTUAL_MACHINE" = www ]; then
@ -226,17 +225,19 @@ function instantiate_vms {
DDNS_HOST="$WWW_HOSTNAME" DDNS_HOST="$WWW_HOSTNAME"
ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))" ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
DDNS_HOST="$BTCPAY_HOSTNAME" DDNS_HOST="$BTCPAY_HOSTNAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME" VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS" MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
if [ "$BTC_CHAIN" = mainnet ]; then if [ "$BITCOIN_CHAIN" = mainnet ]; then
ROOT_DISK_SIZE_GB=150 ROOT_DISK_SIZE_GB=150
elif [ "$BTC_CHAIN" = testnet ]; then elif [ "$BITCOIN_CHAIN" = testnet ]; then
ROOT_DISK_SIZE_GB=70 ROOT_DISK_SIZE_GB=70
fi fi
elif [ "$VIRTUAL_MACHINE" = "sovereign-stack" ]; then elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
DDNS_HOST="sovereign-stack-base" DDNS_HOST="$BASE_IMAGE_VM_NAME"
ROOT_DISK_SIZE_GB=8 ROOT_DISK_SIZE_GB=8
else else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds." echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
@ -249,17 +250,13 @@ function instantiate_vms {
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE" export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN" export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION" export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
./deployment/deploy_vms.sh
# if the local docker client isn't logged in, do so; ./deploy_vms.sh
# this helps prevent docker pull errors since they throttle.
# if [ ! -f "$HOME/.docker/config.json" ]; then
# echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin
# fi
# this tells our local docker client to target the remote endpoint via SSH # this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks. # enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0 docker swarm init --advertise-addr enp6s0
@ -293,7 +290,7 @@ export SITE_LANGUAGE_CODES="en"
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)" export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
export DEPLOY_GHOST=true export DEPLOY_GHOST=true
export DEPLOY_NEXTCLOUD=false export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY="NOSTR_IDENTITY_PUBKEY_GOES_HERE" export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD="$(new_pass)" export GHOST_MYSQL_PASSWORD="$(new_pass)"
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)" export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
@ -305,7 +302,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL EOL
chmod 0744 "$SITE_DEFINITION_PATH" chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!" echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1 exit 1
fi fi
@ -313,28 +310,26 @@ EOL
} }
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')" PROJECT_PATH="$PROJECTS_DIR/$BITCOIN_CHAIN"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects" mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects"
export PROJECT_PATH="$PROJECT_PATH" export PROJECT_PATH="$PROJECT_PATH"
# create a symlink from ./clusterpath/projects/project # create a symlink from ./clusterpath/projects/project
if [ ! -d "$CLUSTER_PATH/projects/$PROJECT_NAME" ]; then if [ ! -d "$CLUSTER_PATH/projects/$BITCOIN_CHAIN" ]; then
ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$PROJECT_NAME" ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$BITCOIN_CHAIN"
fi fi
# check if we need to provision a new lxc project. # create the lxc project as specified by BITCOIN_CHAIN
if [ "$PROJECT_NAME" != "$CURRENT_PROJECT" ]; then if ! lxc project list | grep -q "$BITCOIN_CHAIN"; then
if ! lxc project list | grep -q "$PROJECT_NAME"; then
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!" echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
lxc project create "$PROJECT_NAME" lxc project create "$BITCOIN_CHAIN"
fi
echo "INFO: switch to lxd project '$PROJECT_NAME'."
lxc project switch "$PROJECT_NAME"
fi fi
# # check if we need to provision a new lxc project.
# if [ "$BITCOIN_CHAIN" != "$CURRENT_PROJECT" ]; then
# echo "INFO: switch to lxd project '$BITCOIN_CHAIN'."
# lxc project switch "$BITCOIN_CHAIN"
# fi
# check to see if the enf file exists. exist if not. # check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition" PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
@ -346,11 +341,10 @@ if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# see https://www.sovereign-stack.org/project-definition for more info. # see https://www.sovereign-stack.org/project-definition for more info.
export WWW_SERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED" export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED" export BTCPAYSERVER_MAC_ADDRESS=
export BTC_CHAIN="regtest|testnet|mainnet" export PRIMARY_DOMAIN=
export PRIMARY_DOMAIN="domain0.tld" #export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
export BTCPAY_SERVER_CPU_COUNT="4" export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096" export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="6" export WWW_SERVER_CPU_COUNT="6"
@ -359,8 +353,8 @@ export WWW_SERVER_MEMORY_MB="4096"
EOL EOL
chmod 0744 "$PROJECT_DEFINITION_PATH" chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it yo!" echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/project-definitions/" echo "INFO: Learn more at https://www.sovereign-stack.org/projects/"
exit 1 exit 1
fi fi
@ -368,6 +362,22 @@ fi
# source project defition. # source project defition.
source "$PROJECT_DEFINITION_PATH" source "$PROJECT_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project_definition."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project_definition."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list. # the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}" DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then if [ -n "$OTHER_SITES_LIST" ]; then
@ -403,11 +413,32 @@ done
# now let's run the www and btcpay-specific provisioning scripts. # now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
bash -c "./deployment/www/go.sh" bash -c "./www/go.sh"
ssh ubuntu@"$PRIMARY_WWW_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead
fi fi
#
LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
export DOMAIN_NAME="$PRIMARY_DOMAIN" export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
bash -c "./deployment/btcpayserver/go.sh" bash -c "./btcpayserver/go.sh"
ssh ubuntu@"$BTCPAY_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead
fi fi
# deploy clams wallet.
LOCAL_CLAMS_PATH="$(pwd)/www/clams"
if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
if [ ! -d "$LOCAL_CLAMS_PATH" ]; then
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_PATH"
else
cd "$LOCAL_CLAMS_PATH"
git pull
cd -
fi
fi

View File

@ -1,15 +1,8 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
# let's make sure we have an ssh keypair. We just use $SSH_HOME/id_rsa
# TODO convert this to SSH private key held on Trezor. THus trezor-T required for
# login operations. This should be configurable of course.
if [ ! -f "$SSH_HOME/id_rsa" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
## This is a weird if clause since we need to LEFT-ALIGN the statement below. ## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}" SSH_STRING="Host ${FQDN}"
@ -40,8 +33,9 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
./stub_lxc_profile.sh "$LXD_VM_NAME" ./stub_lxc_profile.sh "$LXD_VM_NAME"
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$(date +%Y-%m)" "$LXD_VM_NAME"
# now let's create a new VM to work with. # now let's create a new VM to work with.
lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm #lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
# let's PIN the HW address for now so we don't exhaust IP # let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally. # and so we can set DNS internally.
@ -50,8 +44,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
lxc start "$LXD_VM_NAME" lxc start "$LXD_VM_NAME"
./wait_for_lxc_ip.sh "$LXD_VM_NAME" bash -c "./wait_for_lxc_ip.sh --lxc-name=$LXD_VM_NAME"
fi fi
# scan the remote machine and install it's identity in our SSH known_hosts file. # scan the remote machine and install it's identity in our SSH known_hosts file.
@ -68,3 +61,6 @@ if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
fi fi
fi fi
ssh "$PRIMARY_WWW_FQDN" -- echo ""

38
deployment/destroy.sh Executable file
View File

@ -0,0 +1,38 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
RESPONSE=
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
. ../defaults.sh
. ./cluster_env.sh
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
if lxc list | grep -q "$LXD_NAME"; then
lxc delete -f "$LXD_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi
if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME"
fi
done
# delete the base image so it can be created.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi

View File

@ -8,11 +8,11 @@ export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME" export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME" export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME" export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info" export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME" export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud" export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea" export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
export BTC_CHAIN="$BTC_CHAIN"
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES" export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"

15
deployment/help.txt Normal file
View File

@ -0,0 +1,15 @@
Sovereign Stack Help.
You are in the Sovereign Stack management environment. From here, you can issue several commands:
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack.
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
ss-show - show the lxd resources associated with the current remote.
For more infomation about all these topics, consult the Sovereign Stack website. Relevant posts include:
- https://www.sovereign-stack.org/commands

51
deployment/migrate.sh Executable file
View File

@ -0,0 +1,51 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
USER_SAYS_YES=false
for i in "$@"; do
case $i in
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
;;
esac
done
. ../defaults.sh
. ./cluster_env.sh
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
# run deploy which backups up everything, but doesnt restart any services.
bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
# call the destroy script. If user proceed, then user data is DESTROYED!
USER_SAYS_YES="$USER_SAYS_YES" ./destroy.sh
# Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"

78
deployment/reset.sh Executable file
View File

@ -0,0 +1,78 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
source ../defaults.sh
echo "Need to uncomment"
exit 1
# ./destroy.sh
# # these only get initialzed upon creation, so we MUST delete here so they get recreated.
# if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
# lxc profile delete "$BASE_IMAGE_VM_NAME"
# fi
# if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
# lxc image rm "$BASE_IMAGE_VM_NAME"
# fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
# fi
# CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
# if ! lxc info | grep -q "project: default"; then
# lxc project switch default
# lxc project delete "$CURRENT_PROJECT"
# fi
# if lxc profile show default | grep -q "root:"; then
# lxc profile device remove default root
# fi
# if lxc profile show default| grep -q "eth0:"; then
# lxc profile device remove default eth0
# fi
# if lxc network list --format csv | grep -q lxdbr0; then
# lxc network delete lxdbr0
# fi
# if lxc network list --format csv | grep -q lxdbr1; then
# lxc network delete lxdbr1
# fi
# if lxc storage list --format csv | grep -q ss-base; then
# lxc storage delete ss-base
# fi
# CURRENT_REMOTE="$(lxc remote get-default)"
# if ! lxc remote get-default | grep -q "local"; then
# lxc remote switch local
# lxc remote remove "$CURRENT_REMOTE"
# fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
# fi
# if snap list | grep -q lxd; then
# sudo snap remove lxd
# sleep 2
# fi
# if zfs list | grep -q sovereign-stack; then
# sudo zfs destroy -r sovereign-stack
# fi
# if zfs list | grep -q "sovereign-stack"; then
# sudo zfs destroy -r "rpool/lxd"
# fi

View File

@ -3,5 +3,8 @@
lxc list lxc list
lxc network list lxc network list
lxc profile list lxc profile list
lxc storage list
lxc image list lxc image list
lxc storage list
lxc storage info ss-base
lxc project list
lxc remote list

View File

@ -1,11 +1,12 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
cd "$(dirname "$0")"
LXD_HOSTNAME="$1" LXD_HOSTNAME="${1:-}"
# generate the custom cloud-init file. Cloud init installs and configures sshd # generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub") SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)" eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa" ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY" export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
@ -36,8 +37,13 @@ EOF
fi fi
# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image. # first of all, if there are uncommited changes, we quit. You better stash or commit!
if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then # Remote VPS instances are tagged with your current git HEAD so we know which code revision
# used when provisioning the VPS.
LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
# this is for the base image only... # this is for the base image only...
cat >> "$YAML_PATH" <<EOF cat >> "$YAML_PATH" <<EOF
user.vendor-data: | user.vendor-data: |
@ -48,7 +54,13 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
package_reboot_if_required: false package_reboot_if_required: false
preserve_hostname: false preserve_hostname: false
fqdn: sovereign-stack fqdn: ${BASE_IMAGE_VM_NAME}
apt:
sources:
docker.list:
source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable"
keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
packages: packages:
- curl - curl
@ -70,6 +82,10 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
- wait-for-it - wait-for-it
- dnsutils - dnsutils
- wget - wget
- docker-ce
- docker-ce-cli
- containerd.io
- docker-compose-plugin
groups: groups:
- docker - docker
@ -83,73 +99,7 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
ssh_authorized_keys: ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY} - ${SSH_AUTHORIZED_KEY}
write_files: write_files:
- path: /home/ubuntu/docker.asc
content: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth
lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh
38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq
L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7
UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N
cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht
ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo
vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD
G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ
XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj
q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB
tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3
BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO
v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd
tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk
jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m
6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P
XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc
FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8
g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm
ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh
9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5
G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW
FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB
EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF
M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx
Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu
w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk
z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8
eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb
VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa
1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X
zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ
pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7
ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ
BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY
1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp
YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI
mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES
KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7
JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ
cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0
6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5
U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z
VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f
irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk
SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz
QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W
9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw
24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe
dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y
Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR
H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh
/nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ
M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S
xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O
jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG
YT90qFF93M3v01BbxP+EIY2/9tiIPbrd
=0YYh
-----END PGP PUBLIC KEY BLOCK-----
- path: /etc/ssh/ssh_config - path: /etc/ssh/ssh_config
content: | content: |
Port 22 Port 22
@ -163,25 +113,13 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
- path: /etc/docker/daemon.json - path: /etc/docker/daemon.json
content: | content: |
{ {
"registry-mirrors": [ "registry-mirrors": ["${REGISTRY_URL}"],
"${REGISTRY_URL}" "labels": [ "githead=${LATEST_GIT_COMMIT}" ]
]
} }
runcmd: runcmd:
- cat /home/ubuntu/docker.asc | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
- sudo rm /home/ubuntu/docker.asc
- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
- echo "alias bitcoin-cli=\"bitcoin-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- echo "alias lightning-cli=\"bitcoin-lightning-cli.sh \$@\"" >> /home/ubuntu/.bash_profile
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
- sudo chmod +x /usr/local/bin/docker-compose
- sudo apt-get install -y openssh-server - sudo apt-get install -y openssh-server
EOF EOF
else else
@ -194,7 +132,7 @@ else
package_upgrade: false package_upgrade: false
package_reboot_if_required: false package_reboot_if_required: false
preserve_hostname: false preserve_hostname: true
fqdn: ${FQDN} fqdn: ${FQDN}
user.network-config: | user.network-config: |
@ -235,7 +173,7 @@ description: Default LXD profile for ${FILENAME}
devices: devices:
root: root:
path: / path: /
pool: sovereign-stack pool: ss-base
type: disk type: disk
config: config:
source: cloud-init:config source: cloud-init:config
@ -243,20 +181,19 @@ devices:
EOF EOF
# Stub out the network piece for the base image. # Stub out the network piece for the base image.
if [ "$LXD_HOSTNAME" = sovereign-stack ] ; then if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ] ; then
# If we are deploying the www, we attach the vm to the underlay via macvlan. #
cat >> "$YAML_PATH" <<EOF cat >> "$YAML_PATH" <<EOF
enp5s0: enp6s0:
name: enp5s0 name: enp6s0
nictype: macvlan network: lxdbr0
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic type: nic
name: ${FILENAME} name: ${FILENAME}
EOF EOF
else else
# If we are deploying the www, we attach the vm to the underlay via macvlan. # If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF cat >> "$YAML_PATH" <<EOF
enp5s0: enp5s0:
nictype: macvlan nictype: macvlan
@ -264,10 +201,10 @@ cat >> "$YAML_PATH" <<EOF
type: nic type: nic
enp6s0: enp6s0:
name: enp6s0 name: enp6s0
network: lxdbrSS network: lxdbr1
type: nic type: nic
name: ${FILENAME} name: ${PRIMARY_DOMAIN}
EOF EOF
fi fi
@ -275,8 +212,7 @@ fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM. # let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME" lxc profile create "$LXD_HOSTNAME"
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
fi fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"

View File

@ -1,8 +1,34 @@
#!/bin/bash #!/bin/bash
set -e set -ex
LXC_INSTANCE_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxc-name=*)
LXC_INSTANCE_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# if the invoker did not set the instance name, throw an error.
if [ -z "$LXC_INSTANCE_NAME" ]; then
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
exit 1
fi
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
exit 1
fi
LXC_INSTANCE_NAME="$1"
IP_V4_ADDRESS= IP_V4_ADDRESS=
while true; do while true; do
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
@ -17,12 +43,3 @@ while true; do
printf '.' printf '.'
fi fi
done done
# Let's remove any entry in our known_hosts, then add it back.
# we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS"
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu

1
deployment/www/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
clams

View File

@ -3,17 +3,11 @@
set -eu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files. # this script backups up a source path to a destination folder on the remote VM
# we could eliminate that and simply save duplicity backups to the management machine running the script # then pulls that data down to the maanagement environment
# this could be done by using a local path and mounting it on the remote VPS.
# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs
# step 1: run duplicity on the remote system to backup all files to the remote system.
# --allow-source-mismatch
# if the source files to backup don't exist on the remote host, we return. # if the source files to backup don't exist on the remote host, we return.
if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_SOURCE_BACKUP_PATH ]"; then if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_SOURCE_BACKUP_PATH ]"; then
echo "INFO: The path to backup does not exist. There's nothing to backup! That's ok, execution will continue."
exit 0 exit 0
fi fi
@ -33,4 +27,3 @@ rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/"
# step 4: unmount the SSHFS filesystem and cleanup. # step 4: unmount the SSHFS filesystem and cleanup.
umount "$SSHFS_PATH" umount "$SSHFS_PATH"
rm -rf "$SSHFS_PATH" rm -rf "$SSHFS_PATH"

View File

@ -1,7 +1,6 @@
#!/bin/bash #!/bin/bash
set -e set -ex
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time. # let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
docker pull certbot/certbot:latest docker pull certbot/certbot:latest
@ -12,9 +11,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../domain_env.sh
# with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates # with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates
# at a cachehing reverse proxy that runs nginx. # at a cachehing reverse proxy that runs nginx.
@ -23,6 +22,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
# this is minimum required; www and btcpay. # this is minimum required; www and btcpay.
DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN" DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN"
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi

View File

@ -14,10 +14,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../domain_env.sh
### Let's check to ensure all the requiredsettings are set. ### Let's check to ensure all the requiredsettings are set.
if [ "$DEPLOY_GHOST" = true ]; then if [ "$DEPLOY_GHOST" = true ]; then
@ -65,8 +64,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
exit 1 exit 1
fi fi
TOR_CONFIG_PATH=
done done
./stop_docker_stacks.sh ./stop_docker_stacks.sh
@ -108,9 +105,9 @@ if [ "$RESTART_FRONT_END" = true ]; then
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../domain_env.sh
# these variable are used by both backup/restore scripts. # these variable are used by both backup/restore scripts.
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER" export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
@ -141,6 +138,7 @@ fi
./stub/gitea_yml.sh ./stub/gitea_yml.sh
./stub/nostr_yml.sh ./stub/nostr_yml.sh
# # start a browser session; point it to port 80 to ensure HTTPS redirect. # # start a browser session; point it to port 80 to ensure HTTPS redirect.
# # WWW_FQDN is in our certificate, so we resolve to that. # # WWW_FQDN is in our certificate, so we resolve to that.
# wait-for-it -t 320 "$WWW_FQDN:80" # wait-for-it -t 320 "$WWW_FQDN:80"

View File

@ -9,9 +9,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../domain_env.sh
### Stop all services. ### Stop all services.
for APP in ghost nextcloud gitea nostr; do for APP in ghost nextcloud gitea nostr; do

View File

@ -1,29 +0,0 @@
FROM node:latest
RUN apt-get update && apt-get install tzdata -y
ENV TZ="America/New_York"
#RUN npm install -g npm@9.3.0
# Clone the repository
RUN git clone https://github.com/clams-tech/browser-app.git /usr/src/clams
WORKDIR /usr/src/clams
# checkout specific tag
RUN git -c advice.detachedHead=false checkout tags/1.2.0
# couldn't do a yarn build without updating this.
# RUN npx -y update-browserslist-db@latest
# install dependencies
RUN yarn
EXPOSE 4173
RUN mkdir /output
VOLUME /output
RUN yarn build
ENTRYPOINT [ "cp", "-a", "/usr/src/clams/.svelte-kit/output/.", "/output/" ]

View File

@ -1,28 +0,0 @@
#!/bin/bash
# The purpose of this script is to use a Docker container to get and build the Clams
# server-side pieces and output them to a specified directory. These files are then
# ready build to be served by a TLS-enabled reverse proxy. It goes
# Client Browser -> wss (WebSocket over TLS) -> ProxyServer -> TCP to btcpayserver:9735
set -ex
cd "$(dirname "$0")"
export CLAMS_OUTPUT_DIR="$REMOTE_HOME/clams"
ssh "$PRIMARY_WWW_FQDN" sudo rm -rf "$CLAMS_OUTPUT_DIR"
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$CLAMS_OUTPUT_DIR"
if docker ps | grep -q clams; then
docker kill clams
fi
if docker ps -a | grep -q clams; then
docker system prune -f
fi
docker build -t clams:latest .
docker run -it --name clams -v "$CLAMS_OUTPUT_DIR":/output clams:latest
ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$CLAMS_OUTPUT_DIR"

View File

@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
# for each language specified in the site_definition, we spawn a separate ghost container # for each language specified in the site_definition, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE # at https://www.domain.com/$LANGUAGE_CODE

View File

@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
if [ "$DEPLOY_GITEA" = true ]; then if [ "$DEPLOY_GITEA" = true ]; then
GITEA_PATH="$REMOTE_GITEA_PATH/$DOMAIN_NAME/${LANGUAGE_CODE}" GITEA_PATH="$REMOTE_GITEA_PATH/$DOMAIN_NAME/${LANGUAGE_CODE}"

View File

@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
# ensure remote directories exist # ensure remote directories exist
if [ "$DEPLOY_NEXTCLOUD" = true ]; then if [ "$DEPLOY_NEXTCLOUD" = true ]; then

View File

@ -1,9 +1,8 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
# here's the NGINX config. We support ghost and nextcloud. # here's the NGINX config. We support ghost and nextcloud.
NGINX_CONF_PATH="$PROJECT_PATH/nginx.conf" NGINX_CONF_PATH="$PROJECT_PATH/nginx.conf"
@ -12,7 +11,6 @@ echo "" > "$NGINX_CONF_PATH"
# iterate over all our domains and create the nginx config file. # iterate over all our domains and create the nginx config file.
iteration=0 iteration=0
echo "DOMAIN_LIST: $DOMAIN_LIST"
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME" export DOMAIN_NAME="$DOMAIN_NAME"
@ -20,10 +18,11 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}" export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" echo "BEFORE"
source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
echo "after"
if [ $iteration = 0 ]; then if [ $iteration = 0 ]; then
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
events { events {
@ -63,7 +62,6 @@ EOL
# http://${DOMAIN_NAME} redirect to https://${WWW_FQDN} # http://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80;
server_name ${DOMAIN_NAME}; server_name ${DOMAIN_NAME};
@ -79,7 +77,6 @@ EOL
# http://${WWW_FQDN} redirect to https://${WWW_FQDN} # http://${WWW_FQDN} redirect to https://${WWW_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80;
server_name ${WWW_FQDN}; server_name ${WWW_FQDN};
return 301 https://${WWW_FQDN}\$request_uri; return 301 https://${WWW_FQDN}\$request_uri;
} }
@ -92,7 +89,6 @@ EOL
# http://${NEXTCLOUD_FQDN} redirect to https://${NEXTCLOUD_FQDN} # http://${NEXTCLOUD_FQDN} redirect to https://${NEXTCLOUD_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80;
server_name ${NEXTCLOUD_FQDN}; server_name ${NEXTCLOUD_FQDN};
return 301 https://${NEXTCLOUD_FQDN}\$request_uri; return 301 https://${NEXTCLOUD_FQDN}\$request_uri;
} }
@ -106,7 +102,6 @@ EOL
# http://${GITEA_FQDN} redirect to https://${GITEA_FQDN} # http://${GITEA_FQDN} redirect to https://${GITEA_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80;
server_name ${GITEA_FQDN}; server_name ${GITEA_FQDN};
return 301 https://${GITEA_FQDN}\$request_uri; return 301 https://${GITEA_FQDN}\$request_uri;
} }
@ -128,14 +123,13 @@ EOL
# http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN} # http://${BTCPAY_USER_FQDN} redirect to https://${BTCPAY_USER_FQDN}
server { server {
listen 80; listen 80;
listen [::]:80;
server_name ${BTCPAY_SERVER_NAMES}; server_name ${BTCPAY_SERVER_NAMES};
return 301 https://${BTCPAY_USER_FQDN}\$request_uri; return 301 https://\$host\$request_uri;
} }
EOL EOL
if [ "$iteration" = 0 ]; then if [ $iteration = 0 ]; then
# TLS config for ghost. # TLS config for ghost.
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
# global TLS settings # global TLS settings
@ -181,7 +175,6 @@ EOL
# https://${DOMAIN_NAME} redirect to https://${WWW_FQDN} # https://${DOMAIN_NAME} redirect to https://${WWW_FQDN}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -274,6 +267,36 @@ EOL
EOL EOL
# Clams server entry
# cat >>"$NGINX_CONF_PATH" <<EOL
# # https server block for https://${CLAMS_FQDN}
# server {
# listen 443 ssl http2;
# ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
# ssl_trusted_certificate $CONTAINER_TLS_PATH/fullchain.pem;
# server_name ${CLAMS_FQDN};
# index index.js;
# root /apps/clams;
# index 200.htm;
# location / {
# try_files \$uri \$uri/ /200.htm;
# }
# location ~* \.(?:css|js|jpg|svg)$ {
# expires 30d;
# add_header Cache-Control "public";
# }
# }
# EOL
echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH" echo " # set up cache paths for nginx caching" >>"$NGINX_CONF_PATH"
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE" STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE"
@ -289,7 +312,6 @@ EOL
# Main HTTPS listener for https://${WWW_FQDN} # Main HTTPS listener for https://${WWW_FQDN}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -328,7 +350,7 @@ EOL
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr; proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$http_host; proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme; proxy_set_header X-Forwarded-Proto \$scheme;
proxy_intercept_errors on; proxy_intercept_errors on;
@ -370,7 +392,7 @@ EOL
cat >>"$NGINX_CONF_PATH" <<EOL cat >>"$NGINX_CONF_PATH" <<EOL
proxy_set_header X-Real-IP \$remote_addr; proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$http_host; proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme; proxy_set_header X-Forwarded-Proto \$scheme;
@ -421,7 +443,6 @@ EOL
# TLS listener for ${NEXTCLOUD_FQDN} # TLS listener for ${NEXTCLOUD_FQDN}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;
@ -474,7 +495,6 @@ EOL
# TLS listener for ${GITEA_FQDN} # TLS listener for ${GITEA_FQDN}
server { server {
listen 443 ssl http2; listen 443 ssl http2;
listen [::]:443 ssl http2;
ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem;
ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem;

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
#https://github.com/fiatjaf/expensive-relay #https://github.com/fiatjaf/expensive-relay
@ -23,9 +23,9 @@ EOL
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
@ -88,9 +88,9 @@ EOL
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
# for each language specified in the site_definition, we spawn a separate ghost container # for each language specified in the site_definition, we spawn a separate ghost container
# at https://www.domain.com/$LANGUAGE_CODE # at https://www.domain.com/$LANGUAGE_CODE

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
docker pull "$NOSTR_RELAY_IMAGE" docker pull "$NOSTR_RELAY_IMAGE"
@ -10,9 +10,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source "$RESPOSITORY_PATH/reset_env.sh" source ../../../defaults.sh
source "$SITE_PATH/site_definition" source "$SITE_PATH/site_definition"
source "$RESPOSITORY_PATH/domain_env.sh" source ../../domain_env.sh
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr" REMOTE_NOSTR_PATH="$REMOTE_HOME/nostr"

View File

@ -1,65 +1,138 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
cd "$(dirname "$0")" cd "$(dirname "$0")"
# see https://www.sovereign-stack.org/management/
. ./defaults.sh
# the DISK variable here tells us which disk (partition) the admin wants to use for
# lxd resources. By default, we provision the disk under / as a loop device. Admin
# can override with CLI modifications.
DISK="rpool/lxd"
#DISK="/dev/sda1"
export DISK="$DISK"
# let's check to ensure the management machine is on the Baseline ubuntu 21.04 # let's check to ensure the management machine is on the Baseline ubuntu 21.04
if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
exit 1 exit 1
fi fi
if [ ! -f /usr/share/keyrings/docker-archive-keyring.gpg ]; then # install snap
cat ./certs/docker.gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
fi
sudo apt-get update
# TODO REVIEW management machine software requirements
# to a host on SERVERS LAN so that it can operate
# TODO document which dependencies are required by what software, e.g., trezor, docker, etc.
# virt-manager allows us to run type-1 vms desktop version. We use remote viewer to get a GUI for the VM
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
apt-transport-https ca-certificates lsb-release docker-ce-cli \
python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \
libcanberra-gtk-module virt-manager pass
# for trezor installation
pip3 install setuptools wheel
pip3 install trezor_agent
if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then
sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
fi
# TODO initialize pass here; need to first initialize Trezor-T certificates.
# install lxd as a snap if it's not installed. We only really use the client part of this package
# on the management machine.
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --candidate sudo snap install lxd
sleep 3
# run lxd init on the remote server./dev/nvme1n1
#
cat <<EOF | lxd init --preseed
config: {}
networks:
- config:
ipv4.address: auto
ipv4.dhcp: true
ipv4.nat: true
ipv6.address: none
description: "Default network bridge for ss-mgmt outbound network access."
name: lxdbr0
type: "bridge"
project: default
storage_pools:
- config:
source: ${DISK}
description: ""
name: sovereign-stack
driver: zfs
profiles:
- config: {}
description: ""
devices:
enp5s0:
name: enp5s0
network: lxdbr0
type: nic
root:
path: /
pool: sovereign-stack
type: disk
name: default
projects: []
cluster: null
EOF
# initialize the daemon for auto use. Most of the time on the management machine,
# we only use the LXC client -- not the daemon. HOWEVER, there are circustances where
# you might want to run the management machine in a LXD-based VM. We we init the lxd daemon
# after havning installed it so it'll be available for use.
# see https://www.sovereign-stack.org/management/
sudo lxd init --auto --storage-pool=default --storage-create-loop=50 --storage-backend=zfs
fi fi
# pull the vm image down if it's not there.
if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc image copy "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
fi
if ! lxc list --format csv | grep -q ss-mgmt; then
lxc init "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB
# mount the pre-verified sovereign stack git repo into the new vm
lxc config device add ss-mgmt sscode disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
fi
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
lxc start ss-mgmt
sleep 15
fi
. ./management/wait_for_lxc_ip.sh
# TODO wait for cloud-init to finish (but in the VM)
# while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
# sleep 1
# done
# now run the mgmt provisioning script.
SSH_PUBKEY_PATH="$HOME/.ssh/id_rsa.pub"
if [ -f "$SSH_PUBKEY_PATH" ]; then
lxc file push "$SSH_PUBKEY_PATH" ss-mgmt/home/ubuntu/.ssh/authorized_keys
else
echo "ERROR: You need to generate an SSH key."
exit 1
fi
lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
lxc file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc
lxc file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack
lxc exec ss-mgmt apt-get update
lxc exec ss-mgmt -- apt-get install -y openssh-server
lxc file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config
lxc exec ss-mgmt -- sudo systemctl restart sshd
# make the Sovereign Stack commands available to the user via ~/.bashrc # make the Sovereign Stack commands available to the user via ~/.bashrc
# we use ~/.bashrc # we use ~/.bashrc
ADDED_COMMAND=false ADDED_COMMAND=false
for SS_COMMAND in deploy cluster; do if ! < "$HOME/.bashrc" grep -q "ss-manage"; then
if ! < "$HOME/.bashrc" grep -q "ss-$SS_COMMAND"; then echo "alias ss-manage='$(pwd)/manage.sh \$@'" >> "$HOME/.bashrc"
echo "alias ss-${SS_COMMAND}='$(pwd)/${SS_COMMAND}.sh \$@'" >> "$HOME/.bashrc"
ADDED_COMMAND=true ADDED_COMMAND=true
fi fi
done
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
# Let's remove any entry in our known_hosts, then add it back.
# we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS"
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh
lxc restart ss-mgmt
if [ "$ADDED_COMMAND" = true ]; then if [ "$ADDED_COMMAND" = true ]; then
echo "WARNING! You need to run 'source ~/.bashrc' before continuing." echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
fi fi

27
manage.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
if ! lxc remote get-default | grep -q local; then
lxc remote switch local
fi
# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh
if ! lxc list --format csv | grep -q "ss-mgmt"; then
echo "ERROR: the management machine VM does not exist. You probably need to run './install.sh'."
echo "INFO: check out https://www.sovereign-stack.org/tag/code-lifecycle-management/ for more information."
fi
# if the machine does exist, let's make sure it's RUNNING.
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
echo "INFO: The management machine was in a STOPPED state. Starting the environment. Please wait."
lxc start ss-mgmt
sleep 30
fi
. ./management/wait_for_lxc_ip.sh
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
ssh ubuntu@"$IP_V4_ADDRESS"

11
management/bash_profile Normal file
View File

@ -0,0 +1,11 @@
#!/bin/bash
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/deploy.sh $@'
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'
alias ss-destroy='/home/ubuntu/sovereign-stack/deployment/destroy.sh $@'
alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt'
alias ll='ls -lah'

117
management/bashrc Normal file
View File

@ -0,0 +1,117 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi

4
management/motd Normal file
View File

@ -0,0 +1,4 @@
#!/bin/bash
echo "Welcome to the management environment. Run 'ss-help' to get started."

60
management/provision.sh Executable file
View File

@ -0,0 +1,60 @@
#!/bin/bash
set -ex
cd "$(dirname "$0")"
# NOTE! This script MUST be executed as root.
sudo apt-get update
sudo apt-get install -y gnupg ca-certificates curl lsb-release
mkdir -p /etc/apt/keyrings
# add the docker gpg key to keyring for docker-ce-cli
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
cat /home/ubuntu/sovereign-stack/certs/docker.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 2>&1
fi
# TODO REVIEW mgmt software requirements
sudo apt-get update
sudo apt-get install -y wait-for-it dnsutils rsync sshfs apt-transport-https docker-ce-cli \
libcanberra-gtk-module snapd nano git
sleep 1
#apt install python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses for trezor stuff
# for trezor installation
#pip3 install setuptools wheel
#pip3 install trezor_agent
# ensure the trezor-t udev rules are in place.
# if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then
# sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
# fi
# install snap
if ! snap list | grep -q lxd; then
sudo snap install lxd
sleep 6
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
sudo lxd init --auto
fi
echo "Your management machine has been provisioned!"
# run a lxd command so we don't we a warning upon first invocation
lxc list > /dev/null 2>&1
# add groups for docker and lxd
sudo addgroup docker
sudo usermod -aG docker ubuntu
sudo usermod -aG lxd ubuntu
# if an SSH pubkey does not exist, we create one.
if [ ! -f /home/ubuntu/.ssh/id_rsa.pub ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f /home/ubuntu/.ssh/id_rsa -t ecdsa -b 521 -N ""
fi

116
management/sshd_config Normal file
View File

@ -0,0 +1,116 @@
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games
# The strategy used for options in the default sshd_config shipped with
# OpenSSH is to specify options with their default value where
# possible, but leave them commented. Uncommented options override the
# default value.
Include /etc/ssh/sshd_config.d/*.conf
Port 22
#AddressFamily any
ListenAddress 0.0.0.0
#ListenAddress ::
#HostKey /etc/ssh/ssh_host_rsa_key
#HostKey /etc/ssh/ssh_host_ecdsa_key
#HostKey /etc/ssh/ssh_host_ed25519_key
# Ciphers and keying
#RekeyLimit default none
# Logging
#SyslogFacility AUTH
#LogLevel INFO
# Authentication:
#LoginGraceTime 2m
#PermitRootLogin prohibit-password
#StrictModes yes
#MaxAuthTries 6
#MaxSessions 10
#PubkeyAuthentication yes
# Expect .ssh/authorized_keys2 to be disregarded by default in future.
#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2
#AuthorizedPrincipalsFile none
#AuthorizedKeysCommand none
#AuthorizedKeysCommandUser nobody
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#HostbasedAuthentication no
# Change to yes if you don't trust ~/.ssh/known_hosts for
# HostbasedAuthentication
#IgnoreUserKnownHosts no
# Don't read the user's ~/.rhosts and ~/.shosts files
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
#PasswordAuthentication yes
#PermitEmptyPasswords no
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
KbdInteractiveAuthentication no
# Kerberos options
#KerberosAuthentication no
#KerberosOrLocalPasswd yes
#KerberosTicketCleanup yes
#KerberosGetAFSToken no
# GSSAPI options
#GSSAPIAuthentication no
#GSSAPICleanupCredentials yes
#GSSAPIStrictAcceptorCheck yes
#GSSAPIKeyExchange no
# Set this to 'yes' to enable PAM authentication, account processing,
# and session processing. If this is enabled, PAM authentication will
# be allowed through the KbdInteractiveAuthentication and
# PasswordAuthentication. Depending on your PAM configuration,
# PAM authentication via KbdInteractiveAuthentication may bypass
# the setting of "PermitRootLogin without-password".
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and KbdInteractiveAuthentication to 'no'.
UsePAM yes
#AllowAgentForwarding yes
#AllowTcpForwarding yes
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
#X11UseLocalhost yes
#PermitTTY yes
#PrintMotd no
#PrintLastLog yes
#TCPKeepAlive yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
#ClientAliveCountMax 3
#UseDNS no
#PidFile /run/sshd.pid
#MaxStartups 10:30:100
#PermitTunnel no
#ChrootDirectory none
#VersionAddendum none
# no default banner path
#Banner none
# Allow client to pass locale environment variables
AcceptEnv LANG LC_*
# override default of no subsystems
Subsystem sftp /usr/lib/openssh/sftp-server
PrintMotd yes

27
management/wait_for_lxc_ip.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
set -e
IP_V4_ADDRESS=
while true; do
# wait for
if lxc list ss-mgmt | grep -q enp5s0; then
break;
else
sleep 1
fi
done
while true; do
IP_V4_ADDRESS=$(lxc list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
if [ -n "$IP_V4_ADDRESS" ]; then
# give the machine extra time to spin up.
break;
else
sleep 1
printf '.'
fi
done
export IP_V4_ADDRESS="$IP_V4_ADDRESS"

View File

@ -1,102 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
source ./defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
# ensure the cluster definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
exit 1
fi
source "$CLUSTER_DEFINITION"
# source project defition.
# Now let's load the project definition.
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
source "$PRIMARY_SITE_DEFINITION_PATH"
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
RESPONSE=
read -r -p "Are you sure you want to continue the migration? ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
lxc delete -f "$LXD_NAME"
lxc profile delete "$LXD_NAME"
done
# delete the base image so it can be created.
if lxc list | grep -q sovereign-stack-base; then
lxc delete -f sovereign-stack-base
fi
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
if lxc profile list | grep -q sovereign-stack; then
lxc profile delete sovereign-stack
fi
if lxc image list | grep -q sovereign-stack-base; then
lxc image rm sovereign-stack-base
fi
if lxc image list | grep -q ubuntu-base; then
lxc image rm ubuntu-base
fi
# Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"

View File

@ -1,21 +0,0 @@
#!/bin/bash
set -eu
export DOMAIN_NAME=
export DUPLICITY_BACKUP_PASSPHRASE=
export BTCPAY_HOSTNAME_IN_CERT=
export DEPLOY_GHOST=true
export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD=
export GHOST_MYSQL_ROOT_PASSWORD=
export NEXTCLOUD_MYSQL_PASSWORD=
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
export GITEA_MYSQL_PASSWORD=
export GITEA_MYSQL_ROOT_PASSWORD=
export LANGUAGE_CODE="en"
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source "$SCRIPT_DIR/defaults.sh"

View File

@ -1,17 +0,0 @@
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync sshfs snapd lxd-client
RUN mkdir /sovereign-stack
COPY ./deployment /sovereign-stack
WORKDIR /sovereign-stack
RUN mkdir /domain
VOLUME /domain
ENV SITE_PATH=/domain
COPY ./entrypoint.sh /entrypoint.sh
RUN chmod 0744 /entrypoint.sh
CMD /entrypoint.sh

View File

@ -1,8 +0,0 @@
#!/bin/bash
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: DOMAIN_NAME not defined.".
exit 1
fi
/sovereign-stack/deploy.sh --domain="$DOMAIN_NAME"

View File

@ -1,32 +0,0 @@
version: "3.8"
services:
# a hidden service that routes to the nginx container at http://onionurl.onion server block
tor-onion:
image: tor:latest
networks:
- tor-net
volumes:
- ${REMOTE_HOME}/tor:/var/lib/tor
- tor-logs:/var/log/tor
configs:
- source: tor-config
target: /etc/tor/torrc
mode: 0644
deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
volumes:
tor-data:
tor-logs:
networks:
tor-net:
attachable: true
configs:
tor-config:
file: ${TOR_CONFIG_PATH}

39
uninstall.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
set -exu
# this script undoes install.sh
. ./defaults.sh
if lxc list --format csv | grep -q ss-mgmt; then
if ! list list --format csv | grep ss-mgmt | grep -q "RUNNING"; then
lxc stop ss-mgmt
fi
lxc config device remove ss-mgmt sscode
lxc delete ss-mgmt
fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
# fi
# if lxc storage list --format csv | grep -q sovereign-stack; then
# lxc profile device remove default root
# lxc storage delete sovereign-stack
# fi
# if snap list | grep -q lxd; then
# sudo snap remove lxd
# sleep 2
# fi
# if zfs list | grep -q sovereign-stack; then
# sudo zfs destroy -r sovereign-stack
# fi
# if zfs list | grep -q "sovereign-stack"; then
# sudo zfs destroy -r "rpool/lxd"
# fi

View File

@ -1 +1 @@
v0.0.22 v0.0.23