From 2c0645c951c9f02a64fdad469b6c5db1440834e9 Mon Sep 17 00:00:00 2001 From: Derek Smith Date: Wed, 1 Feb 2023 14:44:05 -0500 Subject: [PATCH] Undo this commit. --- .gitignore | 2 +- .vscode/settings.json | 8 +- NOTES | 1 - defaults.sh | 54 +++--- deployment/btcpayserver/stub_btcpay_setup.sh | 2 +- .../check_dependencies.sh | 0 cluster.sh => deployment/cluster.sh | 113 +++++++----- deployment/cluster_env.sh | 54 ++++++ deployment/create_lxc_base.sh | 71 +++----- deploy.sh => deployment/deploy.sh | 167 +++++++++++------- deployment/deploy_vms.sh | 18 +- deployment/destroy.sh | 38 ++++ domain_env.sh => deployment/domain_env.sh | 2 +- deployment/help.txt | 15 ++ deployment/migrate.sh | 51 ++++++ deployment/reset.sh | 78 ++++++++ show_lxc.sh => deployment/show.sh | 5 +- deployment/stub_lxc_profile.sh | 138 ++++----------- deployment/wait_for_lxc_ip.sh | 39 ++-- deployment/www/.gitignore | 1 + deployment/www/backup_path.sh | 11 +- deployment/www/generate_certs.sh | 8 +- deployment/www/go.sh | 12 +- deployment/www/stop_docker_stacks.sh | 4 +- deployment/www/stub/clams/Dockerfile | 29 --- deployment/www/stub/clams/build.sh | 28 --- deployment/www/stub/ghost_yml.sh | 4 +- deployment/www/stub/gitea_yml.sh | 4 +- deployment/www/stub/nextcloud_yml.sh | 4 +- deployment/www/stub/nginx_config.sh | 58 ++++-- deployment/www/stub/nginx_yml.sh | 12 +- deployment/www/stub/nostr_yml.sh | 6 +- install.sh | 165 ++++++++++++----- manage.sh | 27 +++ 51-trezor.rules => management/51-trezor.rules | 0 management/bash_profile | 11 ++ management/bashrc | 117 ++++++++++++ management/motd | 4 + management/provision.sh | 60 +++++++ management/sshd_config | 116 ++++++++++++ management/wait_for_lxc_ip.sh | 27 +++ migrate.sh | 102 ----------- reset_env.sh | 21 --- staging/Dockerfile | 17 -- staging/entrypoint.sh | 8 - staging/tor.yml | 32 ---- uninstall.sh | 39 ++++ version.txt | 2 +- 48 files changed, 1133 insertions(+), 652 deletions(-) delete mode 100644 NOTES rename check_dependencies.sh => deployment/check_dependencies.sh (100%) rename cluster.sh => deployment/cluster.sh (70%) create mode 100755 deployment/cluster_env.sh rename deploy.sh => deployment/deploy.sh (74%) create mode 100755 deployment/destroy.sh rename domain_env.sh => deployment/domain_env.sh (95%) create mode 100644 deployment/help.txt create mode 100755 deployment/migrate.sh create mode 100755 deployment/reset.sh rename show_lxc.sh => deployment/show.sh (60%) create mode 100644 deployment/www/.gitignore delete mode 100644 deployment/www/stub/clams/Dockerfile delete mode 100755 deployment/www/stub/clams/build.sh create mode 100755 manage.sh rename 51-trezor.rules => management/51-trezor.rules (100%) create mode 100644 management/bash_profile create mode 100644 management/bashrc create mode 100644 management/motd create mode 100755 management/provision.sh create mode 100644 management/sshd_config create mode 100755 management/wait_for_lxc_ip.sh delete mode 100755 migrate.sh delete mode 100755 reset_env.sh delete mode 100644 staging/Dockerfile delete mode 100644 staging/entrypoint.sh delete mode 100644 staging/tor.yml create mode 100755 uninstall.sh diff --git a/.gitignore b/.gitignore index ecab0e7..ee9014a 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -./reset.sh \ No newline at end of file +publish.sh \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 6bcd5fc..decab03 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -10,11 +10,13 @@ "shellcheck.enableQuickFix": true, "shellcheck.run": "onType", "shellcheck.executablePath": "shellcheck", - "shellcheck.customArgs": [], + "shellcheck.customArgs": [ + "-x" + ], "shellcheck.ignorePatterns": {}, "shellcheck.exclude": [ - // "SC1090", - // "SC1091", + "SC1090", + "SC1091", "SC2029" ], "terminal.integrated.fontFamily": "monospace", diff --git a/NOTES b/NOTES deleted file mode 100644 index 8b75647..0000000 --- a/NOTES +++ /dev/null @@ -1 +0,0 @@ -Trezor MUST Use the "Crypto" firmware with shitcoin support in order for 2FA (WEBAUTHN) to work. Bummer. \ No newline at end of file diff --git a/defaults.sh b/defaults.sh index 927bca5..ac57a80 100755 --- a/defaults.sh +++ b/defaults.sh @@ -1,10 +1,29 @@ #!/bin/bash -set -eu +set -ex + +if lxc remote get-default | grep -q "production"; then + echo "WARNING: You are running a migration procedure on a production system." + echo "" + + # check if there are any uncommited changes. It's dangerous to + # alter production systems when you have commits to make or changes to stash. + if git update-index --refresh | grep -q "needs update"; then + echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue." + exit 1 + fi + + RESPONSE= + read -r -p " Are you sure you want to continue (y) ": RESPONSE + if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 1 + fi + +fi + -export WWW_SERVER_MAC_ADDRESS= export DEPLOY_WWW_SERVER=false -export DEPLOY_BTCPAY_SERVER=false export DEPLOY_GHOST=false export DEPLOY_NEXTCLOUD=false @@ -16,6 +35,8 @@ export BTCPAY_HOSTNAME_IN_CERT="btcpay" export NEXTCLOUD_HOSTNAME="nextcloud" export GITEA_HOSTNAME="git" export NOSTR_HOSTNAME="relay" +export CLAMS_HOSTNAME="clams" +export CLAMS_GIT_REPO="https://github.com/farscapian/clams-app-docker.git" export SITE_LANGUAGE_CODES="en" export LANGUAGE_CODE="en" @@ -37,7 +58,7 @@ export DUPLICITY_BACKUP_PASSPHRASE= export SSH_HOME="$HOME/.ssh" export PASS_HOME="$HOME/.password-store" -export VM_NAME="sovereign-stack-base" + export BTCPAY_SERVER_CPU_COUNT="4" export BTCPAY_SERVER_MEMORY_MB="4096" @@ -48,23 +69,6 @@ export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io" export NEXTCLOUD_SPACE_GB=10 -# first of all, if there are uncommited changes, we quit. You better stash or commit! -# Remote VPS instances are tagged with your current git HEAD so we know which code revision -# used when provisioning the VPS. -#LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/master)" -#export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT" - -# check if there are any uncommited changes. It's dangerous to instantiate VMs using -# code that hasn't been committed. -# if git update-index --refresh | grep -q "needs update"; then -# echo "ERROR: You have uncommited changes! Better stash your work with 'git stash'." -# exit 1 -# fi - -BTC_CHAIN=regtest - -export BTC_CHAIN="$BTC_CHAIN" - DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy" @@ -89,7 +93,6 @@ export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE" export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay" -export SOVEREIGN_STACK_MAC_ADDRESS= export WWW_SERVER_MAC_ADDRESS= export BTCPAYSERVER_MAC_ADDRESS= @@ -97,9 +100,11 @@ export CLUSTERS_DIR="$HOME/ss-clusters" export PROJECTS_DIR="$HOME/ss-projects" export SITES_PATH="$HOME/ss-sites" - # The base VM image. -export BASE_LXC_IMAGE="ubuntu/22.04/cloud" +export LXD_UBUNTU_BASE_VERSION="22.04" +export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}" +export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud" +export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}" # Deploy a registry cache on your management machine. export DEPLOY_MGMT_REGISTRY=false @@ -114,3 +119,4 @@ export REMOTE_CERT_BASE_DIR="$REMOTE_HOME/.certs" # this space is for OS, docker images, etc. DOES NOT INCLUDE USER DATA. export ROOT_DISK_SIZE_GB=20 export REGISTRY_URL="https://index.docker.io/v1/" +export PRIMARY_DOMAIN= \ No newline at end of file diff --git a/deployment/btcpayserver/stub_btcpay_setup.sh b/deployment/btcpayserver/stub_btcpay_setup.sh index 498b6bf..0adcefb 100755 --- a/deployment/btcpayserver/stub_btcpay_setup.sh +++ b/deployment/btcpayserver/stub_btcpay_setup.sh @@ -35,7 +35,7 @@ cd btcpayserver-docker export BTCPAY_HOST="${BTCPAY_USER_FQDN}" export BTCPAY_ANNOUNCEABLE_HOST="${DOMAIN_NAME}" -export NBITCOIN_NETWORK="${BTC_CHAIN}" +export NBITCOIN_NETWORK="${BITCOIN_CHAIN}" export LIGHTNING_ALIAS="${PRIMARY_DOMAIN}" export BTCPAYGEN_LIGHTNING="clightning" export BTCPAYGEN_CRYPTO1="btc" diff --git a/check_dependencies.sh b/deployment/check_dependencies.sh similarity index 100% rename from check_dependencies.sh rename to deployment/check_dependencies.sh diff --git a/cluster.sh b/deployment/cluster.sh similarity index 70% rename from cluster.sh rename to deployment/cluster.sh index 2acea79..c7ff9aa 100755 --- a/cluster.sh +++ b/deployment/cluster.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -eu +set -ex cd "$(dirname "$0")" # This script is meant to be executed on the management machine. @@ -8,7 +8,7 @@ cd "$(dirname "$0")" # to use LXD. DATA_PLANE_MACVLAN_INTERFACE= -DISK_TO_USE= +DISK_TO_USE=loop # override the cluster name. CLUSTER_NAME="${1:-}" @@ -18,7 +18,7 @@ if [ -z "$CLUSTER_NAME" ]; then fi #shellcheck disable=SC1091 -source ./defaults.sh +source ../defaults.sh export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" @@ -30,11 +30,10 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then cat >"$CLUSTER_DEFINITION" </dev/null 2>&1; then - if lxc profile list --format csv | grep -q sovereign-stack; then - lxc profile delete sovereign-stack + if lxc profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then + lxc profile delete "$BASE_IMAGE_VM_NAME" sleep 1 fi - if lxc network list --format csv | grep -q lxdbrSS; then - lxc network delete lxdbrSS + if lxc network list --format csv | grep -q lxdbr0; then + lxc network delete lxdbr0 + sleep 1 + fi + + if lxc network list --format csv | grep -q lxdbr1; then + lxc network delete lxdbr1 sleep 1 fi fi @@ -148,22 +156,13 @@ fi ssh -t "ubuntu@$FQDN" " set -e -# install ufw and allow SSH. -sudo apt update -sudo apt upgrade -y -sudo apt install ufw htop dnsutils nano -y -sudo ufw allow ssh -sudo ufw allow 8443/tcp comment 'allow LXD management' - -# enable the host firewall -if sudo ufw status | grep -q 'Status: inactive'; then - sudo ufw enable -fi +# install tool/dependencies +sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y # install lxd as a snap if it's not installed. if ! snap list | grep -q lxd; then - sudo snap install lxd --candidate - sleep 4 + sudo snap install lxd + sleep 10 fi " @@ -173,27 +172,41 @@ if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)" fi -# stub out the lxd init file for the remote SSH endpoint. -CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/lxdinit_profile.yml" -cat >"$CLUSTER_MASTER_LXD_INIT" < /dev/null && pwd )" -export RESPOSITORY_PATH="$RESPOSITORY_PATH" - ./check_dependencies.sh DOMAIN_NAME= @@ -108,7 +105,7 @@ if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then fi # set up our default paths. -source ./defaults.sh +source ../defaults.sh export DOMAIN_NAME="$DOMAIN_NAME" export REGISTRY_DOCKER_IMAGE="registry:2" @@ -126,26 +123,23 @@ export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH" export RESTART_FRONT_END="$RESTART_FRONT_END" -# ensure our cluster path is created. -mkdir -p "$CLUSTER_PATH" - -# if an authorized_keys file does not exist, we'll stub one out with the current user. -# add additional id_rsa.pub entries manually for more administrative logins. -if [ ! -f "$CLUSTER_PATH/authorized_keys" ]; then - cat "$SSH_HOME/id_rsa.pub" >> "$CLUSTER_PATH/authorized_keys" - echo "INFO: Sovereign Stack just stubbed out '$CLUSTER_PATH/authorized_keys'. Go update it." - echo " Add ssh pubkeys for your various management machines, if any." - echo " By default we added your main ssh pubkey: '$SSH_HOME/id_rsa.pub'." - exit 1 +# todo convert this to Trezor-T +SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub" +export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH" +if [ ! -f "$SSH_PUBKEY_PATH" ]; then + # generate a new SSH key for the base vm image. + ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N "" fi +# ensure our cluster path is created. +mkdir -p "$CLUSTER_PATH" CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" ######################################### if [ ! -f "$CLUSTER_DEFINITION" ]; then - echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'." + echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster'." exit 1 fi @@ -158,7 +152,6 @@ function new_pass { function instantiate_vms { - export BTC_CHAIN="$BTC_CHAIN" export UPDATE_BTCPAY="$UPDATE_BTCPAY" export RECONFIGURE_BTCPAY_SERVER="$RECONFIGURE_BTCPAY_SERVER" @@ -173,7 +166,7 @@ function instantiate_vms { export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ./domain_env.sh # VALIDATE THE INPUT from the ENVFILE if [ -z "$DOMAIN_NAME" ]; then @@ -181,12 +174,15 @@ function instantiate_vms { exit 1 fi + # # switch to the default project + # if ! lxc project list --format csv | grep -a "default (current)"; then + # lxc project switch default + # fi - # first let's get the DISK_TO_USE and DATA_PLANE_MACVLAN_INTERFACE from the ss-config - # which is set up during LXD cluster creation ss-cluster. + # Goal is to get the macvlan interface. LXD_SS_CONFIG_LINE= - if lxc network list --format csv | grep lxdbrSS | grep -q ss-config; then - LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbrSS | grep ss-config)" + if lxc network list --format csv | grep lxdbr0 | grep -q ss-config; then + LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbr0 | grep ss-config)" fi if [ -z "$LXD_SS_CONFIG_LINE" ]; then @@ -196,23 +192,26 @@ function instantiate_vms { CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')" DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)" - DISK_TO_USE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f3)" - export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE" - export DISK_TO_USE="$DISK_TO_USE" - ./deployment/create_lxc_base.sh + + # # switch to the default project to ensure the base image is created. + # if ! lxc project list --format csv | grep -a "default (current)"; then + # lxc project switch default + # fi + + # create the lxd base image. + ./create_lxc_base.sh + + # # now switch to the current chain project. + # if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then + # lxc project switch "$BITCOIN_CHAIN" + # fi export MAC_ADDRESS_TO_PROVISION= export VPS_HOSTNAME="$VPS_HOSTNAME" export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME" - # ensure the admin has set the MAC address for the base image. - if [ -z "$SOVEREIGN_STACK_MAC_ADDRESS" ]; then - echo "ERROR: SOVEREIGN_STACK_MAC_ADDRESS is undefined. Check your project definition." - exit 1 - fi - DDNS_HOST= if [ "$VIRTUAL_MACHINE" = www ]; then @@ -226,17 +225,19 @@ function instantiate_vms { DDNS_HOST="$WWW_HOSTNAME" ROOT_DISK_SIZE_GB="$((ROOT_DISK_SIZE_GB + NEXTCLOUD_SPACE_GB))" elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then + + DDNS_HOST="$BTCPAY_HOSTNAME" VPS_HOSTNAME="$BTCPAY_HOSTNAME" MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS" - if [ "$BTC_CHAIN" = mainnet ]; then + if [ "$BITCOIN_CHAIN" = mainnet ]; then ROOT_DISK_SIZE_GB=150 - elif [ "$BTC_CHAIN" = testnet ]; then + elif [ "$BITCOIN_CHAIN" = testnet ]; then ROOT_DISK_SIZE_GB=70 fi - elif [ "$VIRTUAL_MACHINE" = "sovereign-stack" ]; then - DDNS_HOST="sovereign-stack-base" + elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then + DDNS_HOST="$BASE_IMAGE_VM_NAME" ROOT_DISK_SIZE_GB=8 else echo "ERROR: VIRTUAL_MACHINE not within allowable bounds." @@ -249,17 +250,13 @@ function instantiate_vms { export VIRTUAL_MACHINE="$VIRTUAL_MACHINE" export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN" export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION" - ./deployment/deploy_vms.sh - # if the local docker client isn't logged in, do so; - # this helps prevent docker pull errors since they throttle. - # if [ ! -f "$HOME/.docker/config.json" ]; then - # echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin - # fi + ./deploy_vms.sh # this tells our local docker client to target the remote endpoint via SSH export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" + # enable docker swarm mode so we can support docker stacks. if docker info | grep -q "Swarm: inactive"; then docker swarm init --advertise-addr enp6s0 @@ -293,7 +290,7 @@ export SITE_LANGUAGE_CODES="en" export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)" export DEPLOY_GHOST=true export DEPLOY_NEXTCLOUD=false -export NOSTR_ACCOUNT_PUBKEY="NOSTR_IDENTITY_PUBKEY_GOES_HERE" +export NOSTR_ACCOUNT_PUBKEY= export DEPLOY_GITEA=false export GHOST_MYSQL_PASSWORD="$(new_pass)" export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)" @@ -305,7 +302,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)" EOL chmod 0744 "$SITE_DEFINITION_PATH" - echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!" + echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it!" exit 1 fi @@ -313,28 +310,26 @@ EOL } -CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')" -PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME" +PROJECT_PATH="$PROJECTS_DIR/$BITCOIN_CHAIN" mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects" export PROJECT_PATH="$PROJECT_PATH" # create a symlink from ./clusterpath/projects/project -if [ ! -d "$CLUSTER_PATH/projects/$PROJECT_NAME" ]; then - ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$PROJECT_NAME" +if [ ! -d "$CLUSTER_PATH/projects/$BITCOIN_CHAIN" ]; then + ln -s "$PROJECT_PATH" "$CLUSTER_PATH/projects/$BITCOIN_CHAIN" fi -# check if we need to provision a new lxc project. -if [ "$PROJECT_NAME" != "$CURRENT_PROJECT" ]; then - if ! lxc project list | grep -q "$PROJECT_NAME"; then - echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!" - lxc project create "$PROJECT_NAME" - fi - - echo "INFO: switch to lxd project '$PROJECT_NAME'." - lxc project switch "$PROJECT_NAME" - +# create the lxc project as specified by BITCOIN_CHAIN +if ! lxc project list | grep -q "$BITCOIN_CHAIN"; then + echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!" + lxc project create "$BITCOIN_CHAIN" fi +# # check if we need to provision a new lxc project. +# if [ "$BITCOIN_CHAIN" != "$CURRENT_PROJECT" ]; then +# echo "INFO: switch to lxd project '$BITCOIN_CHAIN'." +# lxc project switch "$BITCOIN_CHAIN" +# fi # check to see if the enf file exists. exist if not. PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition" @@ -346,11 +341,10 @@ if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then # see https://www.sovereign-stack.org/project-definition for more info. -export WWW_SERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED" -export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED" -export BTC_CHAIN="regtest|testnet|mainnet" -export PRIMARY_DOMAIN="domain0.tld" -export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld" +export WWW_SERVER_MAC_ADDRESS= +export BTCPAYSERVER_MAC_ADDRESS= +export PRIMARY_DOMAIN= +#export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld" export BTCPAY_SERVER_CPU_COUNT="4" export BTCPAY_SERVER_MEMORY_MB="4096" export WWW_SERVER_CPU_COUNT="6" @@ -359,8 +353,8 @@ export WWW_SERVER_MEMORY_MB="4096" EOL chmod 0744 "$PROJECT_DEFINITION_PATH" - echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it yo!" - echo "INFO: Learn more at https://www.sovereign-stack.org/project-definitions/" + echo "INFO: we stubbed a new project_defition for you at '$PROJECT_DEFINITION_PATH'. Go update it!" + echo "INFO: Learn more at https://www.sovereign-stack.org/projects/" exit 1 fi @@ -368,6 +362,22 @@ fi # source project defition. source "$PROJECT_DEFINITION_PATH" +if [ -z "$PRIMARY_DOMAIN" ]; then + echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project_definition." + exit 1 +fi + +if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then + echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project_definition." + exit 1 +fi + + +if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then + echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project_definition." + exit 1 +fi + # the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list. DOMAIN_LIST="${PRIMARY_DOMAIN}" if [ -n "$OTHER_SITES_LIST" ]; then @@ -403,11 +413,32 @@ done # now let's run the www and btcpay-specific provisioning scripts. if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then - bash -c "./deployment/www/go.sh" + bash -c "./www/go.sh" + ssh ubuntu@"$PRIMARY_WWW_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead fi +# +LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)" +export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT" + export DOMAIN_NAME="$PRIMARY_DOMAIN" export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then - bash -c "./deployment/btcpayserver/go.sh" + bash -c "./btcpayserver/go.sh" + ssh ubuntu@"$BTCPAY_FQDN" echo "$LATEST_GIT_COMMIT" > /home/ubuntu/.ss-githead fi + +# deploy clams wallet. +LOCAL_CLAMS_PATH="$(pwd)/www/clams" +if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then + if [ ! -d "$LOCAL_CLAMS_PATH" ]; then + git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_PATH" + else + cd "$LOCAL_CLAMS_PATH" + git pull + cd - + fi +fi + + + diff --git a/deployment/deploy_vms.sh b/deployment/deploy_vms.sh index 2488222..e663108 100755 --- a/deployment/deploy_vms.sh +++ b/deployment/deploy_vms.sh @@ -1,15 +1,8 @@ #!/bin/bash -set -eu +set -ex cd "$(dirname "$0")" -# let's make sure we have an ssh keypair. We just use $SSH_HOME/id_rsa -# TODO convert this to SSH private key held on Trezor. THus trezor-T required for -# login operations. This should be configurable of course. -if [ ! -f "$SSH_HOME/id_rsa" ]; then - # generate a new SSH key for the base vm image. - ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N "" -fi ## This is a weird if clause since we need to LEFT-ALIGN the statement below. SSH_STRING="Host ${FQDN}" @@ -40,8 +33,9 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then ./stub_lxc_profile.sh "$LXD_VM_NAME" + lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$(date +%Y-%m)" "$LXD_VM_NAME" # now let's create a new VM to work with. - lxc init --profile="$LXD_VM_NAME" "$VM_NAME" "$LXD_VM_NAME" --vm + #lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm # let's PIN the HW address for now so we don't exhaust IP # and so we can set DNS internally. @@ -50,8 +44,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then lxc start "$LXD_VM_NAME" - ./wait_for_lxc_ip.sh "$LXD_VM_NAME" - + bash -c "./wait_for_lxc_ip.sh --lxc-name=$LXD_VM_NAME" fi # scan the remote machine and install it's identity in our SSH known_hosts file. @@ -68,3 +61,6 @@ if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then fi fi + + +ssh "$PRIMARY_WWW_FQDN" -- echo "" \ No newline at end of file diff --git a/deployment/destroy.sh b/deployment/destroy.sh new file mode 100755 index 0000000..02ba408 --- /dev/null +++ b/deployment/destroy.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" +# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first. + +RESPONSE= +read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE +if [ "$RESPONSE" != "y" ]; then + echo "STOPPING." + exit 0 +fi + +. ../defaults.sh +. ./cluster_env.sh + +for VM in www btcpayserver; do + LXD_NAME="$VM-${DOMAIN_NAME//./-}" + + if lxc list | grep -q "$LXD_NAME"; then + lxc delete -f "$LXD_NAME" + + # remove the ssh known endpoint else we get warnings. + ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME" + fi + + if lxc profile list | grep -q "$LXD_NAME"; then + lxc profile delete "$LXD_NAME" + fi +done + + +# delete the base image so it can be created. +if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then + lxc delete -f "$BASE_IMAGE_VM_NAME" + # remove the ssh known endpoint else we get warnings. + ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME" +fi diff --git a/domain_env.sh b/deployment/domain_env.sh similarity index 95% rename from domain_env.sh rename to deployment/domain_env.sh index 963a2df..1917a3b 100755 --- a/domain_env.sh +++ b/deployment/domain_env.sh @@ -8,11 +8,11 @@ export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME" export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME" export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME" export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME" +export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME" export ADMIN_ACCOUNT_USERNAME="info" export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME" export REMOTE_NEXTCLOUD_PATH="$REMOTE_HOME/nextcloud" export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea" -export BTC_CHAIN="$BTC_CHAIN" export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES" diff --git a/deployment/help.txt b/deployment/help.txt new file mode 100644 index 0000000..2b9e2fc --- /dev/null +++ b/deployment/help.txt @@ -0,0 +1,15 @@ + +Sovereign Stack Help. + +You are in the Sovereign Stack management environment. From here, you can issue several commands: + + ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack. + ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default). + ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data). + ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack. + ss-show - show the lxd resources associated with the current remote. + +For more infomation about all these topics, consult the Sovereign Stack website. Relevant posts include: + + - https://www.sovereign-stack.org/commands + diff --git a/deployment/migrate.sh b/deployment/migrate.sh new file mode 100755 index 0000000..6a254b2 --- /dev/null +++ b/deployment/migrate.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +USER_SAYS_YES=false + +for i in "$@"; do + case $i in + -y) + USER_SAYS_YES=true + shift + ;; + *) + echo "Unexpected option: $1" + ;; + esac +done + +. ../defaults.sh + +. ./cluster_env.sh + +# Check to see if any of the VMs actually don't exist. +# (we only migrate instantiated vms) +for VM in www btcpayserver; do + LXD_NAME="$VM-${DOMAIN_NAME//./-}" + + # if the VM doesn't exist, the we emit an error message and hard quit. + if ! lxc list --format csv | grep -q "$LXD_NAME"; then + echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again." + exit 1 + fi +done + +BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz" +echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH" + +# first we run ss-deploy --stop +# this grabs a backup of all data (backups are on by default) and saves them to the management machine +# the --stop flag ensures that services do NOT come back online. +# by default, we grab a backup. + +# run deploy which backups up everything, but doesnt restart any services. +bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" + +# call the destroy script. If user proceed, then user data is DESTROYED! +USER_SAYS_YES="$USER_SAYS_YES" ./destroy.sh + +# Then we can run a restore operation and specify the backup archive at the CLI. +bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" diff --git a/deployment/reset.sh b/deployment/reset.sh new file mode 100755 index 0000000..79a01f3 --- /dev/null +++ b/deployment/reset.sh @@ -0,0 +1,78 @@ +#!/bin/bash + + +set -ex +cd "$(dirname "$0")" + +source ../defaults.sh + +echo "Need to uncomment" +exit 1 +# ./destroy.sh + +# # these only get initialzed upon creation, so we MUST delete here so they get recreated. +# if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then +# lxc profile delete "$BASE_IMAGE_VM_NAME" +# fi + +# if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then +# lxc image rm "$BASE_IMAGE_VM_NAME" +# fi + +# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then +# lxc image rm "$UBUNTU_BASE_IMAGE_NAME" +# fi + +# CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')" +# if ! lxc info | grep -q "project: default"; then +# lxc project switch default +# lxc project delete "$CURRENT_PROJECT" +# fi + +# if lxc profile show default | grep -q "root:"; then +# lxc profile device remove default root +# fi + +# if lxc profile show default| grep -q "eth0:"; then +# lxc profile device remove default eth0 +# fi + +# if lxc network list --format csv | grep -q lxdbr0; then +# lxc network delete lxdbr0 +# fi + +# if lxc network list --format csv | grep -q lxdbr1; then +# lxc network delete lxdbr1 +# fi + +# if lxc storage list --format csv | grep -q ss-base; then +# lxc storage delete ss-base +# fi + +# CURRENT_REMOTE="$(lxc remote get-default)" +# if ! lxc remote get-default | grep -q "local"; then +# lxc remote switch local +# lxc remote remove "$CURRENT_REMOTE" +# fi + + + + + +# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then +# lxc image delete "$UBUNTU_BASE_IMAGE_NAME" +# fi + + +# if snap list | grep -q lxd; then +# sudo snap remove lxd +# sleep 2 +# fi + +# if zfs list | grep -q sovereign-stack; then +# sudo zfs destroy -r sovereign-stack +# fi + +# if zfs list | grep -q "sovereign-stack"; then +# sudo zfs destroy -r "rpool/lxd" +# fi diff --git a/show_lxc.sh b/deployment/show.sh similarity index 60% rename from show_lxc.sh rename to deployment/show.sh index fcc6d5d..4dcc125 100755 --- a/show_lxc.sh +++ b/deployment/show.sh @@ -3,5 +3,8 @@ lxc list lxc network list lxc profile list -lxc storage list lxc image list +lxc storage list +lxc storage info ss-base +lxc project list +lxc remote list \ No newline at end of file diff --git a/deployment/stub_lxc_profile.sh b/deployment/stub_lxc_profile.sh index 9bbfaa0..912d9b9 100755 --- a/deployment/stub_lxc_profile.sh +++ b/deployment/stub_lxc_profile.sh @@ -1,11 +1,12 @@ #!/bin/bash -set -eu +set -exu +cd "$(dirname "$0")" -LXD_HOSTNAME="$1" +LXD_HOSTNAME="${1:-}" # generate the custom cloud-init file. Cloud init installs and configures sshd -SSH_AUTHORIZED_KEY=$(<"$SSH_HOME/id_rsa.pub") +SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH") eval "$(ssh-agent -s)" ssh-add "$SSH_HOME/id_rsa" export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY" @@ -36,8 +37,13 @@ EOF fi -# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image. -if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then +# first of all, if there are uncommited changes, we quit. You better stash or commit! +# Remote VPS instances are tagged with your current git HEAD so we know which code revision +# used when provisioning the VPS. +LATEST_GIT_COMMIT="$(cat ../.git/refs/heads/master)" +export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT" + +if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then # this is for the base image only... cat >> "$YAML_PATH" <> /home/ubuntu/.bash_profile - - echo "alias bitcoin-cli=\"bitcoin-cli.sh \$@\"" >> /home/ubuntu/.bash_profile - - echo "alias lightning-cli=\"bitcoin-lightning-cli.sh \$@\"" >> /home/ubuntu/.bash_profile - - sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - - sudo chmod +x /usr/local/bin/docker-compose - sudo apt-get install -y openssh-server - EOF else @@ -194,7 +132,7 @@ else package_upgrade: false package_reboot_if_required: false - preserve_hostname: false + preserve_hostname: true fqdn: ${FQDN} user.network-config: | @@ -235,7 +173,7 @@ description: Default LXD profile for ${FILENAME} devices: root: path: / - pool: sovereign-stack + pool: ss-base type: disk config: source: cloud-init:config @@ -243,20 +181,19 @@ devices: EOF # Stub out the network piece for the base image. -if [ "$LXD_HOSTNAME" = sovereign-stack ] ; then +if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ] ; then -# If we are deploying the www, we attach the vm to the underlay via macvlan. +# cat >> "$YAML_PATH" <> "$YAML_PATH" <> "$YAML_PATH" <> "$SSH_HOME/known_hosts" - -ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu diff --git a/deployment/www/.gitignore b/deployment/www/.gitignore new file mode 100644 index 0000000..3b3e03f --- /dev/null +++ b/deployment/www/.gitignore @@ -0,0 +1 @@ +clams \ No newline at end of file diff --git a/deployment/www/backup_path.sh b/deployment/www/backup_path.sh index 936cba0..793f23e 100755 --- a/deployment/www/backup_path.sh +++ b/deployment/www/backup_path.sh @@ -3,17 +3,11 @@ set -eu cd "$(dirname "$0")" -# TODO: We are using extra space on the remote VPS at the moment for the duplicity backup files. -# we could eliminate that and simply save duplicity backups to the management machine running the script -# this could be done by using a local path and mounting it on the remote VPS. -# maybe something like https://superuser.com/questions/616182/how-to-mount-local-directory-to-remote-like-sshfs - -# step 1: run duplicity on the remote system to backup all files to the remote system. -# --allow-source-mismatch +# this script backups up a source path to a destination folder on the remote VM +# then pulls that data down to the maanagement environment # if the source files to backup don't exist on the remote host, we return. if ! ssh "$PRIMARY_WWW_FQDN" "[ -d $REMOTE_SOURCE_BACKUP_PATH ]"; then - echo "INFO: The path to backup does not exist. There's nothing to backup! That's ok, execution will continue." exit 0 fi @@ -33,4 +27,3 @@ rsync -av "$SSHFS_PATH/" "$LOCAL_BACKUP_PATH/" # step 4: unmount the SSHFS filesystem and cleanup. umount "$SSHFS_PATH" rm -rf "$SSHFS_PATH" - diff --git a/deployment/www/generate_certs.sh b/deployment/www/generate_certs.sh index 63fbb13..06e770d 100755 --- a/deployment/www/generate_certs.sh +++ b/deployment/www/generate_certs.sh @@ -1,7 +1,6 @@ #!/bin/bash -set -e - +set -ex # let's do a refresh of the certificates. Let's Encrypt will not run if it's not time. docker pull certbot/certbot:latest @@ -12,9 +11,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../domain_env.sh # with the lxd side, we are trying to expose ALL OUR services from one IP address, which terminates # at a cachehing reverse proxy that runs nginx. @@ -23,6 +22,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do # this is minimum required; www and btcpay. DOMAIN_STRING="-d $DOMAIN_NAME -d $WWW_FQDN -d $BTCPAY_USER_FQDN" + if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi diff --git a/deployment/www/go.sh b/deployment/www/go.sh index 6e34f5d..7f35657 100755 --- a/deployment/www/go.sh +++ b/deployment/www/go.sh @@ -14,10 +14,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" - + source ../domain_env.sh ### Let's check to ensure all the requiredsettings are set. if [ "$DEPLOY_GHOST" = true ]; then @@ -65,8 +64,6 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do exit 1 fi - TOR_CONFIG_PATH= - done ./stop_docker_stacks.sh @@ -108,9 +105,9 @@ if [ "$RESTART_FRONT_END" = true ]; then export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../domain_env.sh # these variable are used by both backup/restore scripts. export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER" @@ -141,6 +138,7 @@ fi ./stub/gitea_yml.sh ./stub/nostr_yml.sh + # # start a browser session; point it to port 80 to ensure HTTPS redirect. # # WWW_FQDN is in our certificate, so we resolve to that. # wait-for-it -t 320 "$WWW_FQDN:80" diff --git a/deployment/www/stop_docker_stacks.sh b/deployment/www/stop_docker_stacks.sh index a20add6..4827200 100755 --- a/deployment/www/stop_docker_stacks.sh +++ b/deployment/www/stop_docker_stacks.sh @@ -9,9 +9,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../domain_env.sh ### Stop all services. for APP in ghost nextcloud gitea nostr; do diff --git a/deployment/www/stub/clams/Dockerfile b/deployment/www/stub/clams/Dockerfile deleted file mode 100644 index 40d2d44..0000000 --- a/deployment/www/stub/clams/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -FROM node:latest - -RUN apt-get update && apt-get install tzdata -y -ENV TZ="America/New_York" - -#RUN npm install -g npm@9.3.0 - -# Clone the repository -RUN git clone https://github.com/clams-tech/browser-app.git /usr/src/clams -WORKDIR /usr/src/clams - -# checkout specific tag -RUN git -c advice.detachedHead=false checkout tags/1.2.0 - -# couldn't do a yarn build without updating this. -# RUN npx -y update-browserslist-db@latest - - -# install dependencies -RUN yarn - -EXPOSE 4173 - -RUN mkdir /output -VOLUME /output - -RUN yarn build - -ENTRYPOINT [ "cp", "-a", "/usr/src/clams/.svelte-kit/output/.", "/output/" ] \ No newline at end of file diff --git a/deployment/www/stub/clams/build.sh b/deployment/www/stub/clams/build.sh deleted file mode 100755 index 96d4f89..0000000 --- a/deployment/www/stub/clams/build.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# The purpose of this script is to use a Docker container to get and build the Clams -# server-side pieces and output them to a specified directory. These files are then -# ready build to be served by a TLS-enabled reverse proxy. It goes -# Client Browser -> wss (WebSocket over TLS) -> ProxyServer -> TCP to btcpayserver:9735 - -set -ex -cd "$(dirname "$0")" - -export CLAMS_OUTPUT_DIR="$REMOTE_HOME/clams" - -ssh "$PRIMARY_WWW_FQDN" sudo rm -rf "$CLAMS_OUTPUT_DIR" -ssh "$PRIMARY_WWW_FQDN" mkdir -p "$CLAMS_OUTPUT_DIR" - -if docker ps | grep -q clams; then - docker kill clams -fi - -if docker ps -a | grep -q clams; then - docker system prune -f -fi - -docker build -t clams:latest . - -docker run -it --name clams -v "$CLAMS_OUTPUT_DIR":/output clams:latest - -ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$CLAMS_OUTPUT_DIR" diff --git a/deployment/www/stub/ghost_yml.sh b/deployment/www/stub/ghost_yml.sh index 913a7b5..1ca52c9 100755 --- a/deployment/www/stub/ghost_yml.sh +++ b/deployment/www/stub/ghost_yml.sh @@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../../domain_env.sh # for each language specified in the site_definition, we spawn a separate ghost container # at https://www.domain.com/$LANGUAGE_CODE diff --git a/deployment/www/stub/gitea_yml.sh b/deployment/www/stub/gitea_yml.sh index bd4e807..4c8d84b 100755 --- a/deployment/www/stub/gitea_yml.sh +++ b/deployment/www/stub/gitea_yml.sh @@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../../domain_env.sh if [ "$DEPLOY_GITEA" = true ]; then GITEA_PATH="$REMOTE_GITEA_PATH/$DOMAIN_NAME/${LANGUAGE_CODE}" diff --git a/deployment/www/stub/nextcloud_yml.sh b/deployment/www/stub/nextcloud_yml.sh index 44d1ce6..a140666 100755 --- a/deployment/www/stub/nextcloud_yml.sh +++ b/deployment/www/stub/nextcloud_yml.sh @@ -8,9 +8,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export SITE_PATH="$SITES_PATH/$DOMAIN_NAME" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + source ../../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" + source ../../domain_env.sh # ensure remote directories exist if [ "$DEPLOY_NEXTCLOUD" = true ]; then diff --git a/deployment/www/stub/nginx_config.sh b/deployment/www/stub/nginx_config.sh index ff99073..2e83f73 100755 --- a/deployment/www/stub/nginx_config.sh +++ b/deployment/www/stub/nginx_config.sh @@ -1,9 +1,8 @@ #!/bin/bash -set -eu +set -ex cd "$(dirname "$0")" - # here's the NGINX config. We support ghost and nextcloud. NGINX_CONF_PATH="$PROJECT_PATH/nginx.conf" @@ -12,7 +11,6 @@ echo "" > "$NGINX_CONF_PATH" # iterate over all our domains and create the nginx config file. iteration=0 -echo "DOMAIN_LIST: $DOMAIN_LIST" for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export DOMAIN_NAME="$DOMAIN_NAME" @@ -20,10 +18,11 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}" # source the site path so we know what features it has. - source "$RESPOSITORY_PATH/reset_env.sh" + echo "BEFORE" + source ../../../defaults.sh source "$SITE_PATH/site_definition" - source "$RESPOSITORY_PATH/domain_env.sh" - + source ../../domain_env.sh + echo "after" if [ $iteration = 0 ]; then cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do STACK_NAME="$DOMAIN_IDENTIFIER-$LANGUAGE_CODE" @@ -289,7 +312,6 @@ EOL # Main HTTPS listener for https://${WWW_FQDN} server { listen 443 ssl http2; - listen [::]:443 ssl http2; ssl_certificate $CONTAINER_TLS_PATH/fullchain.pem; ssl_certificate_key $CONTAINER_TLS_PATH/privkey.pem; @@ -328,7 +350,7 @@ EOL cat >>"$NGINX_CONF_PATH" <>"$NGINX_CONF_PATH" < /dev/null -fi - -sudo apt-get update - -# TODO REVIEW management machine software requirements -# to a host on SERVERS LAN so that it can operate -# TODO document which dependencies are required by what software, e.g., trezor, docker, etc. -# virt-manager allows us to run type-1 vms desktop version. We use remote viewer to get a GUI for the VM -sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \ - apt-transport-https ca-certificates lsb-release docker-ce-cli \ - python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \ - libcanberra-gtk-module virt-manager pass - - -# for trezor installation -pip3 install setuptools wheel -pip3 install trezor_agent - -if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then - sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules -fi - -# TODO initialize pass here; need to first initialize Trezor-T certificates. - - -# install lxd as a snap if it's not installed. We only really use the client part of this package -# on the management machine. +# install snap if ! snap list | grep -q lxd; then - sudo snap install lxd --candidate + sudo snap install lxd + sleep 3 + + # run lxd init on the remote server./dev/nvme1n1 + # + cat <> "$HOME/.bashrc" - ADDED_COMMAND=true - fi -done +if ! < "$HOME/.bashrc" grep -q "ss-manage"; then + echo "alias ss-manage='$(pwd)/manage.sh \$@'" >> "$HOME/.bashrc" + ADDED_COMMAND=true +fi + +wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1 + +# Let's remove any entry in our known_hosts, then add it back. +# we are using IP address here so we don't have to rely on external DNS +# configuration for the base image preparataion. +ssh-keygen -R "$IP_V4_ADDRESS" + +ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts" + +ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu + +ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh + +lxc restart ss-mgmt if [ "$ADDED_COMMAND" = true ]; then - echo "WARNING! You need to run 'source ~/.bashrc' before continuing." + echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment." fi diff --git a/manage.sh b/manage.sh new file mode 100755 index 0000000..0c5cb2b --- /dev/null +++ b/manage.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -eu +cd "$(dirname "$0")" + +if ! lxc remote get-default | grep -q local; then + lxc remote switch local +fi + +# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh +if ! lxc list --format csv | grep -q "ss-mgmt"; then + echo "ERROR: the management machine VM does not exist. You probably need to run './install.sh'." + echo "INFO: check out https://www.sovereign-stack.org/tag/code-lifecycle-management/ for more information." +fi + +# if the machine does exist, let's make sure it's RUNNING. +if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then + echo "INFO: The management machine was in a STOPPED state. Starting the environment. Please wait." + lxc start ss-mgmt + sleep 30 +fi + +. ./management/wait_for_lxc_ip.sh + +wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1 + +ssh ubuntu@"$IP_V4_ADDRESS" diff --git a/51-trezor.rules b/management/51-trezor.rules similarity index 100% rename from 51-trezor.rules rename to management/51-trezor.rules diff --git a/management/bash_profile b/management/bash_profile new file mode 100644 index 0000000..3ecf9bd --- /dev/null +++ b/management/bash_profile @@ -0,0 +1,11 @@ +#!/bin/bash + +alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/deploy.sh $@' +alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@' +alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@' +alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@' +alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@' +alias ss-destroy='/home/ubuntu/sovereign-stack/deployment/destroy.sh $@' +alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt' + +alias ll='ls -lah' diff --git a/management/bashrc b/management/bashrc new file mode 100644 index 0000000..afdeafb --- /dev/null +++ b/management/bashrc @@ -0,0 +1,117 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. +# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) +# for examples + +# If not running interactively, don't do anything +case $- in + *i*) ;; + *) return;; +esac + +# don't put duplicate lines or lines starting with space in the history. +# See bash(1) for more options +HISTCONTROL=ignoreboth + +# append to the history file, don't overwrite it +shopt -s histappend + +# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) +HISTSIZE=1000 +HISTFILESIZE=2000 + +# check the window size after each command and, if necessary, +# update the values of LINES and COLUMNS. +shopt -s checkwinsize + +# If set, the pattern "**" used in a pathname expansion context will +# match all files and zero or more directories and subdirectories. +#shopt -s globstar + +# make less more friendly for non-text input files, see lesspipe(1) +[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" + +# set variable identifying the chroot you work in (used in the prompt below) +if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then + debian_chroot=$(cat /etc/debian_chroot) +fi + +# set a fancy prompt (non-color, unless we know we "want" color) +case "$TERM" in + xterm-color|*-256color) color_prompt=yes;; +esac + +# uncomment for a colored prompt, if the terminal has the capability; turned +# off by default to not distract the user: the focus in a terminal window +# should be on the output of commands, not on the prompt +#force_color_prompt=yes + +if [ -n "$force_color_prompt" ]; then + if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then + # We have color support; assume it's compliant with Ecma-48 + # (ISO/IEC-6429). (Lack of such support is extremely rare, and such + # a case would tend to support setf rather than setaf.) + color_prompt=yes + else + color_prompt= + fi +fi + +if [ "$color_prompt" = yes ]; then + PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' +else + PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' +fi +unset color_prompt force_color_prompt + +# If this is an xterm set the title to user@host:dir +case "$TERM" in +xterm*|rxvt*) + PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" + ;; +*) + ;; +esac + +# enable color support of ls and also add handy aliases +if [ -x /usr/bin/dircolors ]; then + test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" + alias ls='ls --color=auto' + #alias dir='dir --color=auto' + #alias vdir='vdir --color=auto' + + alias grep='grep --color=auto' + alias fgrep='fgrep --color=auto' + alias egrep='egrep --color=auto' +fi + +# colored GCC warnings and errors +#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' + +# some more ls aliases +alias ll='ls -alF' +alias la='ls -A' +alias l='ls -CF' + +# Add an "alert" alias for long running commands. Use like so: +# sleep 10; alert +alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' + +# Alias definitions. +# You may want to put all your additions into a separate file like +# ~/.bash_aliases, instead of adding them here directly. +# See /usr/share/doc/bash-doc/examples in the bash-doc package. + +if [ -f ~/.bash_aliases ]; then + . ~/.bash_aliases +fi + +# enable programmable completion features (you don't need to enable +# this, if it's already enabled in /etc/bash.bashrc and /etc/profile +# sources /etc/bash.bashrc). +if ! shopt -oq posix; then + if [ -f /usr/share/bash-completion/bash_completion ]; then + . /usr/share/bash-completion/bash_completion + elif [ -f /etc/bash_completion ]; then + . /etc/bash_completion + fi +fi diff --git a/management/motd b/management/motd new file mode 100644 index 0000000..9da0546 --- /dev/null +++ b/management/motd @@ -0,0 +1,4 @@ +#!/bin/bash + +echo "Welcome to the management environment. Run 'ss-help' to get started." + diff --git a/management/provision.sh b/management/provision.sh new file mode 100755 index 0000000..061ee42 --- /dev/null +++ b/management/provision.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set -ex +cd "$(dirname "$0")" + +# NOTE! This script MUST be executed as root. +sudo apt-get update +sudo apt-get install -y gnupg ca-certificates curl lsb-release + +mkdir -p /etc/apt/keyrings + +# add the docker gpg key to keyring for docker-ce-cli +if [ ! -f /etc/apt/keyrings/docker.gpg ]; then + cat /home/ubuntu/sovereign-stack/certs/docker.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 2>&1 +fi + +# TODO REVIEW mgmt software requirements +sudo apt-get update +sudo apt-get install -y wait-for-it dnsutils rsync sshfs apt-transport-https docker-ce-cli \ + libcanberra-gtk-module snapd nano git + +sleep 1 + +#apt install python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses for trezor stuff +# for trezor installation +#pip3 install setuptools wheel +#pip3 install trezor_agent + +# ensure the trezor-t udev rules are in place. +# if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then +# sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules +# fi + +# install snap +if ! snap list | grep -q lxd; then + sudo snap install lxd + sleep 6 + + # We just do an auto initialization. All we are using is the LXD client inside the management environment. + sudo lxd init --auto +fi + +echo "Your management machine has been provisioned!" + +# run a lxd command so we don't we a warning upon first invocation +lxc list > /dev/null 2>&1 + + +# add groups for docker and lxd +sudo addgroup docker + +sudo usermod -aG docker ubuntu +sudo usermod -aG lxd ubuntu + +# if an SSH pubkey does not exist, we create one. +if [ ! -f /home/ubuntu/.ssh/id_rsa.pub ]; then + # generate a new SSH key for the base vm image. + ssh-keygen -f /home/ubuntu/.ssh/id_rsa -t ecdsa -b 521 -N "" +fi diff --git a/management/sshd_config b/management/sshd_config new file mode 100644 index 0000000..fb165c8 --- /dev/null +++ b/management/sshd_config @@ -0,0 +1,116 @@ +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options override the +# default value. + +Include /etc/ssh/sshd_config.d/*.conf + +Port 22 +#AddressFamily any +ListenAddress 0.0.0.0 +#ListenAddress :: + +#HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_ecdsa_key +#HostKey /etc/ssh/ssh_host_ed25519_key + +# Ciphers and keying +#RekeyLimit default none + +# Logging +#SyslogFacility AUTH +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +#PermitRootLogin prohibit-password +#StrictModes yes +#MaxAuthTries 6 +#MaxSessions 10 + +#PubkeyAuthentication yes + +# Expect .ssh/authorized_keys2 to be disregarded by default in future. +#AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2 + +#AuthorizedPrincipalsFile none + +#AuthorizedKeysCommand none +#AuthorizedKeysCommandUser nobody + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# To disable tunneled clear text passwords, change to no here! +#PasswordAuthentication yes +#PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +KbdInteractiveAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes +#GSSAPIStrictAcceptorCheck yes +#GSSAPIKeyExchange no + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the KbdInteractiveAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via KbdInteractiveAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and KbdInteractiveAuthentication to 'no'. +UsePAM yes + +#AllowAgentForwarding yes +#AllowTcpForwarding yes +#GatewayPorts no +X11Forwarding yes +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PermitTTY yes +#PrintMotd no +#PrintLastLog yes +#TCPKeepAlive yes +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#UseDNS no +#PidFile /run/sshd.pid +#MaxStartups 10:30:100 +#PermitTunnel no +#ChrootDirectory none +#VersionAddendum none + +# no default banner path +#Banner none + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# override default of no subsystems +Subsystem sftp /usr/lib/openssh/sftp-server + +PrintMotd yes \ No newline at end of file diff --git a/management/wait_for_lxc_ip.sh b/management/wait_for_lxc_ip.sh new file mode 100755 index 0000000..c09804a --- /dev/null +++ b/management/wait_for_lxc_ip.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +IP_V4_ADDRESS= +while true; do + # wait for + if lxc list ss-mgmt | grep -q enp5s0; then + break; + else + sleep 1 + fi +done + +while true; do + IP_V4_ADDRESS=$(lxc list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') + if [ -n "$IP_V4_ADDRESS" ]; then + # give the machine extra time to spin up. + break; + else + sleep 1 + printf '.' + fi +done + + +export IP_V4_ADDRESS="$IP_V4_ADDRESS" diff --git a/migrate.sh b/migrate.sh deleted file mode 100755 index d0cb958..0000000 --- a/migrate.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash - -set -eu -cd "$(dirname "$0")" - -CURRENT_CLUSTER="$(lxc remote get-default)" - -if echo "$CURRENT_CLUSTER" | grep -q "production"; then - echo "WARNING: You are running a migration procedure on a production system." - echo "" - - - RESPONSE= - read -r -p " Are you sure you want to continue (y) ": RESPONSE - if [ "$RESPONSE" != "y" ]; then - echo "STOPPING." - exit 1 - fi - -fi - -source ./defaults.sh - -export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER" -CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" -export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" - -# ensure the cluster definition exists. -if [ ! -f "$CLUSTER_DEFINITION" ]; then - echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'." - exit 1 -fi - -source "$CLUSTER_DEFINITION" - -# source project defition. -# Now let's load the project definition. -PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME" -PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition" -source "$PROJECT_DEFINITION_PATH" - -export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition" -source "$PRIMARY_SITE_DEFINITION_PATH" - -# Check to see if any of the VMs actually don't exist. -# (we only migrate instantiated vms) -for VM in www btcpayserver; do - LXD_NAME="$VM-${DOMAIN_NAME//./-}" - - # if the VM doesn't exist, the we emit an error message and hard quit. - if ! lxc list --format csv | grep -q "$LXD_NAME"; then - echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again." - exit 1 - fi -done - -BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz" -echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH" - -# first we run ss-deploy --stop -# this grabs a backup of all data (backups are on by default) and saves them to the management machine -# the --stop flag ensures that services do NOT come back online. -# by default, we grab a backup. - -bash -c "./deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" - -RESPONSE= -read -r -p "Are you sure you want to continue the migration? ": RESPONSE -if [ "$RESPONSE" != "y" ]; then - echo "STOPPING." - exit 0 -fi - - -for VM in www btcpayserver; do - LXD_NAME="$VM-${DOMAIN_NAME//./-}" - lxc delete -f "$LXD_NAME" - - lxc profile delete "$LXD_NAME" -done - - -# delete the base image so it can be created. -if lxc list | grep -q sovereign-stack-base; then - lxc delete -f sovereign-stack-base -fi - -# these only get initialzed upon creation, so we MUST delete here so they get recreated. -if lxc profile list | grep -q sovereign-stack; then - lxc profile delete sovereign-stack -fi - -if lxc image list | grep -q sovereign-stack-base; then - lxc image rm sovereign-stack-base -fi - -if lxc image list | grep -q ubuntu-base; then - lxc image rm ubuntu-base -fi - -# Then we can run a restore operation and specify the backup archive at the CLI. -bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH" diff --git a/reset_env.sh b/reset_env.sh deleted file mode 100755 index 1478467..0000000 --- a/reset_env.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -eu - -export DOMAIN_NAME= -export DUPLICITY_BACKUP_PASSPHRASE= -export BTCPAY_HOSTNAME_IN_CERT= -export DEPLOY_GHOST=true -export DEPLOY_NEXTCLOUD=false -export NOSTR_ACCOUNT_PUBKEY= -export DEPLOY_GITEA=false -export GHOST_MYSQL_PASSWORD= -export GHOST_MYSQL_ROOT_PASSWORD= -export NEXTCLOUD_MYSQL_PASSWORD= -export NEXTCLOUD_MYSQL_ROOT_PASSWORD= -export GITEA_MYSQL_PASSWORD= -export GITEA_MYSQL_ROOT_PASSWORD= -export LANGUAGE_CODE="en" - -SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -source "$SCRIPT_DIR/defaults.sh" diff --git a/staging/Dockerfile b/staging/Dockerfile deleted file mode 100644 index aca6b75..0000000 --- a/staging/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM ubuntu:22.04 - -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y wait-for-it dnsutils rsync sshfs snapd lxd-client - -RUN mkdir /sovereign-stack -COPY ./deployment /sovereign-stack -WORKDIR /sovereign-stack - -RUN mkdir /domain -VOLUME /domain -ENV SITE_PATH=/domain - -COPY ./entrypoint.sh /entrypoint.sh -RUN chmod 0744 /entrypoint.sh - -CMD /entrypoint.sh diff --git a/staging/entrypoint.sh b/staging/entrypoint.sh deleted file mode 100644 index 9805216..0000000 --- a/staging/entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -if [ -z "$DOMAIN_NAME" ]; then - echo "ERROR: DOMAIN_NAME not defined.". - exit 1 -fi - -/sovereign-stack/deploy.sh --domain="$DOMAIN_NAME" \ No newline at end of file diff --git a/staging/tor.yml b/staging/tor.yml deleted file mode 100644 index d9854e1..0000000 --- a/staging/tor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "3.8" -services: - - # a hidden service that routes to the nginx container at http://onionurl.onion server block - tor-onion: - image: tor:latest - networks: - - tor-net - volumes: - - ${REMOTE_HOME}/tor:/var/lib/tor - - tor-logs:/var/log/tor - configs: - - source: tor-config - target: /etc/tor/torrc - mode: 0644 - deploy: - mode: replicated - replicas: 1 - restart_policy: - condition: on-failure - -volumes: - tor-data: - tor-logs: - -networks: - tor-net: - attachable: true - -configs: - tor-config: - file: ${TOR_CONFIG_PATH} diff --git a/uninstall.sh b/uninstall.sh new file mode 100755 index 0000000..b571fe6 --- /dev/null +++ b/uninstall.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -exu + +# this script undoes install.sh + +. ./defaults.sh + +if lxc list --format csv | grep -q ss-mgmt; then + + if ! list list --format csv | grep ss-mgmt | grep -q "RUNNING"; then + lxc stop ss-mgmt + fi + + lxc config device remove ss-mgmt sscode + lxc delete ss-mgmt +fi + +# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then +# lxc image delete "$UBUNTU_BASE_IMAGE_NAME" +# fi + +# if lxc storage list --format csv | grep -q sovereign-stack; then +# lxc profile device remove default root +# lxc storage delete sovereign-stack +# fi + +# if snap list | grep -q lxd; then +# sudo snap remove lxd +# sleep 2 +# fi + +# if zfs list | grep -q sovereign-stack; then +# sudo zfs destroy -r sovereign-stack +# fi + +# if zfs list | grep -q "sovereign-stack"; then +# sudo zfs destroy -r "rpool/lxd" +# fi diff --git a/version.txt b/version.txt index cce2b1f..70be42f 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -v0.0.22 \ No newline at end of file +v0.0.23 \ No newline at end of file