1
1

Compare commits

...

17 Commits

24 changed files with 1132 additions and 192 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
archive

View File

@ -1,129 +0,0 @@
#!/bin/bash
set -e
export DEPLOY_GHOST=true
export DEPLOY_CLAMS=false
export DEPLOY_NOSTR=false
export DEPLOY_NEXTCLOUD=false
export DEPLOY_GITEA=false
export WWW_HOSTNAME="www"
export BTCPAY_HOSTNAME="btcpayserver"
export BTCPAY_HOSTNAME_IN_CERT="btcpay"
export NEXTCLOUD_HOSTNAME="nextcloud"
export GITEA_HOSTNAME="git"
export NOSTR_HOSTNAME="relay"
export CLAMS_HOSTNAME="clams"
export SITE_LANGUAGE_CODES="en"
export LANGUAGE_CODE="en"
export NOSTR_ACCOUNT_PUBKEY=
# this is where the html is sourced from.
export SITE_HTML_PATH=
export BTCPAY_ADDITIONAL_HOSTNAMES=
export GHOST_MYSQL_PASSWORD=
export GHOST_MYSQL_ROOT_PASSWORD=
export NEXTCLOUD_MYSQL_PASSWORD=
export GITEA_MYSQL_PASSWORD=
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
export GITEA_MYSQL_ROOT_PASSWORD=
export DUPLICITY_BACKUP_PASSPHRASE=
#opt-add-fireflyiii;opt-add-zammad
export SSH_HOME="$HOME/.ssh"
export PASS_HOME="$HOME/.password-store"
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="4"
export WWW_SERVER_MEMORY_MB="4096"
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
export NEXTCLOUD_SPACE_GB=10
DEFAULT_DB_IMAGE="mariadb:10.11.2-jammy"
# run the docker stack.
export GHOST_IMAGE="ghost:5.38.0"
# TODO switch to mysql. May require intricate export work for existing sites.
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
#https://forum.ghost.org/t/how-to-migrate-from-mariadb-10-to-mysql-8/29575
export GHOST_DB_IMAGE="mysql:8.0.32"
export NGINX_IMAGE="nginx:1.23.3"
# version of backup is 24.0.3
export NEXTCLOUD_IMAGE="nextcloud:25.0.4"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
# TODO PIN the gitea version number.
export GITEA_IMAGE="gitea/gitea:latest"
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
export SS_ROOT_PATH="$HOME/ss"
export REMOTES_PATH="$SS_ROOT_PATH/remotes"
export PROJECTS_PATH="$SS_ROOT_PATH/projects"
export SITES_PATH="$SS_ROOT_PATH/sites"
# mount into ss-mgmt/home/ubuntu/snap/lxd/common/config
export LXD_CONFIG_PATH="$SS_ROOT_PATH/lxd"
# The base VM image.
export LXD_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
WEEK_NUMBER=$(date +%U)
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"
export OTHER_SITES_LIST=
export BTCPAY_ALT_NAMES=
export BITCOIN_CHAIN=regtest
export REMOTE_HOME="/home/ubuntu"
export REMOTE_DATA_PATH="$REMOTE_HOME/ss-data"
export REMOTE_DATA_PATH_LETSENCRYPT="$REMOTE_DATA_PATH/letsencrypt"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups"
export BTCPAY_SERVER_APPPATH="$REMOTE_DATA_PATH/btcpayserver-docker"
# this space is for OS, docker images, etc
# values here are fine for regtest generally. Later scripts adjust
# these values based on testnet/mainnet
export WWW_SSDATA_DISK_SIZE_GB=20
export WWW_BACKUP_DISK_SIZE_GB=50
export WWW_DOCKER_DISK_SIZE_GB=30
export BTCPAYSERVER_SSDATA_DISK_SIZE_GB=20
export BTCPAYSERVER_BACKUP_DISK_SIZE_GB=5
export BTCPAYSERVER_DOCKER_DISK_SIZE_GB=30
export REGISTRY_URL="https://index.docker.io/v1"
# this is the git commit of the project/ sub git repo.
# used in the migration script to switch into past for backup
# then back to present (TARGET_PROJECT_GIT_COMMIT) for restore.
export TARGET_PROJECT_GIT_COMMIT=e1754181484007b79ac10aa2f0dd1cd5035b5763
#
export TESTNET_BLOCK_HASH=00000000d8277ba1ca66b40b3e3476629e6f0f97c5b8cfaeabfe402e55db223a
export MAINNET_BLOCK_HASH=000000000000000000047941e3a6102e8896a4ae66b962599568eb25abd6b405
export SS_CACHE_PATH="$SS_ROOT_PATH/cache"
export SS_JAMMY_PATH="$SS_CACHE_PATH/$UBUNTU_BASE_IMAGE_NAME"

9
deployment/base.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
# The base VM image.
export LXD_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
WEEK_NUMBER=$(date +%U)
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"

92
deployment/create_lxc_base.sh Executable file
View File

@ -0,0 +1,92 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
. ./base.sh
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project=default
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" --project=default
# for CHAIN in mainnet testnet; do
# for DATA in blocks chainstate; do
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA"
# done
# done
lxc start "$BASE_IMAGE_VM_NAME" --project=default
sleep 15
while lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- wait-for-it -t 100 127.0.0.1:22
# # If we have any chaninstate or blocks in our SSME, let's push them to the
# # remote host as a zfs volume that way deployments can share a common history
# # of chainstate/blocks.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# # if the storage snapshot doesn't yet exist, create it.
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
# if [ -d "$DATA_PATH" ]; then
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
# lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
# else
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
# fi
# fi
# fi
# done
# done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME" --project=default
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project=default
fi
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
# # now let's get a snapshot of each of the blocks/chainstate directories.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
# lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
# fi
# done
# done

111
deployment/deploy_vm.sh Executable file
View File

@ -0,0 +1,111 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
. ./base.sh
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}"
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
########## BEGIN
cat >> "$SSH_HOME/config" <<-EOF
${SSH_STRING}
HostName ${FQDN}
User ubuntu
EOF
###
fi
# if the machine doesn't exist, we create it.
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
exit 1
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
VM_ID=
BACKUP_DISK_SIZE_GB=
SSDATA_DISK_SIZE_GB=
DOCKER_DISK_SIZE_GB=
if [ "$VIRTUAL_MACHINE" = www ]; then
VM_ID="w"
BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
VM_ID="b"
BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
fi
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
fi
# TODO ensure we are only GROWING the volume--never shrinking
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
fi
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
# now let's create a new VM to work with.
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
lxc init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
# attack the docker block device.
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# # attach any volumes
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# MOUNT_PATH="/$CHAIN-$DATA"
# lxc config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
# done
# done
# fi
lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
fi

View File

@ -0,0 +1,50 @@
#!/bin/bash
set -eu
# file paths
export SSH_HOME="$HOME/.ssh"
export PASS_HOME="$HOME/.password-store" #TODO
export SS_ROOT_PATH="$HOME/ss"
export REMOTES_PATH="$SS_ROOT_PATH/remotes"
export PROJECTS_PATH="$SS_ROOT_PATH/projects"
export SITES_PATH="$SS_ROOT_PATH/sites"
export LXD_CONFIG_PATH="$SS_ROOT_PATH/lxd"
export SS_CACHE_PATH="$SS_ROOT_PATH/cache"
export REMOTE_HOME="/home/ubuntu"
export REMOTE_DATA_PATH="$REMOTE_HOME/ss-data"
export REMOTE_DATA_PATH_LETSENCRYPT="$REMOTE_DATA_PATH/letsencrypt"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups"
export BTCPAY_SERVER_APPPATH="$REMOTE_DATA_PATH/btcpayserver-docker"
export BITCOIN_CHAIN=regtest
# this space is for OS, docker images, etc
# values here are fine for regtest generally. Later scripts adjust
# these values based on testnet/mainnet
export WWW_SSDATA_DISK_SIZE_GB=20
export WWW_BACKUP_DISK_SIZE_GB=50
export WWW_DOCKER_DISK_SIZE_GB=30
export BTCPAYSERVER_SSDATA_DISK_SIZE_GB=20
export BTCPAYSERVER_BACKUP_DISK_SIZE_GB=20
export BTCPAYSERVER_DOCKER_DISK_SIZE_GB=30
export WWW_HOSTNAME="www"
export BTCPAY_HOSTNAME="btcpayserver"
export BTCPAY_HOSTNAME_IN_CERT="btcpay"
export NEXTCLOUD_HOSTNAME="nextcloud"
export GITEA_HOSTNAME="git"
export NOSTR_HOSTNAME="relay"
export CLAMS_HOSTNAME="clams"
export REGISTRY_URL="https://index.docker.io/v1"
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="4"
export WWW_SERVER_MEMORY_MB="4096"
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"

View File

@ -2,7 +2,7 @@
# https://www.sovereign-stack.org/ss-down/ # https://www.sovereign-stack.org/ss-down/
set -exu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
if lxc remote get-default -q | grep -q "local"; then if lxc remote get-default -q | grep -q "local"; then
@ -26,13 +26,12 @@ for i in "$@"; do
esac esac
done done
. ../defaults.sh . ./deployment_defaults.sh
. ./remote_env.sh . ./remote_env.sh
. ./project_env.sh . ./project_env.sh
# let's bring down services on the remote deployment if necessary. # let's bring down services on the remote deployment if necessary.
export DOMAIN_NAME="$PRIMARY_DOMAIN" export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN" export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
@ -45,16 +44,16 @@ for VIRTUAL_MACHINE in www btcpayserver; do
LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}" LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
if lxc list | grep -q "$LXD_NAME"; then if lxc list | grep -q "$LXD_NAME"; then
bash -c "./project/deploy.sh --stop --skip-$SKIP" bash -c "./up.sh --stop --skip-$SKIP"
lxc stop "$LXD_NAME" lxc stop "$LXD_NAME"
lxc delete "$LXD_NAME" lxc delete "$LXD_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi fi
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit
if lxc profile list | grep -q "$LXD_NAME"; then if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME" lxc profile delete "$LXD_NAME"
fi fi
@ -87,9 +86,4 @@ if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn lxc network delete ss-ovn
fi fi
# delete the base image so it can be created. # TODO make a snapshot on all the zfs storage volumes.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME" --project default
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi

View File

@ -24,7 +24,7 @@ fi
source "$PROJECT_DEFINITION_PATH" source "$PROJECT_DEFINITION_PATH"
export PRIMARY_DOMAIN="$PRIMARY_DOMAIN"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf" export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then

View File

@ -18,8 +18,9 @@ if [ -z "$REMOTE_NAME" ]; then
exit 1 exit 1
fi fi
#shellcheck disable=SC1091 . ./deployment_defaults.sh
source ../defaults.sh
. ./base.sh
export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME" export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf" REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
@ -221,7 +222,7 @@ if wait-for-it -t 20 "$FQDN:8443"; then
lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$REMOTE_NAME" lxc remote switch "$REMOTE_NAME"
echo "INFO: You have create a new remote named '$REMOTE_NAME'. Your lxc client is now target it." echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your LXC client has been switched to it."
else else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong." echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
exit 1 exit 1
@ -250,5 +251,7 @@ if ! lxc storage list --format csv | grep -q ss-base; then
# done # done
else else
echo "WARNING! The host '$FQDN' appears to have Sovereign Stack worksloads already provisioned. Proceed with care." echo "WARNING! The host '$FQDN' appears to have Sovereign Stack worksloads already provisioned."
echo "INFO: Here are your current Deployments."
lxc project list -q
fi fi

View File

@ -25,6 +25,8 @@ if echo "$CURRENT_REMOTE" | grep -q "production"; then
fi fi
. ./deployment_defaults.sh
export REMOTE_PATH="$REMOTES_PATH/$CURRENT_REMOTE" export REMOTE_PATH="$REMOTES_PATH/$CURRENT_REMOTE"
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf" REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
export REMOTE_DEFINITION="$REMOTE_DEFINITION" export REMOTE_DEFINITION="$REMOTE_DEFINITION"

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
set -eu set -e
cd "$(dirname "$0")" cd "$(dirname "$0")"
. ../defaults.sh . ./deployment_defaults.sh
. ./remote_env.sh . ./remote_env.sh

291
deployment/stub_lxc_profile.sh Executable file
View File

@ -0,0 +1,291 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
VIRTUAL_MACHINE=base
LXD_HOSTNAME=
SSDATA_VOLUME_NAME=
BACKUP_VOLUME_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxd-hostname=*)
LXD_HOSTNAME="${i#*=}"
shift
;;
--vm=*)
VIRTUAL_MACHINE="${i#*=}"
shift
;;
--ss-volume-name=*)
SSDATA_VOLUME_NAME="${i#*=}"
shift
;;
--backup-volume-name=*)
BACKUP_VOLUME_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
mkdir -p "$PROJECT_PATH/cloud-init"
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat > "$YAML_PATH" <<EOF
config:
EOF
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: 4
limits.memory: 4096MB
EOF
fi
if [ "$VIRTUAL_MACHINE" = www ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
EOF
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
EOF
fi
. ./target.sh
# if VIRTUAL_MACHINE=base, then we doing the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
# this is for the base image only...
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
package_update: true
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: false
fqdn: ${BASE_IMAGE_VM_NAME}
packages:
- curl
- ssh-askpass
- apt-transport-https
- ca-certificates
- gnupg-agent
- software-properties-common
- lsb-release
- net-tools
- htop
- rsync
- duplicity
- sshfs
- fswatch
- jq
- git
- nano
- wait-for-it
- dnsutils
- wget
groups:
- docker
users:
- name: ubuntu
groups: docker
shell: /bin/bash
lock_passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
],
"labels": [
"PROJECT_COMMIT=${PROJECT_GIT_COMMIT}"
]
}
EOF
fi
fi
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
EOF
fi
if [ "$VIRTUAL_MACHINE" != base ]; then
# all other machines that are not the base image
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
package_update: false
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: true
fqdn: ${FQDN}
resize_rootfs: false
disk_setup:
/dev/sdb:
table_type: 'gpt'
layout: true
overwrite: false
fs_setup:
- label: docker-data
filesystem: 'ext4'
device: '/dev/sdb1'
overwrite: false
mounts:
- [ sdb, /var/lib/docker ]
mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
EOF
fi
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
user.network-config: |
version: 2
ethernets:
enp5s0:
dhcp4: true
dhcp4-overrides:
route-metric: 50
match:
macaddress: ${MAC_ADDRESS_TO_PROVISION}
set-name: enp5s0
enp6s0:
dhcp4: true
EOF
fi
# All profiles get a root disk and cloud-init config.
cat >> "$YAML_PATH" <<EOF
description: Default LXD profile for ${FILENAME}
devices:
root:
path: /
pool: ss-base
type: disk
config:
source: cloud-init:config
type: disk
EOF
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
ss-data:
path: ${REMOTE_DATA_PATH}
pool: ss-base
source: ${SSDATA_VOLUME_NAME}
type: disk
ss-backup:
path: ${REMOTE_BACKUP_PATH}
pool: ss-base
source: ${BACKUP_VOLUME_NAME}
type: disk
EOF
fi
# Stub out the network piece for the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic
enp6s0:
name: enp6s0
network: ss-ovn
type: nic
name: ${PRIMARY_DOMAIN}
EOF
fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if [ "$VIRTUAL_MACHINE" = base ]; then
if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME" --project default
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default
else
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
fi

3
deployment/target.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
export TARGET_PROJECT_GIT_COMMIT=42a1604146bce97dd363f3f6e44afb7e19f1ce0f

432
deployment/up.sh Executable file
View File

@ -0,0 +1,432 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
. ./target.sh
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs lxc; do
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
exit 1
fi
done
# do a spot check; if we are on production warn.
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running command against a production system!"
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
OTHER_SITES_LIST=
PRIMARY_DOMAIN=
RUN_CERT_RENEWAL=true
SKIP_BASE_IMAGE_CREATION=false
SKIP_WWW=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=false
BACKUP_BTCPAY=false
BACKUP_CERTS=false
BACKUP_APPS=false
BACKUP_BTCPAY=false
BACKUP_BTCPAY_ARCHIVE_PATH=
RESTORE_BTCPAY=false
SKIP_BTCPAY=false
UPDATE_BTCPAY=false
REMOTE_NAME="$(lxc remote get-default)"
STOP_SERVICES=false
USER_SAYS_YES=false
RESTART_FRONT_END=true
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www)
RESTORE_WWW=true
RESTORE_CERTS=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
shift
;;
--backup-www)
BACKUP_CERTS=true
BACKUP_APPS=true
shift
;;
--backup-btcpayserver)
BACKUP_BTCPAY=true
shift
;;
--stop)
STOP_SERVICES=true
RESTART_FRONT_END=false
shift
;;
--backup-archive-path=*)
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
shift
;;
--update-btcpay)
UPDATE_BTCPAY=true
shift
;;
--skip-www)
SKIP_WWW=true
shift
;;
--skip-btcpayserver)
SKIP_BTCPAY=true
shift
;;
--skip-base-image)
SKIP_BASE_IMAGE_CREATION=true
shift
;;
--no-cert-renew)
RUN_CERT_RENEWAL=false
shift
;;
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: Use the '--backup-archive-path=/path/to/btcpay/archive.tar.gz' option when restoring btcpay server."
exit 1
fi
if [ "$RESTORE_BTCPAY" = true ] && [ ! -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: The backup archive path you specified DOES NOT exist!"
exit 1
fi
. ./remote_env.sh
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES"
export BACKUP_CERTS="$BACKUP_CERTS"
export BACKUP_APPS="$BACKUP_APPS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export REMOTE_NAME="$REMOTE_NAME"
export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
# ensure our remote path is created.
mkdir -p "$REMOTE_PATH"
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The remote definition could not be found. You may need to re-run 'ss-remote'."
exit 1
fi
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
source "$REMOTE_DEFINITION"
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
export DEPLOYMENT_STRING="$DEPLOYMENT_STRING"
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
gpg --gen-random --armor 1 25
}
function stub_site_definition {
mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites"
# create a symlink from the PROJECT_PATH/sites/DOMAIN_NAME to the ss-sites/domain name
DOMAIN_SYMLINK_PATH="$PROJECT_PATH/sites/$DOMAIN_NAME"
if [ ! -L "$DOMAIN_SYMLINK_PATH" ]; then
ln -r -s "$SITE_PATH" "$DOMAIN_SYMLINK_PATH"
fi
if [ ! -f "$SITE_PATH/site.conf" ]; then
# check to see if the enf file exists. exist if not.
SITE_DEFINITION_PATH="$SITE_PATH/site.conf"
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
# stub out a site.conf with new passwords.
cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-up/#siteconf
DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true
DEPLOY_CLAMS=false
DEPLOY_NEXTCLOUD=false
DEPLOY_NOSTR=false
NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site.conf for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1
fi
fi
}
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
# create a symlink from ./remotepath/projects/project
PROJECT_SYMLINK="$REMOTE_PATH/projects/$PROJECT_NAME"
if [ ! -L "$PROJECT_SYMLINK" ]; then
ln -r -s "$PROJECT_PATH" "$PROJECT_SYMLINK"
fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# stub out a project.conf
cat >"$PROJECT_DEFINITION_PATH" <<EOL
# see https://www.sovereign-stack.org/ss-up/#projectconf for more info.
PRIMARY_DOMAIN="domain0.tld"
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
WWW_SERVER_MAC_ADDRESS=
# WWW_SSDATA_DISK_SIZE_GB=100
# WWW_SERVER_CPU_COUNT="6"
# WWW_SERVER_MEMORY_MB="4096"
BTCPAYSERVER_MAC_ADDRESS=
# BTCPAY_SERVER_CPU_COUNT="4"
# BTCPAY_SERVER_MEMORY_MB="4096"
EOL
chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project.conf for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/ss-up/"
exit 1
fi
. ./project_env.sh
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
fi
export DOMAIN_LIST="$DOMAIN_LIST"
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
# let's provision our primary domain first.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export PRIMARY_DOMAIN="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
stub_site_definition
# bring the VMs up under the primary domain name.
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
# iterate over all our server endpoints and provision them if needed.
# www
VPS_HOSTNAME=
. ./base.sh
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
# create the lxd base image.
if [ "$SKIP_BASE_IMAGE_CREATION" = false ]; then
./create_lxc_base.sh
fi
fi
for VIRTUAL_MACHINE in www btcpayserver; do
if [ "$VIRTUAL_MACHINE" = btcpayserver ] && [ "$SKIP_BTCPAY" = true ]; then
continue
fi
if [ "$VIRTUAL_MACHINE" = www ] && [ "$SKIP_WWW" = true ]; then
continue
fi
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
FQDN=
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site.conf"
source ./project/domain_env.sh
# VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: DOMAIN_NAME not specified in your site.conf."
exit 1
fi
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project=default | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
echo "ERROR: the MACVLAN interface has not been specified. You may need to run 'ss-remote' again."
exit 1
fi
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
# Now let's switch to the new project to ensure new resources are created under the project scope.
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
fi
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$SKIP_WWW" = true ]; then
echo "INFO: Skipping WWW due to command line argument."
continue
fi
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
export FQDN="$BASE_IMAGE_VM_NAME"
else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
exit
fi
export FQDN="$FQDN"
export LXD_VM_NAME="${FQDN//./-}"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
export PROJECT_PATH="$PROJECT_PATH"
./deploy_vm.sh
if [ "$VIRTUAL_MACHINE" = www ]; then
# this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0
fi
fi
done
# let's stub out the rest of our site definitions, if any.
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# stub out the site_defition if it's doesn't exist.
stub_site_definition
done
# now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ]; then
./project/www/go.sh
else
echo "INFO: Skipping www VM."
fi
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ]; then
./project/btcpayserver/go.sh
else
echo "INFO: Skipping the btcpayserver VM."
fi

View File

@ -3,6 +3,20 @@
set -eu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
. ./target.sh
# # As part of the install script, we pull down any other sovereign-stack git repos
# PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
# PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
# if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
# git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
# else
# cd "$PROJECTS_SCRIPTS_PATH" || exit 1
# git -c advice.detachedHead=false pull origin main
# git checkout "$TARGET_PROJECT_GIT_COMMIT"
# cd - || exit 1
# fi
# check if there are any uncommited changes. It's dangerous to # check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash. # alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then if git update-index --refresh | grep -q "needs update"; then
@ -22,7 +36,7 @@ if [ "$RESPONSE" != "y" ]; then
exit 0 exit 0
fi fi
. ../defaults.sh . ./deployment_defaults.sh
. ./remote_env.sh . ./remote_env.sh
@ -35,7 +49,7 @@ for VM in www btcpayserver; do
# if the VM doesn't exist, the we emit an error message and hard quit. # if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again." echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-up again."
exit 1 exit 1
fi fi
done done
@ -43,17 +57,12 @@ done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz" BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH" echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
# first, let's grab the GIT commit from the remote machine. # first, let's grab the GIT commit from the remote machine.
export DOMAIN_NAME="$PRIMARY_DOMAIN" export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN" export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
# source the site path so we know what features it has. # source the site path so we know what features it has.
source ../defaults.sh
source "$SITE_PATH/site.conf" source "$SITE_PATH/site.conf"
source ./project/domain_env.sh source ./project/domain_env.sh
@ -68,7 +77,7 @@ git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
cd - cd -
# run deploy which backups up everything, but doesnt restart any services. # run deploy which backups up everything, but doesnt restart any services.
bash -c "./project/deploy.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH --backup-www --backup-btcpayserver" bash -c "./up.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH --backup-www --backup-btcpayserver --skip-base-image"
# call the down script (be default it is non-destructuve of user data.) # call the down script (be default it is non-destructuve of user data.)
./down.sh ./down.sh
@ -89,4 +98,4 @@ cd -
# need to do any restorations (or backups for that matter, though we still grab one); # need to do any restorations (or backups for that matter, though we still grab one);
# we simply mount the existing data. That's the more common case where the user is simply upgrading the system in-place. # we simply mount the existing data. That's the more common case where the user is simply upgrading the system in-place.
./project/deploy.sh ./up.sh

49
deployment/wait_for_lxc_ip.sh Executable file
View File

@ -0,0 +1,49 @@
#!/bin/bash
set -e
LXC_INSTANCE_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxd-name=*)
LXC_INSTANCE_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# if the invoker did not set the instance name, throw an error.
if [ -z "$LXC_INSTANCE_NAME" ]; then
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
exit 1
fi
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
exit 1
fi
IP_V4_ADDRESS=
while true; do
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
if [ -n "$IP_V4_ADDRESS" ]; then
# give the machine extra time to spin up.
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
break
else
sleep 1
printf '.'
fi
done
# wait for cloud-init to complet before returning.
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

View File

@ -11,8 +11,6 @@ if [ "$(hostname)" = ss-mgmt ]; then
exit 1 exit 1
fi fi
. ./defaults.sh
# the DISK variable here tells us which disk (partition) the admin wants to use for # the DISK variable here tells us which disk (partition) the admin wants to use for
# lxd resources. By default, we provision the disk under / as a loop device. Admin # lxd resources. By default, we provision the disk under / as a loop device. Admin
# can override with CLI modifications. # can override with CLI modifications.
@ -61,24 +59,43 @@ EOF
fi fi
. ./deployment/deployment_defaults.sh
. ./deployment/base.sh
# we need to get the base image. IMport it if it's cached, else download it then cache it. # we need to get the base image. IMport it if it's cached, else download it then cache it.
if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if the image if cached locally, import it from disk, otherwise download it from ubuntu # if the image if cached locally, import it from disk, otherwise download it from ubuntu
IMAGE_IDENTIFIER=$(find "$SS_JAMMY_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1) IMAGE_PATH="$HOME/ss/cache/ss-ubuntu-jammy"
METADATA_FILE="$SS_JAMMY_PATH/meta-$IMAGE_IDENTIFIER.tar.xz" IMAGE_IDENTIFIER=$(find "$IMAGE_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1)
IMAGE_FILE="$SS_JAMMY_PATH/$IMAGE_IDENTIFIER.qcow2" METADATA_FILE="$IMAGE_PATH/meta-$IMAGE_IDENTIFIER.tar.xz"
if [ -d "$SS_JAMMY_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then IMAGE_FILE="$IMAGE_PATH/$IMAGE_IDENTIFIER.qcow2"
if [ -d "$IMAGE_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
lxc image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME" lxc image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
else else
lxc image copy "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update lxc image copy "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
mkdir -p "$SS_JAMMY_PATH" mkdir -p "$IMAGE_PATH"
lxc image export "$UBUNTU_BASE_IMAGE_NAME" "$SS_JAMMY_PATH" --vm lxc image export "$UBUNTU_BASE_IMAGE_NAME" "$IMAGE_PATH" --vm
fi fi
fi fi
# if the ss-mgmt doesn't exist, create it. # if the ss-mgmt doesn't exist, create it.
SSH_PUBKEY_PATH="$HOME/.ssh/id_rsa.pub" SSH_PATH="$HOME/.ssh"
SSH_PRIVKEY_PATH="$SSH_PATH/id_rsa"
SSH_PUBKEY_PATH="$SSH_PRIVKEY_PATH.pub"
if [ ! -f "$SSH_PRIVKEY_PATH" ]; then
ssh-keygen -f "$SSH_PRIVKEY_PATH" -t rsa -b 4096
fi
chmod 700 "$HOME/.ssh"
chmod 600 "$HOME/.ssh/config"
# add SSH_PUBKEY_PATH to authorized_keys
grep -qxF "$(cat $SSH_PUBKEY_PATH)" "$SSH_PATH/authorized_keys" || cat "$SSH_PUBKEY_PATH" >> "$SSH_PATH/authorized_keys"
FROM_BUILT_IMAGE=false FROM_BUILT_IMAGE=false
if ! lxc list --format csv | grep -q ss-mgmt; then if ! lxc list --format csv | grep -q ss-mgmt; then
@ -98,6 +115,9 @@ if ! lxc config device show ss-mgmt | grep -q ss-code; then
fi fi
# create the ~/ss path and mount it into the vm. # create the ~/ss path and mount it into the vm.
source ./deployment/deployment_defaults.sh
source ./deployment/base.sh
mkdir -p "$SS_ROOT_PATH" mkdir -p "$SS_ROOT_PATH"
if ! lxc config device show ss-mgmt | grep -q ss-root; then if ! lxc config device show ss-mgmt | grep -q ss-root; then
@ -154,11 +174,6 @@ fi
# wait for the vm to have an IP address # wait for the vm to have an IP address
. ./management/wait_for_lxc_ip.sh . ./management/wait_for_lxc_ip.sh
# wait for the VM to complete its default cloud-init.
while lxc exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# do some other preparations for user experience # do some other preparations for user experience
lxc file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases lxc file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases
lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
@ -178,14 +193,12 @@ if ! < "$HOME/.bashrc" grep -q "ss-manage"; then
ADDED_COMMAND=true ADDED_COMMAND=true
fi fi
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
# Let's remove any entry in our known_hosts, then add it back. # Let's remove any entry in our known_hosts, then add it back.
# we are using IP address here so we don't have to rely on external DNS # we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion. # configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS" ssh-keygen -R "$IP_V4_ADDRESS"
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts" ssh-keyscan -H "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
@ -196,6 +209,7 @@ if [ "$FROM_BUILT_IMAGE" = false ]; then
lxc stop ss-mgmt lxc stop ss-mgmt
if ! lxc image list | grep -q "ss-mgmt"; then if ! lxc image list | grep -q "ss-mgmt"; then
echo "Publishing image. Please wait, this may take a while..."
lxc publish ss-mgmt --alias=ss-mgmt lxc publish ss-mgmt --alias=ss-mgmt
fi fi
@ -206,7 +220,7 @@ if [ "$ADDED_COMMAND" = true ]; then
echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment." echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
fi fi
. ./defaults.sh . ./deployment/target.sh
# As part of the install script, we pull down any other sovereign-stack git repos # As part of the install script, we pull down any other sovereign-stack git repos
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project" PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"

View File

@ -28,14 +28,12 @@ fi
# if the machine does exist, let's make sure it's RUNNING. # if the machine does exist, let's make sure it's RUNNING.
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
echo "INFO: The management machine was in a STOPPED state. Starting the environment. Please wait." echo "INFO: The SSME was in a STOPPED state. Starting the environment. Please wait."
lxc start ss-mgmt lxc start ss-mgmt
sleep 30 sleep 30
fi fi
. ./management/wait_for_lxc_ip.sh . ./management/wait_for_lxc_ip.sh
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
# let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt. # let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt.
ssh ubuntu@"$IP_V4_ADDRESS" ssh ubuntu@"$IP_V4_ADDRESS"

2
management/bash_aliases Executable file → Normal file
View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@' alias ss-up='/home/ubuntu/sovereign-stack/deployment/up.sh $@'
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@' alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@' alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@' alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'

0
management/bash_profile Executable file → Normal file
View File

View File

@ -44,19 +44,9 @@ fi
# run a lxd command so we don't we a warning upon first invocation # run a lxd command so we don't we a warning upon first invocation
lxc list > /dev/null 2>&1 lxc list > /dev/null 2>&1
# add groups for docker and lxd # add groups for docker and lxd
if ! groups ubuntu | grep -q docker; then if ! groups ubuntu | grep -q docker; then
sudo addgroup docker sudo addgroup docker
sudo usermod -aG docker ubuntu sudo usermod -aG docker ubuntu
sudo usermod -aG lxd ubuntu sudo usermod -aG lxd ubuntu
fi fi
# if an SSH pubkey does not exist, we create one.
if [ ! -f /home/ubuntu/.ssh/id_rsa.pub ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f /home/ubuntu/.ssh/id_rsa -t ecdsa -b 521 -N ""
fi
echo "Your management machine has been provisioned!"

View File

@ -25,3 +25,8 @@ done
export IP_V4_ADDRESS="$IP_V4_ADDRESS" export IP_V4_ADDRESS="$IP_V4_ADDRESS"
# wait for the VM to complete its default cloud-init.
while lxc exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

View File

@ -24,7 +24,20 @@ if ! command -v lxc >/dev/null 2>&1; then
exit 1 exit 1
fi fi
. ./defaults.sh
if ! lxc remote get-default | grep -q "local"; then
echo "ERROR: You MUST be on the local remote when uninstalling the SSME."
echo "INFO: You can use 'lxc remote switch local' to do this."
exit 1
fi
if ! lxc project list | grep -q "default (current)"; then
echo "ERROR: You MUST be on the default project when uninstalling the SSME."
echo "INFO: You can use 'lxc project switch default' to do this."
exit 1
fi
if lxc list --format csv | grep -q "ss-mgmt"; then if lxc list --format csv | grep -q "ss-mgmt"; then
@ -61,6 +74,8 @@ if [ "$PURGE_LXD" = true ]; then
lxc network delete lxdbr0 lxc network delete lxdbr0
fi fi
# this file contains the BASE_IMAGE_NAME
. ./deployment/base.sh
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc image delete "$UBUNTU_BASE_IMAGE_NAME" lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
fi fi

1
update.sh Executable file
View File

@ -0,0 +1 @@
#!/bin/bash