Refactoring

This commit is contained in:
Derek Smith 2023-04-07 10:20:15 -04:00
parent e175418148
commit d536b85d51
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
15 changed files with 89 additions and 955 deletions

View File

@ -1,98 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
IMAGE_IDENTIFIER=$(find "$SS_JAMMY_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1)
METADATA_FILE="$SS_JAMMY_PATH/meta-$IMAGE_IDENTIFIER.tar.xz"
IMAGE_FILE="$SS_JAMMY_PATH/$IMAGE_IDENTIFIER.qcow2"
if [ -d "$SS_JAMMY_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
lxc image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
else
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project=default
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" --project=default
# for CHAIN in mainnet testnet; do
# for DATA in blocks chainstate; do
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA"
# done
# done
lxc start "$BASE_IMAGE_VM_NAME" --project=default
sleep 15
while lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- wait-for-it -t 100 127.0.0.1:22
# # If we have any chaninstate or blocks in our SSME, let's push them to the
# # remote host as a zfs volume that way deployments can share a common history
# # of chainstate/blocks.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# # if the storage snapshot doesn't yet exist, create it.
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
# if [ -d "$DATA_PATH" ]; then
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
# lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
# else
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
# fi
# fi
# fi
# done
# done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME" --project=default
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project=default
fi
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
# # now let's get a snapshot of each of the blocks/chainstate directories.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
# lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
# fi
# done
# done

441
deploy.sh
View File

@ -1,441 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "$0")"
LATEST_GIT_COMMIT="$(cat ./.git/refs/heads/main)"
export LATEST_GIT_COMMIT="$LATEST_GIT_COMMIT"
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs lxc; do
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
exit 1
fi
done
# do a spot check; if we are on production warn.
if lxc remote get-default | grep -q "production"; then
echo "WARNING: You are running command against a production system!"
echo ""
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
RESPONSE=
read -r -p " Are you sure you want to continue (y) ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 1
fi
fi
PRIMARY_DOMAIN=
RUN_CERT_RENEWAL=true
SKIP_WWW=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=false
BACKUP_BTCPAY=false
BACKUP_CERTS=false
BACKUP_APPS=false
BACKUP_BTCPAY=false
BACKUP_BTCPAY_ARCHIVE_PATH=
RESTORE_BTCPAY=false
SKIP_BTCPAY=false
UPDATE_BTCPAY=false
REMOTE_NAME="$(lxc remote get-default)"
STOP_SERVICES=false
USER_SAYS_YES=false
RESTART_FRONT_END=true
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www)
RESTORE_WWW=true
RESTORE_CERTS=true
shift
;;
--restore-btcpay)
RESTORE_BTCPAY=true
shift
;;
--backup-www)
BACKUP_CERTS=true
BACKUP_APPS=true
shift
;;
--backup-btcpayserver)
BACKUP_BTCPAY=true
shift
;;
--stop)
STOP_SERVICES=true
RESTART_FRONT_END=false
shift
;;
--backup-archive-path=*)
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
shift
;;
--update-btcpay)
UPDATE_BTCPAY=true
shift
;;
--skip-www)
SKIP_WWW=true
shift
;;
--skip-btcpayserver)
SKIP_BTCPAY=true
shift
;;
--no-cert-renew)
RUN_CERT_RENEWAL=false
shift
;;
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: Use the '--backup-archive-path=/path/to/btcpay/archive.tar.gz' option when restoring btcpay server."
exit 1
fi
if [ "$RESTORE_BTCPAY" = true ] && [ ! -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: The backup archive path you specified DOES NOT exist!"
exit 1
fi
# set up our default paths.
source ../../defaults.sh
. ../remote_env.sh
export REGISTRY_DOCKER_IMAGE="registry:2"
export RESTORE_WWW="$RESTORE_WWW"
export STOP_SERVICES="$STOP_SERVICES"
export BACKUP_CERTS="$BACKUP_CERTS"
export BACKUP_APPS="$BACKUP_APPS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
export BACKUP_BTCPAY="$BACKUP_BTCPAY"
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
export REMOTE_NAME="$REMOTE_NAME"
export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
# generate a new SSH key for the base vm image.
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
fi
# ensure our remote path is created.
mkdir -p "$REMOTE_PATH"
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The remote definition could not be found. You may need to re-run 'ss-remote'."
exit 1
fi
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
source "$REMOTE_DEFINITION"
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
export DEPLOYMENT_STRING="$DEPLOYMENT_STRING"
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
gpg --gen-random --armor 1 25
}
function stub_site_definition {
mkdir -p "$SITE_PATH" "$PROJECT_PATH/sites"
# create a symlink from the PROJECT_PATH/sites/DOMAIN_NAME to the ss-sites/domain name
DOMAIN_SYMLINK_PATH="$PROJECT_PATH/sites/$DOMAIN_NAME"
if [ ! -L "$DOMAIN_SYMLINK_PATH" ]; then
ln -r -s "$SITE_PATH" "$DOMAIN_SYMLINK_PATH"
fi
if [ ! -f "$SITE_PATH/site.conf" ]; then
# check to see if the enf file exists. exist if not.
SITE_DEFINITION_PATH="$SITE_PATH/site.conf"
if [ ! -f "$SITE_DEFINITION_PATH" ]; then
# stub out a site.conf with new passwords.
cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-deploy/#siteconf
DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true
DEPLOY_CLAMS=false
DEPLOY_NEXTCLOUD=false
DEPLOY_NOSTR=false
NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site.conf for you at '$SITE_DEFINITION_PATH'. Go update it!"
exit 1
fi
fi
}
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
# create a symlink from ./remotepath/projects/project
PROJECT_SYMLINK="$REMOTE_PATH/projects/$PROJECT_NAME"
if [ ! -L "$PROJECT_SYMLINK" ]; then
ln -r -s "$PROJECT_PATH" "$PROJECT_SYMLINK"
fi
# check to see if the enf file exists. exist if not.
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
# stub out a project.conf
cat >"$PROJECT_DEFINITION_PATH" <<EOL
# see https://www.sovereign-stack.org/ss-deploy/#projectconf for more info.
PRIMARY_DOMAIN="domain0.tld"
# OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
WWW_SERVER_MAC_ADDRESS=
# WWW_SSDATA_DISK_SIZE_GB=100
# WWW_SERVER_CPU_COUNT="6"
# WWW_SERVER_MEMORY_MB="4096"
BTCPAYSERVER_MAC_ADDRESS=
# BTCPAY_SERVER_CPU_COUNT="4"
# BTCPAY_SERVER_MEMORY_MB="4096"
EOL
chmod 0744 "$PROJECT_DEFINITION_PATH"
echo "INFO: we stubbed a new project.conf for you at '$PROJECT_DEFINITION_PATH'. Go update it!"
echo "INFO: Learn more at https://www.sovereign-stack.org/ss-deploy/"
exit 1
fi
. ../project_env.sh
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your project.conf."
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "ERROR: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
if [ -z "$BTCPAYSERVER_MAC_ADDRESS" ]; then
echo "ERROR: the BTCPAYSERVER_MAC_ADDRESS is not specified. Check your project.conf."
exit 1
fi
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
DOMAIN_LIST="${PRIMARY_DOMAIN}"
if [ -n "$OTHER_SITES_LIST" ]; then
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
fi
export DOMAIN_LIST="$DOMAIN_LIST"
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
# let's provision our primary domain first.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export PRIMARY_DOMAIN="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
stub_site_definition
# bring the VMs up under the primary domain name.
export UPDATE_BTCPAY="$UPDATE_BTCPAY"
# iterate over all our server endpoints and provision them if needed.
# www
VPS_HOSTNAME=
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
# create the lxd base image.
./create_lxc_base.sh
fi
for VIRTUAL_MACHINE in www btcpayserver; do
if [ "$VIRTUAL_MACHINE" = btcpayserver ] && [ "$SKIP_BTCPAY" = true ]; then
continue
fi
if [ "$VIRTUAL_MACHINE" = www ] && [ "$SKIP_WWW" = true ]; then
continue
fi
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
FQDN=
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
source "$SITE_PATH/site.conf"
source ./domain_env.sh
# VALIDATE THE INPUT from the ENVFILE
if [ -z "$DOMAIN_NAME" ]; then
echo "ERROR: DOMAIN_NAME not specified in your site.conf."
exit 1
fi
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project=default | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
echo "ERROR: the MACVLAN interface has not been specified. You may need to run 'ss-remote' again."
exit 1
fi
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
# Now let's switch to the new project to ensure new resources are created under the project scope.
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
fi
export MAC_ADDRESS_TO_PROVISION=
export VPS_HOSTNAME="$VPS_HOSTNAME"
export FQDN="$VPS_HOSTNAME.$DOMAIN_NAME"
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$SKIP_WWW" = true ]; then
echo "INFO: Skipping WWW due to command line argument."
continue
fi
FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$WWW_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$WWW_SERVER_MAC_ADDRESS"
elif [ "$VIRTUAL_MACHINE" = btcpayserver ] || [ "$SKIP_BTCPAY" = true ]; then
FQDN="$BTCPAY_HOSTNAME.$DOMAIN_NAME"
VPS_HOSTNAME="$BTCPAY_HOSTNAME"
MAC_ADDRESS_TO_PROVISION="$BTCPAYSERVER_MAC_ADDRESS"
elif [ "$VIRTUAL_MACHINE" = "$BASE_IMAGE_VM_NAME" ]; then
export FQDN="$BASE_IMAGE_VM_NAME"
else
echo "ERROR: VIRTUAL_MACHINE not within allowable bounds."
exit
fi
export FQDN="$FQDN"
export LXD_VM_NAME="${FQDN//./-}"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
export PROJECT_PATH="$PROJECT_PATH"
./deploy_vm.sh
if [ "$VIRTUAL_MACHINE" = www ]; then
# this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
# enable docker swarm mode so we can support docker stacks.
if docker info | grep -q "Swarm: inactive"; then
docker swarm init --advertise-addr enp6s0
fi
fi
done
# let's stub out the rest of our site definitions, if any.
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
export DOMAIN_NAME="$DOMAIN_NAME"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# stub out the site_defition if it's doesn't exist.
stub_site_definition
done
# now let's run the www and btcpay-specific provisioning scripts.
if [ "$SKIP_WWW" = false ]; then
./www/go.sh
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
else
echo "INFO: Skipping www VM."
fi
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
if [ "$SKIP_BTCPAY" = false ]; then
./btcpayserver/go.sh
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
else
echo "INFO: Skipping the btcpayserver VM."
fi

View File

@ -1,117 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
## This is a weird if clause since we need to LEFT-ALIGN the statement below.
SSH_STRING="Host ${FQDN}"
if ! grep -q "$SSH_STRING" "$SSH_HOME/config"; then
########## BEGIN
cat >> "$SSH_HOME/config" <<-EOF
${SSH_STRING}
HostName ${FQDN}
User ubuntu
EOF
###
fi
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$FQDN"
# if the machine doesn't exist, we create it.
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
exit 1
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
VM_ID=
BACKUP_DISK_SIZE_GB=
SSDATA_DISK_SIZE_GB=
DOCKER_DISK_SIZE_GB=
if [ "$VIRTUAL_MACHINE" = www ]; then
VM_ID="w"
BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
VM_ID="b"
BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
fi
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
fi
# TODO ensure we are only GROWING the volume--never shrinking
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
fi
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
# now let's create a new VM to work with.
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
lxc init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
# attack the docker block device.
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# # attach any volumes
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# MOUNT_PATH="/$CHAIN-$DATA"
# lxc config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
# done
# done
# fi
lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# send an updated ~/.bashrc so we have quicker access to cli tools
scp ./btcpayserver/bashrc.txt "ubuntu@$FQDN:$REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chown ubuntu:ubuntu $REMOTE_HOME/.bashrc"
ssh "$BTCPAY_FQDN" "chmod 0664 $REMOTE_HOME/.bashrc"
fi
fi

View File

@ -11,6 +11,8 @@ export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
export CLAMS_FQDN="$CLAMS_HOSTNAME.$DOMAIN_NAME"
export ADMIN_ACCOUNT_USERNAME="info"
export CERTIFICATE_EMAIL_ADDRESS="$ADMIN_ACCOUNT_USERNAME@$DOMAIN_NAME"
export REMOTE_GHOST_PATH="$REMOTE_DATA_PATH/ghost"
export REMOTE_NEXTCLOUD_PATH="$REMOTE_DATA_PATH/nextcloud"
export REMOTE_GITEA_PATH="$REMOTE_DATA_PATH/gitea"

73
project_defaults.sh Normal file
View File

@ -0,0 +1,73 @@
#!/bin/bash
set -e
export DEPLOY_GHOST=true
export DEPLOY_CLAMS=false
export DEPLOY_NOSTR=false
export DEPLOY_NEXTCLOUD=false
export DEPLOY_GITEA=false
export SITE_LANGUAGE_CODES="en"
export LANGUAGE_CODE="en"
export NOSTR_ACCOUNT_PUBKEY=
# this is where the html is sourced from.
export SITE_HTML_PATH=
export BTCPAY_ADDITIONAL_HOSTNAMES=
export GHOST_MYSQL_PASSWORD=
export GHOST_MYSQL_ROOT_PASSWORD=
export NEXTCLOUD_MYSQL_PASSWORD=
export GITEA_MYSQL_PASSWORD=
export NEXTCLOUD_MYSQL_ROOT_PASSWORD=
export GITEA_MYSQL_ROOT_PASSWORD=
export DUPLICITY_BACKUP_PASSPHRASE=
export BTCPAY_SERVER_CPU_COUNT="4"
export BTCPAY_SERVER_MEMORY_MB="4096"
export WWW_SERVER_CPU_COUNT="4"
export WWW_SERVER_MEMORY_MB="4096"
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
DEFAULT_DB_IMAGE="mariadb:10.11.2-jammy"
# run the docker stack.
export GHOST_IMAGE="ghost:5.38.0"
# TODO switch to mysql. May require intricate export work for existing sites.
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
#https://forum.ghost.org/t/how-to-migrate-from-mariadb-10-to-mysql-8/29575
export GHOST_DB_IMAGE="mysql:8.0.32"
export NGINX_IMAGE="nginx:1.23.3"
# version of backup is 24.0.3
export NEXTCLOUD_IMAGE="nextcloud:25.0.4"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
# TODO PIN the gitea version number.
export GITEA_IMAGE="gitea/gitea:latest"
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
export OTHER_SITES_LIST=
export BTCPAY_ALT_NAMES=
export REMOTE_HOME="/home/ubuntu"
export REMOTE_DATA_PATH="$REMOTE_HOME/ss-data"
export REMOTE_DATA_PATH_LETSENCRYPT="$REMOTE_DATA_PATH/letsencrypt"
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups"
export BTCPAY_SERVER_APPPATH="$REMOTE_DATA_PATH/btcpayserver-docker"

View File

@ -1,286 +0,0 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
VIRTUAL_MACHINE=base
LXD_HOSTNAME=
SSDATA_VOLUME_NAME=
BACKUP_VOLUME_NAME=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--lxd-hostname=*)
LXD_HOSTNAME="${i#*=}"
shift
;;
--vm=*)
VIRTUAL_MACHINE="${i#*=}"
shift
;;
--ss-volume-name=*)
SSDATA_VOLUME_NAME="${i#*=}"
shift
;;
--backup-volume-name=*)
BACKUP_VOLUME_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
mkdir -p "$PROJECT_PATH/cloud-init"
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat > "$YAML_PATH" <<EOF
config:
EOF
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: 4
limits.memory: 4096MB
EOF
fi
if [ "$VIRTUAL_MACHINE" = www ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
EOF
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
EOF
fi
# if VIRTUAL_MACHINE=base, then we doing the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
# this is for the base image only...
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
package_update: true
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: false
fqdn: ${BASE_IMAGE_VM_NAME}
packages:
- curl
- ssh-askpass
- apt-transport-https
- ca-certificates
- gnupg-agent
- software-properties-common
- lsb-release
- net-tools
- htop
- rsync
- duplicity
- sshfs
- fswatch
- jq
- git
- nano
- wait-for-it
- dnsutils
- wget
groups:
- docker
users:
- name: ubuntu
groups: docker
shell: /bin/bash
lock_passwd: false
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
]
}
EOF
fi
fi
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
EOF
fi
if [ "$VIRTUAL_MACHINE" != base ]; then
# all other machines that are not the base image
cat >> "$YAML_PATH" <<EOF
user.vendor-data: |
#cloud-config
apt_mirror: http://us.archive.ubuntu.com/ubuntu/
package_update: false
package_upgrade: false
package_reboot_if_required: false
preserve_hostname: true
fqdn: ${FQDN}
resize_rootfs: false
disk_setup:
/dev/sdb:
table_type: 'gpt'
layout: true
overwrite: false
fs_setup:
- label: docker-data
filesystem: 'ext4'
device: '/dev/sdb1'
overwrite: false
mounts:
- [ sdb, /var/lib/docker ]
mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
EOF
fi
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
user.network-config: |
version: 2
ethernets:
enp5s0:
dhcp4: true
dhcp4-overrides:
route-metric: 50
match:
macaddress: ${MAC_ADDRESS_TO_PROVISION}
set-name: enp5s0
enp6s0:
dhcp4: true
EOF
fi
# All profiles get a root disk and cloud-init config.
cat >> "$YAML_PATH" <<EOF
description: Default LXD profile for ${FILENAME}
devices:
root:
path: /
pool: ss-base
type: disk
config:
source: cloud-init:config
type: disk
EOF
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
ss-data:
path: ${REMOTE_DATA_PATH}
pool: ss-base
source: ${SSDATA_VOLUME_NAME}
type: disk
ss-backup:
path: ${REMOTE_BACKUP_PATH}
pool: ss-base
source: ${BACKUP_VOLUME_NAME}
type: disk
EOF
fi
# Stub out the network piece for the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
enp6s0:
name: enp6s0
network: lxdbr0
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic
enp6s0:
name: enp6s0
network: ss-ovn
type: nic
name: ${PRIMARY_DOMAIN}
EOF
fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if [ "$VIRTUAL_MACHINE" = base ]; then
if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME" --project default
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default
else
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
fi

View File

@ -11,7 +11,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh

View File

@ -1,12 +1,13 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
# redirect all docker commands to the remote host.
DOCKER_HOST="ssh://ubuntu@$WWW_FQDN"
export DOCKER_HOST="$DOCKER_HOST"
# Create the nginx config file which covers all domainys.
bash -c ./stub/nginx_config.sh
@ -16,7 +17,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh
@ -109,7 +110,7 @@ if [ "$RESTART_FRONT_END" = true ]; then
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
# bring down ghost instances.
@ -9,7 +9,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../defaults.sh
source ../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../domain_env.sh

View File

@ -9,7 +9,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh

View File

@ -8,7 +8,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh

View File

@ -8,7 +8,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh

View File

@ -18,7 +18,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export CONTAINER_TLS_PATH="/etc/letsencrypt/${DOMAIN_NAME}/live/${DOMAIN_NAME}"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh

View File

@ -23,7 +23,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh
@ -95,7 +95,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh

View File

@ -10,7 +10,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
# source the site path so we know what features it has.
source ../../../../defaults.sh
source ../../project_defaults.sh
source "$SITE_PATH/site.conf"
source ../../domain_env.sh