1
1
Fork 1

Compare commits

...

33 Commits

Author SHA1 Message Date
Derek Smith 89009e52e5
Updat elnplayserver control. 2023-11-19 14:05:11 -05:00
Derek Smith 65400e1dbd
reset updates. 2023-11-19 14:05:08 -05:00
Derek Smith 5689f97c4d
lxd update 2023-11-19 14:05:03 -05:00
Derek Smith 61a06feab6
Remove deployment string, add order deets. 2023-11-19 14:04:46 -05:00
Derek Smith 1691464249
Nitpicks. 2023-11-19 14:04:44 -05:00
Derek Smith 0be4a5a2bd
Nitpicks. 2023-11-19 14:04:42 -05:00
Derek Smith 1c99942bb2
Refactor disk siziing exports. 2023-11-19 14:04:41 -05:00
Derek Smith 3e2f6c9ca5
Remove BITCOIN_CHAIN from project names. 2023-11-19 14:04:39 -05:00
Derek Smith e7a0e9c2b0
Improve down.sh 2023-11-19 14:04:37 -05:00
Derek Smith 778ca21dc7
Delete stop.sh 2023-11-19 14:04:35 -05:00
Derek Smith b5dc9d59b2
Refactor MAC address envs. 2023-11-19 14:04:33 -05:00
Derek Smith e03a2526ee
Delete target.sh 2023-11-19 14:04:31 -05:00
Derek Smith 8019839389
Refactor system ENVs 2023-11-19 14:04:29 -05:00
Derek Smith 5c41c7a609
Disable deletion of BASE_IMAGE_VM_NAME VM. 2023-11-19 14:04:27 -05:00
Derek Smith 59466556f7
add LNPLAY_BASE_IMAGE_NAME 2023-11-19 14:04:25 -05:00
Derek Smith 2e060ed5ae
Silence command output. 2023-11-19 14:04:23 -05:00
Derek Smith 893f8d4f61
Add timeout on wait_for_lxc_ip 2023-11-19 14:04:21 -05:00
Derek Smith b18899c25c
Scope to default. 2023-11-19 14:04:18 -05:00
Derek Smith 9e5a2da31d
Ensure default project. 2023-11-19 14:04:12 -05:00
Derek Smith 565c18fbf4
Add controls over base image creation. 2023-11-19 14:03:51 -05:00
Derek Smith f55a353d40
Update project. 2023-11-19 14:02:44 -05:00
Derek Smith 05ad16707c
Update project. 2023-11-19 14:02:42 -05:00
Derek Smith effc232ace
Use '-q' with lxc init commands. 2023-11-19 14:02:40 -05:00
Derek Smith c4b96e73b0
Ensure initial image copy goes to default project. 2023-11-19 14:02:37 -05:00
Derek Smith 923134dc2c
Enable debuggin on create_lxc_base.sh 2023-11-19 14:02:35 -05:00
Derek Smith 2cc1227c16
Enable debugging. 2023-11-19 14:02:33 -05:00
Derek Smith 3e726a578f
Update project/ remove warnings. 2023-11-19 14:02:31 -05:00
Derek Smith aff55e953d
Update project head. 2023-11-19 14:02:29 -05:00
Derek Smith a88a0ebb69
Update project head. 2023-11-19 14:02:27 -05:00
Derek Smith 4c0f486097
SKIP proj provis if is DEPLOYMENT_STRING is null 2023-11-19 14:02:24 -05:00
Derek Smith 796c36e40e
Update project head. 2023-11-19 14:01:44 -05:00
Derek Smith 82fd312766
Bump LXD version. 2023-11-19 14:00:54 -05:00
Derek Smith 2fd61026ef
Increase timer. 2023-11-19 14:00:34 -05:00
18 changed files with 402 additions and 390 deletions

View File

@ -7,3 +7,5 @@ export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
WEEK_NUMBER=$(date +%U)
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"
export LNPLAY_BASE_IMAGE_NAME="ss-lnplay"
export LNPLAY_BASE_IMAGE_VM_NAME="$LNPLAY_BASE_IMAGE_NAME"

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
. ./base.sh
@ -8,28 +8,39 @@ cd "$(dirname "$0")"
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
lxc delete -f "$BASE_IMAGE_VM_NAME" --project default
fi
# let's download our base image.
if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
if ! lxc image list --format csv --columns l --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
lxc image copy -q "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update --target-project default
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
if lxc list --format csv -q | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
if lxc list --format csv -q --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# if there's no snapshot, we dispense with the old image and try again.
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force
if ! lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc delete "$BASE_IMAGE_VM_NAME" --force --project default
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
fi
else
# the base image is ubuntu:22.04.
lxc init --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project=default
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" --project=default
if ! lxc list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then
# the base image is ubuntu:22.04.
# this command needs a pseudo-terminal for some reason, thus the script
script -q -c "lxc init -q --profile=$BASE_IMAGE_VM_NAME $UBUNTU_BASE_IMAGE_NAME $BASE_IMAGE_VM_NAME --vm --project default" /dev/null
sleep 1
fi
# TODO we might have to do a sleep loop timer here in case there are states before STOPPED.
# just a hunch
if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" --project default
lxc start "$BASE_IMAGE_VM_NAME" --project default
sleep 15
fi
# for CHAIN in mainnet testnet; do
# for DATA in blocks chainstate; do
@ -37,56 +48,59 @@ else
# done
# done
lxc start "$BASE_IMAGE_VM_NAME" --project=default
if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: RUNNING"; then
sleep 15
sleep 25
while lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" --project=default -- wait-for-it -t 100 127.0.0.1:22
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" --project default -- wait-for-it -t 100 127.0.0.1:22
# # If we have any chaninstate or blocks in our SSME, let's push them to the
# # remote host as a zfs volume that way deployments can share a common history
# # of chainstate/blocks.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# # if the storage snapshot doesn't yet exist, create it.
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
# if [ -d "$DATA_PATH" ]; then
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
# lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
# else
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
# fi
# fi
# fi
# done
# done
# # If we have any chaninstate or blocks in our SSME, let's push them to the
# # remote host as a zfs volume that way deployments can share a common history
# # of chainstate/blocks.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# # if the storage snapshot doesn't yet exist, create it.
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
# if [ -d "$DATA_PATH" ]; then
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
# lxc file push --recursive --project default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
# else
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
# fi
# fi
# fi
# done
# done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME" --project=default
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project=default
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME" --project default > /dev/null
fi
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project default > /dev/null
fi
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait." > /dev/null
lxc publish -q --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none > /dev/null
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
lxc delete -f "$BASE_IMAGE_VM_NAME" --project=default
# echo "INFO: Success creating the base image. Deleting artifacts from the build process." > /dev/null
# lxc delete -q -f "$BASE_IMAGE_VM_NAME" --project default > /dev/null
# # now let's get a snapshot of each of the blocks/chainstate directories.
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
# lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
# lxc storage volume snapshot ss-base --project default "$CHAIN-$DATA"
# fi
# done
# done

3
deployment/defaults.env Normal file
View File

@ -0,0 +1,3 @@
WWW_SERVER_MAC_ADDRESS=
BTCPAY_SERVER_MAC_ADDRESS=
LNPLAY_SERVER_MAC_ADDRESS=

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
. ./base.sh
@ -31,81 +31,96 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
VM_ID=
BACKUP_DISK_SIZE_GB=
SSDATA_DISK_SIZE_GB=
DOCKER_DISK_SIZE_GB=
if [ "$VIRTUAL_MACHINE" = www ]; then
if [ "$SKIP_WWW" = true ]; then
if [ -z "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
exit 0
fi
VM_ID="w"
BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
fi
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
if [ "$SKIP_BTCPAYSERVER" = true ]; then
if [ -z "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
exit 0
fi
VM_ID="b"
BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
fi
if [ "$VIRTUAL_MACHINE" = lnplayserver ]; then
if [ "$SKIP_LNPLAY_SERVER" = true ]; then
if [ -z "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
exit 0
fi
VM_ID="c"
BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
fi
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
# with lnplay server, we wrap everything up into an image.
# everything else gets ZFS storage volumes.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
EXISTING_STORAGE_VOLUMES=$(lxc storage volume list ss-base -q --format csv)
if ! echo "$EXISTING_STORAGE_VOLUMES" | grep -q docker; then
lxc storage volume create ss-base docker --type=block >> /dev/null
lxc storage volume set ss-base docker size="${DOCKER_DISK_SIZE_GB}GB"
fi
if ! echo "$EXISTING_STORAGE_VOLUMES" | grep -q backup; then
lxc storage volume create ss-base backup --type=filesystem >> /dev/null
lxc storage volume set ss-base backup size="${BACKUP_DISK_SIZE_GB}GB"
fi
if ! lxc storage volume list ss-base --format csv -q --project default | grep -q ss-data; then
lxc storage volume create ss-base ss-data --type=filesystem >> /dev/null
lxc storage volume set ss-base ss-data size="${SSDATA_DISK_SIZE_GB}GB"
fi
fi
# TODO ensure we are only GROWING the volume--never shrinking
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME"
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
# we need to do this in a pseduo-TTY since it doesn't execute within a docker container
mkdir -p /tmp/ss
# lnplayserver uses a different base image, but that's ok.
BASE_IMAGE_NAME="$DOCKER_BASE_IMAGE_NAME"
if [ "$VIRTUAL_MACHINE" = lnplayserver ] && lxc image list -q --format csv | grep -q "$LNPLAY_BASE_IMAGE_NAME,"; then
BASE_IMAGE_NAME="$LNPLAY_BASE_IMAGE_NAME"
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
fi
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
# now let's create a new VM to work with.
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
lxc init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
script -q -f /tmp/ss/typescript -c "lxc init -q $BASE_IMAGE_NAME $LXD_VM_NAME --vm --profile=$LXD_VM_NAME" >> /dev/null
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
# attack the docker block device.
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
# record the expiration date of the VM in the user data.
if [ -n "$VM_EXPIRATION_DATE" ]; then
lxc config set "$LXD_VM_NAME" user.expiration_date "$VM_EXPIRATION_DATE"
fi
# record the order id in the VM user data.
if [ -n "$ORDER_ID" ]; then
lxc config set "$LXD_VM_NAME" user.order_id "$ORDER_ID"
fi
# lnplayserver doesnt have any ZFS volumes; everything is built into the image.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
# attach the docker block device.
lxc storage volume attach ss-base docker "$LXD_VM_NAME"
# attach the ss-data volume.
lxc storage volume attach ss-base ss-data "$LXD_VM_NAME" ss-data "$REMOTE_DATA_PATH"
fi
# fi
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# # attach any volumes
# for CHAIN in testnet mainnet; do
@ -117,14 +132,21 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# fi
lxc start "$LXD_VM_NAME"
sleep 10
sleep 15
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
SSH_PUBKEY=$(cat "$SSH_PUBKEY_PATH")
# we push the management environment's ssh public key to the ubuntu user via the lxc management plane.
# this is needed in case the management plane's SSH key changes since the base image was created.
lxc file push "$SSH_PUBKEY_PATH" "$LXD_VM_NAME/$REMOTE_HOME/.ssh/authorized_keys" >> /dev/null
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
ssh "ubuntu@$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "ubuntu@$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
fi
fi

View File

@ -10,56 +10,40 @@ if lxc remote get-default -q | grep -q "local"; then
exit 1
fi
KEEP_DOCKER_VOLUME=true
PURGE_STORAGE_VOLUMES=false
OTHER_SITES_LIST=
SKIP_BTCPAYSERVER=false
SKIP_WWW=false
SKIP_LNPLAY_SERVER=false
BACKUP_WWW_APPS=true
NON_INTERACTIVE_MODE=false
LNPLAY_ENV_FILE_PATH=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--purge)
KEEP_DOCKER_VOLUME=false
--purge-storage-volumes=*)
PURGE_STORAGE_VOLUMES="${i#*=}"
shift
;;
--skip-btcpayserver)
SKIP_BTCPAYSERVER=true
--non-interactive)
NON_INTERACTIVE_MODE=true
shift
;;
--skip-wwwserver)
SKIP_WWW=true
shift
;;
--skip-lnplayserver)
SKIP_LNPLAY_SERVER=true
--lnplay-env-path=*)
LNPLAY_ENV_FILE_PATH="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
SERVERS=
if [ "$SKIP_BTCPAYSERVER" = false ]; then
SERVERS="btcpayserver"
fi
if [ "$SKIP_WWW" = false ]; then
SERVERS="www $SERVERS"
fi
if [ "$SKIP_LNPLAY_SERVER" = false ]; then
SERVERS="lnplayserver $SERVERS"
fi
export NON_INTERACTIVE_MODE="$NON_INTERACTIVE_MODE"
. ./deployment_defaults.sh
. ./remote_env.sh
. ./defaults.env
. ./project_env.sh
# let's bring down services on the remote deployment if necessary.
@ -67,65 +51,101 @@ export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
source "$SITE_PATH/site.conf"
source ./project/domain_env.sh
source ./domain_list.sh
for VIRTUAL_MACHINE in $SERVERS; do
function deleteVM {
IP_V4_ADDRESS=
LXD_VM_NAME="$1"
FORCE_DELETE_VM="$3"
if lxc list | grep -q "$1"; then
if [ "$FORCE_DELETE_VM" = false ]; then
if [ -n "$WWW_SERVER_MAC_ADDRESS" ]; then
DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" ./project/www/stop_docker_stacks.sh
fi
LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
if [ -n "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
if wait-for-it -t 5 "$BTCPAY_SERVER_FQDN":22; then
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
echo "ERROR: the remote BTCPAY Server is not available on ssh."
exit 1
fi
fi
if lxc list | grep -q "$LXD_NAME"; then
bash -c "./stop.sh --server=$VIRTUAL_MACHINE"
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
bash -c "./project/lnplay/down.sh --non-interactive=true --env-file=$LNPLAY_ENV_FILE_PATH"
fi
if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
APP_LIST="letsencrypt ghost nextcloud gitea nostr"
echo "INFO: Backing up WWW apps."
for APP in $APP_LIST; do
bash -c "$(pwd)/project/www/backup_www.sh --app=$APP"
done
# if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
# APP_LIST="letsencrypt ghost nextcloud gitea nostr"
# echo "INFO: Backing up WWW apps."
# for APP in $APP_LIST; do
# bash -c "$(pwd)/project/www/backup_www.sh --app=$APP"
# done
# fi
IP_V4_ADDRESS="$(lxc list "$LXD_VM_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
if lxc list -q --format csv | grep "$1" | grep -q RUNNING; then
lxc stop -f "$LXD_VM_NAME"
fi
lxc delete -f "$LXD_VM_NAME"
else
lxc delete -f "$LXD_VM_NAME"
fi
lxc stop "$LXD_NAME"
lxc delete "$LXD_NAME"
fi
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit
ssh-keygen -R "$SSH_HOME/known_hosts" -R "$2.$3" >> /dev/null
if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME"
if [ -n "$IP_V4_ADDRESS" ]; then
ssh-keygen -R "$SSH_HOME/known_hosts" -R "$IP_V4_ADDRESS" >> /dev/null
fi
if [ "$KEEP_DOCKER_VOLUME" = false ]; then
# destroy the docker volume
VM_ID=w
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
VM_ID="b"
elif [ "$VIRTUAL_MACHINE" = lnplayserver ]; then
VM_ID="c"
fi
# d for docker; b for backup; s for ss-data
for DATA in d b s; do
VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""$DATA"
if lxc storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then
RESPONSE=
read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$LXD_NAME'?": RESPONSE
if [ "$RESPONSE" = "y" ]; then
lxc storage volume delete ss-base "$VOLUME_NAME"
fi
fi
done
else
# we maintain the volumes
# TODO make a snapshot on all the zfs storage volumes.
echo "TODO: create snapshot of ZFS volumes and pull them to mgmt machine."
if lxc profile list | grep -q "$1"; then
lxc profile delete "$1" >> /dev/null
fi
done
if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn
}
LXD_NAME=
if [ -n "$WWW_SERVER_MAC_ADDRESS" ]; then
LXD_NAME="${WWW_HOSTNAME//./-}-${PRIMARY_DOMAIN//./-}"
deleteVM "$LXD_NAME" "$WWW_HOSTNAME" "$PRIMARY_DOMAIN" false
fi
if [ -n "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
LXD_NAME="${BTCPAY_HOSTNAME//./-}-${PRIMARY_DOMAIN//./-}"
deleteVM "$LXD_NAME" "$BTCPAY_HOSTNAME" "$PRIMARY_DOMAIN" false
fi
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
LXD_NAME="${LNPLAY_SERVER_HOSTNAME//./-}-${PRIMARY_DOMAIN//./-}"
deleteVM "$LXD_NAME" "$LNPLAY_SERVER_HOSTNAME" "$PRIMARY_DOMAIN" true
fi
if [ "$PURGE_STORAGE_VOLUMES" = true ]; then
EXISTING_STORAGE_VOLUMES=$(lxc storage volume list ss-base -q --format csv)
if echo "$EXISTING_STORAGE_VOLUMES" | grep -q docker; then
lxc storage volume delete ss-base docker >> /dev/null
fi
if echo "$EXISTING_STORAGE_VOLUMES" | grep -q ss-data; then
lxc storage volume delete ss-base ss-data >> /dev/null
fi
if echo "$EXISTING_STORAGE_VOLUMES" | grep -q backup; then
lxc storage volume delete ss-base backup >> /dev/null
fi
if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn
fi
fi

@ -1 +1 @@
Subproject commit e699ed6fdd5d71dfa62e7477857bf4d5608cb698
Subproject commit dd446a753e5baf53fd3a20178c67af5418a91ca4

View File

@ -2,19 +2,16 @@
set -eu
. ./defaults.env
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_NAME="$PROJECT_NAME"
if [ "$PROJECT_NAME" = default ]; then
echo "ERROR: You are on the default project. Use 'lxc project list' and 'lxc project switch <project>'."
exit 1
fi
BITCOIN_CHAIN=$(echo "$PROJECT_NAME" | cut -d'-' -f2)
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
@ -24,14 +21,12 @@ fi
source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then
echo "ERROR: the site definition does not exist."
exit 1
fi
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'."
exit 1
@ -52,4 +47,8 @@ export BTCPAYSERVER_DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
export LNPLAY_SERVER_SSDATA_DISK_SIZE_GB="$LNPLAY_SERVER_SSDATA_DISK_SIZE_GB"
export LNPLAY_SERVER_BACKUP_DISK_SIZE_GB="$LNPLAY_SERVER_BACKUP_DISK_SIZE_GB"
export LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB="$LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB"
export LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB="$LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB"
export WWW_SERVER_MAC_ADDRESS="$WWW_SERVER_MAC_ADDRESS"
export BTCPAY_SERVER_MAC_ADDRESS="$BTCPAY_SERVER_MAC_ADDRESS"
export LNPLAY_SERVER_MAC_ADDRESS="$LNPLAY_SERVER_MAC_ADDRESS"

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eu
cd "$(dirname "$0")"
# This script is meant to be executed on the management machine.
@ -33,7 +33,7 @@ if [ ! -f "$REMOTE_DEFINITION" ]; then
# https://www.sovereign-stack.org/ss-remote
LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
DEPLOYMENT_STRING="(dev|regtest),(staging|testnet)"
# DEPLOYMENT_STRING="(dev|regtest),(staging|testnet)"
# REGISTRY_URL=http://registry.domain.tld:5000
EOL
@ -148,12 +148,13 @@ fi
# install dependencies.
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.17/stable'
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.18/stable'
sleep 5
fi
# install OVN for the project-specific bridge networks
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central && sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central"
ssh -t "ubuntu@$FQDN" "sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
# if the user did not specify the interface, we just use whatever is used for the default route.
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
@ -203,16 +204,6 @@ profiles:
pool: ss-base
type: disk
name: default
cluster:
server_name: ${REMOTE_NAME}
enabled: true
member_config: []
cluster_address: ""
cluster_certificate: ""
server_address: ""
cluster_password: ""
cluster_certificate_path: ""
cluster_token: ""
EOF
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.

View File

@ -38,59 +38,24 @@ if [ ! -f "$REMOTE_DEFINITION" ]; then
exit 1
fi
DEPLOYMENT_STRING=
source "$REMOTE_DEFINITION"
# ensure our projects are provisioned according to DEPLOYMENT_STRING
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
# let's provision the projects if specified in the remote.conf file.
# we assume projects are created EXTERNALLY to sovereign-stack when DEPLOYMENT_STRING is null.
if [ -n "$DEPLOYMENT_STRING" ]; then
# ensure our projects are provisioned according to DEPLOYMENT_STRING
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
# create the lxc project as specified by PROJECT_NAME
if ! lxc project list | grep -q "$PROJECT_NAME"; then
lxc project create "$PROJECT_NAME"
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true
lxc project switch "$PROJECT_NAME"
fi
# default values are already at regtest mode.
if [ "$BITCOIN_CHAIN" = testnet ]; then
WWW_SSDATA_DISK_SIZE_GB=30
WWW_BACKUP_DISK_SIZE_GB=30
WWW_DOCKER_DISK_SIZE_GB=50
BTCPAYSERVER_SSDATA_DISK_SIZE_GB=30
BTCPAYSERVER_BACKUP_DISK_SIZE_GB=30
BTCPAYSERVER_DOCKER_DISK_SIZE_GB=100
LNPLAY_SERVER_SSDATA_DISK_SIZE_GB=20
LNPLAY_SERVER_BACKUP_DISK_SIZE_GB=20
LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB=20
elif [ "$BITCOIN_CHAIN" = mainnet ]; then
WWW_SSDATA_DISK_SIZE_GB=40
WWW_BACKUP_DISK_SIZE_GB=40
WWW_DOCKER_DISK_SIZE_GB=1000
BTCPAYSERVER_SSDATA_DISK_SIZE_GB=30
BTCPAYSERVER_BACKUP_DISK_SIZE_GB=30
BTCPAYSERVER_DOCKER_DISK_SIZE_GB=500
LNPLAY_SERVER_SSDATA_DISK_SIZE_GB=20
LNPLAY_SERVER_BACKUP_DISK_SIZE_GB=20
LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB=400
fi
export WWW_SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
export WWW_BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
export WWW_DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
export BTCPAYSERVER_SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
export BTCPAYSERVER_BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
export BTCPAYSERVER_DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
done
# create the lxc project as specified by PROJECT_NAME
if ! lxc project list | grep -q "$PROJECT_NAME"; then
lxc project create "$PROJECT_NAME"
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true
lxc project switch "$PROJECT_NAME"
fi
done
fi

View File

@ -1,9 +1,15 @@
#!/bin/bash
set -e
set -eu
cd "$(dirname "$0")"
RESPONSE=
read -r -p "This is a VERY DANGEROUS COMMAND! Are you sure you want to continue? (y/n)": RESPONSE
if [ "$RESPONSE" != "y" ]; then
exit 1
fi
PURGE_LXD=false
# grab any modifications from the command line.
@ -20,10 +26,10 @@ for i in "$@"; do
esac
done
source ../defaults.sh
./down.sh
. ./base.sh
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc profile delete "$BASE_IMAGE_VM_NAME"
@ -46,6 +52,16 @@ fi
if [ "$PURGE_LXD" = true ]; then
# purge all projects
PROJECT_NAMES=$(lxc project list --format csv -q | grep -vw default | cut -d',' -f1)
# Iterate over each project name
for PROJECT in $PROJECT_NAMES; do
if ! echo "$PROJECT" | grep -q default; then
lxc project delete "$PROJECT"
fi
done
if lxc profile show default | grep -q "root:"; then
lxc profile device remove default root
fi

View File

@ -1,66 +0,0 @@
#!/bin/bash
# https://www.sovereign-stack.org/ss-down/
set -eu
cd "$(dirname "$0")"
if lxc remote get-default -q | grep -q "local"; then
echo "ERROR: you are on the local lxc remote. Nothing to take down"
exit 1
fi
SERVER_TO_STOP=
OTHER_SITES_LIST=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--server=*)
SERVER_TO_STOP="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
if [ -z "$SERVER_TO_STOP" ]; then
echo "ERROR: you MUST specify a server to stop with '--server=www' for example."
exit 1
fi
. ./deployment_defaults.sh
. ./remote_env.sh
. ./project_env.sh
# let's bring down services on the remote deployment if necessary.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
source "$SITE_PATH/site.conf"
source ./project/domain_env.sh
source ./domain_list.sh
if [ "$SERVER_TO_STOP" = www ]; then
DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" ./project/www/stop_docker_stacks.sh
fi
if [ "$SERVER_TO_STOP" = btcpayserver ]; then
if wait-for-it -t 5 "$BTCPAY_SERVER_FQDN":22; then
ssh "$BTCPAY_SERVER_FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
else
echo "ERROR: the remote BTCPAY Server is not available on ssh."
exit 1
fi
fi
if [ "$SERVER_TO_STOP" = lnplayserver ]; then
DOCKER_HOST="ssh://ubuntu@$LNPLAY_SERVER_FQDN" ./project/lnplay/down.sh
fi

View File

@ -5,8 +5,6 @@ cd "$(dirname "$0")"
VIRTUAL_MACHINE=base
LXD_HOSTNAME=
SSDATA_VOLUME_NAME=
BACKUP_VOLUME_NAME=
# grab any modifications from the command line.
for i in "$@"; do
@ -19,14 +17,6 @@ for i in "$@"; do
VIRTUAL_MACHINE="${i#*=}"
shift
;;
--ss-volume-name=*)
SSDATA_VOLUME_NAME="${i#*=}"
shift
;;
--backup-volume-name=*)
BACKUP_VOLUME_NAME="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
@ -36,8 +26,8 @@ done
# generate the custom cloud-init file. Cloud init installs and configures sshd
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
eval "$(ssh-agent -s)"
ssh-add "$SSH_HOME/id_rsa"
eval "$(ssh-agent -s)" >> /dev/null
ssh-add "$SSH_HOME/id_rsa" >> /dev/null
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
@ -82,8 +72,6 @@ EOF
fi
. ./target.sh
# if VIRTUAL_MACHINE=base, then we doing the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
# this is for the base image only...
@ -144,7 +132,7 @@ EOF
"${REGISTRY_URL}"
],
"labels": [
"PROJECT_COMMIT=${TARGET_PROJECT_GIT_COMMIT}"
"test=test"
]
}
@ -153,14 +141,12 @@ EOF
fi
fi
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo chmod a+r /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
@ -182,6 +168,11 @@ if [ "$VIRTUAL_MACHINE" != base ]; then
preserve_hostname: true
fqdn: ${FQDN}
EOF
fi
if [ "$VIRTUAL_MACHINE" = www ] || [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
resize_rootfs: false
disk_setup:
@ -216,6 +207,7 @@ if [ "$VIRTUAL_MACHINE" != base ]; then
match:
macaddress: ${MAC_ADDRESS_TO_PROVISION}
set-name: enp5s0
EOF
fi
@ -252,21 +244,29 @@ devices:
type: disk
EOF
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
# we add the lnplayserver ss-base manually since ss-base
# resides in the default project for those deployments.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
cat >> "$YAML_PATH" <<EOF
ss-data:
path: ${REMOTE_DATA_PATH}
pool: ss-base
source: ${SSDATA_VOLUME_NAME}
source: ss-data
type: disk
ss-backup:
path: ${REMOTE_BACKUP_PATH}
pool: ss-base
source: ${BACKUP_VOLUME_NAME}
source: ss-backup
type: disk
EOF
fi
fi
fi
# Stub out the network piece for the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
@ -278,7 +278,7 @@ name: ${FILENAME}
EOF
else
# If we are deploying a VM that attaches to the network underlay.
# If we are deploying a VM that attaches to the network underlay.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
@ -286,6 +286,7 @@ else
type: nic
EOF
# on www and btcpayserver, we also attach the VMs to OVN network for inter-VM communication
if [ "$VIRTUAL_MACHINE" = www ] || [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
enp6s0:
@ -303,17 +304,16 @@ fi
if [ "$VIRTUAL_MACHINE" = base ]; then
if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME" --project default
lxc profile create "$LXD_HOSTNAME" --project default >> /dev/null
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default
else
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
lxc profile create "$LXD_HOSTNAME"
lxc profile create "$LXD_HOSTNAME" >> /dev/null
fi
# configure the profile with our generated cloud-init.yml file.
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
fi

View File

@ -1,3 +0,0 @@
#!/bin/bash
export TARGET_PROJECT_GIT_COMMIT=ca069c7decdc74d2719a7f34927bda49159da2ae

View File

@ -1,10 +1,8 @@
#!/bin/bash
set -eu
set -exu
cd "$(dirname "$0")"
. ./target.sh
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs lxc; do
if ! command -v "$cmd" >/dev/null 2>&1; then
@ -50,10 +48,9 @@ RESTORE_BTCPAY=false
UPDATE_BTCPAY=false
REMOTE_NAME="$(lxc remote get-default)"
USER_SAYS_YES=false
WWW_SERVER_MAC_ADDRESS=
BTCPAY_SERVER_MAC_ADDRESS=
LNPLAY_SERVER_MAC_ADDRESS=
LNPLAY_ENV_FILE_PATH=
VM_EXPIRATION_DATE=
ORDER_ID=
# grab any modifications from the command line.
for i in "$@"; do
@ -98,6 +95,18 @@ for i in "$@"; do
SKIP_BASE_IMAGE_CREATION=true
shift
;;
--lnplay-env-path=*)
LNPLAY_ENV_FILE_PATH="${i#*=}"
shift
;;
--vm-expiration-date=*)
VM_EXPIRATION_DATE="${i#*=}"
shift
;;
--order-id=*)
ORDER_ID="${i#*=}"
shift
;;
--no-cert-renew)
RUN_CERT_RENEWAL=false
shift
@ -125,6 +134,45 @@ fi
. ./remote_env.sh
# default values are already at regtest mode.
if [ "$BITCOIN_CHAIN" = testnet ]; then
WWW_SSDATA_DISK_SIZE_GB=30
WWW_BACKUP_DISK_SIZE_GB=30
WWW_DOCKER_DISK_SIZE_GB=50
BTCPAYSERVER_SSDATA_DISK_SIZE_GB=30
BTCPAYSERVER_BACKUP_DISK_SIZE_GB=30
BTCPAYSERVER_DOCKER_DISK_SIZE_GB=100
LNPLAY_SERVER_SSDATA_DISK_SIZE_GB=20
LNPLAY_SERVER_BACKUP_DISK_SIZE_GB=20
LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB=20
elif [ "$BITCOIN_CHAIN" = mainnet ]; then
WWW_SSDATA_DISK_SIZE_GB=40
WWW_BACKUP_DISK_SIZE_GB=40
WWW_DOCKER_DISK_SIZE_GB=1000
BTCPAYSERVER_SSDATA_DISK_SIZE_GB=30
BTCPAYSERVER_BACKUP_DISK_SIZE_GB=30
BTCPAYSERVER_DOCKER_DISK_SIZE_GB=500
LNPLAY_SERVER_SSDATA_DISK_SIZE_GB=20
LNPLAY_SERVER_BACKUP_DISK_SIZE_GB=20
LNPLAY_SSERVER_DOCKER_DISK_SIZE_GB=400
fi
export WWW_SSDATA_DISK_SIZE_GB="$WWW_SSDATA_DISK_SIZE_GB"
export WWW_BACKUP_DISK_SIZE_GB="$WWW_BACKUP_DISK_SIZE_GB"
export WWW_DOCKER_DISK_SIZE_GB="$WWW_DOCKER_DISK_SIZE_GB"
export BTCPAYSERVER_SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
export BTCPAYSERVER_BACKUP_DISK_SIZE_GB="$BTCPAYSERVER_BACKUP_DISK_SIZE_GB"
export BTCPAYSERVER_DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
export REGISTRY_DOCKER_IMAGE="registry:2"
export BACKUP_CERTS="$BACKUP_CERTS"
export RESTORE_BTCPAY="$RESTORE_BTCPAY"
@ -136,6 +184,7 @@ export REMOTE_PATH="$REMOTES_PATH/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTORE_CERTS="$RESTORE_CERTS"
export LNPLAY_ENV_FILE_PATH="$LNPLAY_ENV_FILE_PATH"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
@ -153,7 +202,7 @@ fi
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
source "$REMOTE_DEFINITION"
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
export DEPLOYMENT_STRING="$DEPLOYMENT_STRING"
# this is our password generation mechanism. Relying on GPG for secure password generation
function new_pass {
@ -215,10 +264,6 @@ EOL
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
export SKIP_BTCPAYSERVER="$SKIP_BTCPAYSERVER"
export SKIP_WWW="$SKIP_WWW"
export SKIP_LNPLAY_SERVER="$SKIP_LNPLAY_SERVER"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
@ -269,20 +314,6 @@ if [ -z "$PRIMARY_DOMAIN" ]; then
exit 1
fi
if [ -z "$WWW_SERVER_MAC_ADDRESS" ]; then
echo "WARNING: the WWW_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
fi
if [ -z "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
echo "WARNING: the BTCPAY_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
fi
if [ -z "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
echo "WARNING: the LNPLAY_SERVER_MAC_ADDRESS is not specified. Check your project.conf."
fi
source ./domain_list.sh
# let's provision our primary domain first.
@ -301,6 +332,8 @@ export UPDATE_BTCPAY="$UPDATE_BTCPAY"
VPS_HOSTNAME=
. ./base.sh
# first, we create base images if needed.
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
# create the lxd base image.
if [ "$SKIP_BASE_IMAGE_CREATION" = false ]; then
@ -339,8 +372,8 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
if lxc network list --format csv --project=default | grep lxdbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project=default | grep lxdbr0 | grep ss-config)"
if lxc network list --format csv --project default | grep lxdbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project default | grep lxdbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
@ -352,15 +385,17 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
# Now let's switch to the new project to ensure new resources are created under the project scope.
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
# we only need the ovn network with www and btcpayserver
if [ -n "$WWW_SERVER_MAC_ADDRESS" ] || [ -n "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
fi
fi
export MAC_ADDRESS_TO_PROVISION=
@ -390,11 +425,12 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
fi
export FQDN="$FQDN"
export LXD_VM_NAME="${FQDN//./-}"
LXD_VM_NAME="${FQDN//./-}"
export LXD_VM_NAME="$LXD_VM_NAME"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
export PROJECT_PATH="$PROJECT_PATH"
./deploy_vm.sh
env ORDER_ID="$ORDER_ID" VM_EXPIRATION_DATE="$VM_EXPIRATION_DATE" LXD_VM_NAME="$LXD_VM_NAME" ./deploy_vm.sh
done
@ -430,31 +466,39 @@ fi
# don't run lnplay stuff if user specifies --skip-lnplay
if [ "$SKIP_LNPLAY_SERVER" = false ]; then
# now let's run the www and btcpay-specific provisioning scripts.
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
export DOCKER_HOST="ssh://ubuntu@$LNPLAY_SERVER_FQDN"
# now let's run the lnplay provisioning scripts.
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
# set the active env to our LNPLAY_SERVER_FQDN
cat >./project/lnplay/active_env.txt <<EOL
${LNPLAY_SERVER_FQDN}
EOL
LNPLAY_ENV_FILE=./project/lnplay/environments/"$LNPLAY_SERVER_FQDN"
# only stub out the file if it doesn't exist. otherwise we leave it be.
if [ ! -f "$LNPLAY_ENV_FILE" ]; then
# and we have to set our environment file as well.
cat > "$LNPLAY_ENV_FILE" <<EOL
# only stub out the file if it doesn't exist. otherwise we leave it be.
if [ ! -f "$LNPLAY_ENV_FILE_PATH" ]; then
# here's the default env afa sovereign stack is concerned (only relevant if left unset by admin).
cat > "$LNPLAY_ENV_FILE_PATH" <<EOL
DOCKER_HOST=ssh://ubuntu@${LNPLAY_SERVER_FQDN}
DOMAIN_NAME=${PRIMARY_DOMAIN}
ENABLE_TLS=true
BTC_CHAIN=${BITCOIN_CHAIN}
CLN_COUNT=200
CHANNEL_SETUP=none
LNPLAY_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/lnplayserver
EOL
fi
bash -c "./project/lnplay/up.sh -y --env-file=$LNPLAY_ENV_FILE_PATH --no-services"
# if we've just finished provisioning the first slot, then we take it down and get snapshots.
if ! lxc image list -q --format csv | grep -q "$LNPLAY_BASE_IMAGE_NAME"; then
# we'll stop it
lxc stop "$LXD_VM_NAME"
lxc snapshot "$LXD_VM_NAME" lnplay-loaded > /dev/null
lxc publish -q --public "$LXD_VM_NAME/lnplay-loaded" --alias="$LNPLAY_BASE_IMAGE_NAME" --compression none > /dev/null
# then start it
lxc start "$LXD_VM_NAME"
sleep 15
fi
bash -c "./project/lnplay/up.sh -y"
# now run the provisioning script, but this time without --no-services
bash -c "./project/lnplay/up.sh -y --env-file=$LNPLAY_ENV_FILE_PATH"
fi
fi

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eu
LXC_INSTANCE_NAME=
@ -30,16 +30,22 @@ if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
fi
IP_V4_ADDRESS=
COUNTER=0
while true; do
if [ "$COUNTER" -gt 50 ]; then
break
fi
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
if [ -n "$IP_V4_ADDRESS" ]; then
# give the machine extra time to spin up.
wait-for-it -t 300 "$IP_V4_ADDRESS:22"
wait-for-it -t 120 "$IP_V4_ADDRESS:22"
break
else
sleep 1
printf '.'
COUNTER=$((COUNTER + 1))
fi
done

View File

@ -47,7 +47,7 @@ export DISK="$DISK"
# install lxd snap and initialize it
if ! snap list | grep -q lxd; then
sudo snap install lxd --channel=5.17/stable
sudo snap install lxd --channel=5.18/stable
sleep 5
# run lxd init
@ -103,7 +103,7 @@ if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
if [ -d "$IMAGE_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
lxc image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
else
lxc image copy "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
lxc image copy -q "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
mkdir -p "$IMAGE_PATH"
lxc image export "$UBUNTU_BASE_IMAGE_NAME" "$IMAGE_PATH" --vm
fi
@ -130,9 +130,9 @@ if ! lxc list --format csv | grep -q ss-mgmt; then
# TODO check to see if there's an existing ss-mgmt image to spawn from, otherwise do this.
if lxc image list | grep -q ss-mgmt; then
FROM_BUILT_IMAGE=true
lxc init ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
script -q -c "lxc init -q ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default" >> /dev/null
else
lxc init "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
script -q -c "lxc init -q images:$BASE_LXC_IMAGE ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default" >> /dev/null
fi
fi

View File

@ -5,7 +5,6 @@ alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-up='/home/ubuntu/sovereign-stack/deployment/up.sh $@'
alias ss-down='/home/ubuntu/sovereign-stack/deployment/down.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-stop='/home/ubuntu/sovereign-stack/deployment/stop.sh $@'
alias ss-start='/home/ubuntu/sovereign-stack/deployment/start.sh $@'
alias ss-restore='/home/ubuntu/sovereign-stack/deployment/restore.sh $@'

View File

@ -35,7 +35,7 @@ sleep 10
# install snap
if ! snap list | grep -q lxd; then
sudo snap install htop
sudo snap install lxd --channel=5.17/stable
sudo snap install lxd --channel=5.18/stable
sleep 6
# We just do an auto initialization. All we are using is the LXD client inside the management environment.