1
1
Fork 1

Nitpicks.

This commit is contained in:
Derek Smith 2023-10-20 13:54:34 -04:00
parent 0be4a5a2bd
commit 1691464249
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
7 changed files with 111 additions and 75 deletions

View File

@ -33,7 +33,8 @@ else
sleep 1
fi
# TODO we might have to do a sleep loop timer here in case there are states before STOPPED.
# just a hunch
if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" --project default

View File

@ -63,45 +63,69 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
SSDATA_DISK_SIZE_GB="$BTCPAYSERVER_SSDATA_DISK_SIZE_GB"
DOCKER_DISK_SIZE_GB="$BTCPAYSERVER_DOCKER_DISK_SIZE_GB"
fi
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
# with lnplay server, we wrap everything up into an image.
# everything else gets ZFS storage volumes.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
EXISTING_STORAGE_VOLUMES=$(lxc storage volume list ss-base -q --format csv)
if ! echo "$EXISTING_STORAGE_VOLUMES" | grep -q docker; then
lxc storage volume create ss-base docker --type=block >> /dev/null
lxc storage volume set ss-base docker size="${DOCKER_DISK_SIZE_GB}GB"
fi
if ! echo "$EXISTING_STORAGE_VOLUMES" | grep -q backup; then
lxc storage volume create ss-base backup --type=filesystem >> /dev/null
lxc storage volume set ss-base backup size="${BACKUP_DISK_SIZE_GB}GB"
fi
if ! lxc storage volume list ss-base --format csv -q --project default | grep -q ss-data; then
lxc storage volume create ss-base ss-data --type=filesystem >> /dev/null
lxc storage volume set ss-base ss-data size="${SSDATA_DISK_SIZE_GB}GB"
fi
fi
# TODO ensure we are only GROWING the volume--never shrinking
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME"
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
# we need to do this in a pseduo-TTY since it doesn't execute within a docker container
mkdir -p /tmp/ss
# lnplayserver uses a different base image, but that's ok.
BASE_IMAGE_NAME="$DOCKER_BASE_IMAGE_NAME"
if [ "$VIRTUAL_MACHINE" = lnplayserver ]; then
BASE_IMAGE_NAME="$LNPLAY_BASE_IMAGE_NAME"
fi
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
if ! lxc image list -q --format csv | grep -q "$BASE_IMAGE_NAME"; then
echo "ERROR: The base image does not exist: $BASE_IMAGE_NAME"
exit 1
fi
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
# now let's create a new VM to work with.
#lxc init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
lxc init -q "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
script -q -f /tmp/ss/typescript -c "lxc init -q $BASE_IMAGE_NAME $LXD_VM_NAME --vm --profile=$LXD_VM_NAME" >> /dev/null
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
# attack the docker block device.
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
# record the expiration date of the VM in the user data.
if [ -n "$VM_EXPIRATION_DATE" ]; then
lxc config set "$LXD_VM_NAME" user.expiration_date "$VM_EXPIRATION_DATE"
fi
# record the order id in the VM user data.
if [ -n "$ORDER_ID" ]; then
lxc config set "$LXD_VM_NAME" user.order_id "$ORDER_ID"
fi
# lnplayserver doesnt have any ZFS volumes; everything is built into the image.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
# attach the docker block device.
lxc storage volume attach ss-base docker "$LXD_VM_NAME"
# attach the ss-data volume.
lxc storage volume attach ss-base ss-data "$LXD_VM_NAME" ss-data "$REMOTE_DATA_PATH"
fi
# fi
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# # attach any volumes
# for CHAIN in testnet mainnet; do
@ -113,14 +137,21 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# fi
lxc start "$LXD_VM_NAME"
sleep 10
sleep 15
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"
ssh "$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
SSH_PUBKEY=$(cat "$SSH_PUBKEY_PATH")
# we push the management environment's ssh public key to the ubuntu user via the lxc management plane.
# this is needed in case the management plane's SSH key changes since the base image was created.
lxc file push "$SSH_PUBKEY_PATH" "$LXD_VM_NAME/$REMOTE_HOME/.ssh/authorized_keys" >> /dev/null
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
ssh "ubuntu@$FQDN" "sudo chown ubuntu:ubuntu $REMOTE_DATA_PATH"
ssh "ubuntu@$FQDN" "sudo chown -R ubuntu:ubuntu $REMOTE_BACKUP_PATH"
fi
fi

View File

@ -21,7 +21,6 @@ fi
source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eu
cd "$(dirname "$0")"
# This script is meant to be executed on the management machine.
@ -203,16 +203,6 @@ profiles:
pool: ss-base
type: disk
name: default
cluster:
server_name: ${REMOTE_NAME}
enabled: true
member_config: []
cluster_address: ""
cluster_certificate: ""
server_address: ""
cluster_password: ""
cluster_certificate_path: ""
cluster_token: ""
EOF
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.

View File

@ -20,10 +20,10 @@ for i in "$@"; do
esac
done
source ../defaults.sh
./down.sh
. ./base.sh
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc profile delete "$BASE_IMAGE_VM_NAME"

View File

@ -141,14 +141,12 @@ EOF
fi
fi
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo chmod a+r /etc/apt/keyrings/docker.gpg
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
@ -170,6 +168,11 @@ if [ "$VIRTUAL_MACHINE" != base ]; then
preserve_hostname: true
fqdn: ${FQDN}
EOF
fi
if [ "$VIRTUAL_MACHINE" = www ] || [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
cat >> "$YAML_PATH" <<EOF
resize_rootfs: false
disk_setup:
@ -245,20 +248,25 @@ EOF
if [ "$VIRTUAL_MACHINE" != base ]; then
cat >> "$YAML_PATH" <<EOF
# we add the lnplayserver ss-base manually since ss-base
# resides in the default project for those deployments.
if [ "$VIRTUAL_MACHINE" != lnplayserver ]; then
cat >> "$YAML_PATH" <<EOF
ss-data:
path: ${REMOTE_DATA_PATH}
pool: ss-base
source: ${SSDATA_VOLUME_NAME}
source: ss-data
type: disk
ss-backup:
path: ${REMOTE_BACKUP_PATH}
pool: ss-base
source: ${BACKUP_VOLUME_NAME}
source: ss-backup
type: disk
EOF
fi
fi
fi
# Stub out the network piece for the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
cat >> "$YAML_PATH" <<EOF

View File

@ -252,10 +252,6 @@ EOL
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
export SKIP_BTCPAYSERVER="$SKIP_BTCPAYSERVER"
export SKIP_WWW="$SKIP_WWW"
export SKIP_LNPLAY_SERVER="$SKIP_LNPLAY_SERVER"
mkdir -p "$PROJECT_PATH" "$REMOTE_PATH/projects"
@ -382,9 +378,12 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
lxc project switch "$PROJECT_NAME"
fi
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
# we only need the ovn network with www and btcpayserver
if [ -n "$WWW_SERVER_MAC_ADDRESS" ] || [ -n "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
fi
fi
export MAC_ADDRESS_TO_PROVISION=
@ -454,31 +453,39 @@ fi
# don't run lnplay stuff if user specifies --skip-lnplay
if [ "$SKIP_LNPLAY_SERVER" = false ]; then
# now let's run the www and btcpay-specific provisioning scripts.
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
export DOCKER_HOST="ssh://ubuntu@$LNPLAY_SERVER_FQDN"
# now let's run the lnplay provisioning scripts.
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
# set the active env to our LNPLAY_SERVER_FQDN
cat >./project/lnplay/active_env.txt <<EOL
${LNPLAY_SERVER_FQDN}
EOL
LNPLAY_ENV_FILE=./project/lnplay/environments/"$LNPLAY_SERVER_FQDN"
# only stub out the file if it doesn't exist. otherwise we leave it be.
if [ ! -f "$LNPLAY_ENV_FILE" ]; then
# and we have to set our environment file as well.
cat > "$LNPLAY_ENV_FILE" <<EOL
# only stub out the file if it doesn't exist. otherwise we leave it be.
if [ ! -f "$LNPLAY_ENV_FILE_PATH" ]; then
# here's the default env afa sovereign stack is concerned (only relevant if left unset by admin).
cat > "$LNPLAY_ENV_FILE_PATH" <<EOL
DOCKER_HOST=ssh://ubuntu@${LNPLAY_SERVER_FQDN}
DOMAIN_NAME=${PRIMARY_DOMAIN}
ENABLE_TLS=true
BTC_CHAIN=${BITCOIN_CHAIN}
CLN_COUNT=200
CHANNEL_SETUP=none
LNPLAY_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/lnplayserver
EOL
fi
bash -c "./project/lnplay/up.sh -y --env-file=$LNPLAY_ENV_FILE_PATH --no-services"
# if we've just finished provisioning the first slot, then we take it down and get snapshots.
if [[ "$LXD_VM_NAME" == "008slot0"* ]] && ! lxc image list -q --format csv | grep -q "$LNPLAY_BASE_IMAGE_NAME"; then
# we'll stop it
lxc stop "$LXD_VM_NAME"
lxc snapshot "$LXD_VM_NAME" lnplay-loaded > /dev/null
lxc publish -q --public "$LXD_VM_NAME/lnplay-loaded" --alias="$LNPLAY_BASE_IMAGE_NAME" --compression none > /dev/null
# then start it
lxc start "$LXD_VM_NAME"
sleep 15
fi
bash -c "./project/lnplay/up.sh -y"
# now run the provisioning script, but this time without --no-services
bash -c "./project/lnplay/up.sh -y --env-file=$LNPLAY_ENV_FILE_PATH"
fi
fi