Initial Switch to incus.
This commit is contained in:
parent
559d5f11f4
commit
a273488646
@ -3,7 +3,7 @@
|
|||||||
# The base VM image.
|
# The base VM image.
|
||||||
export LXD_UBUNTU_BASE_VERSION="jammy"
|
export LXD_UBUNTU_BASE_VERSION="jammy"
|
||||||
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
export BASE_INCUS_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
||||||
WEEK_NUMBER=$(date +%U)
|
WEEK_NUMBER=$(date +%U)
|
||||||
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"
|
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"
|
||||||
|
101
deployment/create_base.sh
Executable file
101
deployment/create_base.sh
Executable file
@ -0,0 +1,101 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
. ./base.sh
|
||||||
|
|
||||||
|
bash -c "./stub_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
|
if incus list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
|
||||||
|
incus delete -f "$BASE_IMAGE_VM_NAME" --project default
|
||||||
|
fi
|
||||||
|
|
||||||
|
# let's download our base image.
|
||||||
|
if ! incus image list --format csv --columns l --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
# copy the image down from canonical.
|
||||||
|
incus image copy "images:$BASE_INCUS_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update --target-project default
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If the VM does exist, then we will delete it (so we can start fresh)
|
||||||
|
if incus list --format csv -q --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
# if there's no snapshot, we dispense with the old image and try again.
|
||||||
|
if ! incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
|
incus delete "$BASE_IMAGE_VM_NAME" --force --project default
|
||||||
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
|
||||||
|
if ! incus list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
|
# the base image is ubuntu:22.04.
|
||||||
|
incus init -q --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project default
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then
|
||||||
|
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
|
||||||
|
incus config set "$BASE_IMAGE_VM_NAME" --project default
|
||||||
|
incus start "$BASE_IMAGE_VM_NAME" --project default
|
||||||
|
sleep 15
|
||||||
|
fi
|
||||||
|
|
||||||
|
# for CHAIN in mainnet testnet; do
|
||||||
|
# for DATA in blocks chainstate; do
|
||||||
|
# incus storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA"
|
||||||
|
# done
|
||||||
|
# done
|
||||||
|
|
||||||
|
if incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: RUNNING"; then
|
||||||
|
|
||||||
|
while incus exec "$BASE_IMAGE_VM_NAME" --project default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
|
||||||
|
# ensure the ssh service is listening at localhost
|
||||||
|
incus exec "$BASE_IMAGE_VM_NAME" --project default -- wait-for-it -t 100 127.0.0.1:22
|
||||||
|
|
||||||
|
# # If we have any chaninstate or blocks in our SSME, let's push them to the
|
||||||
|
# # remote host as a zfs volume that way deployments can share a common history
|
||||||
|
# # of chainstate/blocks.
|
||||||
|
# for CHAIN in testnet mainnet; do
|
||||||
|
# for DATA in blocks chainstate; do
|
||||||
|
# # if the storage snapshot doesn't yet exist, create it.
|
||||||
|
# if ! incus storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
|
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
||||||
|
# if [ -d "$DATA_PATH" ]; then
|
||||||
|
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
|
||||||
|
# if incus exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
|
||||||
|
# incus file push --recursive --project default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
|
||||||
|
# incus exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
|
||||||
|
# incus exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
|
||||||
|
# else
|
||||||
|
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
|
||||||
|
# fi
|
||||||
|
# fi
|
||||||
|
# fi
|
||||||
|
# done
|
||||||
|
# done
|
||||||
|
|
||||||
|
# stop the VM and get a snapshot.
|
||||||
|
incus stop "$BASE_IMAGE_VM_NAME" --project default
|
||||||
|
fi
|
||||||
|
|
||||||
|
incus snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project default
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
|
||||||
|
incus publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
|
||||||
|
|
||||||
|
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
|
||||||
|
incus delete -f "$BASE_IMAGE_VM_NAME" --project default
|
||||||
|
|
||||||
|
# # now let's get a snapshot of each of the blocks/chainstate directories.
|
||||||
|
# for CHAIN in testnet mainnet; do
|
||||||
|
# for DATA in blocks chainstate; do
|
||||||
|
# if ! incus storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
||||||
|
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
|
||||||
|
# incus storage volume snapshot ss-base --project default "$CHAIN-$DATA"
|
||||||
|
# fi
|
||||||
|
# done
|
||||||
|
# done
|
@ -1,101 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -exu
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
. ./base.sh
|
|
||||||
|
|
||||||
bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME"
|
|
||||||
|
|
||||||
if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then
|
|
||||||
lxc delete -f "$BASE_IMAGE_VM_NAME" --project default
|
|
||||||
fi
|
|
||||||
|
|
||||||
# let's download our base image.
|
|
||||||
if ! lxc image list --format csv --columns l --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
|
||||||
# copy the image down from canonical.
|
|
||||||
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update --target-project default
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
|
||||||
if lxc list --format csv -q --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
|
||||||
# if there's no snapshot, we dispense with the old image and try again.
|
|
||||||
if ! lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
|
||||||
lxc delete "$BASE_IMAGE_VM_NAME" --force --project default
|
|
||||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
|
|
||||||
if ! lxc list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then
|
|
||||||
# the base image is ubuntu:22.04.
|
|
||||||
lxc init -q --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project default || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then
|
|
||||||
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
|
|
||||||
lxc config set "$BASE_IMAGE_VM_NAME" --project default
|
|
||||||
lxc start "$BASE_IMAGE_VM_NAME" --project default
|
|
||||||
sleep 15
|
|
||||||
fi
|
|
||||||
|
|
||||||
# for CHAIN in mainnet testnet; do
|
|
||||||
# for DATA in blocks chainstate; do
|
|
||||||
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA"
|
|
||||||
# done
|
|
||||||
# done
|
|
||||||
|
|
||||||
if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: RUNNING"; then
|
|
||||||
|
|
||||||
while lxc exec "$BASE_IMAGE_VM_NAME" --project default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
# ensure the ssh service is listening at localhost
|
|
||||||
lxc exec "$BASE_IMAGE_VM_NAME" --project default -- wait-for-it -t 100 127.0.0.1:22
|
|
||||||
|
|
||||||
# # If we have any chaninstate or blocks in our SSME, let's push them to the
|
|
||||||
# # remote host as a zfs volume that way deployments can share a common history
|
|
||||||
# # of chainstate/blocks.
|
|
||||||
# for CHAIN in testnet mainnet; do
|
|
||||||
# for DATA in blocks chainstate; do
|
|
||||||
# # if the storage snapshot doesn't yet exist, create it.
|
|
||||||
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
|
||||||
# DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
|
|
||||||
# if [ -d "$DATA_PATH" ]; then
|
|
||||||
# COMPLETE_FILE_PATH="$DATA_PATH/complete"
|
|
||||||
# if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
|
|
||||||
# lxc file push --recursive --project default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/"
|
|
||||||
# lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
|
|
||||||
# lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA"
|
|
||||||
# else
|
|
||||||
# echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
|
|
||||||
# fi
|
|
||||||
# fi
|
|
||||||
# fi
|
|
||||||
# done
|
|
||||||
# done
|
|
||||||
|
|
||||||
# stop the VM and get a snapshot.
|
|
||||||
lxc stop "$BASE_IMAGE_VM_NAME" --project default
|
|
||||||
fi
|
|
||||||
|
|
||||||
lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project default
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
|
|
||||||
lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
|
|
||||||
|
|
||||||
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
|
|
||||||
lxc delete -f "$BASE_IMAGE_VM_NAME" --project default
|
|
||||||
|
|
||||||
# # now let's get a snapshot of each of the blocks/chainstate directories.
|
|
||||||
# for CHAIN in testnet mainnet; do
|
|
||||||
# for DATA in blocks chainstate; do
|
|
||||||
# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then
|
|
||||||
# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'."
|
|
||||||
# lxc storage volume snapshot ss-base --project default "$CHAIN-$DATA"
|
|
||||||
# fi
|
|
||||||
# done
|
|
||||||
# done
|
|
@ -21,7 +21,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# if the machine doesn't exist, we create it.
|
# if the machine doesn't exist, we create it.
|
||||||
if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
if ! incus list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||||
|
|
||||||
# create a base image if needed and instantiate a VM.
|
# create a base image if needed and instantiate a VM.
|
||||||
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
||||||
@ -69,57 +69,57 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
|
DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d"
|
||||||
if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
|
if ! incus storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then
|
||||||
lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
|
incus storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO ensure we are only GROWING the volume--never shrinking
|
# TODO ensure we are only GROWING the volume--never shrinking
|
||||||
lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
|
incus storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
|
SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s"
|
||||||
if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
|
if ! incus storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then
|
||||||
lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
|
incus storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
|
# TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs.
|
||||||
lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
|
incus storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
|
||||||
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
|
BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b"
|
||||||
if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
|
if ! incus storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then
|
||||||
lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
|
incus storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
|
incus storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
|
||||||
|
|
||||||
|
|
||||||
bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
|
bash -c "./stub_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
|
||||||
|
|
||||||
# now let's create a new VM to work with.
|
# now let's create a new VM to work with.
|
||||||
#lxc init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
#incus init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
lxc init -q "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
|
incus init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
|
||||||
|
|
||||||
# let's PIN the HW address for now so we don't exhaust IP
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
# and so we can set DNS internally.
|
# and so we can set DNS internally.
|
||||||
lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
incus config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
|
||||||
|
|
||||||
# attack the docker block device.
|
# attack the docker block device.
|
||||||
lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
|
incus storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
|
||||||
|
|
||||||
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
||||||
# # attach any volumes
|
# # attach any volumes
|
||||||
# for CHAIN in testnet mainnet; do
|
# for CHAIN in testnet mainnet; do
|
||||||
# for DATA in blocks chainstate; do
|
# for DATA in blocks chainstate; do
|
||||||
# MOUNT_PATH="/$CHAIN-$DATA"
|
# MOUNT_PATH="/$CHAIN-$DATA"
|
||||||
# lxc config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
|
# incus config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
|
||||||
# done
|
# done
|
||||||
# done
|
# done
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
lxc start "$LXD_VM_NAME"
|
incus start "$LXD_VM_NAME"
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
|
bash -c "./wait_for_ip.sh --lxd-name=$LXD_VM_NAME"
|
||||||
|
|
||||||
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
# scan the remote machine and install it's identity in our SSH known_hosts file.
|
||||||
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"
|
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -exu
|
||||||
|
|
||||||
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
||||||
DOMAIN_LIST="${PRIMARY_DOMAIN}"
|
DOMAIN_LIST="${PRIMARY_DOMAIN}"
|
||||||
@ -11,6 +12,6 @@ export DOMAIN_LIST="$DOMAIN_LIST"
|
|||||||
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
||||||
export OTHER_SITES_LIST="$OTHER_SITES_LIST"
|
export OTHER_SITES_LIST="$OTHER_SITES_LIST"
|
||||||
|
|
||||||
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$PRIMARY_DOMAIN"
|
||||||
export BTCPAY_SERVER_FQDN="$BTCPAY_SERVER_HOSTNAME.$DOMAIN_NAME"
|
export BTCPAY_SERVER_FQDN="$BTCPAY_SERVER_HOSTNAME.$PRIMARY_DOMAIN"
|
||||||
export LNPLAY_SERVER_FQDN="$LNPLAY_SERVER_HOSTNAME.$DOMAIN_NAME"
|
export LNPLAY_SERVER_FQDN="$LNPLAY_SERVER_HOSTNAME.$PRIMARY_DOMAIN"
|
@ -5,8 +5,8 @@
|
|||||||
set -exu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
if lxc remote get-default -q | grep -q "local"; then
|
if incus remote get-default -q | grep -q "local"; then
|
||||||
echo "ERROR: you are on the local lxc remote. Nothing to take down"
|
echo "ERROR: you are on the local incus remote. Nothing to take down"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ for VIRTUAL_MACHINE in $SERVERS; do
|
|||||||
|
|
||||||
LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
|
LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
|
||||||
|
|
||||||
if lxc list | grep -q "$LXD_NAME"; then
|
if incus list | grep -q "$LXD_NAME"; then
|
||||||
bash -c "./stop.sh --server=$VIRTUAL_MACHINE"
|
bash -c "./stop.sh --server=$VIRTUAL_MACHINE"
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
|
if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
|
||||||
@ -86,16 +86,16 @@ for VIRTUAL_MACHINE in $SERVERS; do
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc stop "$LXD_NAME"
|
incus stop "$LXD_NAME"
|
||||||
|
|
||||||
lxc delete "$LXD_NAME"
|
incus delete "$LXD_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# remove the ssh known endpoint else we get warnings.
|
# remove the ssh known endpoint else we get warnings.
|
||||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit
|
||||||
|
|
||||||
if lxc profile list | grep -q "$LXD_NAME"; then
|
if incus profile list | grep -q "$LXD_NAME"; then
|
||||||
lxc profile delete "$LXD_NAME"
|
incus profile delete "$LXD_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$KEEP_DOCKER_VOLUME" = false ]; then
|
if [ "$KEEP_DOCKER_VOLUME" = false ]; then
|
||||||
@ -110,12 +110,12 @@ for VIRTUAL_MACHINE in $SERVERS; do
|
|||||||
# d for docker; b for backup; s for ss-data
|
# d for docker; b for backup; s for ss-data
|
||||||
for DATA in d b s; do
|
for DATA in d b s; do
|
||||||
VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""$DATA"
|
VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""$DATA"
|
||||||
if lxc storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then
|
if incus storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then
|
||||||
RESPONSE=
|
RESPONSE=
|
||||||
read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$LXD_NAME'?": RESPONSE
|
read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$LXD_NAME'?": RESPONSE
|
||||||
|
|
||||||
if [ "$RESPONSE" = "y" ]; then
|
if [ "$RESPONSE" = "y" ]; then
|
||||||
lxc storage volume delete ss-base "$VOLUME_NAME"
|
incus storage volume delete ss-base "$VOLUME_NAME"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -126,6 +126,6 @@ for VIRTUAL_MACHINE in $SERVERS; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if lxc network list -q | grep -q ss-ovn; then
|
if incus network list -q | grep -q ss-ovn; then
|
||||||
lxc network delete ss-ovn
|
incus network delete ss-ovn
|
||||||
fi
|
fi
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
|
PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')"
|
||||||
export PROJECT_NAME="$PROJECT_NAME"
|
export PROJECT_NAME="$PROJECT_NAME"
|
||||||
|
|
||||||
if [ "$PROJECT_NAME" = default ]; then
|
if [ "$PROJECT_NAME" = default ]; then
|
||||||
echo "ERROR: You are on the default project. Use 'lxc project list' and 'lxc project switch <project>'."
|
echo "ERROR: You are on the default project. Use 'incus project list' and 'incus project switch <project>'."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -27,17 +27,11 @@ source "$PROJECT_DEFINITION_PATH"
|
|||||||
|
|
||||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
|
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
|
||||||
|
|
||||||
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then
|
|
||||||
echo "ERROR: the site definition does not exist."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'."
|
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
|
||||||
|
|
||||||
SHASUM_OF_PRIMARY_DOMAIN="$(echo -n "$PRIMARY_DOMAIN" | sha256sum | awk '{print $1;}' )"
|
SHASUM_OF_PRIMARY_DOMAIN="$(echo -n "$PRIMARY_DOMAIN" | sha256sum | awk '{print $1;}' )"
|
||||||
export PRIMARY_DOMAIN_IDENTIFIER="${SHASUM_OF_PRIMARY_DOMAIN: -6}"
|
export PRIMARY_DOMAIN_IDENTIFIER="${SHASUM_OF_PRIMARY_DOMAIN: -6}"
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ fi
|
|||||||
|
|
||||||
source "$REMOTE_DEFINITION"
|
source "$REMOTE_DEFINITION"
|
||||||
|
|
||||||
if ! lxc remote list | grep -q "$REMOTE_NAME"; then
|
if ! incus remote list | grep -q "$REMOTE_NAME"; then
|
||||||
FQDN="${2:-}"
|
FQDN="${2:-}"
|
||||||
|
|
||||||
if [ -z "$FQDN" ]; then
|
if [ -z "$FQDN" ]; then
|
||||||
@ -89,7 +89,7 @@ if ! lxc remote list | grep -q "$REMOTE_NAME"; then
|
|||||||
ssh-copy-id -i "$HOME/.ssh/id_rsa.pub" "ubuntu@$FQDN"
|
ssh-copy-id -i "$HOME/.ssh/id_rsa.pub" "ubuntu@$FQDN"
|
||||||
|
|
||||||
if [ -z "$DISK_TO_USE" ]; then
|
if [ -z "$DISK_TO_USE" ]; then
|
||||||
if ! ssh "ubuntu@$FQDN" lxc storage list -q | grep -q ss-base; then
|
if ! ssh "ubuntu@$FQDN" incus storage list -q | grep -q ss-base; then
|
||||||
echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now."
|
echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now."
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
@ -126,20 +126,20 @@ if [ -z "$LXD_REMOTE_PASSWORD" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v lxc >/dev/null 2>&1; then
|
if ! command -v incus >/dev/null 2>&1; then
|
||||||
if lxc profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
|
if incus profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc profile delete "$BASE_IMAGE_VM_NAME"
|
incus profile delete "$BASE_IMAGE_VM_NAME"
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc network list --format csv -q --project default | grep -q lxdbr0; then
|
if incus network list --format csv -q --project default | grep -q lxdbr0; then
|
||||||
lxc network delete lxdbr0 --project default
|
incus network delete lxdbr0 --project default
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if lxc network list --format csv -q project default | grep -q lxdbr1; then
|
if incus network list --format csv -q project default | grep -q lxdbr1; then
|
||||||
lxc network delete lxdbr1 --project default
|
incus network delete lxdbr1 --project default
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ fi
|
|||||||
# install dependencies.
|
# install dependencies.
|
||||||
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
|
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
|
||||||
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
||||||
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.17/stable'
|
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.18/candidate'
|
||||||
sleep 5
|
sleep 5
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -215,37 +215,37 @@ cluster:
|
|||||||
cluster_token: ""
|
cluster_token: ""
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
# ensure the lxd service is available over the network, then add a incus remote, then switch the active remote to it.
|
||||||
if wait-for-it -t 20 "$FQDN:8443"; then
|
if wait-for-it -t 20 "$FQDN:8443"; then
|
||||||
# now create a remote on your local LXC client and switch to it.
|
# now create a remote on your local incus client and switch to it.
|
||||||
# the software will now target the new remote.
|
# the software will now target the new remote.
|
||||||
lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
incus remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
||||||
lxc remote switch "$REMOTE_NAME"
|
incus remote switch "$REMOTE_NAME"
|
||||||
|
|
||||||
echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your LXC client has been switched to it."
|
echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your incus client has been switched to it."
|
||||||
else
|
else
|
||||||
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
|
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# create the default storage pool if necessary
|
# create the default storage pool if necessary
|
||||||
if ! lxc storage list --format csv | grep -q ss-base; then
|
if ! incus storage list --format csv | grep -q ss-base; then
|
||||||
|
|
||||||
if [ "$DISK_TO_USE" != loop ]; then
|
if [ "$DISK_TO_USE" != loop ]; then
|
||||||
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
|
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
|
||||||
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
|
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
|
||||||
lxc storage create ss-base zfs source="$DISK_TO_USE"
|
incus storage create ss-base zfs source="$DISK_TO_USE"
|
||||||
else
|
else
|
||||||
# if a disk is the default 'loop', then we create a zfs storage pool
|
# if a disk is the default 'loop', then we create a zfs storage pool
|
||||||
# on top of the existing filesystem using a loop device, per LXD docs
|
# on top of the existing filesystem using a loop device, per LXD docs
|
||||||
lxc storage create ss-base zfs
|
incus storage create ss-base zfs
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# # create the testnet/mainnet blocks/chainstate subvolumes.
|
# # create the testnet/mainnet blocks/chainstate subvolumes.
|
||||||
# for CHAIN in mainnet testnet; do
|
# for CHAIN in mainnet testnet; do
|
||||||
# for DATA in blocks chainstate; do
|
# for DATA in blocks chainstate; do
|
||||||
# if ! lxc storage volume list ss-base | grep -q "$CHAIN-$DATA"; then
|
# if ! incus storage volume list ss-base | grep -q "$CHAIN-$DATA"; then
|
||||||
# lxc storage volume create ss-base "$CHAIN-$DATA" --type=filesystem
|
# incus storage volume create ss-base "$CHAIN-$DATA" --type=filesystem
|
||||||
# fi
|
# fi
|
||||||
# done
|
# done
|
||||||
# done
|
# done
|
||||||
@ -253,5 +253,5 @@ if ! lxc storage list --format csv | grep -q ss-base; then
|
|||||||
else
|
else
|
||||||
echo "WARNING! The host '$FQDN' appears to have Sovereign Stack worksloads already provisioned."
|
echo "WARNING! The host '$FQDN' appears to have Sovereign Stack worksloads already provisioned."
|
||||||
echo "INFO: Here are your current Deployments."
|
echo "INFO: Here are your current Deployments."
|
||||||
lxc project list -q
|
incus project list -q
|
||||||
fi
|
fi
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
CURRENT_REMOTE="$(lxc remote get-default)"
|
CURRENT_REMOTE="$(incus remote get-default)"
|
||||||
|
|
||||||
if echo "$CURRENT_REMOTE" | grep -q "production"; then
|
if echo "$CURRENT_REMOTE" | grep -q "production"; then
|
||||||
echo "WARNING: You are running a migration procedure on a production system."
|
echo "WARNING: You are running a migration procedure on a production system."
|
||||||
@ -50,11 +50,11 @@ if [ -n "$DEPLOYMENT_STRING" ]; then
|
|||||||
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
||||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||||
|
|
||||||
# create the lxc project as specified by PROJECT_NAME
|
# create the incus project as specified by PROJECT_NAME
|
||||||
if ! lxc project list | grep -q "$PROJECT_NAME"; then
|
if ! incus project list | grep -q "$PROJECT_NAME"; then
|
||||||
lxc project create "$PROJECT_NAME"
|
incus project create "$PROJECT_NAME"
|
||||||
lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true
|
incus project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true
|
||||||
lxc project switch "$PROJECT_NAME"
|
incus project switch "$PROJECT_NAME"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
@ -4,13 +4,13 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
PURGE_LXD=false
|
PURGE_INCUS=false
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
case $i in
|
case $i in
|
||||||
--purge)
|
--purge)
|
||||||
PURGE_LXD=true
|
PURGE_INCUS=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@ -25,61 +25,61 @@ source ../defaults.sh
|
|||||||
./down.sh
|
./down.sh
|
||||||
|
|
||||||
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
||||||
if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
if incus profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc profile delete "$BASE_IMAGE_VM_NAME"
|
incus profile delete "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
if incus image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc image rm "$BASE_IMAGE_VM_NAME"
|
incus image rm "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
if incus image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
||||||
lxc image rm "$DOCKER_BASE_IMAGE_NAME"
|
incus image rm "$DOCKER_BASE_IMAGE_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
CURRENT_PROJECT="$(incus info | grep "project:" | awk '{print $2}')"
|
||||||
if ! lxc info | grep -q "project: default"; then
|
if ! incus info | grep -q "project: default"; then
|
||||||
lxc project switch default
|
incus project switch default
|
||||||
lxc project delete "$CURRENT_PROJECT"
|
incus project delete "$CURRENT_PROJECT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if [ "$PURGE_LXD" = true ]; then
|
if [ "$PURGE_INCUS" = true ]; then
|
||||||
|
|
||||||
if lxc profile show default | grep -q "root:"; then
|
if incus profile show default | grep -q "root:"; then
|
||||||
lxc profile device remove default root
|
incus profile device remove default root
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc profile show default| grep -q "eth0:"; then
|
if incus profile show default| grep -q "eth0:"; then
|
||||||
lxc profile device remove default eth0
|
incus profile device remove default eth0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc network list --format csv -q --project default | grep -q lxdbr0; then
|
if incus network list --format csv -q --project default | grep -q lxdbr0; then
|
||||||
lxc network delete lxdbr0 --project default
|
incus network delete lxdbr0 --project default
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc network list --format csv -q --project default | grep -q lxdbr1; then
|
if incus network list --format csv -q --project default | grep -q lxdbr1; then
|
||||||
lxc network delete lxdbr1 --project default
|
incus network delete lxdbr1 --project default
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# # create the testnet/mainnet blocks/chainstate subvolumes.
|
# # create the testnet/mainnet blocks/chainstate subvolumes.
|
||||||
# for CHAIN in mainnet testnet; do
|
# for CHAIN in mainnet testnet; do
|
||||||
# for DATA in blocks chainstate; do
|
# for DATA in blocks chainstate; do
|
||||||
# if lxc storage volume list ss-base | grep -q "$CHAIN-$DATA"; then
|
# if incus storage volume list ss-base | grep -q "$CHAIN-$DATA"; then
|
||||||
# lxc storage volume delete ss-base "$CHAIN-$DATA"
|
# incus storage volume delete ss-base "$CHAIN-$DATA"
|
||||||
# fi
|
# fi
|
||||||
# done
|
# done
|
||||||
# done
|
# done
|
||||||
|
|
||||||
echo "WARNING: ss-basae NOT DELETED. NEED TO TEST THIS SCRIPT"
|
echo "WARNING: ss-basae NOT DELETED. NEED TO TEST THIS SCRIPT"
|
||||||
# if lxc storage list --format csv | grep -q ss-base; then
|
# if incus storage list --format csv | grep -q ss-base; then
|
||||||
# lxc storage delete ss-base
|
# incus storage delete ss-base
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
CURRENT_REMOTE="$(lxc remote get-default)"
|
CURRENT_REMOTE="$(incus remote get-default)"
|
||||||
if ! lxc remote get-default | grep -q "local"; then
|
if ! incus remote get-default | grep -q "local"; then
|
||||||
lxc remote switch local
|
incus remote switch local
|
||||||
lxc remote remove "$CURRENT_REMOTE"
|
incus remote remove "$CURRENT_REMOTE"
|
||||||
|
|
||||||
echo "INFO: The remote '$CURRENT_REMOTE' has been removed! You are now controlling your local instance."
|
echo "INFO: The remote '$CURRENT_REMOTE' has been removed! You are now controlling your local instance."
|
||||||
fi
|
fi
|
||||||
|
@ -9,14 +9,14 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
echo "Global Settings:"
|
echo "Global Settings:"
|
||||||
|
|
||||||
lxc image list
|
incus image list
|
||||||
lxc storage list
|
incus storage list
|
||||||
|
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo
|
echo
|
||||||
|
|
||||||
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
|
PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')"
|
||||||
export export="$PROJECT_NAME"
|
export export="$PROJECT_NAME"
|
||||||
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
|
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
|
||||||
|
|
||||||
@ -26,17 +26,17 @@ echo "Active project: $PROJECT_NAME"
|
|||||||
echo "----------------------------------------------------------"
|
echo "----------------------------------------------------------"
|
||||||
|
|
||||||
echo " Networks:"
|
echo " Networks:"
|
||||||
lxc network list
|
incus network list
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo " Storage Volumes:"
|
echo " Storage Volumes:"
|
||||||
lxc storage volume list ss-base
|
incus storage volume list ss-base
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo " Profiles:"
|
echo " Profiles:"
|
||||||
lxc profile list
|
incus profile list
|
||||||
|
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo " Instances (VMs):"
|
echo " Instances (VMs):"
|
||||||
lxc list
|
incus list
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
if lxc remote get-default -q | grep -q "local"; then
|
if incus remote get-default -q | grep -q "local"; then
|
||||||
echo "ERROR: you are on the local lxc remote. Nothing to take down"
|
echo "ERROR: you are on the local incus remote. Nothing to take down"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
VIRTUAL_MACHINE=base
|
VIRTUAL_MACHINE=base
|
||||||
@ -36,8 +36,8 @@ done
|
|||||||
|
|
||||||
# generate the custom cloud-init file. Cloud init installs and configures sshd
|
# generate the custom cloud-init file. Cloud init installs and configures sshd
|
||||||
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
|
SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH")
|
||||||
eval "$(ssh-agent -s)"
|
eval "$(ssh-agent -s)" > /dev/null
|
||||||
ssh-add "$SSH_HOME/id_rsa" >> /dev/null
|
ssh-add "$SSH_HOME/id_rsa" > /dev/null
|
||||||
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
|
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
|
||||||
|
|
||||||
export FILENAME="$LXD_HOSTNAME.yml"
|
export FILENAME="$LXD_HOSTNAME.yml"
|
||||||
@ -302,18 +302,18 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = base ]; then
|
if [ "$VIRTUAL_MACHINE" = base ]; then
|
||||||
if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
|
if ! incus profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
|
||||||
lxc profile create "$LXD_HOSTNAME" --project default
|
incus profile create "$LXD_HOSTNAME" --project default
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# configure the profile with our generated cloud-init.yml file.
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default
|
incus profile edit "$LXD_HOSTNAME" --project default < "$YAML_PATH"
|
||||||
else
|
else
|
||||||
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
if ! incus profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
||||||
lxc profile create "$LXD_HOSTNAME"
|
incus profile create "$LXD_HOSTNAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# configure the profile with our generated cloud-init.yml file.
|
# configure the profile with our generated cloud-init.yml file.
|
||||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
incus profile edit "$LXD_HOSTNAME" < "$YAML_PATH"
|
||||||
fi
|
fi
|
||||||
|
|
@ -6,7 +6,7 @@ cd "$(dirname "$0")"
|
|||||||
. ./target.sh
|
. ./target.sh
|
||||||
|
|
||||||
# check to ensure dependencies are met.
|
# check to ensure dependencies are met.
|
||||||
for cmd in wait-for-it dig rsync sshfs lxc; do
|
for cmd in wait-for-it dig rsync sshfs incus; do
|
||||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||||
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
||||||
exit 1
|
exit 1
|
||||||
@ -14,7 +14,7 @@ for cmd in wait-for-it dig rsync sshfs lxc; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# do a spot check; if we are on production warn.
|
# do a spot check; if we are on production warn.
|
||||||
if lxc remote get-default | grep -q "production"; then
|
if incus remote get-default | grep -q "production"; then
|
||||||
echo "WARNING: You are running command against a production system!"
|
echo "WARNING: You are running command against a production system!"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
@ -48,7 +48,7 @@ SKIP_LNPLAY_SERVER=false
|
|||||||
BACKUP_BTCPAY_ARCHIVE_PATH=
|
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||||
RESTORE_BTCPAY=false
|
RESTORE_BTCPAY=false
|
||||||
UPDATE_BTCPAY=false
|
UPDATE_BTCPAY=false
|
||||||
REMOTE_NAME="$(lxc remote get-default)"
|
REMOTE_NAME="$(incus remote get-default)"
|
||||||
USER_SAYS_YES=false
|
USER_SAYS_YES=false
|
||||||
|
|
||||||
WWW_SERVER_MAC_ADDRESS=
|
WWW_SERVER_MAC_ADDRESS=
|
||||||
@ -212,7 +212,7 @@ EOL
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')"
|
PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')"
|
||||||
export PROJECT_NAME="$PROJECT_NAME"
|
export PROJECT_NAME="$PROJECT_NAME"
|
||||||
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
|
export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME"
|
||||||
export SKIP_BTCPAYSERVER="$SKIP_BTCPAYSERVER"
|
export SKIP_BTCPAYSERVER="$SKIP_BTCPAYSERVER"
|
||||||
@ -287,10 +287,10 @@ export UPDATE_BTCPAY="$UPDATE_BTCPAY"
|
|||||||
VPS_HOSTNAME=
|
VPS_HOSTNAME=
|
||||||
|
|
||||||
. ./base.sh
|
. ./base.sh
|
||||||
if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
if ! incus image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
||||||
# create the lxd base image.
|
# create the lxd base image.
|
||||||
if [ "$SKIP_BASE_IMAGE_CREATION" = false ]; then
|
if [ "$SKIP_BASE_IMAGE_CREATION" = false ]; then
|
||||||
./create_lxc_base.sh
|
./create_base.sh
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -325,8 +325,8 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
|
|||||||
|
|
||||||
# Goal is to get the macvlan interface.
|
# Goal is to get the macvlan interface.
|
||||||
LXD_SS_CONFIG_LINE=
|
LXD_SS_CONFIG_LINE=
|
||||||
if lxc network list --format csv --project default | grep lxdbr0 | grep -q "ss-config"; then
|
if incus network list --format csv --project default | grep lxdbr0 | grep -q "ss-config"; then
|
||||||
LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project default | grep lxdbr0 | grep ss-config)"
|
LXD_SS_CONFIG_LINE="$(incus network list --format csv --project default | grep lxdbr0 | grep ss-config)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
|
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
|
||||||
@ -340,13 +340,13 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
|
|||||||
|
|
||||||
|
|
||||||
# Now let's switch to the new project to ensure new resources are created under the project scope.
|
# Now let's switch to the new project to ensure new resources are created under the project scope.
|
||||||
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
if ! incus info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||||
lxc project switch "$PROJECT_NAME"
|
incus project switch "$PROJECT_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# check if the OVN network exists in this project.
|
# check if the OVN network exists in this project.
|
||||||
if ! lxc network list | grep -q "ss-ovn"; then
|
if ! incus network list | grep -q "ss-ovn"; then
|
||||||
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
|
incus network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export MAC_ADDRESS_TO_PROVISION=
|
export MAC_ADDRESS_TO_PROVISION=
|
||||||
@ -427,19 +427,15 @@ EOL
|
|||||||
|
|
||||||
LNPLAY_ENV_FILE=./project/lnplay/environments/"$LNPLAY_SERVER_FQDN"
|
LNPLAY_ENV_FILE=./project/lnplay/environments/"$LNPLAY_SERVER_FQDN"
|
||||||
|
|
||||||
# only stub out the file if it doesn't exist. otherwise we leave it be.
|
|
||||||
if [ ! -f "$LNPLAY_ENV_FILE" ]; then
|
|
||||||
# and we have to set our environment file as well.
|
# and we have to set our environment file as well.
|
||||||
cat > "$LNPLAY_ENV_FILE" <<EOL
|
cat > "$LNPLAY_ENV_FILE" <<EOL
|
||||||
DOCKER_HOST=ssh://ubuntu@${LNPLAY_SERVER_FQDN}
|
DOCKER_HOST=ssh://ubuntu@${LNPLAY_SERVER_FQDN}
|
||||||
DOMAIN_NAME=${PRIMARY_DOMAIN}
|
DOMAIN_NAME=${PRIMARY_DOMAIN}
|
||||||
ENABLE_TLS=true
|
ENABLE_TLS=true
|
||||||
BTC_CHAIN=${BITCOIN_CHAIN}
|
BTC_CHAIN=${BITCOIN_CHAIN}
|
||||||
CLN_COUNT=200
|
|
||||||
CHANNEL_SETUP=none
|
CHANNEL_SETUP=none
|
||||||
LNPLAY_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/lnplayserver
|
LNPLAY_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/lnplayserver
|
||||||
EOL
|
EOL
|
||||||
fi
|
|
||||||
|
|
||||||
bash -c "./project/lnplay/up.sh -y"
|
bash -c "./project/lnplay/up.sh -y"
|
||||||
fi
|
fi
|
||||||
|
@ -2,13 +2,13 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
LXC_INSTANCE_NAME=
|
INCUS_INSTANCE_NAME=
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
case $i in
|
case $i in
|
||||||
--lxd-name=*)
|
--lxd-name=*)
|
||||||
LXC_INSTANCE_NAME="${i#*=}"
|
INCUS_INSTANCE_NAME="${i#*=}"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@ -19,19 +19,19 @@ for i in "$@"; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# if the invoker did not set the instance name, throw an error.
|
# if the invoker did not set the instance name, throw an error.
|
||||||
if [ -z "$LXC_INSTANCE_NAME" ]; then
|
if [ -z "$INCUS_INSTANCE_NAME" ]; then
|
||||||
echo "ERROR: The lxc instance name was not specified. Use '--lxc-name' when calling wait_for_lxc_ip.sh."
|
echo "ERROR: The instance name was not specified. Use '--incus-name' when calling wait_for_ip.sh."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! lxc list --format csv | grep -q "$LXC_INSTANCE_NAME"; then
|
if ! incus list --format csv | grep -q "$INCUS_INSTANCE_NAME"; then
|
||||||
echo "ERROR: the lxc instance '$LXC_INSTANCE_NAME' does not exist."
|
echo "ERROR: the instance '$INCUS_INSTANCE_NAME' does not exist."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
IP_V4_ADDRESS=
|
IP_V4_ADDRESS=
|
||||||
while true; do
|
while true; do
|
||||||
IP_V4_ADDRESS="$(lxc list "$LXC_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
|
IP_V4_ADDRESS="$(incus list "$INCUS_INSTANCE_NAME" --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')" || true
|
||||||
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
||||||
if [ -n "$IP_V4_ADDRESS" ]; then
|
if [ -n "$IP_V4_ADDRESS" ]; then
|
||||||
# give the machine extra time to spin up.
|
# give the machine extra time to spin up.
|
||||||
@ -44,7 +44,7 @@ while true; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# wait for cloud-init to complet before returning.
|
# wait for cloud-init to complet before returning.
|
||||||
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
while incus exec "$INCUS_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
||||||
|
|
102
install.sh
102
install.sh
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# https://www.sovereign-stack.org/install/
|
# https://www.sovereign-stack.org/install/
|
||||||
@ -35,7 +35,6 @@ done
|
|||||||
sudo iptables -F FORWARD
|
sudo iptables -F FORWARD
|
||||||
sudo iptables -P FORWARD ACCEPT
|
sudo iptables -P FORWARD ACCEPT
|
||||||
|
|
||||||
|
|
||||||
# if the user didn't specify the disk or partition, we create a loop device under
|
# if the user didn't specify the disk or partition, we create a loop device under
|
||||||
# the user's home directory. If the user does specify a disk or partition, we will
|
# the user's home directory. If the user does specify a disk or partition, we will
|
||||||
# create the ZFS pool there.
|
# create the ZFS pool there.
|
||||||
@ -45,23 +44,23 @@ fi
|
|||||||
|
|
||||||
export DISK="$DISK"
|
export DISK="$DISK"
|
||||||
|
|
||||||
# install lxd snap and initialize it
|
# this script undoes install.sh
|
||||||
if ! snap list | grep -q lxd; then
|
if ! command -v incus >/dev/null 2>&1; then
|
||||||
sudo snap install lxd --channel=5.17/stable
|
bash -c ./install_incus.sh
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# run lxd init
|
# run lxd init
|
||||||
cat <<EOF | lxd init --preseed
|
cat <<EOF | sudo incus admin init --preseed
|
||||||
config: {}
|
config: {}
|
||||||
networks:
|
networks:
|
||||||
- config:
|
- config:
|
||||||
ipv4.address: auto
|
ipv4.address: auto
|
||||||
ipv4.dhcp: true
|
ipv4.dhcp: true
|
||||||
ipv4.nat: true
|
|
||||||
ipv6.address: none
|
ipv6.address: none
|
||||||
description: "Default network bridge for ss-mgmt outbound network access."
|
description: "Default network bridge for ss-mgmt outbound network access."
|
||||||
name: lxdbr0
|
name: incusbr0
|
||||||
type: bridge
|
type: bridge
|
||||||
|
project: default
|
||||||
storage_pools:
|
storage_pools:
|
||||||
- config:
|
- config:
|
||||||
source: ${DISK}
|
source: ${DISK}
|
||||||
@ -74,7 +73,7 @@ profiles:
|
|||||||
devices:
|
devices:
|
||||||
enp5s0:
|
enp5s0:
|
||||||
name: enp5s0
|
name: enp5s0
|
||||||
network: lxdbr0
|
network: incusbr0
|
||||||
type: nic
|
type: nic
|
||||||
root:
|
root:
|
||||||
path: /
|
path: /
|
||||||
@ -82,30 +81,32 @@ profiles:
|
|||||||
type: disk
|
type: disk
|
||||||
name: default
|
name: default
|
||||||
projects: []
|
projects: []
|
||||||
|
cluster: null
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
. ./deployment/deployment_defaults.sh
|
. ./deployment/deployment_defaults.sh
|
||||||
|
|
||||||
. ./deployment/base.sh
|
. ./deployment/base.sh
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# we need to get the base image. IMport it if it's cached, else download it then cache it.
|
# we need to get the base image. IMport it if it's cached, else download it then cache it.
|
||||||
if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
if ! incus image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
|
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
|
||||||
IMAGE_PATH="$HOME/ss/cache/ss-ubuntu-jammy"
|
IMAGE_PATH="$HOME/ss/cache/ss-ubuntu-jammy"
|
||||||
IMAGE_IDENTIFIER=$(find "$IMAGE_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1)
|
IMAGE_IDENTIFIER=$(find "$IMAGE_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1)
|
||||||
METADATA_FILE="$IMAGE_PATH/meta-$IMAGE_IDENTIFIER.tar.xz"
|
METADATA_FILE="$IMAGE_PATH/meta-$IMAGE_IDENTIFIER.tar.xz"
|
||||||
IMAGE_FILE="$IMAGE_PATH/$IMAGE_IDENTIFIER.qcow2"
|
IMAGE_FILE="$IMAGE_PATH/$IMAGE_IDENTIFIER.qcow2"
|
||||||
if [ -d "$IMAGE_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
|
if [ -d "$IMAGE_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
|
||||||
lxc image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
|
incus image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
else
|
else
|
||||||
lxc image copy "images:$BASE_LXC_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
|
incus image copy "images:$BASE_INCUS_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
|
||||||
mkdir -p "$IMAGE_PATH"
|
mkdir -p "$IMAGE_PATH"
|
||||||
lxc image export "$UBUNTU_BASE_IMAGE_NAME" "$IMAGE_PATH" --vm
|
incus image export "$UBUNTU_BASE_IMAGE_NAME" "$IMAGE_PATH" --vm
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -118,28 +119,25 @@ if [ ! -f "$SSH_PRIVKEY_PATH" ]; then
|
|||||||
ssh-keygen -f "$SSH_PRIVKEY_PATH" -t rsa -b 4096
|
ssh-keygen -f "$SSH_PRIVKEY_PATH" -t rsa -b 4096
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chmod 700 "$HOME/.ssh"
|
|
||||||
chmod 600 "$HOME/.ssh/config"
|
|
||||||
|
|
||||||
# add SSH_PUBKEY_PATH to authorized_keys
|
# add SSH_PUBKEY_PATH to authorized_keys
|
||||||
grep -qxF "$(cat $SSH_PUBKEY_PATH)" "$SSH_PATH/authorized_keys" || cat "$SSH_PUBKEY_PATH" >> "$SSH_PATH/authorized_keys"
|
grep -qxF "$(cat "$SSH_PUBKEY_PATH")" "$SSH_PATH/authorized_keys" || cat "$SSH_PUBKEY_PATH" >> "$SSH_PATH/authorized_keys"
|
||||||
|
|
||||||
FROM_BUILT_IMAGE=false
|
FROM_BUILT_IMAGE=false
|
||||||
if ! lxc list --format csv | grep -q ss-mgmt; then
|
if ! incus list --format csv | grep -q ss-mgmt; then
|
||||||
|
|
||||||
# TODO check to see if there's an existing ss-mgmt image to spawn from, otherwise do this.
|
# TODO check to see if there's an existing ss-mgmt image to spawn from, otherwise do this.
|
||||||
if lxc image list | grep -q ss-mgmt; then
|
if incus image list | grep -q ss-mgmt; then
|
||||||
FROM_BUILT_IMAGE=true
|
FROM_BUILT_IMAGE=true
|
||||||
lxc init -q ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
|
incus init ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
|
||||||
else
|
else
|
||||||
lxc init -q "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
|
incus init "images:$BASE_INCUS_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
|
||||||
fi
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# mount the pre-verified sovereign stack git repo into the new vm
|
# mount the pre-verified sovereign stack git repo into the new vm
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-code; then
|
if ! incus config device show ss-mgmt | grep -q ss-code; then
|
||||||
lxc config device add ss-mgmt ss-code disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
|
incus config device add ss-mgmt ss-code disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# create the ~/ss path and mount it into the vm.
|
# create the ~/ss path and mount it into the vm.
|
||||||
@ -148,8 +146,8 @@ source ./deployment/base.sh
|
|||||||
|
|
||||||
mkdir -p "$SS_ROOT_PATH"
|
mkdir -p "$SS_ROOT_PATH"
|
||||||
|
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-root; then
|
if ! incus config device show ss-mgmt | grep -q ss-root; then
|
||||||
lxc config device add ss-mgmt ss-root disk source="$SS_ROOT_PATH" path=/home/ubuntu/ss
|
incus config device add ss-mgmt ss-root disk source="$SS_ROOT_PATH" path=/home/ubuntu/ss
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
||||||
@ -157,62 +155,62 @@ BITCOIN_DIR="$HOME/.bitcoin"
|
|||||||
REMOTE_BITCOIN_CACHE_PATH="/home/ubuntu/ss/cache/bitcoin"
|
REMOTE_BITCOIN_CACHE_PATH="/home/ubuntu/ss/cache/bitcoin"
|
||||||
BITCOIN_TESTNET_BLOCKS_PATH="$BITCOIN_DIR/testnet3/blocks"
|
BITCOIN_TESTNET_BLOCKS_PATH="$BITCOIN_DIR/testnet3/blocks"
|
||||||
if [ -d "$BITCOIN_TESTNET_BLOCKS_PATH" ]; then
|
if [ -d "$BITCOIN_TESTNET_BLOCKS_PATH" ]; then
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-testnet-blocks; then
|
if ! incus config device show ss-mgmt | grep -q ss-testnet-blocks; then
|
||||||
lxc config device add ss-mgmt ss-testnet-blocks disk source="$BITCOIN_TESTNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/blocks
|
incus config device add ss-mgmt ss-testnet-blocks disk source="$BITCOIN_TESTNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/blocks
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
||||||
BITCOIN_TESTNET_CHAINSTATE_PATH="$BITCOIN_DIR/testnet3/chainstate"
|
BITCOIN_TESTNET_CHAINSTATE_PATH="$BITCOIN_DIR/testnet3/chainstate"
|
||||||
if [ -d "$BITCOIN_TESTNET_CHAINSTATE_PATH" ]; then
|
if [ -d "$BITCOIN_TESTNET_CHAINSTATE_PATH" ]; then
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-testnet-chainstate; then
|
if ! incus config device show ss-mgmt | grep -q ss-testnet-chainstate; then
|
||||||
lxc config device add ss-mgmt ss-testnet-chainstate disk source="$BITCOIN_TESTNET_CHAINSTATE_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/chainstate
|
incus config device add ss-mgmt ss-testnet-chainstate disk source="$BITCOIN_TESTNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/testnet/chainstate"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if a ~/.bitcoin/blocks dir exists, mount it in.
|
# if a ~/.bitcoin/blocks dir exists, mount it in.
|
||||||
BITCOIN_MAINNET_BLOCKS_PATH="$BITCOIN_DIR/blocks"
|
BITCOIN_MAINNET_BLOCKS_PATH="$BITCOIN_DIR/blocks"
|
||||||
if [ -d "$BITCOIN_MAINNET_BLOCKS_PATH" ]; then
|
if [ -d "$BITCOIN_MAINNET_BLOCKS_PATH" ]; then
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-mainnet-blocks; then
|
if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then
|
||||||
lxc config device add ss-mgmt ss-mainnet-blocks disk source="$BITCOIN_MAINNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/mainnet/blocks
|
incus config device add ss-mgmt ss-mainnet-blocks disk source="$BITCOIN_MAINNET_BLOCKS_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/blocks"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
|
||||||
BITCOIN_MAINNET_CHAINSTATE_PATH="$BITCOIN_DIR/chainstate"
|
BITCOIN_MAINNET_CHAINSTATE_PATH="$BITCOIN_DIR/chainstate"
|
||||||
if [ -d "$BITCOIN_MAINNET_CHAINSTATE_PATH" ]; then
|
if [ -d "$BITCOIN_MAINNET_CHAINSTATE_PATH" ]; then
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-mainnet-blocks; then
|
if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then
|
||||||
lxc config device add ss-mgmt ss-mainnet-chainstate disk source="$BITCOIN_MAINNET_CHAINSTATE_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/mainnet/chainstate
|
incus config device add ss-mgmt ss-mainnet-chainstate disk source="$BITCOIN_MAINNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/chainstate"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# mount the ssh directory in there.
|
# mount the ssh directory in there.
|
||||||
if [ -f "$SSH_PUBKEY_PATH" ]; then
|
if [ -f "$SSH_PUBKEY_PATH" ]; then
|
||||||
if ! lxc config device show ss-mgmt | grep -q ss-ssh; then
|
if ! incus config device show ss-mgmt | grep -q ss-ssh; then
|
||||||
lxc config device add ss-mgmt ss-ssh disk source="$HOME/.ssh" path=/home/ubuntu/.ssh
|
incus config device add ss-mgmt ss-ssh disk source="$HOME/.ssh" path=/home/ubuntu/.ssh
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# start the vm if it's not already running
|
# start the vm if it's not already running
|
||||||
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
if incus list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
||||||
lxc start ss-mgmt
|
incus start ss-mgmt
|
||||||
sleep 10
|
sleep 10
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# wait for the vm to have an IP address
|
# wait for the vm to have an IP address
|
||||||
. ./management/wait_for_lxc_ip.sh
|
. ./management/wait_for_ip.sh
|
||||||
|
|
||||||
# do some other preparations for user experience
|
# do some other preparations for user experience
|
||||||
lxc file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases
|
incus file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases
|
||||||
lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
|
incus file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
|
||||||
lxc file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc
|
incus file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc
|
||||||
lxc file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack
|
incus file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack
|
||||||
|
|
||||||
# install SSH
|
# install SSH
|
||||||
lxc exec ss-mgmt apt-get update
|
incus exec ss-mgmt apt-get update
|
||||||
lxc exec ss-mgmt -- apt-get install -y openssh-server
|
incus exec ss-mgmt -- apt-get install -y openssh-server
|
||||||
lxc file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config
|
incus file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config
|
||||||
lxc exec ss-mgmt -- sudo systemctl restart sshd
|
incus exec ss-mgmt -- sudo systemctl restart sshd
|
||||||
|
|
||||||
# add 'ss-manage' to the bare metal ~/.bashrc
|
# add 'ss-manage' to the bare metal ~/.bashrc
|
||||||
ADDED_COMMAND=false
|
ADDED_COMMAND=false
|
||||||
@ -234,14 +232,14 @@ ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
|
|||||||
if [ "$FROM_BUILT_IMAGE" = false ]; then
|
if [ "$FROM_BUILT_IMAGE" = false ]; then
|
||||||
ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh
|
ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh
|
||||||
|
|
||||||
lxc stop ss-mgmt
|
incus stop ss-mgmt
|
||||||
|
|
||||||
if ! lxc image list | grep -q "ss-mgmt"; then
|
if ! incus image list | grep -q "ss-mgmt"; then
|
||||||
echo "Publishing image. Please wait, this may take a while..."
|
echo "Publishing image. Please wait, this may take a while..."
|
||||||
lxc publish ss-mgmt --alias=ss-mgmt
|
incus publish ss-mgmt --alias=ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc start ss-mgmt
|
incus start ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$ADDED_COMMAND" = true ]; then
|
if [ "$ADDED_COMMAND" = true ]; then
|
||||||
|
67
install_incus.sh
Executable file
67
install_incus.sh
Executable file
@ -0,0 +1,67 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
if [ ! -f "$(which incus)" ]; then
|
||||||
|
# need to get the remote.conf in there
|
||||||
|
# this isn't really needed since env are provided via docker.
|
||||||
|
sudo tee /etc/apt/sources.list.d/zabbly-incus-daily.sources <<EOF
|
||||||
|
Enabled: yes
|
||||||
|
Types: deb
|
||||||
|
URIs: https://pkgs.zabbly.com/incus/daily
|
||||||
|
Suites: jammy
|
||||||
|
Components: main
|
||||||
|
Architectures: amd64
|
||||||
|
Signed-By: /etc/apt/keyrings/zabbly.asc
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo tee /etc/apt/keyrings/zabbly.asc <<EOF
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
|
||||||
|
mQGNBGTlYcIBDACYQoVXVyQ6Y3Of14GwEaiv/RstQ8jWnH441OtvDbD/VVT8yF0P
|
||||||
|
pUfypWjQS8aq0g32Qgb9H9+b8UAAKojA2W0szjJFlmmSq19YDMMmNC4AnfeZlKYM
|
||||||
|
61Zonna7fPaXmlsTlSiUeo/PGvmAXrkFURC9S8FbhZdWEcUpf9vcKAoEzV8qGA4J
|
||||||
|
xbKlj8EOjSkdq3OQ1hHjP8gynbbzMhZQwjbnWqoiPj35ed9EMn+0QcX+GmynGq6T
|
||||||
|
hBXdRdeQjZC6rmXzNF2opCyxqx3BJ0C7hUtpHegmeoH34wnJHCqGYkEKFAjlRLoW
|
||||||
|
tOzHY9J7OFvB6U7ENtnquj7lg2VQK+hti3uiHW+oide06QgjVw2irucCblQzphgo
|
||||||
|
iX5QJs7tgFFDsA9Ee0DZP6cu83hNFdDcXEZBc9MT5Iu0Ijvj7Oeym3DJpkCuIWgk
|
||||||
|
SeP56sp7333zrg73Ua7YZsZHRayAe/4YdNUua+90P4GD12TpTtJa4iRWRd7bis6m
|
||||||
|
tSkKRj7kxyTsxpEAEQEAAbQmWmFiYmx5IEtlcm5lbCBCdWlsZHMgPGluZm9AemFi
|
||||||
|
Ymx5LmNvbT6JAdQEEwEKAD4WIQRO/FkGlssVuHxzo62CzIeXyDjc/QUCZOVhwgIb
|
||||||
|
AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCCzIeXyDjc/W05C/4n
|
||||||
|
lGRTlyOETF2K8oWbjtan9wlttQ+pwymJCnP8T+JJDycGL8dPsGdG1ldHdorVZpFi
|
||||||
|
1P+Bem9bbiW73TpbX+WuCfP1g3WN7AVa2mYRfSVhsLNeBAMRgWgNW9JYsmg99lmY
|
||||||
|
aPsRYZdGu/PB+ffMIyWhjL3CKCbYS6lV5N5Mi4Lobyz/I1Euxpk2vJhhUqh786nJ
|
||||||
|
pQpDnvEl1CRANS6JD9bIvEdfatlAhFlrz1TTf6R7SlppyYI7tme4I/G3dnnHWYSG
|
||||||
|
cGRaLwpwobTq0UNSO71g7+at9eY8dh5nn2lZUvvxZvlbXoOoPxKUoeGVXqoq5F7S
|
||||||
|
QcMVAogYtyNlnLnsUfSPw6YFRaQ5o00h30bR3hk+YmJ47AJCRY9GIc/IEdSnd/Z5
|
||||||
|
Ea7CrP2Bo4zxPgcl8fe311FQRTRoWr19l5PXZgGjzy6siXTrYQi6GjLtqVB5SjJf
|
||||||
|
rrIIy1vZRyDL96WPu6fS+XQMpjsSygj+DBFk8OAvHhQhMCXHgT4BMyg4D5GE0665
|
||||||
|
AY0EZOVhwgEMAMIztf6WlRsweysb0tzktYE5E/GxIK1lwcD10Jzq3ovJJPa2Tg2t
|
||||||
|
J6ZBmMQfwU4OYO8lJxlgm7t6MYh41ZZaRhySCtbJiAXqK08LP9Gc1iWLRvKuMzli
|
||||||
|
NFSiFDFGT1D6kwucVfL/THxvZlQ559kK+LB4iXEKXz37r+MCX1K9uiv0wn63Vm0K
|
||||||
|
gD3HDgfXWYJcNyXXfJBe3/T5AhuSBOQcpa7Ow5n8zJ+OYg3FFKWHDBTSSZHpbJFr
|
||||||
|
ArMIGARz5/f+EVj9XGY4W/+ZJlxNh8FzrTLeRArmCWqKLPRG/KF36dTY7MDpOzlw
|
||||||
|
vu7frv+cgiXHZ2NfPrkH8oOl4L+ufze5KBGcN0QwFDcuwCkv/7Ft9Ta7gVaIBsK7
|
||||||
|
12oHInUJ6EkBovxpuaLlHlP8IfmZLZbbHzR2gR0e6IhLtrzd7urB+gXUtp6+wCL+
|
||||||
|
kWD14TTJhSQ+SFU8ajvUah7/1m2bxdjZNp9pzOPGkr/jEjCM0CpZiCY62SeIJqVc
|
||||||
|
4/ID9NYLAGmSIwARAQABiQG8BBgBCgAmFiEETvxZBpbLFbh8c6OtgsyHl8g43P0F
|
||||||
|
AmTlYcICGwwFCQPCZwAACgkQgsyHl8g43P0wEgv+LuknyXHpYpiUcJOl9Q5yLokd
|
||||||
|
o7tJwJ+9Fu7EDAfM7mPgyBj7Ad/v9RRP+JKWHqIYEjyrRnz9lmzciU+LT/CeoQu/
|
||||||
|
MgpU8wRI4gVtLkX2238amrTKKlVjQUUNHf7cITivUs/8e5W21JfwvcSzu5z4Mxyw
|
||||||
|
L6vMlBUAixtzZSXD6O7MO9uggHUZMt5gDSPXG2RcIgWm0Bd1yTHL7jZt67xBgZ4d
|
||||||
|
hUoelMN2XIDLv4SY78jbHAqVN6CLLtWrz0f5YdaeYj8OT6Ohr/iJQdlfVaiY4ikp
|
||||||
|
DzagLi0LvG9/GuB9eO6yLuojg45JEH8DC7NW5VbdUITxQe9NQ/j5kaRKTEq0fyZ+
|
||||||
|
qsrryTyvXghxK8oMUcI10l8d41qXDDPCA40kruuspCZSAle3zdqpYqiu6bglrgWr
|
||||||
|
Zr2Nm9ecm/kkqMIcyJ8e2mlkuufq5kVem0Oez+GIDegvwnK3HAqWQ9lzdWKvnLiE
|
||||||
|
gNkvg3bqIwZ/WoHBnSwOwwAzwarJl/gn8OG6CIeP
|
||||||
|
=8Uc6
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
|
EOF
|
||||||
|
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install incus -y --no-install-recommends
|
||||||
|
|
||||||
|
sudo usermod -a -G incus-admin "$(whoami)"
|
||||||
|
|
||||||
|
fi
|
18
manage.sh
18
manage.sh
@ -6,34 +6,34 @@ set -eu
|
|||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# check to ensure dependencies are met.
|
# check to ensure dependencies are met.
|
||||||
if ! command -v lxc >/dev/null 2>&1; then
|
if ! command -v incus >/dev/null 2>&1; then
|
||||||
echo "This script requires 'lxd/lxc' to be installed. Please run 'install.sh'."
|
echo "This script requires incus to be installed. Please run 'install.sh'."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! lxc remote get-default | grep -q "local"; then
|
if ! incus remote get-default | grep -q "local"; then
|
||||||
lxc remote switch "local"
|
incus remote switch "local"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! lxc list -q --format csv | grep -q ss-mgmt; then
|
if ! incus list -q --format csv | grep -q ss-mgmt; then
|
||||||
echo "ERROR: the 'ss-mgmt' VM does not exist. You may need to run install.sh"
|
echo "ERROR: the 'ss-mgmt' VM does not exist. You may need to run install.sh"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh
|
# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh
|
||||||
if ! lxc list --format csv | grep -q "ss-mgmt"; then
|
if ! incus list --format csv | grep -q "ss-mgmt"; then
|
||||||
echo "ERROR: the management machine VM does not exist. You probably need to run './install.sh'."
|
echo "ERROR: the management machine VM does not exist. You probably need to run './install.sh'."
|
||||||
echo "INFO: check out https://www.sovereign-stack.org/tag/code-lifecycle-management/ for more information."
|
echo "INFO: check out https://www.sovereign-stack.org/tag/code-lifecycle-management/ for more information."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if the machine does exist, let's make sure it's RUNNING.
|
# if the machine does exist, let's make sure it's RUNNING.
|
||||||
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
if incus list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
||||||
echo "INFO: The SSME was in a STOPPED state. Starting the environment. Please wait."
|
echo "INFO: The SSME was in a STOPPED state. Starting the environment. Please wait."
|
||||||
lxc start ss-mgmt
|
incus start ss-mgmt
|
||||||
sleep 30
|
sleep 30
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. ./management/wait_for_lxc_ip.sh
|
. ./management/wait_for_ip.sh
|
||||||
|
|
||||||
# let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt.
|
# let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt.
|
||||||
ssh ubuntu@"$IP_V4_ADDRESS"
|
ssh ubuntu@"$IP_V4_ADDRESS"
|
||||||
|
@ -35,7 +35,7 @@ sleep 10
|
|||||||
# install snap
|
# install snap
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install htop
|
sudo snap install htop
|
||||||
sudo snap install lxd --channel=5.17/stable
|
sudo snap install lxd --channel=5.18/candidate
|
||||||
sleep 6
|
sleep 6
|
||||||
|
|
||||||
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
||||||
@ -43,7 +43,7 @@ if ! snap list | grep -q lxd; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# run a lxd command so we don't we a warning upon first invocation
|
# run a lxd command so we don't we a warning upon first invocation
|
||||||
lxc list > /dev/null 2>&1
|
incus list > /dev/null 2>&1
|
||||||
|
|
||||||
# add groups for docker and lxd
|
# add groups for docker and lxd
|
||||||
if ! groups ubuntu | grep -q docker; then
|
if ! groups ubuntu | grep -q docker; then
|
||||||
|
@ -5,7 +5,7 @@ set -e
|
|||||||
IP_V4_ADDRESS=
|
IP_V4_ADDRESS=
|
||||||
while true; do
|
while true; do
|
||||||
# wait for
|
# wait for
|
||||||
if lxc list ss-mgmt | grep -q enp5s0; then
|
if incus list ss-mgmt | grep -q enp5s0; then
|
||||||
break;
|
break;
|
||||||
else
|
else
|
||||||
sleep 1
|
sleep 1
|
||||||
@ -13,7 +13,7 @@ while true; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
while true; do
|
while true; do
|
||||||
IP_V4_ADDRESS=$(lxc list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
|
IP_V4_ADDRESS=$(incus list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
|
||||||
if [ -n "$IP_V4_ADDRESS" ]; then
|
if [ -n "$IP_V4_ADDRESS" ]; then
|
||||||
# give the machine extra time to spin up.
|
# give the machine extra time to spin up.
|
||||||
break;
|
break;
|
||||||
@ -27,6 +27,6 @@ done
|
|||||||
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
export IP_V4_ADDRESS="$IP_V4_ADDRESS"
|
||||||
|
|
||||||
# wait for the VM to complete its default cloud-init.
|
# wait for the VM to complete its default cloud-init.
|
||||||
while lxc exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
while incus exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done
|
done
|
66
uninstall.sh
66
uninstall.sh
@ -1,14 +1,14 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -exu
|
||||||
|
|
||||||
PURGE_LXD=false
|
PURGE_INCUS=false
|
||||||
|
|
||||||
# grab any modifications from the command line.
|
# grab any modifications from the command line.
|
||||||
for i in "$@"; do
|
for i in "$@"; do
|
||||||
case $i in
|
case $i in
|
||||||
--purge)
|
--purge)
|
||||||
PURGE_LXD=true
|
PURGE_INCUS=true
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
@ -19,72 +19,70 @@ for i in "$@"; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
# this script undoes install.sh
|
# this script undoes install.sh
|
||||||
if ! command -v lxc >/dev/null 2>&1; then
|
if ! command -v incus >/dev/null 2>&1; then
|
||||||
echo "This script requires 'lxc' to be installed. Please run 'install.sh'."
|
echo "This script requires incus to be installed. Please run 'install.sh'."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! incus remote get-default | grep -q "local"; then
|
||||||
if ! lxc remote get-default | grep -q "local"; then
|
|
||||||
echo "ERROR: You MUST be on the local remote when uninstalling the SSME."
|
echo "ERROR: You MUST be on the local remote when uninstalling the SSME."
|
||||||
echo "INFO: You can use 'lxc remote switch local' to do this."
|
echo "INFO: You can use 'incus remote switch local' to do this."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if ! lxc project list | grep -q "default (current)"; then
|
if ! incus project list | grep -q "default (current)"; then
|
||||||
echo "ERROR: You MUST be on the default project when uninstalling the SSME."
|
echo "ERROR: You MUST be on the default project when uninstalling the SSME."
|
||||||
echo "INFO: You can use 'lxc project switch default' to do this."
|
echo "INFO: You can use 'incus project switch default' to do this."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
if lxc list --format csv | grep -q "ss-mgmt"; then
|
if incus list --format csv | grep -q "ss-mgmt"; then
|
||||||
|
|
||||||
if lxc list --format csv -q | grep -q "ss-mgmt,RUNNING"; then
|
if incus list --format csv -q | grep -q "ss-mgmt,RUNNING"; then
|
||||||
lxc stop ss-mgmt
|
incus stop ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc config device list ss-mgmt -q | grep -q "ss-code"; then
|
if incus config device list ss-mgmt -q | grep -q "ss-code"; then
|
||||||
lxc config device remove ss-mgmt ss-code
|
incus config device remove ss-mgmt ss-code
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc config device list ss-mgmt -q | grep -q "ss-root"; then
|
if incus config device list ss-mgmt -q | grep -q "ss-root"; then
|
||||||
lxc config device remove ss-mgmt ss-root
|
incus config device remove ss-mgmt ss-root
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc config device list ss-mgmt -q | grep -q "ss-ssh"; then
|
if incus config device list ss-mgmt -q | grep -q "ss-ssh"; then
|
||||||
lxc config device remove ss-mgmt ss-ssh
|
incus config device remove ss-mgmt ss-ssh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc delete ss-mgmt
|
incus delete ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$PURGE_LXD" = true ]; then
|
if [ "$PURGE_INCUS" = true ]; then
|
||||||
|
|
||||||
if lxc profile device list default | grep -q root; then
|
if incus profile device list default | grep -q root; then
|
||||||
lxc profile device remove default root
|
incus profile device remove default root
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc profile device list default | grep -q enp5s0; then
|
if incus profile device list default | grep -q enp5s0; then
|
||||||
lxc profile device remove default enp5s0
|
incus profile device remove default enp5s0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc network list --project default | grep -q lxdbr0; then
|
if incus network list --project default | grep -q lxdbr0; then
|
||||||
lxc network delete lxdbr0
|
incus network delete lxdbr0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# this file contains the BASE_IMAGE_NAME
|
# this file contains the BASE_IMAGE_NAME
|
||||||
. ./deployment/base.sh
|
. ./deployment/base.sh
|
||||||
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
if incus image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
|
incus image delete "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc storage list --format csv | grep -q sovereign-stack; then
|
if incus storage list --format csv | grep -q sovereign-stack; then
|
||||||
lxc storage delete sovereign-stack
|
incus storage delete sovereign-stack
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if snap list | grep -q lxd; then
|
sudo apt purge incus
|
||||||
sudo snap remove lxd
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
Loading…
Reference in New Issue
Block a user