From a2734886465ba907aba895be8ebcd30818c79e47 Mon Sep 17 00:00:00 2001 From: Derek Smith Date: Fri, 22 Sep 2023 17:46:07 -0600 Subject: [PATCH] Initial Switch to incus. --- deployment/base.sh | 2 +- deployment/create_base.sh | 101 +++++++++++++++++ deployment/create_lxc_base.sh | 101 ----------------- deployment/deploy_vm.sh | 36 +++---- deployment/domain_list.sh | 7 +- deployment/down.sh | 22 ++-- deployment/project_env.sh | 10 +- deployment/remote.sh | 42 ++++---- deployment/remote_env.sh | 12 +-- deployment/reset.sh | 58 +++++----- deployment/show.sh | 14 +-- deployment/stop.sh | 4 +- .../{stub_lxc_profile.sh => stub_profile.sh} | 18 ++-- deployment/up.sh | 34 +++--- .../{wait_for_lxc_ip.sh => wait_for_ip.sh} | 16 +-- install.sh | 102 +++++++++--------- install_incus.sh | 67 ++++++++++++ manage.sh | 18 ++-- management/provision.sh | 4 +- .../{wait_for_lxc_ip.sh => wait_for_ip.sh} | 6 +- uninstall.sh | 66 ++++++------ 21 files changed, 397 insertions(+), 343 deletions(-) create mode 100755 deployment/create_base.sh delete mode 100755 deployment/create_lxc_base.sh rename deployment/{stub_lxc_profile.sh => stub_profile.sh} (93%) rename deployment/{wait_for_lxc_ip.sh => wait_for_ip.sh} (52%) create mode 100755 install_incus.sh rename management/{wait_for_lxc_ip.sh => wait_for_ip.sh} (60%) diff --git a/deployment/base.sh b/deployment/base.sh index 61cae1e..52a22b2 100755 --- a/deployment/base.sh +++ b/deployment/base.sh @@ -3,7 +3,7 @@ # The base VM image. export LXD_UBUNTU_BASE_VERSION="jammy" export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}" -export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud" +export BASE_INCUS_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud" WEEK_NUMBER=$(date +%U) export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}" export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER" diff --git a/deployment/create_base.sh b/deployment/create_base.sh new file mode 100755 index 0000000..94aa149 --- /dev/null +++ b/deployment/create_base.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +set -exu +cd "$(dirname "$0")" + +. ./base.sh + +bash -c "./stub_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME" + +if incus list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then + incus delete -f "$BASE_IMAGE_VM_NAME" --project default +fi + +# let's download our base image. +if ! incus image list --format csv --columns l --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then + # copy the image down from canonical. + incus image copy "images:$BASE_INCUS_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update --target-project default +fi + +# If the VM does exist, then we will delete it (so we can start fresh) +if incus list --format csv -q --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then + # if there's no snapshot, we dispense with the old image and try again. + if ! incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then + incus delete "$BASE_IMAGE_VM_NAME" --force --project default + ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME" + fi +else + + if ! incus list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then + # the base image is ubuntu:22.04. + incus init -q --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project default + fi + + + if incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then + # TODO move this sovereign-stack-base construction VM to separate dedicated IP + incus config set "$BASE_IMAGE_VM_NAME" --project default + incus start "$BASE_IMAGE_VM_NAME" --project default + sleep 15 + fi + + # for CHAIN in mainnet testnet; do + # for DATA in blocks chainstate; do + # incus storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA" + # done + # done + + if incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: RUNNING"; then + + while incus exec "$BASE_IMAGE_VM_NAME" --project default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do + sleep 1 + done + + # ensure the ssh service is listening at localhost + incus exec "$BASE_IMAGE_VM_NAME" --project default -- wait-for-it -t 100 127.0.0.1:22 + + # # If we have any chaninstate or blocks in our SSME, let's push them to the + # # remote host as a zfs volume that way deployments can share a common history + # # of chainstate/blocks. + # for CHAIN in testnet mainnet; do + # for DATA in blocks chainstate; do + # # if the storage snapshot doesn't yet exist, create it. + # if ! incus storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then + # DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA" + # if [ -d "$DATA_PATH" ]; then + # COMPLETE_FILE_PATH="$DATA_PATH/complete" + # if incus exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then + # incus file push --recursive --project default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/" + # incus exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH" + # incus exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA" + # else + # echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing." + # fi + # fi + # fi + # done + # done + + # stop the VM and get a snapshot. + incus stop "$BASE_IMAGE_VM_NAME" --project default + fi + + incus snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project default + +fi + +echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait." +incus publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none + +echo "INFO: Success creating the base image. Deleting artifacts from the build process." +incus delete -f "$BASE_IMAGE_VM_NAME" --project default + +# # now let's get a snapshot of each of the blocks/chainstate directories. +# for CHAIN in testnet mainnet; do +# for DATA in blocks chainstate; do +# if ! incus storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then +# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'." +# incus storage volume snapshot ss-base --project default "$CHAIN-$DATA" +# fi +# done +# done diff --git a/deployment/create_lxc_base.sh b/deployment/create_lxc_base.sh deleted file mode 100755 index d12bbed..0000000 --- a/deployment/create_lxc_base.sh +++ /dev/null @@ -1,101 +0,0 @@ -#!/bin/bash - -set -exu -cd "$(dirname "$0")" - -. ./base.sh - -bash -c "./stub_lxc_profile.sh --lxd-hostname=$BASE_IMAGE_VM_NAME" - -if lxc list -q --project default | grep -q "$BASE_IMAGE_VM_NAME" ; then - lxc delete -f "$BASE_IMAGE_VM_NAME" --project default -fi - -# let's download our base image. -if ! lxc image list --format csv --columns l --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then - # copy the image down from canonical. - lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update --target-project default -fi - -# If the lxc VM does exist, then we will delete it (so we can start fresh) -if lxc list --format csv -q --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then - # if there's no snapshot, we dispense with the old image and try again. - if ! lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then - lxc delete "$BASE_IMAGE_VM_NAME" --force --project default - ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME" - fi -else - - if ! lxc list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then - # the base image is ubuntu:22.04. - lxc init -q --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project default || true - fi - - - if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then - # TODO move this sovereign-stack-base construction VM to separate dedicated IP - lxc config set "$BASE_IMAGE_VM_NAME" --project default - lxc start "$BASE_IMAGE_VM_NAME" --project default - sleep 15 - fi - - # for CHAIN in mainnet testnet; do - # for DATA in blocks chainstate; do - # lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/bitcoin/$DATA" - # done - # done - - if lxc info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: RUNNING"; then - - while lxc exec "$BASE_IMAGE_VM_NAME" --project default -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do - sleep 1 - done - - # ensure the ssh service is listening at localhost - lxc exec "$BASE_IMAGE_VM_NAME" --project default -- wait-for-it -t 100 127.0.0.1:22 - - # # If we have any chaninstate or blocks in our SSME, let's push them to the - # # remote host as a zfs volume that way deployments can share a common history - # # of chainstate/blocks. - # for CHAIN in testnet mainnet; do - # for DATA in blocks chainstate; do - # # if the storage snapshot doesn't yet exist, create it. - # if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then - # DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA" - # if [ -d "$DATA_PATH" ]; then - # COMPLETE_FILE_PATH="$DATA_PATH/complete" - # if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then - # lxc file push --recursive --project default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME""$DATA_PATH/" - # lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH" - # lxc exec "$BASE_IMAGE_VM_NAME" -- chown -R 999:999 "$DATA_PATH/$DATA" - # else - # echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing." - # fi - # fi - # fi - # done - # done - - # stop the VM and get a snapshot. - lxc stop "$BASE_IMAGE_VM_NAME" --project default - fi - - lxc snapshot "$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" --project default - -fi - -echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait." -lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none - -echo "INFO: Success creating the base image. Deleting artifacts from the build process." -lxc delete -f "$BASE_IMAGE_VM_NAME" --project default - -# # now let's get a snapshot of each of the blocks/chainstate directories. -# for CHAIN in testnet mainnet; do -# for DATA in blocks chainstate; do -# if ! lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then -# echo "INFO: Creating a snapshot 'ss-base/$CHAIN-$DATA/snap0'." -# lxc storage volume snapshot ss-base --project default "$CHAIN-$DATA" -# fi -# done -# done diff --git a/deployment/deploy_vm.sh b/deployment/deploy_vm.sh index c268aee..3733738 100755 --- a/deployment/deploy_vm.sh +++ b/deployment/deploy_vm.sh @@ -21,7 +21,7 @@ EOF fi # if the machine doesn't exist, we create it. -if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then +if ! incus list --format csv | grep -q "$LXD_VM_NAME"; then # create a base image if needed and instantiate a VM. if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then @@ -69,57 +69,57 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then fi DOCKER_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""d" - if ! lxc storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then - lxc storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block + if ! incus storage volume list ss-base | grep -q "$DOCKER_VOLUME_NAME"; then + incus storage volume create ss-base "$DOCKER_VOLUME_NAME" --type=block fi # TODO ensure we are only GROWING the volume--never shrinking - lxc storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB" + incus storage volume set ss-base "$DOCKER_VOLUME_NAME" size="${DOCKER_DISK_SIZE_GB}GB" SSDATA_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""s" - if ! lxc storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then - lxc storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem + if ! incus storage volume list ss-base | grep -q "$SSDATA_VOLUME_NAME"; then + incus storage volume create ss-base "$SSDATA_VOLUME_NAME" --type=filesystem fi # TODO ensure we are only GROWING the volume--never shrinking per zfs volume docs. - lxc storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB" + incus storage volume set ss-base "$SSDATA_VOLUME_NAME" size="${SSDATA_DISK_SIZE_GB}GB" BACKUP_VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""b" - if ! lxc storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then - lxc storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem + if ! incus storage volume list ss-base | grep -q "$BACKUP_VOLUME_NAME"; then + incus storage volume create ss-base "$BACKUP_VOLUME_NAME" --type=filesystem fi - lxc storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB" + incus storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB" - bash -c "./stub_lxc_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME" + bash -c "./stub_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME" # now let's create a new VM to work with. - #lxc init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm - lxc init -q "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME" + #incus init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm + incus init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME" # let's PIN the HW address for now so we don't exhaust IP # and so we can set DNS internally. - lxc config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION" + incus config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION" # attack the docker block device. - lxc storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME" + incus storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME" # if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then # # attach any volumes # for CHAIN in testnet mainnet; do # for DATA in blocks chainstate; do # MOUNT_PATH="/$CHAIN-$DATA" - # lxc config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH" + # incus config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH" # done # done # fi - lxc start "$LXD_VM_NAME" + incus start "$LXD_VM_NAME" sleep 10 - bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME" + bash -c "./wait_for_ip.sh --lxd-name=$LXD_VM_NAME" # scan the remote machine and install it's identity in our SSH known_hosts file. ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts" diff --git a/deployment/domain_list.sh b/deployment/domain_list.sh index 07a0833..ac8b3fe 100755 --- a/deployment/domain_list.sh +++ b/deployment/domain_list.sh @@ -1,5 +1,6 @@ #!/bin/bash +set -exu # the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list. DOMAIN_LIST="${PRIMARY_DOMAIN}" @@ -11,6 +12,6 @@ export DOMAIN_LIST="$DOMAIN_LIST" export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1)) export OTHER_SITES_LIST="$OTHER_SITES_LIST" -export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME" -export BTCPAY_SERVER_FQDN="$BTCPAY_SERVER_HOSTNAME.$DOMAIN_NAME" -export LNPLAY_SERVER_FQDN="$LNPLAY_SERVER_HOSTNAME.$DOMAIN_NAME" \ No newline at end of file +export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$PRIMARY_DOMAIN" +export BTCPAY_SERVER_FQDN="$BTCPAY_SERVER_HOSTNAME.$PRIMARY_DOMAIN" +export LNPLAY_SERVER_FQDN="$LNPLAY_SERVER_HOSTNAME.$PRIMARY_DOMAIN" \ No newline at end of file diff --git a/deployment/down.sh b/deployment/down.sh index 363194a..9147feb 100755 --- a/deployment/down.sh +++ b/deployment/down.sh @@ -5,8 +5,8 @@ set -exu cd "$(dirname "$0")" -if lxc remote get-default -q | grep -q "local"; then - echo "ERROR: you are on the local lxc remote. Nothing to take down" +if incus remote get-default -q | grep -q "local"; then + echo "ERROR: you are on the local incus remote. Nothing to take down" exit 1 fi @@ -75,7 +75,7 @@ for VIRTUAL_MACHINE in $SERVERS; do LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}" - if lxc list | grep -q "$LXD_NAME"; then + if incus list | grep -q "$LXD_NAME"; then bash -c "./stop.sh --server=$VIRTUAL_MACHINE" if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then @@ -86,16 +86,16 @@ for VIRTUAL_MACHINE in $SERVERS; do done fi - lxc stop "$LXD_NAME" + incus stop "$LXD_NAME" - lxc delete "$LXD_NAME" + incus delete "$LXD_NAME" fi # remove the ssh known endpoint else we get warnings. ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit - if lxc profile list | grep -q "$LXD_NAME"; then - lxc profile delete "$LXD_NAME" + if incus profile list | grep -q "$LXD_NAME"; then + incus profile delete "$LXD_NAME" fi if [ "$KEEP_DOCKER_VOLUME" = false ]; then @@ -110,12 +110,12 @@ for VIRTUAL_MACHINE in $SERVERS; do # d for docker; b for backup; s for ss-data for DATA in d b s; do VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""$DATA" - if lxc storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then + if incus storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then RESPONSE= read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$LXD_NAME'?": RESPONSE if [ "$RESPONSE" = "y" ]; then - lxc storage volume delete ss-base "$VOLUME_NAME" + incus storage volume delete ss-base "$VOLUME_NAME" fi fi done @@ -126,6 +126,6 @@ for VIRTUAL_MACHINE in $SERVERS; do fi done -if lxc network list -q | grep -q ss-ovn; then - lxc network delete ss-ovn +if incus network list -q | grep -q ss-ovn; then + incus network delete ss-ovn fi diff --git a/deployment/project_env.sh b/deployment/project_env.sh index e6040c7..f49338d 100755 --- a/deployment/project_env.sh +++ b/deployment/project_env.sh @@ -2,11 +2,11 @@ set -eu -PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')" +PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')" export PROJECT_NAME="$PROJECT_NAME" if [ "$PROJECT_NAME" = default ]; then - echo "ERROR: You are on the default project. Use 'lxc project list' and 'lxc project switch '." + echo "ERROR: You are on the default project. Use 'incus project list' and 'incus project switch '." exit 1 fi @@ -27,17 +27,11 @@ source "$PROJECT_DEFINITION_PATH" export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf" -if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then - echo "ERROR: the site definition does not exist." - exit 1 -fi if [ -z "$PRIMARY_DOMAIN" ]; then echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'." exit 1 fi -source "$PRIMARY_SITE_DEFINITION_PATH" - SHASUM_OF_PRIMARY_DOMAIN="$(echo -n "$PRIMARY_DOMAIN" | sha256sum | awk '{print $1;}' )" export PRIMARY_DOMAIN_IDENTIFIER="${SHASUM_OF_PRIMARY_DOMAIN: -6}" diff --git a/deployment/remote.sh b/deployment/remote.sh index 2ec0457..a530f91 100755 --- a/deployment/remote.sh +++ b/deployment/remote.sh @@ -47,7 +47,7 @@ fi source "$REMOTE_DEFINITION" -if ! lxc remote list | grep -q "$REMOTE_NAME"; then +if ! incus remote list | grep -q "$REMOTE_NAME"; then FQDN="${2:-}" if [ -z "$FQDN" ]; then @@ -89,7 +89,7 @@ if ! lxc remote list | grep -q "$REMOTE_NAME"; then ssh-copy-id -i "$HOME/.ssh/id_rsa.pub" "ubuntu@$FQDN" if [ -z "$DISK_TO_USE" ]; then - if ! ssh "ubuntu@$FQDN" lxc storage list -q | grep -q ss-base; then + if ! ssh "ubuntu@$FQDN" incus storage list -q | grep -q ss-base; then echo "INFO: It looks like the DISK_TO_USE has not been set. Enter it now." echo "" @@ -126,20 +126,20 @@ if [ -z "$LXD_REMOTE_PASSWORD" ]; then exit 1 fi -if ! command -v lxc >/dev/null 2>&1; then - if lxc profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then - lxc profile delete "$BASE_IMAGE_VM_NAME" +if ! command -v incus >/dev/null 2>&1; then + if incus profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then + incus profile delete "$BASE_IMAGE_VM_NAME" sleep 1 fi - if lxc network list --format csv -q --project default | grep -q lxdbr0; then - lxc network delete lxdbr0 --project default + if incus network list --format csv -q --project default | grep -q lxdbr0; then + incus network delete lxdbr0 --project default sleep 1 fi - if lxc network list --format csv -q project default | grep -q lxdbr1; then - lxc network delete lxdbr1 --project default + if incus network list --format csv -q project default | grep -q lxdbr1; then + incus network delete lxdbr1 --project default sleep 1 fi @@ -148,7 +148,7 @@ fi # install dependencies. ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y' if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then - ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.17/stable' + ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.18/candidate' sleep 5 fi @@ -215,37 +215,37 @@ cluster: cluster_token: "" EOF -# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it. +# ensure the lxd service is available over the network, then add a incus remote, then switch the active remote to it. if wait-for-it -t 20 "$FQDN:8443"; then - # now create a remote on your local LXC client and switch to it. + # now create a remote on your local incus client and switch to it. # the software will now target the new remote. - lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate - lxc remote switch "$REMOTE_NAME" + incus remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate + incus remote switch "$REMOTE_NAME" - echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your LXC client has been switched to it." + echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your incus client has been switched to it." else echo "ERROR: Could not detect the LXD endpoint. Something went wrong." exit 1 fi # create the default storage pool if necessary -if ! lxc storage list --format csv | grep -q ss-base; then +if ! incus storage list --format csv | grep -q ss-base; then if [ "$DISK_TO_USE" != loop ]; then # we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'. # TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption? - lxc storage create ss-base zfs source="$DISK_TO_USE" + incus storage create ss-base zfs source="$DISK_TO_USE" else # if a disk is the default 'loop', then we create a zfs storage pool # on top of the existing filesystem using a loop device, per LXD docs - lxc storage create ss-base zfs + incus storage create ss-base zfs fi # # create the testnet/mainnet blocks/chainstate subvolumes. # for CHAIN in mainnet testnet; do # for DATA in blocks chainstate; do - # if ! lxc storage volume list ss-base | grep -q "$CHAIN-$DATA"; then - # lxc storage volume create ss-base "$CHAIN-$DATA" --type=filesystem + # if ! incus storage volume list ss-base | grep -q "$CHAIN-$DATA"; then + # incus storage volume create ss-base "$CHAIN-$DATA" --type=filesystem # fi # done # done @@ -253,5 +253,5 @@ if ! lxc storage list --format csv | grep -q ss-base; then else echo "WARNING! The host '$FQDN' appears to have Sovereign Stack worksloads already provisioned." echo "INFO: Here are your current Deployments." - lxc project list -q + incus project list -q fi diff --git a/deployment/remote_env.sh b/deployment/remote_env.sh index 540cacd..60f02d9 100755 --- a/deployment/remote_env.sh +++ b/deployment/remote_env.sh @@ -2,7 +2,7 @@ set -eu -CURRENT_REMOTE="$(lxc remote get-default)" +CURRENT_REMOTE="$(incus remote get-default)" if echo "$CURRENT_REMOTE" | grep -q "production"; then echo "WARNING: You are running a migration procedure on a production system." @@ -50,11 +50,11 @@ if [ -n "$DEPLOYMENT_STRING" ]; then BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2) PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN" - # create the lxc project as specified by PROJECT_NAME - if ! lxc project list | grep -q "$PROJECT_NAME"; then - lxc project create "$PROJECT_NAME" - lxc project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true - lxc project switch "$PROJECT_NAME" + # create the incus project as specified by PROJECT_NAME + if ! incus project list | grep -q "$PROJECT_NAME"; then + incus project create "$PROJECT_NAME" + incus project set "$PROJECT_NAME" features.networks=true features.images=false features.storage.volumes=true + incus project switch "$PROJECT_NAME" fi done fi diff --git a/deployment/reset.sh b/deployment/reset.sh index 4f22e26..50bf546 100755 --- a/deployment/reset.sh +++ b/deployment/reset.sh @@ -4,13 +4,13 @@ set -e cd "$(dirname "$0")" -PURGE_LXD=false +PURGE_INCUS=false # grab any modifications from the command line. for i in "$@"; do case $i in --purge) - PURGE_LXD=true + PURGE_INCUS=true shift ;; *) @@ -25,61 +25,61 @@ source ../defaults.sh ./down.sh # these only get initialzed upon creation, so we MUST delete here so they get recreated. -if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then - lxc profile delete "$BASE_IMAGE_VM_NAME" +if incus profile list | grep -q "$BASE_IMAGE_VM_NAME"; then + incus profile delete "$BASE_IMAGE_VM_NAME" fi -if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then - lxc image rm "$BASE_IMAGE_VM_NAME" +if incus image list | grep -q "$BASE_IMAGE_VM_NAME"; then + incus image rm "$BASE_IMAGE_VM_NAME" fi -if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then - lxc image rm "$DOCKER_BASE_IMAGE_NAME" +if incus image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then + incus image rm "$DOCKER_BASE_IMAGE_NAME" fi -CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')" -if ! lxc info | grep -q "project: default"; then - lxc project switch default - lxc project delete "$CURRENT_PROJECT" +CURRENT_PROJECT="$(incus info | grep "project:" | awk '{print $2}')" +if ! incus info | grep -q "project: default"; then + incus project switch default + incus project delete "$CURRENT_PROJECT" fi -if [ "$PURGE_LXD" = true ]; then +if [ "$PURGE_INCUS" = true ]; then - if lxc profile show default | grep -q "root:"; then - lxc profile device remove default root + if incus profile show default | grep -q "root:"; then + incus profile device remove default root fi - if lxc profile show default| grep -q "eth0:"; then - lxc profile device remove default eth0 + if incus profile show default| grep -q "eth0:"; then + incus profile device remove default eth0 fi - if lxc network list --format csv -q --project default | grep -q lxdbr0; then - lxc network delete lxdbr0 --project default + if incus network list --format csv -q --project default | grep -q lxdbr0; then + incus network delete lxdbr0 --project default fi - if lxc network list --format csv -q --project default | grep -q lxdbr1; then - lxc network delete lxdbr1 --project default + if incus network list --format csv -q --project default | grep -q lxdbr1; then + incus network delete lxdbr1 --project default fi # # create the testnet/mainnet blocks/chainstate subvolumes. # for CHAIN in mainnet testnet; do # for DATA in blocks chainstate; do - # if lxc storage volume list ss-base | grep -q "$CHAIN-$DATA"; then - # lxc storage volume delete ss-base "$CHAIN-$DATA" + # if incus storage volume list ss-base | grep -q "$CHAIN-$DATA"; then + # incus storage volume delete ss-base "$CHAIN-$DATA" # fi # done # done echo "WARNING: ss-basae NOT DELETED. NEED TO TEST THIS SCRIPT" - # if lxc storage list --format csv | grep -q ss-base; then - # lxc storage delete ss-base + # if incus storage list --format csv | grep -q ss-base; then + # incus storage delete ss-base # fi - CURRENT_REMOTE="$(lxc remote get-default)" - if ! lxc remote get-default | grep -q "local"; then - lxc remote switch local - lxc remote remove "$CURRENT_REMOTE" + CURRENT_REMOTE="$(incus remote get-default)" + if ! incus remote get-default | grep -q "local"; then + incus remote switch local + incus remote remove "$CURRENT_REMOTE" echo "INFO: The remote '$CURRENT_REMOTE' has been removed! You are now controlling your local instance." fi diff --git a/deployment/show.sh b/deployment/show.sh index 9540173..1e417f4 100755 --- a/deployment/show.sh +++ b/deployment/show.sh @@ -9,14 +9,14 @@ cd "$(dirname "$0")" echo "Global Settings:" -lxc image list -lxc storage list +incus image list +incus storage list echo echo -PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')" +PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')" export export="$PROJECT_NAME" export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME" @@ -26,17 +26,17 @@ echo "Active project: $PROJECT_NAME" echo "----------------------------------------------------------" echo " Networks:" -lxc network list +incus network list echo echo " Storage Volumes:" -lxc storage volume list ss-base +incus storage volume list ss-base echo echo " Profiles:" -lxc profile list +incus profile list echo echo " Instances (VMs):" -lxc list +incus list diff --git a/deployment/stop.sh b/deployment/stop.sh index abc3f90..1f61fdc 100755 --- a/deployment/stop.sh +++ b/deployment/stop.sh @@ -5,8 +5,8 @@ set -eu cd "$(dirname "$0")" -if lxc remote get-default -q | grep -q "local"; then - echo "ERROR: you are on the local lxc remote. Nothing to take down" +if incus remote get-default -q | grep -q "local"; then + echo "ERROR: you are on the local incus remote. Nothing to take down" exit 1 fi diff --git a/deployment/stub_lxc_profile.sh b/deployment/stub_profile.sh similarity index 93% rename from deployment/stub_lxc_profile.sh rename to deployment/stub_profile.sh index f7fc61b..837ca5b 100755 --- a/deployment/stub_lxc_profile.sh +++ b/deployment/stub_profile.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -eu +set -exu cd "$(dirname "$0")" VIRTUAL_MACHINE=base @@ -36,8 +36,8 @@ done # generate the custom cloud-init file. Cloud init installs and configures sshd SSH_AUTHORIZED_KEY=$(<"$SSH_PUBKEY_PATH") -eval "$(ssh-agent -s)" -ssh-add "$SSH_HOME/id_rsa" >> /dev/null +eval "$(ssh-agent -s)" > /dev/null +ssh-add "$SSH_HOME/id_rsa" > /dev/null export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY" export FILENAME="$LXD_HOSTNAME.yml" @@ -302,18 +302,18 @@ EOF fi if [ "$VIRTUAL_MACHINE" = base ]; then - if ! lxc profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then - lxc profile create "$LXD_HOSTNAME" --project default + if ! incus profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then + incus profile create "$LXD_HOSTNAME" --project default fi # configure the profile with our generated cloud-init.yml file. - cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" --project default + incus profile edit "$LXD_HOSTNAME" --project default < "$YAML_PATH" else - if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then - lxc profile create "$LXD_HOSTNAME" + if ! incus profile list --format csv | grep -q "$LXD_HOSTNAME"; then + incus profile create "$LXD_HOSTNAME" fi # configure the profile with our generated cloud-init.yml file. - cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME" + incus profile edit "$LXD_HOSTNAME" < "$YAML_PATH" fi diff --git a/deployment/up.sh b/deployment/up.sh index 832b551..fe8457e 100755 --- a/deployment/up.sh +++ b/deployment/up.sh @@ -6,7 +6,7 @@ cd "$(dirname "$0")" . ./target.sh # check to ensure dependencies are met. -for cmd in wait-for-it dig rsync sshfs lxc; do +for cmd in wait-for-it dig rsync sshfs incus; do if ! command -v "$cmd" >/dev/null 2>&1; then echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'." exit 1 @@ -14,7 +14,7 @@ for cmd in wait-for-it dig rsync sshfs lxc; do done # do a spot check; if we are on production warn. -if lxc remote get-default | grep -q "production"; then +if incus remote get-default | grep -q "production"; then echo "WARNING: You are running command against a production system!" echo "" @@ -48,7 +48,7 @@ SKIP_LNPLAY_SERVER=false BACKUP_BTCPAY_ARCHIVE_PATH= RESTORE_BTCPAY=false UPDATE_BTCPAY=false -REMOTE_NAME="$(lxc remote get-default)" +REMOTE_NAME="$(incus remote get-default)" USER_SAYS_YES=false WWW_SERVER_MAC_ADDRESS= @@ -212,7 +212,7 @@ EOL } -PROJECT_NAME="$(lxc info | grep "project:" | awk '{print $2}')" +PROJECT_NAME="$(incus info | grep "project:" | awk '{print $2}')" export PROJECT_NAME="$PROJECT_NAME" export PROJECT_PATH="$PROJECTS_PATH/$PROJECT_NAME" export SKIP_BTCPAYSERVER="$SKIP_BTCPAYSERVER" @@ -287,10 +287,10 @@ export UPDATE_BTCPAY="$UPDATE_BTCPAY" VPS_HOSTNAME= . ./base.sh -if ! lxc image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then +if ! incus image list --format csv | grep -q "$DOCKER_BASE_IMAGE_NAME"; then # create the lxd base image. if [ "$SKIP_BASE_IMAGE_CREATION" = false ]; then - ./create_lxc_base.sh + ./create_base.sh fi fi @@ -325,8 +325,8 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do # Goal is to get the macvlan interface. LXD_SS_CONFIG_LINE= - if lxc network list --format csv --project default | grep lxdbr0 | grep -q "ss-config"; then - LXD_SS_CONFIG_LINE="$(lxc network list --format csv --project default | grep lxdbr0 | grep ss-config)" + if incus network list --format csv --project default | grep lxdbr0 | grep -q "ss-config"; then + LXD_SS_CONFIG_LINE="$(incus network list --format csv --project default | grep lxdbr0 | grep ss-config)" fi if [ -z "$LXD_SS_CONFIG_LINE" ]; then @@ -340,13 +340,13 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do # Now let's switch to the new project to ensure new resources are created under the project scope. - if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then - lxc project switch "$PROJECT_NAME" + if ! incus info | grep "project:" | grep -q "$PROJECT_NAME"; then + incus project switch "$PROJECT_NAME" fi # check if the OVN network exists in this project. - if ! lxc network list | grep -q "ss-ovn"; then - lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none + if ! incus network list | grep -q "ss-ovn"; then + incus network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none fi export MAC_ADDRESS_TO_PROVISION= @@ -421,25 +421,21 @@ if [ "$SKIP_LNPLAY_SERVER" = false ]; then export DOCKER_HOST="ssh://ubuntu@$LNPLAY_SERVER_FQDN" # set the active env to our LNPLAY_SERVER_FQDN - cat >./project/lnplay/active_env.txt < ./project/lnplay/active_env.txt < "$LNPLAY_ENV_FILE" < "$LNPLAY_ENV_FILE" </dev/null 2>&1; then + bash -c ./install_incus.sh + # run lxd init - cat <> "$SSH_PATH/authorized_keys" +grep -qxF "$(cat "$SSH_PUBKEY_PATH")" "$SSH_PATH/authorized_keys" || cat "$SSH_PUBKEY_PATH" >> "$SSH_PATH/authorized_keys" FROM_BUILT_IMAGE=false -if ! lxc list --format csv | grep -q ss-mgmt; then +if ! incus list --format csv | grep -q ss-mgmt; then # TODO check to see if there's an existing ss-mgmt image to spawn from, otherwise do this. - if lxc image list | grep -q ss-mgmt; then + if incus image list | grep -q ss-mgmt; then FROM_BUILT_IMAGE=true - lxc init -q ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default + incus init ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default else - lxc init -q "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default + incus init "images:$BASE_INCUS_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default fi fi # mount the pre-verified sovereign stack git repo into the new vm -if ! lxc config device show ss-mgmt | grep -q ss-code; then - lxc config device add ss-mgmt ss-code disk source="$(pwd)" path=/home/ubuntu/sovereign-stack +if ! incus config device show ss-mgmt | grep -q ss-code; then + incus config device add ss-mgmt ss-code disk source="$(pwd)" path=/home/ubuntu/sovereign-stack fi # create the ~/ss path and mount it into the vm. @@ -148,8 +146,8 @@ source ./deployment/base.sh mkdir -p "$SS_ROOT_PATH" -if ! lxc config device show ss-mgmt | grep -q ss-root; then - lxc config device add ss-mgmt ss-root disk source="$SS_ROOT_PATH" path=/home/ubuntu/ss +if ! incus config device show ss-mgmt | grep -q ss-root; then + incus config device add ss-mgmt ss-root disk source="$SS_ROOT_PATH" path=/home/ubuntu/ss fi # if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in. @@ -157,62 +155,62 @@ BITCOIN_DIR="$HOME/.bitcoin" REMOTE_BITCOIN_CACHE_PATH="/home/ubuntu/ss/cache/bitcoin" BITCOIN_TESTNET_BLOCKS_PATH="$BITCOIN_DIR/testnet3/blocks" if [ -d "$BITCOIN_TESTNET_BLOCKS_PATH" ]; then - if ! lxc config device show ss-mgmt | grep -q ss-testnet-blocks; then - lxc config device add ss-mgmt ss-testnet-blocks disk source="$BITCOIN_TESTNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/blocks + if ! incus config device show ss-mgmt | grep -q ss-testnet-blocks; then + incus config device add ss-mgmt ss-testnet-blocks disk source="$BITCOIN_TESTNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/blocks fi fi # if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in. BITCOIN_TESTNET_CHAINSTATE_PATH="$BITCOIN_DIR/testnet3/chainstate" if [ -d "$BITCOIN_TESTNET_CHAINSTATE_PATH" ]; then - if ! lxc config device show ss-mgmt | grep -q ss-testnet-chainstate; then - lxc config device add ss-mgmt ss-testnet-chainstate disk source="$BITCOIN_TESTNET_CHAINSTATE_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/chainstate + if ! incus config device show ss-mgmt | grep -q ss-testnet-chainstate; then + incus config device add ss-mgmt ss-testnet-chainstate disk source="$BITCOIN_TESTNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/testnet/chainstate" fi fi # if a ~/.bitcoin/blocks dir exists, mount it in. BITCOIN_MAINNET_BLOCKS_PATH="$BITCOIN_DIR/blocks" if [ -d "$BITCOIN_MAINNET_BLOCKS_PATH" ]; then - if ! lxc config device show ss-mgmt | grep -q ss-mainnet-blocks; then - lxc config device add ss-mgmt ss-mainnet-blocks disk source="$BITCOIN_MAINNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/mainnet/blocks + if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then + incus config device add ss-mgmt ss-mainnet-blocks disk source="$BITCOIN_MAINNET_BLOCKS_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/blocks" fi fi # if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in. BITCOIN_MAINNET_CHAINSTATE_PATH="$BITCOIN_DIR/chainstate" if [ -d "$BITCOIN_MAINNET_CHAINSTATE_PATH" ]; then - if ! lxc config device show ss-mgmt | grep -q ss-mainnet-blocks; then - lxc config device add ss-mgmt ss-mainnet-chainstate disk source="$BITCOIN_MAINNET_CHAINSTATE_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/mainnet/chainstate + if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then + incus config device add ss-mgmt ss-mainnet-chainstate disk source="$BITCOIN_MAINNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/chainstate" fi fi # mount the ssh directory in there. if [ -f "$SSH_PUBKEY_PATH" ]; then - if ! lxc config device show ss-mgmt | grep -q ss-ssh; then - lxc config device add ss-mgmt ss-ssh disk source="$HOME/.ssh" path=/home/ubuntu/.ssh + if ! incus config device show ss-mgmt | grep -q ss-ssh; then + incus config device add ss-mgmt ss-ssh disk source="$HOME/.ssh" path=/home/ubuntu/.ssh fi fi # start the vm if it's not already running -if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then - lxc start ss-mgmt +if incus list --format csv | grep -q "ss-mgmt,STOPPED"; then + incus start ss-mgmt sleep 10 fi # wait for the vm to have an IP address -. ./management/wait_for_lxc_ip.sh +. ./management/wait_for_ip.sh # do some other preparations for user experience -lxc file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases -lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile -lxc file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc -lxc file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack +incus file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases +incus file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile +incus file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc +incus file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack # install SSH -lxc exec ss-mgmt apt-get update -lxc exec ss-mgmt -- apt-get install -y openssh-server -lxc file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config -lxc exec ss-mgmt -- sudo systemctl restart sshd +incus exec ss-mgmt apt-get update +incus exec ss-mgmt -- apt-get install -y openssh-server +incus file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config +incus exec ss-mgmt -- sudo systemctl restart sshd # add 'ss-manage' to the bare metal ~/.bashrc ADDED_COMMAND=false @@ -234,14 +232,14 @@ ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu if [ "$FROM_BUILT_IMAGE" = false ]; then ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh - lxc stop ss-mgmt + incus stop ss-mgmt - if ! lxc image list | grep -q "ss-mgmt"; then + if ! incus image list | grep -q "ss-mgmt"; then echo "Publishing image. Please wait, this may take a while..." - lxc publish ss-mgmt --alias=ss-mgmt + incus publish ss-mgmt --alias=ss-mgmt fi - lxc start ss-mgmt + incus start ss-mgmt fi if [ "$ADDED_COMMAND" = true ]; then diff --git a/install_incus.sh b/install_incus.sh new file mode 100755 index 0000000..9ec2641 --- /dev/null +++ b/install_incus.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +set -ex + +if [ ! -f "$(which incus)" ]; then + # need to get the remote.conf in there + # this isn't really needed since env are provided via docker. + sudo tee /etc/apt/sources.list.d/zabbly-incus-daily.sources </dev/null 2>&1; then - echo "This script requires 'lxd/lxc' to be installed. Please run 'install.sh'." +if ! command -v incus >/dev/null 2>&1; then + echo "This script requires incus to be installed. Please run 'install.sh'." exit 1 fi -if ! lxc remote get-default | grep -q "local"; then - lxc remote switch "local" +if ! incus remote get-default | grep -q "local"; then + incus remote switch "local" fi -if ! lxc list -q --format csv | grep -q ss-mgmt; then +if ! incus list -q --format csv | grep -q ss-mgmt; then echo "ERROR: the 'ss-mgmt' VM does not exist. You may need to run install.sh" exit 1 fi # if the mgmt machine doesn't exist, then warn the user to perform ./install.sh -if ! lxc list --format csv | grep -q "ss-mgmt"; then +if ! incus list --format csv | grep -q "ss-mgmt"; then echo "ERROR: the management machine VM does not exist. You probably need to run './install.sh'." echo "INFO: check out https://www.sovereign-stack.org/tag/code-lifecycle-management/ for more information." fi # if the machine does exist, let's make sure it's RUNNING. -if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then +if incus list --format csv | grep -q "ss-mgmt,STOPPED"; then echo "INFO: The SSME was in a STOPPED state. Starting the environment. Please wait." - lxc start ss-mgmt + incus start ss-mgmt sleep 30 fi -. ./management/wait_for_lxc_ip.sh +. ./management/wait_for_ip.sh # let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt. ssh ubuntu@"$IP_V4_ADDRESS" diff --git a/management/provision.sh b/management/provision.sh index 6a17645..91c0bc3 100755 --- a/management/provision.sh +++ b/management/provision.sh @@ -35,7 +35,7 @@ sleep 10 # install snap if ! snap list | grep -q lxd; then sudo snap install htop - sudo snap install lxd --channel=5.17/stable + sudo snap install lxd --channel=5.18/candidate sleep 6 # We just do an auto initialization. All we are using is the LXD client inside the management environment. @@ -43,7 +43,7 @@ if ! snap list | grep -q lxd; then fi # run a lxd command so we don't we a warning upon first invocation -lxc list > /dev/null 2>&1 +incus list > /dev/null 2>&1 # add groups for docker and lxd if ! groups ubuntu | grep -q docker; then diff --git a/management/wait_for_lxc_ip.sh b/management/wait_for_ip.sh similarity index 60% rename from management/wait_for_lxc_ip.sh rename to management/wait_for_ip.sh index e7598d7..fd0f46d 100755 --- a/management/wait_for_lxc_ip.sh +++ b/management/wait_for_ip.sh @@ -5,7 +5,7 @@ set -e IP_V4_ADDRESS= while true; do # wait for - if lxc list ss-mgmt | grep -q enp5s0; then + if incus list ss-mgmt | grep -q enp5s0; then break; else sleep 1 @@ -13,7 +13,7 @@ while true; do done while true; do - IP_V4_ADDRESS=$(lxc list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') + IP_V4_ADDRESS=$(incus list ss-mgmt --format csv --columns=4 | grep enp5s0 | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') if [ -n "$IP_V4_ADDRESS" ]; then # give the machine extra time to spin up. break; @@ -27,6 +27,6 @@ done export IP_V4_ADDRESS="$IP_V4_ADDRESS" # wait for the VM to complete its default cloud-init. -while lxc exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do +while incus exec ss-mgmt -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do sleep 1 done diff --git a/uninstall.sh b/uninstall.sh index d1a754e..000295c 100755 --- a/uninstall.sh +++ b/uninstall.sh @@ -1,14 +1,14 @@ #!/bin/bash -set -eu +set -exu -PURGE_LXD=false +PURGE_INCUS=false # grab any modifications from the command line. for i in "$@"; do case $i in --purge) - PURGE_LXD=true + PURGE_INCUS=true shift ;; *) @@ -19,72 +19,70 @@ for i in "$@"; do done # this script undoes install.sh -if ! command -v lxc >/dev/null 2>&1; then - echo "This script requires 'lxc' to be installed. Please run 'install.sh'." +if ! command -v incus >/dev/null 2>&1; then + echo "This script requires incus to be installed. Please run 'install.sh'." exit 1 fi - -if ! lxc remote get-default | grep -q "local"; then +if ! incus remote get-default | grep -q "local"; then echo "ERROR: You MUST be on the local remote when uninstalling the SSME." - echo "INFO: You can use 'lxc remote switch local' to do this." + echo "INFO: You can use 'incus remote switch local' to do this." exit 1 fi -if ! lxc project list | grep -q "default (current)"; then +if ! incus project list | grep -q "default (current)"; then echo "ERROR: You MUST be on the default project when uninstalling the SSME." - echo "INFO: You can use 'lxc project switch default' to do this." + echo "INFO: You can use 'incus project switch default' to do this." exit 1 fi -if lxc list --format csv | grep -q "ss-mgmt"; then +if incus list --format csv | grep -q "ss-mgmt"; then - if lxc list --format csv -q | grep -q "ss-mgmt,RUNNING"; then - lxc stop ss-mgmt + if incus list --format csv -q | grep -q "ss-mgmt,RUNNING"; then + incus stop ss-mgmt fi - if lxc config device list ss-mgmt -q | grep -q "ss-code"; then - lxc config device remove ss-mgmt ss-code + if incus config device list ss-mgmt -q | grep -q "ss-code"; then + incus config device remove ss-mgmt ss-code fi - if lxc config device list ss-mgmt -q | grep -q "ss-root"; then - lxc config device remove ss-mgmt ss-root + if incus config device list ss-mgmt -q | grep -q "ss-root"; then + incus config device remove ss-mgmt ss-root fi - if lxc config device list ss-mgmt -q | grep -q "ss-ssh"; then - lxc config device remove ss-mgmt ss-ssh + if incus config device list ss-mgmt -q | grep -q "ss-ssh"; then + incus config device remove ss-mgmt ss-ssh fi - lxc delete ss-mgmt + incus delete ss-mgmt fi -if [ "$PURGE_LXD" = true ]; then +if [ "$PURGE_INCUS" = true ]; then - if lxc profile device list default | grep -q root; then - lxc profile device remove default root + if incus profile device list default | grep -q root; then + incus profile device remove default root fi - if lxc profile device list default | grep -q enp5s0; then - lxc profile device remove default enp5s0 + if incus profile device list default | grep -q enp5s0; then + incus profile device remove default enp5s0 fi - if lxc network list --project default | grep -q lxdbr0; then - lxc network delete lxdbr0 + if incus network list --project default | grep -q lxdbr0; then + incus network delete lxdbr0 fi # this file contains the BASE_IMAGE_NAME . ./deployment/base.sh - if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then - lxc image delete "$UBUNTU_BASE_IMAGE_NAME" + if incus image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then + incus image delete "$UBUNTU_BASE_IMAGE_NAME" fi - if lxc storage list --format csv | grep -q sovereign-stack; then - lxc storage delete sovereign-stack + if incus storage list --format csv | grep -q sovereign-stack; then + incus storage delete sovereign-stack fi - if snap list | grep -q lxd; then - sudo snap remove lxd - fi + sudo apt purge incus + fi \ No newline at end of file