1
1
Fork 1

Incus updates for ss-mgmt

This commit is contained in:
Derek Smith 2023-11-29 14:04:24 -05:00
parent 44c2859a84
commit a2c29b189e
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
11 changed files with 143 additions and 98 deletions

View File

@ -1,9 +1,9 @@
#!/bin/bash
# The base VM image.
export LXD_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_INCUS_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
export INCUS_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${INCUS_UBUNTU_BASE_VERSION//./-}"
export BASE_INCUS_IMAGE="ubuntu/$INCUS_UBUNTU_BASE_VERSION/cloud"
WEEK_NUMBER=$(date +%U)
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${INCUS_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${INCUS_UBUNTU_BASE_VERSION//./-}-$WEEK_NUMBER"

View File

@ -21,7 +21,7 @@ EOF
fi
# if the machine doesn't exist, we create it.
if ! incus list --format csv | grep -q "$LXD_VM_NAME"; then
if ! incus list --format csv | grep -q "$INCUS_VM_NAME"; then
# create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
@ -93,33 +93,33 @@ if ! incus list --format csv | grep -q "$LXD_VM_NAME"; then
incus storage volume set ss-base "$BACKUP_VOLUME_NAME" size="${BACKUP_DISK_SIZE_GB}GB"
bash -c "./stub_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$LXD_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
bash -c "./stub_profile.sh --vm=$VIRTUAL_MACHINE --lxd-hostname=$INCUS_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
# now let's create a new VM to work with.
#incus init -q --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
incus init "$DOCKER_BASE_IMAGE_NAME" "$LXD_VM_NAME" --vm --profile="$LXD_VM_NAME"
#incus init -q --profile="$INCUS_VM_NAME" "$BASE_IMAGE_VM_NAME" "$INCUS_VM_NAME" --vm
incus init "$DOCKER_BASE_IMAGE_NAME" "$INCUS_VM_NAME" --vm --profile="$INCUS_VM_NAME"
# let's PIN the HW address for now so we don't exhaust IP
# and so we can set DNS internally.
incus config set "$LXD_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
incus config set "$INCUS_VM_NAME" "volatile.enp5s0.hwaddr=$MAC_ADDRESS_TO_PROVISION"
# attack the docker block device.
incus storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$LXD_VM_NAME"
incus storage volume attach ss-base "$DOCKER_VOLUME_NAME" "$INCUS_VM_NAME"
# if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
# # attach any volumes
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# MOUNT_PATH="/$CHAIN-$DATA"
# incus config device add "$LXD_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
# incus config device add "$INCUS_VM_NAME" "$CHAIN-$DATA" disk pool=ss-base source="$CHAIN-$DATA" path="$MOUNT_PATH"
# done
# done
# fi
incus start "$LXD_VM_NAME"
incus start "$INCUS_VM_NAME"
sleep 10
bash -c "./wait_for_ip.sh --lxd-name=$LXD_VM_NAME"
bash -c "./wait_for_ip.sh --lxd-name=$INCUS_VM_NAME"
# scan the remote machine and install it's identity in our SSH known_hosts file.
ssh-keyscan -H "$FQDN" >> "$SSH_HOME/known_hosts"

View File

@ -9,7 +9,7 @@ export SS_ROOT_PATH="$HOME/ss"
export REMOTES_PATH="$SS_ROOT_PATH/remotes"
export PROJECTS_PATH="$SS_ROOT_PATH/projects"
export SITES_PATH="$SS_ROOT_PATH/sites"
export LXD_CONFIG_PATH="$SS_ROOT_PATH/lxd"
export INCUS_CONFIG_PATH="$SS_ROOT_PATH/incus"
export SS_CACHE_PATH="$SS_ROOT_PATH/cache"

View File

@ -73,9 +73,9 @@ source ./domain_list.sh
for VIRTUAL_MACHINE in $SERVERS; do
LXD_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
INCUS_VM_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
if incus list | grep -q "$LXD_NAME"; then
if incus list | grep -q "$INCUS_VM_NAME"; then
bash -c "./stop.sh --server=$VIRTUAL_MACHINE"
if [ "$VIRTUAL_MACHINE" = www ] && [ "$BACKUP_WWW_APPS" = true ]; then
@ -86,16 +86,16 @@ for VIRTUAL_MACHINE in $SERVERS; do
done
fi
incus stop "$LXD_NAME"
incus stop "$INCUS_VM_NAME"
incus delete "$LXD_NAME"
incus delete "$INCUS_VM_NAME"
fi
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$VIRTUAL_MACHINE.$PRIMARY_DOMAIN" | exit
if incus profile list | grep -q "$LXD_NAME"; then
incus profile delete "$LXD_NAME"
if incus profile list | grep -q "$INCUS_VM_NAME"; then
incus profile delete "$INCUS_VM_NAME"
fi
if [ "$KEEP_DOCKER_VOLUME" = false ]; then
@ -112,7 +112,7 @@ for VIRTUAL_MACHINE in $SERVERS; do
VOLUME_NAME="$PRIMARY_DOMAIN_IDENTIFIER-$VM_ID""$DATA"
if incus storage volume list ss-base -q | grep -q "$VOLUME_NAME"; then
RESPONSE=
read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$LXD_NAME'?": RESPONSE
read -r -p "Are you sure you want to delete the '$VOLUME_NAME' volume intended for '$INCUS_VM_NAME'?": RESPONSE
if [ "$RESPONSE" = "y" ]; then
incus storage volume delete ss-base "$VOLUME_NAME"

View File

@ -5,7 +5,7 @@ cd "$(dirname "$0")"
# This script is meant to be executed on the management machine.
# it reaches out to an SSH endpoint and provisions that machine
# to use LXD.
# to use incus.
DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE=
@ -32,8 +32,6 @@ if [ ! -f "$REMOTE_DEFINITION" ]; then
cat >"$REMOTE_DEFINITION" <<EOL
# https://www.sovereign-stack.org/ss-remote
LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
DEPLOYMENT_STRING="(dev|regtest),(staging|testnet)"
# REGISTRY_URL=http://registry.domain.tld:5000
EOL
@ -119,13 +117,6 @@ if [ "$DISK_TO_USE" != loop ]; then
fi
fi
# error out if the remote password is unset.
if [ -z "$LXD_REMOTE_PASSWORD" ]; then
echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote.conf file."
exit 1
fi
if ! command -v incus >/dev/null 2>&1; then
if incus profile list --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
incus profile delete "$BASE_IMAGE_VM_NAME"
@ -171,7 +162,6 @@ IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
config:
core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_REMOTE_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15
@ -219,12 +209,12 @@ EOF
if wait-for-it -t 20 "$FQDN:8443"; then
# now create a remote on your local incus client and switch to it.
# the software will now target the new remote.
incus remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
incus remote add "$REMOTE_NAME" "$FQDN" --protocol=lxd --auth-type=tls --accept-certificate
incus remote switch "$REMOTE_NAME"
echo "INFO: A new remote named '$REMOTE_NAME' has been created. Your incus client has been switched to it."
else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
echo "ERROR: Could not detect the incus endpoint. Something went wrong."
exit 1
fi
@ -232,12 +222,12 @@ fi
if ! incus storage list --format csv | grep -q ss-base; then
if [ "$DISK_TO_USE" != loop ]; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# we omit putting a size here so, so incus will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
incus storage create ss-base zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs
# on top of the existing filesystem using a loop device, per incus docs
incus storage create ss-base zfs
fi

View File

@ -4,7 +4,7 @@ set -exu
cd "$(dirname "$0")"
VIRTUAL_MACHINE=base
LXD_HOSTNAME=
INCUS_HOSTNAME=
SSDATA_VOLUME_NAME=
BACKUP_VOLUME_NAME=
@ -12,7 +12,7 @@ BACKUP_VOLUME_NAME=
for i in "$@"; do
case $i in
--lxd-hostname=*)
LXD_HOSTNAME="${i#*=}"
INCUS_HOSTNAME="${i#*=}"
shift
;;
--vm=*)
@ -40,7 +40,7 @@ eval "$(ssh-agent -s)" > /dev/null
ssh-add "$SSH_HOME/id_rsa" > /dev/null
export SSH_AUTHORIZED_KEY="$SSH_AUTHORIZED_KEY"
export FILENAME="$LXD_HOSTNAME.yml"
export FILENAME="$INCUS_HOSTNAME.yml"
mkdir -p "$PROJECT_PATH/cloud-init"
YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
@ -241,7 +241,7 @@ fi
# All profiles get a root disk and cloud-init config.
cat >> "$YAML_PATH" <<EOF
description: Default LXD profile for ${FILENAME}
description: Default incus profile for ${FILENAME}
devices:
root:
path: /
@ -302,18 +302,18 @@ EOF
fi
if [ "$VIRTUAL_MACHINE" = base ]; then
if ! incus profile list --format csv --project default | grep -q "$LXD_HOSTNAME"; then
incus profile create "$LXD_HOSTNAME" --project default
if ! incus profile list --format csv --project default | grep -q "$INCUS_HOSTNAME"; then
incus profile create "$INCUS_HOSTNAME" --project default
fi
# configure the profile with our generated cloud-init.yml file.
incus profile edit "$LXD_HOSTNAME" --project default < "$YAML_PATH"
incus profile edit "$INCUS_HOSTNAME" --project default < "$YAML_PATH"
else
if ! incus profile list --format csv | grep -q "$LXD_HOSTNAME"; then
incus profile create "$LXD_HOSTNAME"
if ! incus profile list --format csv | grep -q "$INCUS_HOSTNAME"; then
incus profile create "$INCUS_HOSTNAME"
fi
# configure the profile with our generated cloud-init.yml file.
incus profile edit "$LXD_HOSTNAME" < "$YAML_PATH"
incus profile edit "$INCUS_HOSTNAME" < "$YAML_PATH"
fi

View File

@ -152,7 +152,6 @@ fi
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
source "$REMOTE_DEFINITION"
export LXD_REMOTE_PASSWORD="$LXD_REMOTE_PASSWORD"
# this is our password generation mechanism. Relying on GPG for secure password generation
@ -324,17 +323,17 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
fi
# Goal is to get the macvlan interface.
LXD_SS_CONFIG_LINE=
INCUS_SS_CONFIG_LINE=
if incus network list --format csv --project default | grep incusbr0 | grep -q "ss-config"; then
LXD_SS_CONFIG_LINE="$(incus network list --format csv --project default | grep incusbr0 | grep ss-config)"
INCUS_SS_CONFIG_LINE="$(incus network list --format csv --project default | grep incusbr0 | grep ss-config)"
fi
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
if [ -z "$INCUS_SS_CONFIG_LINE" ]; then
echo "ERROR: the MACVLAN interface has not been specified. You may need to run 'ss-remote' again."
exit 1
fi
CONFIG_ITEMS="$(echo "$LXD_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
CONFIG_ITEMS="$(echo "$INCUS_SS_CONFIG_LINE" | awk -F'"' '{print $2}')"
DATA_PLANE_MACVLAN_INTERFACE="$(echo "$CONFIG_ITEMS" | cut -d ',' -f2)"
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
@ -376,7 +375,7 @@ for VIRTUAL_MACHINE in www btcpayserver lnplayserver; do
fi
export FQDN="$FQDN"
export LXD_VM_NAME="${FQDN//./-}"
export INCUS_VM_NAME="${FQDN//./-}"
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
export PROJECT_PATH="$PROJECT_PATH"

View File

@ -12,12 +12,11 @@ if [ "$(hostname)" = ss-mgmt ]; then
fi
DISK_OR_PARTITION=
DISK=loop
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--disk-or-partition=*)
--disk=*)
DISK_OR_PARTITION="${i#*=}"
shift
;;
@ -31,7 +30,7 @@ done
# ensure the iptables forward policy is set to ACCEPT so your host can act as a router
# Note this is necessary if docker is running (or has been previuosly installed) on the
# same host running LXD.
# same host running incus.
sudo iptables -F FORWARD
sudo iptables -P FORWARD ACCEPT
@ -39,18 +38,15 @@ sudo iptables -P FORWARD ACCEPT
# the user's home directory. If the user does specify a disk or partition, we will
# create the ZFS pool there.
if [ -z "$DISK_OR_PARTITION" ]; then
DISK="$DISK_OR_PARTITION"
echo "ERROR: You MUST set DISK_OR_PARTITION"
exit 1
fi
export DISK="$DISK"
# run the incus install script.
sudo bash -c ./install_incus.sh
# this script undoes install.sh
if ! command -v incus >/dev/null 2>&1; then
bash -c ./install_incus.sh
# run lxd init
cat <<EOF | sudo incus admin init --preseed
# run incus init
cat <<EOF | sudo incus admin init --preseed
config: {}
networks:
- config:
@ -63,7 +59,7 @@ networks:
project: default
storage_pools:
- config:
source: ${DISK}
source: ${DISK_OR_PARTITION}
description: ""
name: sovereign-stack
driver: zfs
@ -85,9 +81,6 @@ cluster: null
EOF
fi
. ./deployment/deployment_defaults.sh

70
install_incus.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
if [ $UID -ne 0 ]; then
echo "ERROR: run with sudo."
exit 1
fi
# put the zabbly key in there.
mkdir -p /etc/apt/keyrings/
cat <<EOF > /etc/apt/keyrings/zabbly.asc
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBGTlYcIBDACYQoVXVyQ6Y3Of14GwEaiv/RstQ8jWnH441OtvDbD/VVT8yF0P
pUfypWjQS8aq0g32Qgb9H9+b8UAAKojA2W0szjJFlmmSq19YDMMmNC4AnfeZlKYM
61Zonna7fPaXmlsTlSiUeo/PGvmAXrkFURC9S8FbhZdWEcUpf9vcKAoEzV8qGA4J
xbKlj8EOjSkdq3OQ1hHjP8gynbbzMhZQwjbnWqoiPj35ed9EMn+0QcX+GmynGq6T
hBXdRdeQjZC6rmXzNF2opCyxqx3BJ0C7hUtpHegmeoH34wnJHCqGYkEKFAjlRLoW
tOzHY9J7OFvB6U7ENtnquj7lg2VQK+hti3uiHW+oide06QgjVw2irucCblQzphgo
iX5QJs7tgFFDsA9Ee0DZP6cu83hNFdDcXEZBc9MT5Iu0Ijvj7Oeym3DJpkCuIWgk
SeP56sp7333zrg73Ua7YZsZHRayAe/4YdNUua+90P4GD12TpTtJa4iRWRd7bis6m
tSkKRj7kxyTsxpEAEQEAAbQmWmFiYmx5IEtlcm5lbCBCdWlsZHMgPGluZm9AemFi
Ymx5LmNvbT6JAdQEEwEKAD4WIQRO/FkGlssVuHxzo62CzIeXyDjc/QUCZOVhwgIb
AwUJA8JnAAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCCzIeXyDjc/W05C/4n
lGRTlyOETF2K8oWbjtan9wlttQ+pwymJCnP8T+JJDycGL8dPsGdG1ldHdorVZpFi
1P+Bem9bbiW73TpbX+WuCfP1g3WN7AVa2mYRfSVhsLNeBAMRgWgNW9JYsmg99lmY
aPsRYZdGu/PB+ffMIyWhjL3CKCbYS6lV5N5Mi4Lobyz/I1Euxpk2vJhhUqh786nJ
pQpDnvEl1CRANS6JD9bIvEdfatlAhFlrz1TTf6R7SlppyYI7tme4I/G3dnnHWYSG
cGRaLwpwobTq0UNSO71g7+at9eY8dh5nn2lZUvvxZvlbXoOoPxKUoeGVXqoq5F7S
QcMVAogYtyNlnLnsUfSPw6YFRaQ5o00h30bR3hk+YmJ47AJCRY9GIc/IEdSnd/Z5
Ea7CrP2Bo4zxPgcl8fe311FQRTRoWr19l5PXZgGjzy6siXTrYQi6GjLtqVB5SjJf
rrIIy1vZRyDL96WPu6fS+XQMpjsSygj+DBFk8OAvHhQhMCXHgT4BMyg4D5GE0665
AY0EZOVhwgEMAMIztf6WlRsweysb0tzktYE5E/GxIK1lwcD10Jzq3ovJJPa2Tg2t
J6ZBmMQfwU4OYO8lJxlgm7t6MYh41ZZaRhySCtbJiAXqK08LP9Gc1iWLRvKuMzli
NFSiFDFGT1D6kwucVfL/THxvZlQ559kK+LB4iXEKXz37r+MCX1K9uiv0wn63Vm0K
gD3HDgfXWYJcNyXXfJBe3/T5AhuSBOQcpa7Ow5n8zJ+OYg3FFKWHDBTSSZHpbJFr
ArMIGARz5/f+EVj9XGY4W/+ZJlxNh8FzrTLeRArmCWqKLPRG/KF36dTY7MDpOzlw
vu7frv+cgiXHZ2NfPrkH8oOl4L+ufze5KBGcN0QwFDcuwCkv/7Ft9Ta7gVaIBsK7
12oHInUJ6EkBovxpuaLlHlP8IfmZLZbbHzR2gR0e6IhLtrzd7urB+gXUtp6+wCL+
kWD14TTJhSQ+SFU8ajvUah7/1m2bxdjZNp9pzOPGkr/jEjCM0CpZiCY62SeIJqVc
4/ID9NYLAGmSIwARAQABiQG8BBgBCgAmFiEETvxZBpbLFbh8c6OtgsyHl8g43P0F
AmTlYcICGwwFCQPCZwAACgkQgsyHl8g43P0wEgv+LuknyXHpYpiUcJOl9Q5yLokd
o7tJwJ+9Fu7EDAfM7mPgyBj7Ad/v9RRP+JKWHqIYEjyrRnz9lmzciU+LT/CeoQu/
MgpU8wRI4gVtLkX2238amrTKKlVjQUUNHf7cITivUs/8e5W21JfwvcSzu5z4Mxyw
L6vMlBUAixtzZSXD6O7MO9uggHUZMt5gDSPXG2RcIgWm0Bd1yTHL7jZt67xBgZ4d
hUoelMN2XIDLv4SY78jbHAqVN6CLLtWrz0f5YdaeYj8OT6Ohr/iJQdlfVaiY4ikp
DzagLi0LvG9/GuB9eO6yLuojg45JEH8DC7NW5VbdUITxQe9NQ/j5kaRKTEq0fyZ+
qsrryTyvXghxK8oMUcI10l8d41qXDDPCA40kruuspCZSAle3zdqpYqiu6bglrgWr
Zr2Nm9ecm/kkqMIcyJ8e2mlkuufq5kVem0Oez+GIDegvwnK3HAqWQ9lzdWKvnLiE
gNkvg3bqIwZ/WoHBnSwOwwAzwarJl/gn8OG6CIeP
=8Uc6
-----END PGP PUBLIC KEY BLOCK-----
EOF
sh -c 'cat <<EOF > /etc/apt/sources.list.d/zabbly-incus-stable.sources
Enabled: yes
Types: deb
URIs: https://pkgs.zabbly.com/incus/stable
Suites: $(. /etc/os-release && echo ${VERSION_CODENAME})
Components: main
Architectures: $(dpkg --print-architecture)
Signed-By: /etc/apt/keyrings/zabbly.asc
EOF'
apt-get update
apt-get install incus -y --no-install-recommends

View File

@ -17,37 +17,26 @@ fi
# TODO REVIEW mgmt software requirements
sudo apt-get update
sudo apt-get install -y wait-for-it dnsutils rsync sshfs apt-transport-https docker-ce-cli libcanberra-gtk-module snapd nano git
sudo apt-get install -y wait-for-it dnsutils rsync sshfs apt-transport-https docker-ce-cli libcanberra-gtk-module nano git
sudo bash -c "$HOME/sovereign-stack/install_incus.sh"
sleep 10
# #apt install python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses for trezor stuff
# # for trezor installation
# #pip3 install setuptools wheel
# #pip3 install trezor_agent
# # ensure the trezor-t udev rules are in place.
# # if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then
# # sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
# # fi
# install snap
if ! snap list | grep -q lxd; then
sudo snap install htop
sudo snap install lxd --channel=5.18/candidate
sleep 6
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
sudo lxd init --auto
fi
# run a lxd command so we don't we a warning upon first invocation
incus list > /dev/null 2>&1
sudo incus admin init --minimal
# add groups for docker and lxd
if ! groups ubuntu | grep -q docker; then
sudo addgroup docker
sudo usermod -aG docker ubuntu
sudo usermod -aG lxd ubuntu
if ! grep -q "^docker:" /etc/group; then
sudo groupadd docker
fi
# add groups for docker and lxd
if ! grep -q "^incus-admin:" /etc/group; then
sudo groupadd incus-admin
fi
if ! groups ubuntu | grep -q "\bdocker\b"; then
sudo usermod -aG docker ubuntu
fi
if ! groups ubuntu | grep -q "\bincus-admin\b"; then
sudo usermod -aG incus-admin ubuntu
fi

View File

@ -83,4 +83,8 @@ if [ "$PURGE_INCUS" = true ]; then
incus storage delete sovereign-stack
fi
if dpkg -l | grep -q incus; then
sudo apt purge incus -y
fi
fi