1
1

Compare commits

...

4 Commits

Author SHA1 Message Date
c3980df073
Various plumbing updates. 2023-03-09 09:58:56 -05:00
9c518e47e2
Implement LXD projects. 2023-03-09 09:58:16 -05:00
f5deac4874
Standardize lxd version. 2023-03-09 09:56:38 -05:00
493946c1f5
Rename cluster to remote. 2023-03-09 09:55:40 -05:00
13 changed files with 115 additions and 72 deletions

View File

@ -74,7 +74,7 @@ export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS=
export CLUSTERS_DIR="$HOME/ss-clusters"
export REMOTES_DIR="$HOME/ss-remotes"
export PROJECTS_DIR="$HOME/ss-projects"
export SITES_PATH="$HOME/ss-sites"
@ -83,6 +83,7 @@ export LXD_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}"
# Deploy a registry cache on your management machine.
export DEPLOY_MGMT_REGISTRY=false
@ -99,4 +100,4 @@ export ROOT_DISK_SIZE_GB=20
export REGISTRY_URL="https://index.docker.io/v1/"
export PRIMARY_DOMAIN=
export TARGET_PROJECT_GIT_COMMIT=d3a1a36de08cc9ff25b854b960b52257d21291a4
export TARGET_PROJECT_GIT_COMMIT=fbc6e2b6e50fa2f1a6cbc75d44f1a1e92917a792

View File

@ -1,2 +0,0 @@
# this is tracked in a distinct git repo.
project

View File

@ -2,7 +2,7 @@
set -e
cd "$(dirname "$0")"
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
# this script takes down all resources in the remote. This script is DESTRUCTIVE of data, so make sure it's backed up first.
if lxc remote get-default | grep -q "local"; then
@ -19,10 +19,16 @@ fi
. ../defaults.sh
. ./cluster_env.sh
. ./remote_env.sh
. ./project_env.sh
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
if lxc project list | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
fi
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
@ -39,6 +45,19 @@ for VM in www btcpayserver; do
done
if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn
fi
if ! lxc info | grep "project:" | grep -q default; then
lxc project switch default
fi
if lxc project list | grep -q "$PROJECT_NAME"; then
lxc project delete "$PROJECT_NAME"
fi
# delete the base image so it can be created.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME"

View File

@ -3,7 +3,7 @@ Sovereign Stack Help.
You are in the Sovereign Stack management environment. From here, you can issue several commands:
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack.
ss-remote - Take a remote SSH endpoint under management of Sovereign Stack.
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -exu
set -eu
cd "$(dirname "$0")"
@ -28,7 +28,7 @@ done
. ../defaults.sh
. ./cluster_env.sh
. ./remote_env.sh
. ./project_env.sh

View File

@ -20,6 +20,6 @@ export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition
source "$PRIMARY_SITE_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition."
exit 1
fi

View File

@ -10,57 +10,57 @@ cd "$(dirname "$0")"
DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE=
# override the cluster name.
CLUSTER_NAME="${1:-}"
if [ -z "$CLUSTER_NAME" ]; then
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'"
echo " for example: 'ss-cluster dev clusterhost01.domain.tld"
# override the remote name.
REMOTE_NAME="${1:-}"
if [ -z "$REMOTE_NAME" ]; then
echo "ERROR: The remote name was not provided. Syntax is: 'ss-remote REMOTE_NAME SSH_HOST_FQDN'"
echo " for example: 'ss-remote dev host01.domain.tld"
exit 1
fi
#shellcheck disable=SC1091
source ../defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
mkdir -p "$CLUSTER_PATH"
if [ ! -f "$CLUSTER_DEFINITION" ]; then
# stub out a cluster_definition.
cat >"$CLUSTER_DEFINITION" <<EOL
mkdir -p "$REMOTE_PATH"
if [ ! -f "$REMOTE_DEFINITION" ]; then
# stub out a remote_definition.
cat >"$REMOTE_DEFINITION" <<EOL
#!/bin/bash
# see https://www.sovereign-stack.org/cluster-definition for more info!
# see https://www.sovereign-stack.org/remote for more info!
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
export LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
export BITCOIN_CHAIN="regtest"
export PROJECT_PREFIX="dev"
export PROJECT_PREFIX="$REMOTE_NAME"
#export REGISTRY_URL=http://registry.domain.tld:5000
EOL
chmod 0744 "$CLUSTER_DEFINITION"
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you."
echo "Use this file to customize your cluster deployment;"
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information."
chmod 0744 "$REMOTE_DEFINITION"
echo "We stubbed out a '$REMOTE_DEFINITION' file for you."
echo "Use this file to customize your remote deployment;"
echo "Check out 'https://www.sovereign-stack.org/remote' for more information."
exit 1
fi
source "$CLUSTER_DEFINITION"
source "$REMOTE_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
if ! lxc remote list | grep -q "$REMOTE_NAME"; then
FQDN="${2:-}"
if [ -z "$FQDN" ]; then
echo "ERROR: You MUST provide the FQDN of the cluster host."
echo "ERROR: You MUST provide the FQDN of the remote host."
exit
fi
shift
if [ -z "$FQDN" ]; then
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
echo "ERROR: The Fully Qualified Domain Name of the new remote member was not set."
exit 1
fi
@ -113,11 +113,14 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
fi
else
echo "ERROR: the cluster already exists! You need to go delete your lxd remote if you want to re-create your cluster."
echo " It's may also be helpful to reset/rename your cluster path."
echo "ERROR: the remote already exists! You need to go delete your lxd remote if you want to re-create your remote."
echo " It's may also be helpful to reset/rename your remote path."
exit 1
fi
#ssh "ubuntu@$FQDN" 'sudo echo "ubuntu ALL=(ALL) NOPASSWD: /bin/su - a" >> /etc/sudoers'
# if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system.
@ -135,9 +138,9 @@ IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
# error out if the cluster password is unset.
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
echo "ERROR: LXD_CLUSTER_PASSWORD must be set in your cluster_definition."
# error out if the remote password is unset.
if [ -z "$LXD_REMOTE_PASSWORD" ]; then
echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote_definition."
exit 1
fi
@ -152,15 +155,26 @@ if ! command -v lxc >/dev/null 2>&1; then
sleep 1
fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
sleep 1
fi
fi
# install dependencies.
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable
sleep 10
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.11/stable'
sleep 5
fi
# install OVN for the project-specific bridge networks
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central"
ssh -t "ubuntu@$FQDN" "sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
# just attach VMs to the network interface used for for the default route.
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
@ -169,12 +183,11 @@ fi
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
# run lxd init on the remote server.
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
config:
core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_CLUSTER_PASSWORD}
core.trust_password: ${LXD_REMOTE_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15
@ -183,10 +196,20 @@ networks:
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
type: bridge
config:
ipv4.address: 10.9.9.1/24
ipv4.dhcp.ranges: 10.9.9.10-10.9.9.127
ipv4.nat: true
ipv4.dhcp: true
ipv6.address: none
dns.mode: managed
- name: lxdbr1
description: "Non-natting bridge for ovn networks to connect to."
type: bridge
config:
ipv4.address: 10.10.10.1/24
ipv4.dhcp.ranges: 10.10.10.10-10.10.10.63
ipv4.ovn.ranges: 10.10.10.64-10.10.10.254
ipv4.nat: false
ipv6.address: none
profiles:
- config: {}
description: "default profile for sovereign-stack instances."
@ -197,7 +220,7 @@ profiles:
type: disk
name: default
cluster:
server_name: ${CLUSTER_NAME}
server_name: ${REMOTE_NAME}
enabled: true
member_config: []
cluster_address: ""
@ -211,11 +234,11 @@ EOF
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
if wait-for-it -t 20 "$FQDN:8443"; then
# now create a remote on your local LXC client and switch to it.
# the software will now target the new cluster.
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$CLUSTER_NAME"
# the software will now target the new remote.
lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$REMOTE_NAME"
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it."
echo "INFO: You have create a new remote named '$REMOTE_NAME'. Great! We switched your lxd remote to it."
else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
exit 1
@ -228,7 +251,6 @@ if ! lxc storage list --format csv | grep -q ss-base; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
lxc storage create ss-base zfs source="$DISK_TO_USE"
else
# if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs

View File

@ -3,9 +3,9 @@
set -eu
cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)"
CURRENT_REMOTE="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
if echo "$CURRENT_REMOTE" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""
@ -26,15 +26,15 @@ if echo "$CURRENT_CLUSTER" | grep -q "production"; then
fi
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
export REMOTE_PATH="$REMOTES_DIR/$CURRENT_REMOTE"
REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
# ensure the cluster definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information."
# ensure the remote definition exists.
if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The remote definition could not be found. You may need to run 'ss-remote'."
echo "INFO: Consult https://www.sovereign-stack.org/remote for more information."
exit 1
fi
source "$CLUSTER_DEFINITION"
source "$REMOTE_DEFINITION"

View File

@ -17,8 +17,8 @@ if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc image rm "$BASE_IMAGE_VM_NAME"
fi
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
lxc image rm "$DOCKER_BASE_IMAGE_NAME"
fi
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
@ -39,6 +39,11 @@ if lxc network list --format csv | grep -q lxdbr0; then
lxc network delete lxdbr0
fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
fi
if lxc storage list --format csv | grep -q ss-base; then
lxc storage delete ss-base
fi

View File

@ -1,10 +1,9 @@
#!/bin/bash
lxc list
lxc remote list
lxc storage list
lxc image list
lxc project list
lxc network list
lxc profile list
lxc image list
lxc storage list
lxc storage info ss-base
lxc project list
lxc remote list
lxc list

View File

@ -24,7 +24,7 @@ fi
# install snap
if ! snap list | grep -q lxd; then
sudo snap install lxd --channel=5.10/stable
sudo snap install lxd --channel=5.11/stable
sleep 5
# run lxd init on the remote server./dev/nvme1n1
@ -60,7 +60,6 @@ profiles:
type: disk
name: default
projects: []
cluster: null
EOF

View File

@ -1,7 +1,7 @@
#!/bin/bash
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@'
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'

View File

@ -34,7 +34,7 @@ sleep 1
# install snap
if ! snap list | grep -q lxd; then
sudo snap install lxd --channel=5.10/stable
sudo snap install lxd --channel=5.11/stable
sleep 6
# We just do an auto initialization. All we are using is the LXD client inside the management environment.