1
1

Rename cluster to remote.

This commit is contained in:
Derek Smith 2023-03-09 09:55:40 -05:00
parent 98866559bd
commit 493946c1f5
Signed by: farscapian
GPG Key ID: B443E530A14E1C90
9 changed files with 55 additions and 59 deletions

View File

@ -74,7 +74,7 @@ export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export WWW_SERVER_MAC_ADDRESS= export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS= export BTCPAYSERVER_MAC_ADDRESS=
export CLUSTERS_DIR="$HOME/ss-clusters" export REMOTES_DIR="$HOME/ss-remotes"
export PROJECTS_DIR="$HOME/ss-projects" export PROJECTS_DIR="$HOME/ss-projects"
export SITES_PATH="$HOME/ss-sites" export SITES_PATH="$HOME/ss-sites"

View File

@ -2,7 +2,7 @@
set -e set -e
cd "$(dirname "$0")" cd "$(dirname "$0")"
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first. # this script takes down all resources in the remote. This script is DESTRUCTIVE of data, so make sure it's backed up first.
if lxc remote get-default | grep -q "local"; then if lxc remote get-default | grep -q "local"; then
@ -19,7 +19,7 @@ fi
. ../defaults.sh . ../defaults.sh
. ./cluster_env.sh . ./remote_env.sh
. ./project_env.sh . ./project_env.sh

View File

@ -3,7 +3,7 @@ Sovereign Stack Help.
You are in the Sovereign Stack management environment. From here, you can issue several commands: You are in the Sovereign Stack management environment. From here, you can issue several commands:
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack. ss-remote - Take a remote SSH endpoint under management of Sovereign Stack.
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default). ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data). ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack. ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.

View File

@ -28,7 +28,7 @@ done
. ../defaults.sh . ../defaults.sh
. ./cluster_env.sh . ./remote_env.sh
. ./project_env.sh . ./project_env.sh

View File

@ -20,6 +20,6 @@ export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition
source "$PRIMARY_SITE_DEFINITION_PATH" source "$PRIMARY_SITE_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition." echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition."
exit 1 exit 1
fi fi

79
deployment/cluster.sh → deployment/remote.sh Executable file → Normal file
View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -e set -ex
cd "$(dirname "$0")" cd "$(dirname "$0")"
# This script is meant to be executed on the management machine. # This script is meant to be executed on the management machine.
@ -10,57 +10,57 @@ cd "$(dirname "$0")"
DATA_PLANE_MACVLAN_INTERFACE= DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE= DISK_TO_USE=
# override the cluster name. # override the remote name.
CLUSTER_NAME="${1:-}" REMOTE_NAME="${1:-}"
if [ -z "$CLUSTER_NAME" ]; then if [ -z "$REMOTE_NAME" ]; then
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'" echo "ERROR: The remote name was not provided. Syntax is: 'ss-remote REMOTE_NAME SSH_HOST_FQDN'"
echo " for example: 'ss-cluster dev clusterhost01.domain.tld" echo " for example: 'ss-remote dev host01.domain.tld"
exit 1 exit 1
fi fi
#shellcheck disable=SC1091 #shellcheck disable=SC1091
source ../defaults.sh source ../defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" export REMOTE_DEFINITION="$REMOTE_DEFINITION"
mkdir -p "$CLUSTER_PATH" mkdir -p "$REMOTE_PATH"
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$REMOTE_DEFINITION" ]; then
# stub out a cluster_definition. # stub out a remote_definition.
cat >"$CLUSTER_DEFINITION" <<EOL cat >"$REMOTE_DEFINITION" <<EOL
#!/bin/bash #!/bin/bash
# see https://www.sovereign-stack.org/cluster-definition for more info! # see https://www.sovereign-stack.org/remote for more info!
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)" export LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
export BITCOIN_CHAIN="regtest" export BITCOIN_CHAIN="regtest"
export PROJECT_PREFIX="dev" export PROJECT_PREFIX="$REMOTE_NAME"
#export REGISTRY_URL=http://registry.domain.tld:5000 #export REGISTRY_URL=http://registry.domain.tld:5000
EOL EOL
chmod 0744 "$CLUSTER_DEFINITION" chmod 0744 "$REMOTE_DEFINITION"
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you." echo "We stubbed out a '$REMOTE_DEFINITION' file for you."
echo "Use this file to customize your cluster deployment;" echo "Use this file to customize your remote deployment;"
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information." echo "Check out 'https://www.sovereign-stack.org/remote' for more information."
exit 1 exit 1
fi fi
source "$CLUSTER_DEFINITION" source "$REMOTE_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then if ! lxc remote list | grep -q "$REMOTE_NAME"; then
FQDN="${2:-}" FQDN="${2:-}"
if [ -z "$FQDN" ]; then if [ -z "$FQDN" ]; then
echo "ERROR: You MUST provide the FQDN of the cluster host." echo "ERROR: You MUST provide the FQDN of the remote host."
exit exit
fi fi
shift shift
if [ -z "$FQDN" ]; then if [ -z "$FQDN" ]; then
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set." echo "ERROR: The Fully Qualified Domain Name of the new remote member was not set."
exit 1 exit 1
fi fi
@ -113,11 +113,14 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
fi fi
else else
echo "ERROR: the cluster already exists! You need to go delete your lxd remote if you want to re-create your cluster." echo "ERROR: the remote already exists! You need to go delete your lxd remote if you want to re-create your remote."
echo " It's may also be helpful to reset/rename your cluster path." echo " It's may also be helpful to reset/rename your remote path."
exit 1 exit 1
fi fi
#ssh "ubuntu@$FQDN" 'sudo echo "ubuntu ALL=(ALL) NOPASSWD: /bin/su - a" >> /etc/sudoers'
# if the disk is loop-based, then we assume the / path exists. # if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system. # ensure we actually have that disk/partition on the system.
@ -135,9 +138,9 @@ IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}" IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)" IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
# error out if the cluster password is unset. # error out if the remote password is unset.
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then if [ -z "$LXD_REMOTE_PASSWORD" ]; then
echo "ERROR: LXD_CLUSTER_PASSWORD must be set in your cluster_definition." echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote_definition."
exit 1 exit 1
fi fi
@ -155,10 +158,7 @@ if ! command -v lxc >/dev/null 2>&1; then
fi fi
# install dependencies. # install dependencies.
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable
sleep 10
fi fi
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll # if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
@ -169,12 +169,11 @@ fi
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE" export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
# run lxd init on the remote server. # run lxd init on the remote server.
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
config: config:
core.https_address: ${MGMT_PLANE_IP}:8443 core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_CLUSTER_PASSWORD} core.trust_password: ${LXD_REMOTE_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP} core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15 images.auto_update_interval: 15
@ -184,7 +183,6 @@ networks:
type: bridge type: bridge
config: config:
ipv4.nat: true ipv4.nat: true
ipv4.dhcp: true
ipv6.address: none ipv6.address: none
dns.mode: managed dns.mode: managed
profiles: profiles:
@ -197,7 +195,7 @@ profiles:
type: disk type: disk
name: default name: default
cluster: cluster:
server_name: ${CLUSTER_NAME} server_name: ${REMOTE_NAME}
enabled: true enabled: true
member_config: [] member_config: []
cluster_address: "" cluster_address: ""
@ -211,11 +209,11 @@ EOF
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it. # ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
if wait-for-it -t 20 "$FQDN:8443"; then if wait-for-it -t 20 "$FQDN:8443"; then
# now create a remote on your local LXC client and switch to it. # now create a remote on your local LXC client and switch to it.
# the software will now target the new cluster. # the software will now target the new remote.
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$CLUSTER_NAME" lxc remote switch "$REMOTE_NAME"
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it." echo "INFO: You have create a new remote named '$REMOTE_NAME'. Great! We switched your lxd remote to it."
else else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong." echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
exit 1 exit 1
@ -228,7 +226,6 @@ if ! lxc storage list --format csv | grep -q ss-base; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'. # we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption? # TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
lxc storage create ss-base zfs source="$DISK_TO_USE" lxc storage create ss-base zfs source="$DISK_TO_USE"
else else
# if a disk is the default 'loop', then we create a zfs storage pool # if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs # on top of the existing filesystem using a loop device, per LXD docs

View File

@ -3,9 +3,9 @@
set -eu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)" CURRENT_REMOTE="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then if echo "$CURRENT_REMOTE" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system." echo "WARNING: You are running a migration procedure on a production system."
echo "" echo ""
@ -26,15 +26,15 @@ if echo "$CURRENT_CLUSTER" | grep -q "production"; then
fi fi
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER" export REMOTE_PATH="$REMOTES_DIR/$CURRENT_REMOTE"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" export REMOTE_DEFINITION="$REMOTE_DEFINITION"
# ensure the cluster definition exists. # ensure the remote definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'." echo "ERROR: The remote definition could not be found. You may need to run 'ss-remote'."
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information." echo "INFO: Consult https://www.sovereign-stack.org/remote for more information."
exit 1 exit 1
fi fi
source "$CLUSTER_DEFINITION" source "$REMOTE_DEFINITION"

View File

@ -60,7 +60,6 @@ profiles:
type: disk type: disk
name: default name: default
projects: [] projects: []
cluster: null
EOF EOF

View File

@ -1,7 +1,7 @@
#!/bin/bash #!/bin/bash
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@' alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@' alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@' alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@' alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@' alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'