Compare commits
4 Commits
98866559bd
...
c3980df073
Author | SHA1 | Date | |
---|---|---|---|
c3980df073 | |||
9c518e47e2 | |||
f5deac4874 | |||
493946c1f5 |
@ -74,7 +74,7 @@ export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
|
|||||||
export WWW_SERVER_MAC_ADDRESS=
|
export WWW_SERVER_MAC_ADDRESS=
|
||||||
export BTCPAYSERVER_MAC_ADDRESS=
|
export BTCPAYSERVER_MAC_ADDRESS=
|
||||||
|
|
||||||
export CLUSTERS_DIR="$HOME/ss-clusters"
|
export REMOTES_DIR="$HOME/ss-remotes"
|
||||||
export PROJECTS_DIR="$HOME/ss-projects"
|
export PROJECTS_DIR="$HOME/ss-projects"
|
||||||
export SITES_PATH="$HOME/ss-sites"
|
export SITES_PATH="$HOME/ss-sites"
|
||||||
|
|
||||||
@ -83,6 +83,7 @@ export LXD_UBUNTU_BASE_VERSION="jammy"
|
|||||||
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
||||||
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
|
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
|
|
||||||
# Deploy a registry cache on your management machine.
|
# Deploy a registry cache on your management machine.
|
||||||
export DEPLOY_MGMT_REGISTRY=false
|
export DEPLOY_MGMT_REGISTRY=false
|
||||||
@ -99,4 +100,4 @@ export ROOT_DISK_SIZE_GB=20
|
|||||||
export REGISTRY_URL="https://index.docker.io/v1/"
|
export REGISTRY_URL="https://index.docker.io/v1/"
|
||||||
export PRIMARY_DOMAIN=
|
export PRIMARY_DOMAIN=
|
||||||
|
|
||||||
export TARGET_PROJECT_GIT_COMMIT=d3a1a36de08cc9ff25b854b960b52257d21291a4
|
export TARGET_PROJECT_GIT_COMMIT=fbc6e2b6e50fa2f1a6cbc75d44f1a1e92917a792
|
||||||
|
2
deployment/.gitignore
vendored
2
deployment/.gitignore
vendored
@ -1,2 +0,0 @@
|
|||||||
# this is tracked in a distinct git repo.
|
|
||||||
project
|
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
|
# this script takes down all resources in the remote. This script is DESTRUCTIVE of data, so make sure it's backed up first.
|
||||||
|
|
||||||
|
|
||||||
if lxc remote get-default | grep -q "local"; then
|
if lxc remote get-default | grep -q "local"; then
|
||||||
@ -19,10 +19,16 @@ fi
|
|||||||
|
|
||||||
. ../defaults.sh
|
. ../defaults.sh
|
||||||
|
|
||||||
. ./cluster_env.sh
|
. ./remote_env.sh
|
||||||
|
|
||||||
. ./project_env.sh
|
. ./project_env.sh
|
||||||
|
|
||||||
|
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||||
|
if lxc project list | grep -q "$PROJECT_NAME"; then
|
||||||
|
lxc project switch "$PROJECT_NAME"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
for VM in www btcpayserver; do
|
for VM in www btcpayserver; do
|
||||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||||
|
|
||||||
@ -39,6 +45,19 @@ for VM in www btcpayserver; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
|
if lxc network list -q | grep -q ss-ovn; then
|
||||||
|
lxc network delete ss-ovn
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! lxc info | grep "project:" | grep -q default; then
|
||||||
|
lxc project switch default
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if lxc project list | grep -q "$PROJECT_NAME"; then
|
||||||
|
lxc project delete "$PROJECT_NAME"
|
||||||
|
fi
|
||||||
|
|
||||||
# delete the base image so it can be created.
|
# delete the base image so it can be created.
|
||||||
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
||||||
|
@ -3,7 +3,7 @@ Sovereign Stack Help.
|
|||||||
|
|
||||||
You are in the Sovereign Stack management environment. From here, you can issue several commands:
|
You are in the Sovereign Stack management environment. From here, you can issue several commands:
|
||||||
|
|
||||||
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack.
|
ss-remote - Take a remote SSH endpoint under management of Sovereign Stack.
|
||||||
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
|
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
|
||||||
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
|
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
|
||||||
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
|
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
@ -28,7 +28,7 @@ done
|
|||||||
|
|
||||||
. ../defaults.sh
|
. ../defaults.sh
|
||||||
|
|
||||||
. ./cluster_env.sh
|
. ./remote_env.sh
|
||||||
|
|
||||||
. ./project_env.sh
|
. ./project_env.sh
|
||||||
|
|
||||||
|
@ -20,6 +20,6 @@ export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition
|
|||||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||||
|
|
||||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
|
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -10,57 +10,57 @@ cd "$(dirname "$0")"
|
|||||||
DATA_PLANE_MACVLAN_INTERFACE=
|
DATA_PLANE_MACVLAN_INTERFACE=
|
||||||
DISK_TO_USE=
|
DISK_TO_USE=
|
||||||
|
|
||||||
# override the cluster name.
|
# override the remote name.
|
||||||
CLUSTER_NAME="${1:-}"
|
REMOTE_NAME="${1:-}"
|
||||||
if [ -z "$CLUSTER_NAME" ]; then
|
if [ -z "$REMOTE_NAME" ]; then
|
||||||
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'"
|
echo "ERROR: The remote name was not provided. Syntax is: 'ss-remote REMOTE_NAME SSH_HOST_FQDN'"
|
||||||
echo " for example: 'ss-cluster dev clusterhost01.domain.tld"
|
echo " for example: 'ss-remote dev host01.domain.tld"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#shellcheck disable=SC1091
|
#shellcheck disable=SC1091
|
||||||
source ../defaults.sh
|
source ../defaults.sh
|
||||||
|
|
||||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
|
||||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
|
||||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
|
||||||
|
|
||||||
mkdir -p "$CLUSTER_PATH"
|
mkdir -p "$REMOTE_PATH"
|
||||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
if [ ! -f "$REMOTE_DEFINITION" ]; then
|
||||||
# stub out a cluster_definition.
|
# stub out a remote_definition.
|
||||||
cat >"$CLUSTER_DEFINITION" <<EOL
|
cat >"$REMOTE_DEFINITION" <<EOL
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# see https://www.sovereign-stack.org/cluster-definition for more info!
|
# see https://www.sovereign-stack.org/remote for more info!
|
||||||
|
|
||||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
export LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||||
export BITCOIN_CHAIN="regtest"
|
export BITCOIN_CHAIN="regtest"
|
||||||
export PROJECT_PREFIX="dev"
|
export PROJECT_PREFIX="$REMOTE_NAME"
|
||||||
#export REGISTRY_URL=http://registry.domain.tld:5000
|
#export REGISTRY_URL=http://registry.domain.tld:5000
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
|
|
||||||
chmod 0744 "$CLUSTER_DEFINITION"
|
chmod 0744 "$REMOTE_DEFINITION"
|
||||||
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you."
|
echo "We stubbed out a '$REMOTE_DEFINITION' file for you."
|
||||||
echo "Use this file to customize your cluster deployment;"
|
echo "Use this file to customize your remote deployment;"
|
||||||
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information."
|
echo "Check out 'https://www.sovereign-stack.org/remote' for more information."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source "$CLUSTER_DEFINITION"
|
source "$REMOTE_DEFINITION"
|
||||||
|
|
||||||
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
if ! lxc remote list | grep -q "$REMOTE_NAME"; then
|
||||||
FQDN="${2:-}"
|
FQDN="${2:-}"
|
||||||
|
|
||||||
if [ -z "$FQDN" ]; then
|
if [ -z "$FQDN" ]; then
|
||||||
echo "ERROR: You MUST provide the FQDN of the cluster host."
|
echo "ERROR: You MUST provide the FQDN of the remote host."
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
shift
|
shift
|
||||||
|
|
||||||
if [ -z "$FQDN" ]; then
|
if [ -z "$FQDN" ]; then
|
||||||
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
|
echo "ERROR: The Fully Qualified Domain Name of the new remote member was not set."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -113,11 +113,14 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
else
|
else
|
||||||
echo "ERROR: the cluster already exists! You need to go delete your lxd remote if you want to re-create your cluster."
|
echo "ERROR: the remote already exists! You need to go delete your lxd remote if you want to re-create your remote."
|
||||||
echo " It's may also be helpful to reset/rename your cluster path."
|
echo " It's may also be helpful to reset/rename your remote path."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
#ssh "ubuntu@$FQDN" 'sudo echo "ubuntu ALL=(ALL) NOPASSWD: /bin/su - a" >> /etc/sudoers'
|
||||||
|
|
||||||
# if the disk is loop-based, then we assume the / path exists.
|
# if the disk is loop-based, then we assume the / path exists.
|
||||||
if [ "$DISK_TO_USE" != loop ]; then
|
if [ "$DISK_TO_USE" != loop ]; then
|
||||||
# ensure we actually have that disk/partition on the system.
|
# ensure we actually have that disk/partition on the system.
|
||||||
@ -135,9 +138,9 @@ IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1
|
|||||||
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
|
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
|
||||||
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
|
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
|
||||||
|
|
||||||
# error out if the cluster password is unset.
|
# error out if the remote password is unset.
|
||||||
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
|
if [ -z "$LXD_REMOTE_PASSWORD" ]; then
|
||||||
echo "ERROR: LXD_CLUSTER_PASSWORD must be set in your cluster_definition."
|
echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote_definition."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -152,15 +155,26 @@ if ! command -v lxc >/dev/null 2>&1; then
|
|||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
if lxc network list --format csv | grep -q lxdbr1; then
|
||||||
|
lxc network delete lxdbr1
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# install dependencies.
|
# install dependencies.
|
||||||
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
|
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
|
||||||
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
||||||
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable
|
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.11/stable'
|
||||||
sleep 10
|
sleep 5
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# install OVN for the project-specific bridge networks
|
||||||
|
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central"
|
||||||
|
|
||||||
|
ssh -t "ubuntu@$FQDN" "sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
|
||||||
|
|
||||||
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
||||||
# just attach VMs to the network interface used for for the default route.
|
# just attach VMs to the network interface used for for the default route.
|
||||||
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
||||||
@ -169,12 +183,11 @@ fi
|
|||||||
|
|
||||||
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
|
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
|
||||||
|
|
||||||
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
|
|
||||||
# run lxd init on the remote server.
|
# run lxd init on the remote server.
|
||||||
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
|
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
|
||||||
config:
|
config:
|
||||||
core.https_address: ${MGMT_PLANE_IP}:8443
|
core.https_address: ${MGMT_PLANE_IP}:8443
|
||||||
core.trust_password: ${LXD_CLUSTER_PASSWORD}
|
core.trust_password: ${LXD_REMOTE_PASSWORD}
|
||||||
core.dns_address: ${MGMT_PLANE_IP}
|
core.dns_address: ${MGMT_PLANE_IP}
|
||||||
images.auto_update_interval: 15
|
images.auto_update_interval: 15
|
||||||
|
|
||||||
@ -183,10 +196,20 @@ networks:
|
|||||||
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
|
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
|
||||||
type: bridge
|
type: bridge
|
||||||
config:
|
config:
|
||||||
|
ipv4.address: 10.9.9.1/24
|
||||||
|
ipv4.dhcp.ranges: 10.9.9.10-10.9.9.127
|
||||||
ipv4.nat: true
|
ipv4.nat: true
|
||||||
ipv4.dhcp: true
|
|
||||||
ipv6.address: none
|
ipv6.address: none
|
||||||
dns.mode: managed
|
dns.mode: managed
|
||||||
|
- name: lxdbr1
|
||||||
|
description: "Non-natting bridge for ovn networks to connect to."
|
||||||
|
type: bridge
|
||||||
|
config:
|
||||||
|
ipv4.address: 10.10.10.1/24
|
||||||
|
ipv4.dhcp.ranges: 10.10.10.10-10.10.10.63
|
||||||
|
ipv4.ovn.ranges: 10.10.10.64-10.10.10.254
|
||||||
|
ipv4.nat: false
|
||||||
|
ipv6.address: none
|
||||||
profiles:
|
profiles:
|
||||||
- config: {}
|
- config: {}
|
||||||
description: "default profile for sovereign-stack instances."
|
description: "default profile for sovereign-stack instances."
|
||||||
@ -197,7 +220,7 @@ profiles:
|
|||||||
type: disk
|
type: disk
|
||||||
name: default
|
name: default
|
||||||
cluster:
|
cluster:
|
||||||
server_name: ${CLUSTER_NAME}
|
server_name: ${REMOTE_NAME}
|
||||||
enabled: true
|
enabled: true
|
||||||
member_config: []
|
member_config: []
|
||||||
cluster_address: ""
|
cluster_address: ""
|
||||||
@ -211,11 +234,11 @@ EOF
|
|||||||
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
||||||
if wait-for-it -t 20 "$FQDN:8443"; then
|
if wait-for-it -t 20 "$FQDN:8443"; then
|
||||||
# now create a remote on your local LXC client and switch to it.
|
# now create a remote on your local LXC client and switch to it.
|
||||||
# the software will now target the new cluster.
|
# the software will now target the new remote.
|
||||||
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
||||||
lxc remote switch "$CLUSTER_NAME"
|
lxc remote switch "$REMOTE_NAME"
|
||||||
|
|
||||||
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it."
|
echo "INFO: You have create a new remote named '$REMOTE_NAME'. Great! We switched your lxd remote to it."
|
||||||
else
|
else
|
||||||
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
|
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
|
||||||
exit 1
|
exit 1
|
||||||
@ -228,7 +251,6 @@ if ! lxc storage list --format csv | grep -q ss-base; then
|
|||||||
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
|
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
|
||||||
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
|
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
|
||||||
lxc storage create ss-base zfs source="$DISK_TO_USE"
|
lxc storage create ss-base zfs source="$DISK_TO_USE"
|
||||||
|
|
||||||
else
|
else
|
||||||
# if a disk is the default 'loop', then we create a zfs storage pool
|
# if a disk is the default 'loop', then we create a zfs storage pool
|
||||||
# on top of the existing filesystem using a loop device, per LXD docs
|
# on top of the existing filesystem using a loop device, per LXD docs
|
@ -3,9 +3,9 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
CURRENT_CLUSTER="$(lxc remote get-default)"
|
CURRENT_REMOTE="$(lxc remote get-default)"
|
||||||
|
|
||||||
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
if echo "$CURRENT_REMOTE" | grep -q "production"; then
|
||||||
echo "WARNING: You are running a migration procedure on a production system."
|
echo "WARNING: You are running a migration procedure on a production system."
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
@ -26,15 +26,15 @@ if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
|||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
|
export REMOTE_PATH="$REMOTES_DIR/$CURRENT_REMOTE"
|
||||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
REMOTE_DEFINITION="$REMOTE_PATH/remote_definition"
|
||||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
|
||||||
|
|
||||||
# ensure the cluster definition exists.
|
# ensure the remote definition exists.
|
||||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
if [ ! -f "$REMOTE_DEFINITION" ]; then
|
||||||
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
|
echo "ERROR: The remote definition could not be found. You may need to run 'ss-remote'."
|
||||||
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information."
|
echo "INFO: Consult https://www.sovereign-stack.org/remote for more information."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
source "$CLUSTER_DEFINITION"
|
source "$REMOTE_DEFINITION"
|
@ -17,8 +17,8 @@ if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
|||||||
lxc image rm "$BASE_IMAGE_VM_NAME"
|
lxc image rm "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
||||||
lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
|
lxc image rm "$DOCKER_BASE_IMAGE_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
||||||
@ -39,6 +39,11 @@ if lxc network list --format csv | grep -q lxdbr0; then
|
|||||||
lxc network delete lxdbr0
|
lxc network delete lxdbr0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if lxc network list --format csv | grep -q lxdbr1; then
|
||||||
|
lxc network delete lxdbr1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
if lxc storage list --format csv | grep -q ss-base; then
|
if lxc storage list --format csv | grep -q ss-base; then
|
||||||
lxc storage delete ss-base
|
lxc storage delete ss-base
|
||||||
fi
|
fi
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
lxc list
|
lxc remote list
|
||||||
|
lxc storage list
|
||||||
|
lxc image list
|
||||||
|
lxc project list
|
||||||
lxc network list
|
lxc network list
|
||||||
lxc profile list
|
lxc profile list
|
||||||
lxc image list
|
lxc list
|
||||||
lxc storage list
|
|
||||||
lxc storage info ss-base
|
|
||||||
lxc project list
|
|
||||||
lxc remote list
|
|
@ -24,7 +24,7 @@ fi
|
|||||||
|
|
||||||
# install snap
|
# install snap
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --channel=5.10/stable
|
sudo snap install lxd --channel=5.11/stable
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
# run lxd init on the remote server./dev/nvme1n1
|
# run lxd init on the remote server./dev/nvme1n1
|
||||||
@ -60,7 +60,6 @@ profiles:
|
|||||||
type: disk
|
type: disk
|
||||||
name: default
|
name: default
|
||||||
projects: []
|
projects: []
|
||||||
cluster: null
|
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
|
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
|
||||||
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@'
|
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
|
||||||
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
|
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
|
||||||
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
|
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
|
||||||
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'
|
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'
|
||||||
|
@ -34,7 +34,7 @@ sleep 1
|
|||||||
|
|
||||||
# install snap
|
# install snap
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --channel=5.10/stable
|
sudo snap install lxd --channel=5.11/stable
|
||||||
sleep 6
|
sleep 6
|
||||||
|
|
||||||
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
||||||
|
Loading…
Reference in New Issue
Block a user