forked from ss/sovereign-stack
Compare commits
31 Commits
eb67ec10a7
...
cde7d287c5
Author | SHA1 | Date | |
---|---|---|---|
cde7d287c5 | |||
16d4339af6 | |||
672be48b08 | |||
7e8706f81c | |||
5da08eab26 | |||
e2bfd5d090 | |||
a6ac567f12 | |||
34c1edf27e | |||
180cd1fa8d | |||
afa6c530ff | |||
cc6bdef20d | |||
514ae6ce24 | |||
d283dfb353 | |||
c08260a2d4 | |||
efeb0261bc | |||
b2abf3fdf4 | |||
870d0b685c | |||
6884154c04 | |||
8590e82411 | |||
7a705828b7 | |||
867771c908 | |||
e205d1cc7a | |||
7ba91f8bcb | |||
628df90d32 | |||
bd3acd8ef4 | |||
c3980df073 | |||
9c518e47e2 | |||
f5deac4874 | |||
493946c1f5 | |||
98866559bd | |||
03d7411a05 |
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@ -14,11 +14,11 @@
|
||||
"-x"
|
||||
],
|
||||
"shellcheck.ignorePatterns": {},
|
||||
"shellcheck.exclude": [
|
||||
"SC1090",
|
||||
"SC1091",
|
||||
"SC2029"
|
||||
],
|
||||
// "shellcheck.exclude": [
|
||||
// "SC1090",
|
||||
// "SC1091",
|
||||
// "SC2029"
|
||||
// ],
|
||||
"terminal.integrated.fontFamily": "monospace",
|
||||
"workbench.colorCustomizations": {
|
||||
"activityBar.background": "#1900a565",
|
||||
|
12
defaults.sh
12
defaults.sh
@ -51,7 +51,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
|
||||
|
||||
|
||||
# run the docker stack.
|
||||
export GHOST_IMAGE="ghost:5.37.0"
|
||||
export GHOST_IMAGE="ghost:5.38.0"
|
||||
|
||||
# TODO switch to mysql. May require intricate export work for existing sites.
|
||||
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
||||
@ -74,7 +74,7 @@ export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
|
||||
export WWW_SERVER_MAC_ADDRESS=
|
||||
export BTCPAYSERVER_MAC_ADDRESS=
|
||||
|
||||
export CLUSTERS_DIR="$HOME/ss-clusters"
|
||||
export REMOTES_DIR="$HOME/ss-remotes"
|
||||
export PROJECTS_DIR="$HOME/ss-projects"
|
||||
export SITES_PATH="$HOME/ss-sites"
|
||||
|
||||
@ -83,9 +83,8 @@ export LXD_UBUNTU_BASE_VERSION="jammy"
|
||||
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
||||
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||
|
||||
# Deploy a registry cache on your management machine.
|
||||
export DEPLOY_MGMT_REGISTRY=false
|
||||
export OTHER_SITES_LIST=
|
||||
export BTCPAY_ALT_NAMES=
|
||||
export BITCOIN_CHAIN=regtest
|
||||
@ -99,4 +98,7 @@ export ROOT_DISK_SIZE_GB=20
|
||||
export REGISTRY_URL="https://index.docker.io/v1/"
|
||||
export PRIMARY_DOMAIN=
|
||||
|
||||
export TARGET_PROJECT_GIT_COMMIT=0701de580bdd6d32058852b0c6f290867d2d8ea2
|
||||
# this is the git commit of the project/ sub git repo.
|
||||
# used in the migration script to switch into past for backup
|
||||
# then back to present (TARGET_PROJECT_GIT_COMMIT) for restore.
|
||||
export TARGET_PROJECT_GIT_COMMIT=29acc1796dca29f56f80005f869d3bdc1faf6c58
|
||||
|
1
deployment/.gitignore
vendored
1
deployment/.gitignore
vendored
@ -1,2 +1 @@
|
||||
# this is tracked in a distinct git repo.
|
||||
project
|
@ -1,5 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
|
||||
# purpose of script is to switch the ./project repo to the git commit as
|
@ -2,46 +2,107 @@
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
|
||||
|
||||
# this script destroys all resources in the current project.
|
||||
|
||||
if lxc remote get-default | grep -q "local"; then
|
||||
echo "ERROR: you are on the local lxc remote. Nothing to destroy"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "WARNING: This will DESTROY any existing VMs!"
|
||||
|
||||
RESPONSE=
|
||||
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE
|
||||
read -r -p "Are you sure you want to continue (y/n): ": RESPONSE
|
||||
if [ "$RESPONSE" != "y" ]; then
|
||||
echo "STOPPING."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
USER_TARGET_PROJECT=
|
||||
|
||||
# grab any modifications from the command line.
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
--project=*)
|
||||
USER_TARGET_PROJECT="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unexpected option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
. ../defaults.sh
|
||||
|
||||
. ./cluster_env.sh
|
||||
. ./remote_env.sh
|
||||
|
||||
. ./project_env.sh
|
||||
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
|
||||
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
|
||||
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
|
||||
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
||||
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
|
||||
if lxc list | grep -q "$LXD_NAME"; then
|
||||
lxc delete -f "$LXD_NAME"
|
||||
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
|
||||
if [ -n "$USER_TARGET_PROJECT" ]; then
|
||||
if ! lxc project list | grep -q "$USER_TARGET_PROJECT"; then
|
||||
echo "ERROR: the project does not exist! Nothing to destroy."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
|
||||
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
export PROJECT_NAME="$PROJECT_NAME"
|
||||
export PROJECT_PATH="$PROJECT_PATH"
|
||||
|
||||
. ./project_env.sh
|
||||
|
||||
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||
if lxc project list | grep -q "$PROJECT_NAME"; then
|
||||
lxc project switch "$PROJECT_NAME"
|
||||
fi
|
||||
fi
|
||||
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
|
||||
if lxc list | grep -q "$LXD_NAME"; then
|
||||
lxc delete -f "$LXD_NAME"
|
||||
|
||||
# remove the ssh known endpoint else we get warnings.
|
||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
|
||||
fi
|
||||
|
||||
if lxc profile list | grep -q "$LXD_NAME"; then
|
||||
lxc profile delete "$LXD_NAME"
|
||||
fi
|
||||
done
|
||||
|
||||
if lxc network list -q | grep -q ss-ovn; then
|
||||
lxc network delete ss-ovn
|
||||
fi
|
||||
|
||||
if ! lxc info | grep "project:" | grep -q default; then
|
||||
lxc project switch default
|
||||
fi
|
||||
|
||||
if lxc project list | grep -q "$PROJECT_NAME"; then
|
||||
lxc project delete "$PROJECT_NAME"
|
||||
fi
|
||||
|
||||
# delete the base image so it can be created.
|
||||
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
||||
# remove the ssh known endpoint else we get warnings.
|
||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
|
||||
fi
|
||||
|
||||
if lxc profile list | grep -q "$LXD_NAME"; then
|
||||
lxc profile delete "$LXD_NAME"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# delete the base image so it can be created.
|
||||
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||
lxc delete -f "$BASE_IMAGE_VM_NAME"
|
||||
# remove the ssh known endpoint else we get warnings.
|
||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
|
||||
fi
|
||||
done
|
@ -1,15 +1,12 @@
|
||||
|
||||
Sovereign Stack Help.
|
||||
You are in the Sovereign Stack Management Environment (SSME). From here, you can issue several commands:
|
||||
|
||||
You are in the Sovereign Stack management environment. From here, you can issue several commands:
|
||||
|
||||
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack.
|
||||
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default).
|
||||
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
|
||||
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
|
||||
ss-remote - Take a remote SSH endpoint under management of Sovereign Stack.
|
||||
ss-deploy - Creates a deployment to your active LXD remote.
|
||||
ss-destroy - Destroys the active deployment (WARNING: destructive).
|
||||
ss-update - brings an existing deployment up to the newest version of Sovereign Stack.
|
||||
ss-show - show the lxd resources associated with the current remote.
|
||||
|
||||
For more infomation about all these topics, consult the Sovereign Stack website. Relevant posts include:
|
||||
|
||||
- https://www.sovereign-stack.org/commands
|
||||
For more infomation about all these topics, consult the Sovereign Stack website starting with:
|
||||
|
||||
- https://www.sovereign-stack.org/tag/instance-management/
|
||||
|
@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
|
||||
# check if there are any uncommited changes. It's dangerous to
|
||||
# alter production systems when you have commits to make or changes to stash.
|
||||
if git update-index --refresh | grep -q "needs update"; then
|
||||
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
USER_SAYS_YES=false
|
||||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-y)
|
||||
USER_SAYS_YES=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected option: $1"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
. ../defaults.sh
|
||||
|
||||
. ./cluster_env.sh
|
||||
|
||||
. ./project_env.sh
|
||||
|
||||
|
||||
# deploy clams wallet.
|
||||
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
|
||||
PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
|
||||
if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
|
||||
git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
|
||||
else
|
||||
cd "$PROJECTS_SCRIPTS_PATH"
|
||||
git pull
|
||||
cd -
|
||||
fi
|
||||
|
||||
|
||||
# Check to see if any of the VMs actually don't exist.
|
||||
# (we only migrate instantiated vms)
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
|
||||
# if the VM doesn't exist, the we emit an error message and hard quit.
|
||||
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
|
||||
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
|
||||
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
# first we run ss-deploy --stop
|
||||
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
|
||||
# the --stop flag ensures that services do NOT come back online.
|
||||
# by default, we grab a backup.
|
||||
|
||||
# first, let's grab the GIT commit from the remote machine.
|
||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
|
||||
|
||||
# source the site path so we know what features it has.
|
||||
source ../defaults.sh
|
||||
source "$SITE_PATH/site_definition"
|
||||
source ./project/domain_env.sh
|
||||
|
||||
GIT_COMMIT_ON_REMOTE_HOST="$(ssh ubuntu@$BTCPAY_FQDN cat /home/ubuntu/.ss-githead)"
|
||||
cd project/
|
||||
git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
|
||||
cd -
|
||||
sleep 5
|
||||
|
||||
# run deploy which backups up everything, but doesnt restart any services.
|
||||
bash -c "./project/deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
# call the destroy script. If user proceed, then user data is DESTROYED!
|
||||
USER_SAYS_YES="$USER_SAYS_YES" ./destroy.sh
|
||||
|
||||
cd project/
|
||||
git checkout "$TARGET_PROJECT_GIT_COMMIT"
|
||||
cd -
|
||||
|
||||
sleep 5
|
||||
# Then we can run a restore operation and specify the backup archive at the CLI.
|
||||
bash -c "./project/deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
@ -3,23 +3,32 @@
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# source project defition.
|
||||
# Now let's load the project definition.
|
||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||
export PROJECT_NAME="$PROJECT_NAME"
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||
|
||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
|
||||
|
||||
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
|
||||
echo "ERROR: 'project_definition' not found $PROJECT_DEFINITION_PATH not found."
|
||||
echo "ERROR: 'project.conf' not found $PROJECT_DEFINITION_PATH not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$PROJECT_DEFINITION_PATH"
|
||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||
|
||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
|
||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
|
||||
|
||||
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then
|
||||
echo "ERROR: the site definition does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||
|
||||
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
||||
if lxc project list | grep -q "$PROJECT_NAME"; then
|
||||
lxc project switch "$PROJECT_NAME"
|
||||
fi
|
||||
fi
|
@ -10,57 +10,54 @@ cd "$(dirname "$0")"
|
||||
DATA_PLANE_MACVLAN_INTERFACE=
|
||||
DISK_TO_USE=
|
||||
|
||||
# override the cluster name.
|
||||
CLUSTER_NAME="${1:-}"
|
||||
if [ -z "$CLUSTER_NAME" ]; then
|
||||
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'"
|
||||
echo " for example: 'ss-cluster dev clusterhost01.domain.tld"
|
||||
# override the remote name.
|
||||
REMOTE_NAME="${1:-}"
|
||||
if [ -z "$REMOTE_NAME" ]; then
|
||||
echo "ERROR: The remote name was not provided. Syntax is: 'ss-remote REMOTE_NAME SSH_HOST_FQDN'"
|
||||
echo " for example: 'ss-remote dev host01.domain.tld"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#shellcheck disable=SC1091
|
||||
source ../defaults.sh
|
||||
|
||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||
export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
|
||||
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
|
||||
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
|
||||
|
||||
mkdir -p "$CLUSTER_PATH"
|
||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
# stub out a cluster_definition.
|
||||
cat >"$CLUSTER_DEFINITION" <<EOL
|
||||
#!/bin/bash
|
||||
mkdir -p "$REMOTE_PATH"
|
||||
if [ ! -f "$REMOTE_DEFINITION" ]; then
|
||||
# stub out a remote.conf.
|
||||
cat >"$REMOTE_DEFINITION" <<EOL
|
||||
# https://www.sovereign-stack.org/ss-remote
|
||||
|
||||
# see https://www.sovereign-stack.org/cluster-definition for more info!
|
||||
|
||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||
export BITCOIN_CHAIN="regtest"
|
||||
export PROJECT_PREFIX="dev"
|
||||
#export REGISTRY_URL=http://registry.domain.tld:5000
|
||||
LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||
DEPLOYMENT_STRING="(dev|regtest),(staging|testnet)"
|
||||
# REGISTRY_URL=http://registry.domain.tld:5000
|
||||
|
||||
EOL
|
||||
|
||||
chmod 0744 "$CLUSTER_DEFINITION"
|
||||
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you."
|
||||
echo "Use this file to customize your cluster deployment;"
|
||||
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information."
|
||||
chmod 0744 "$REMOTE_DEFINITION"
|
||||
echo "We stubbed out a '$REMOTE_DEFINITION' file for you."
|
||||
echo "Use this file to customize your remote deployment;"
|
||||
echo "Check out 'https://www.sovereign-stack.org/ss-remote' for more information."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$CLUSTER_DEFINITION"
|
||||
source "$REMOTE_DEFINITION"
|
||||
|
||||
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
||||
if ! lxc remote list | grep -q "$REMOTE_NAME"; then
|
||||
FQDN="${2:-}"
|
||||
|
||||
if [ -z "$FQDN" ]; then
|
||||
echo "ERROR: You MUST provide the FQDN of the cluster host."
|
||||
echo "ERROR: You MUST provide the FQDN of the remote host."
|
||||
exit
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
if [ -z "$FQDN" ]; then
|
||||
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
|
||||
echo "ERROR: The Fully Qualified Domain Name of the new remote member was not set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -113,11 +110,14 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
||||
fi
|
||||
|
||||
else
|
||||
echo "ERROR: the cluster already exists! You need to go delete your lxd remote if you want to re-create your cluster."
|
||||
echo " It's may also be helpful to reset/rename your cluster path."
|
||||
echo "ERROR: the remote already exists! You need to go delete your lxd remote if you want to re-create your remote."
|
||||
echo " It's may also be helpful to reset/rename your remote path."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
#ssh "ubuntu@$FQDN" 'sudo echo "ubuntu ALL=(ALL) NOPASSWD: /bin/su - a" >> /etc/sudoers'
|
||||
|
||||
# if the disk is loop-based, then we assume the / path exists.
|
||||
if [ "$DISK_TO_USE" != loop ]; then
|
||||
# ensure we actually have that disk/partition on the system.
|
||||
@ -135,9 +135,9 @@ IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1
|
||||
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
|
||||
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
|
||||
|
||||
# error out if the cluster password is unset.
|
||||
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then
|
||||
echo "ERROR: LXD_CLUSTER_PASSWORD must be set in your cluster_definition."
|
||||
# error out if the remote password is unset.
|
||||
if [ -z "$LXD_REMOTE_PASSWORD" ]; then
|
||||
echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote.conf file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -152,15 +152,26 @@ if ! command -v lxc >/dev/null 2>&1; then
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
|
||||
if lxc network list --format csv | grep -q lxdbr1; then
|
||||
lxc network delete lxdbr1
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# install dependencies.
|
||||
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
|
||||
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
|
||||
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
||||
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable
|
||||
sleep 10
|
||||
ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.11/stable'
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
# install OVN for the project-specific bridge networks
|
||||
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central"
|
||||
|
||||
ssh -t "ubuntu@$FQDN" "sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
|
||||
|
||||
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
||||
# just attach VMs to the network interface used for for the default route.
|
||||
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
||||
@ -169,12 +180,11 @@ fi
|
||||
|
||||
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
|
||||
|
||||
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
|
||||
# run lxd init on the remote server.
|
||||
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
|
||||
config:
|
||||
core.https_address: ${MGMT_PLANE_IP}:8443
|
||||
core.trust_password: ${LXD_CLUSTER_PASSWORD}
|
||||
core.trust_password: ${LXD_REMOTE_PASSWORD}
|
||||
core.dns_address: ${MGMT_PLANE_IP}
|
||||
images.auto_update_interval: 15
|
||||
|
||||
@ -183,10 +193,20 @@ networks:
|
||||
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
|
||||
type: bridge
|
||||
config:
|
||||
ipv4.address: 10.9.9.1/24
|
||||
ipv4.dhcp.ranges: 10.9.9.10-10.9.9.127
|
||||
ipv4.nat: true
|
||||
ipv4.dhcp: true
|
||||
ipv6.address: none
|
||||
dns.mode: managed
|
||||
- name: lxdbr1
|
||||
description: "Non-natting bridge for ovn networks to connect to."
|
||||
type: bridge
|
||||
config:
|
||||
ipv4.address: 10.10.10.1/24
|
||||
ipv4.dhcp.ranges: 10.10.10.10-10.10.10.63
|
||||
ipv4.ovn.ranges: 10.10.10.64-10.10.10.254
|
||||
ipv4.nat: false
|
||||
ipv6.address: none
|
||||
profiles:
|
||||
- config: {}
|
||||
description: "default profile for sovereign-stack instances."
|
||||
@ -197,7 +217,7 @@ profiles:
|
||||
type: disk
|
||||
name: default
|
||||
cluster:
|
||||
server_name: ${CLUSTER_NAME}
|
||||
server_name: ${REMOTE_NAME}
|
||||
enabled: true
|
||||
member_config: []
|
||||
cluster_address: ""
|
||||
@ -211,11 +231,11 @@ EOF
|
||||
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
||||
if wait-for-it -t 20 "$FQDN:8443"; then
|
||||
# now create a remote on your local LXC client and switch to it.
|
||||
# the software will now target the new cluster.
|
||||
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
||||
lxc remote switch "$CLUSTER_NAME"
|
||||
# the software will now target the new remote.
|
||||
lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
|
||||
lxc remote switch "$REMOTE_NAME"
|
||||
|
||||
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it."
|
||||
echo "INFO: You have create a new remote named '$REMOTE_NAME'. Great! We switched your lxd remote to it."
|
||||
else
|
||||
echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
|
||||
exit 1
|
||||
@ -228,7 +248,6 @@ if ! lxc storage list --format csv | grep -q ss-base; then
|
||||
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
|
||||
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
|
||||
lxc storage create ss-base zfs source="$DISK_TO_USE"
|
||||
|
||||
else
|
||||
# if a disk is the default 'loop', then we create a zfs storage pool
|
||||
# on top of the existing filesystem using a loop device, per LXD docs
|
@ -3,9 +3,9 @@
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
CURRENT_CLUSTER="$(lxc remote get-default)"
|
||||
CURRENT_REMOTE="$(lxc remote get-default)"
|
||||
|
||||
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
||||
if echo "$CURRENT_REMOTE" | grep -q "production"; then
|
||||
echo "WARNING: You are running a migration procedure on a production system."
|
||||
echo ""
|
||||
|
||||
@ -26,15 +26,15 @@ if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
||||
|
||||
fi
|
||||
|
||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
|
||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||
export REMOTE_PATH="$REMOTES_DIR/$CURRENT_REMOTE"
|
||||
REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
|
||||
export REMOTE_DEFINITION="$REMOTE_DEFINITION"
|
||||
|
||||
# ensure the cluster definition exists.
|
||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
|
||||
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information."
|
||||
# ensure the remote definition exists.
|
||||
if [ ! -f "$REMOTE_DEFINITION" ]; then
|
||||
echo "ERROR: The remote definition could not be found. You may need to run 'ss-remote'."
|
||||
echo "INFO: Consult https://www.sovereign-stack.org/ss-remote for more information."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$CLUSTER_DEFINITION"
|
||||
source "$REMOTE_DEFINITION"
|
@ -17,8 +17,8 @@ if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||
lxc image rm "$BASE_IMAGE_VM_NAME"
|
||||
fi
|
||||
|
||||
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||
lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
|
||||
if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
|
||||
lxc image rm "$DOCKER_BASE_IMAGE_NAME"
|
||||
fi
|
||||
|
||||
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
||||
@ -39,6 +39,11 @@ if lxc network list --format csv | grep -q lxdbr0; then
|
||||
lxc network delete lxdbr0
|
||||
fi
|
||||
|
||||
if lxc network list --format csv | grep -q lxdbr1; then
|
||||
lxc network delete lxdbr1
|
||||
fi
|
||||
|
||||
|
||||
if lxc storage list --format csv | grep -q ss-base; then
|
||||
lxc storage delete ss-base
|
||||
fi
|
||||
@ -47,26 +52,4 @@ CURRENT_REMOTE="$(lxc remote get-default)"
|
||||
if ! lxc remote get-default | grep -q "local"; then
|
||||
lxc remote switch local
|
||||
lxc remote remove "$CURRENT_REMOTE"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
|
||||
# fi
|
||||
|
||||
|
||||
# if snap list | grep -q lxd; then
|
||||
# sudo snap remove lxd
|
||||
# sleep 2
|
||||
# fi
|
||||
|
||||
# if zfs list | grep -q sovereign-stack; then
|
||||
# sudo zfs destroy -r sovereign-stack
|
||||
# fi
|
||||
|
||||
# if zfs list | grep -q "sovereign-stack"; then
|
||||
# sudo zfs destroy -r "rpool/lxd"
|
||||
# fi
|
||||
fi
|
@ -1,10 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
lxc list
|
||||
echo "LXD REMOTE: $(lxc remote get-default)"
|
||||
|
||||
lxc project list
|
||||
|
||||
lxc storage list
|
||||
lxc image list
|
||||
lxc project list
|
||||
lxc network list
|
||||
lxc profile list
|
||||
lxc image list
|
||||
lxc storage list
|
||||
lxc storage info ss-base
|
||||
lxc project list
|
||||
lxc remote list
|
||||
lxc list
|
129
deployment/update.sh
Executable file
129
deployment/update.sh
Executable file
@ -0,0 +1,129 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
|
||||
# check if there are any uncommited changes. It's dangerous to
|
||||
# alter production systems when you have commits to make or changes to stash.
|
||||
if git update-index --refresh | grep -q "needs update"; then
|
||||
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "WARNING: this script backs up your existing remote and saves all data locally in the SSME."
|
||||
echo " Then, all your VMs are destroyed on the remote resulting is destruction of user data."
|
||||
echo " But then we re-create everything using the new codebase, then restore user data to the"
|
||||
echo " newly provisioned VMs."
|
||||
|
||||
RESPONSE=
|
||||
read -r -p "Are you sure you want to continue (y/n): ": RESPONSE
|
||||
if [ "$RESPONSE" != "y" ]; then
|
||||
echo "STOPPING."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
USER_TARGET_PROJECT=
|
||||
|
||||
# grab any modifications from the command line.
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
--project=*)
|
||||
USER_TARGET_PROJECT="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unexpected option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
. ../defaults.sh
|
||||
|
||||
. ./remote_env.sh
|
||||
|
||||
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
|
||||
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
|
||||
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
|
||||
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
|
||||
export PROJECT_PREFIX="$PROJECT_PREFIX"
|
||||
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
|
||||
|
||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
|
||||
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
|
||||
if [ -n "$USER_TARGET_PROJECT" ]; then
|
||||
if ! lxc project list | grep -q "$USER_TARGET_PROJECT"; then
|
||||
echo "ERROR: the project does not exist! Nothing to update."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
|
||||
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
export PROJECT_NAME="$PROJECT_NAME"
|
||||
export PROJECT_PATH="$PROJECT_PATH"
|
||||
|
||||
. ./project_env.sh
|
||||
|
||||
# Check to see if any of the VMs actually don't exist.
|
||||
# (we only migrate instantiated vms)
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
|
||||
# if the VM doesn't exist, the we emit an error message and hard quit.
|
||||
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
|
||||
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
|
||||
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
# first we run ss-deploy --stop
|
||||
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
|
||||
# the --stop flag ensures that services do NOT come back online.
|
||||
# by default, we grab a backup.
|
||||
|
||||
# first, let's grab the GIT commit from the remote machine.
|
||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
|
||||
|
||||
# source the site path so we know what features it has.
|
||||
source ../defaults.sh
|
||||
source "$SITE_PATH/site.conf"
|
||||
source ./project/domain_env.sh
|
||||
|
||||
# now we want to switch the git HEAD of the project subdirectory to the
|
||||
# version of code that was last used
|
||||
GIT_COMMIT_ON_REMOTE_HOST="$(ssh ubuntu@$BTCPAY_FQDN cat /home/ubuntu/.ss-githead)"
|
||||
cd project/
|
||||
git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
|
||||
cd -
|
||||
sleep 5
|
||||
|
||||
# run deploy which backups up everything, but doesnt restart any services.
|
||||
bash -c "./project/deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
# call the destroy script. If user proceed, then user data is DESTROYED!
|
||||
./destroy.sh
|
||||
|
||||
cd project/
|
||||
git checkout "$TARGET_PROJECT_GIT_COMMIT"
|
||||
cd -
|
||||
|
||||
sleep 5
|
||||
# Then we can run a restore operation and specify the backup archive at the CLI.
|
||||
bash -c "./project/deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
done
|
14
farscapian.gpg
Normal file
14
farscapian.gpg
Normal file
@ -0,0 +1,14 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mFIEAAAAABMIKoZIzj0DAQcCAwQ3hQeRT8HLyQEtKJ5C3dKilfWhSpqkPFtfuE0I
|
||||
i+MNLavAM7tL9gShij7tEcyZe0Iy2hc38TizSlQJciIdgtHUtCJEZXJlayBTbWl0
|
||||
aCA8ZGVyZWtAZmFyc2NhcGlhbi5jb20+iIAEExMIABwFAgAAAAACCwkCGwMEFQgJ
|
||||
CgQWAgMBAheAAh4BABYJELRD5TChThyQCxpUUkVaT1ItR1BHXMcA/2k4QtiV0eNQ
|
||||
299XW4Wvoac1Be6+WTPRIaC/PYnd0pR7AP4hi5ou6uyKtqkfhLtRQHN/9ny3MBEG
|
||||
whGxb/bCIzOdILhWBAAAAAASCCqGSM49AwEHAgMEI0VBpCTeIpfdH2UcWiSPYGAJ
|
||||
Z1Rsp0uKf6HzZnpGRAdCTNgCh+pVBibP0Cz0pNdM7IfHSfS+OP4/Lb1B5N9BSAMB
|
||||
CAeIbQQYEwgACQUCAAAAAAIbDAAWCRC0Q+UwoU4ckAsaVFJFWk9SLUdQRxM4AQCw
|
||||
m24svH13uNAebQurOloy/1qZgNdXANBQQ05oi1tEyAD/eGFFVdgs5L6Hpg/GJLvo
|
||||
X8bd1+1sa2d9TldbgfNfRA0=
|
||||
=vZGY
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
14
install.sh
14
install.sh
@ -24,7 +24,7 @@ fi
|
||||
|
||||
# install snap
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd --channel=5.10/stable
|
||||
sudo snap install lxd --channel=5.11/stable
|
||||
sleep 5
|
||||
|
||||
# run lxd init on the remote server./dev/nvme1n1
|
||||
@ -60,7 +60,6 @@ profiles:
|
||||
type: disk
|
||||
name: default
|
||||
projects: []
|
||||
cluster: null
|
||||
|
||||
EOF
|
||||
|
||||
@ -138,3 +137,14 @@ if [ "$ADDED_COMMAND" = true ]; then
|
||||
echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
|
||||
fi
|
||||
|
||||
|
||||
# As part of the install script, we pull down any other sovereign-stack git repos
|
||||
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
|
||||
PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
|
||||
if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
|
||||
git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
|
||||
else
|
||||
cd "$PROJECTS_SCRIPTS_PATH"
|
||||
git pull origin main
|
||||
cd -
|
||||
fi
|
||||
|
@ -1,10 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
|
||||
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@'
|
||||
alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
|
||||
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
|
||||
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
|
||||
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@'
|
||||
alias ss-update='/home/ubuntu/sovereign-stack/deployment/update.sh $@'
|
||||
alias ss-destroy='/home/ubuntu/sovereign-stack/deployment/destroy.sh $@'
|
||||
alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt'
|
||||
|
||||
|
@ -34,7 +34,7 @@ sleep 1
|
||||
|
||||
# install snap
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd --channel=5.10/stable
|
||||
sudo snap install lxd --channel=5.11/stable
|
||||
sleep 6
|
||||
|
||||
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
||||
|
Loading…
Reference in New Issue
Block a user