Compare commits

..

31 Commits

Author SHA1 Message Date
cde7d287c5
Update projects commitment. 2023-03-15 11:23:29 -04:00
16d4339af6
Prompt and help udpates. 2023-03-15 11:19:34 -04:00
672be48b08
Added PROEJCT_NAME= 2023-03-14 11:45:29 -04:00
7e8706f81c
Add USER_TARGET_PROJECT to update.sh 2023-03-14 11:44:22 -04:00
5da08eab26
Add --project scope. 2023-03-14 11:37:33 -04:00
e2bfd5d090
Update commit hash. 2023-03-13 14:47:06 -04:00
a6ac567f12
Update target project git commit. 2023-03-13 14:44:13 -04:00
34c1edf27e
Add project context switch during ss-update. 2023-03-13 13:54:47 -04:00
180cd1fa8d
Rename site_definition to site.conf 2023-03-13 13:54:29 -04:00
afa6c530ff
Rename ss-migrate to ss-update. 2023-03-13 13:45:26 -04:00
cc6bdef20d
Update TARGET_PROJECT_GIT_COMMIT 2023-03-13 13:41:01 -04:00
514ae6ce24
Rename to remote.conf 2023-03-13 13:40:47 -04:00
d283dfb353
Add iterate over DEPLOYMENT_STRING 2023-03-13 13:40:07 -04:00
c08260a2d4
Remove symlink. 2023-03-11 11:13:36 -05:00
efeb0261bc
Minor updates. 2023-03-11 11:12:19 -05:00
b2abf3fdf4
Update docker image versions. 2023-03-10 22:40:50 -05:00
870d0b685c
Update project ref. 2023-03-10 21:13:40 -05:00
6884154c04
Update documentation and links. 2023-03-10 21:12:39 -05:00
8590e82411
Simplifiey and add comments. 2023-03-10 21:12:24 -05:00
7a705828b7
Update migration warning message. 2023-03-10 11:14:55 -05:00
867771c908
Remove Project 2023-03-09 15:38:50 -05:00
e205d1cc7a
Update gitignore 2023-03-09 15:37:53 -05:00
7ba91f8bcb
Various updates. 2023-03-09 15:36:30 -05:00
628df90d32
Update projects git ref and other monor updates. 2023-03-09 10:53:00 -05:00
bd3acd8ef4
Add farscapian.gpg 2023-03-09 10:16:11 -05:00
c3980df073
Various plumbing updates. 2023-03-09 09:58:56 -05:00
9c518e47e2
Implement LXD projects. 2023-03-09 09:58:16 -05:00
f5deac4874
Standardize lxd version. 2023-03-09 09:56:38 -05:00
493946c1f5
Rename cluster to remote. 2023-03-09 09:55:40 -05:00
98866559bd
Move projects pull to install.sh 2023-03-06 19:04:56 -05:00
03d7411a05
Update TARGET_PROJECT_GIT_COMMIT 2023-03-06 18:55:57 -05:00
17 changed files with 368 additions and 243 deletions

10
.vscode/settings.json vendored
View File

@ -14,11 +14,11 @@
"-x" "-x"
], ],
"shellcheck.ignorePatterns": {}, "shellcheck.ignorePatterns": {},
"shellcheck.exclude": [ // "shellcheck.exclude": [
"SC1090", // "SC1090",
"SC1091", // "SC1091",
"SC2029" // "SC2029"
], // ],
"terminal.integrated.fontFamily": "monospace", "terminal.integrated.fontFamily": "monospace",
"workbench.colorCustomizations": { "workbench.colorCustomizations": {
"activityBar.background": "#1900a565", "activityBar.background": "#1900a565",

View File

@ -51,7 +51,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
# run the docker stack. # run the docker stack.
export GHOST_IMAGE="ghost:5.37.0" export GHOST_IMAGE="ghost:5.38.0"
# TODO switch to mysql. May require intricate export work for existing sites. # TODO switch to mysql. May require intricate export work for existing sites.
# THIS MUST BE COMPLETED BEFORE v1 RELEASE # THIS MUST BE COMPLETED BEFORE v1 RELEASE
@ -74,7 +74,7 @@ export NOSTR_RELAY_IMAGE="scsibug/nostr-rs-relay"
export WWW_SERVER_MAC_ADDRESS= export WWW_SERVER_MAC_ADDRESS=
export BTCPAYSERVER_MAC_ADDRESS= export BTCPAYSERVER_MAC_ADDRESS=
export CLUSTERS_DIR="$HOME/ss-clusters" export REMOTES_DIR="$HOME/ss-remotes"
export PROJECTS_DIR="$HOME/ss-projects" export PROJECTS_DIR="$HOME/ss-projects"
export SITES_PATH="$HOME/ss-sites" export SITES_PATH="$HOME/ss-sites"
@ -83,9 +83,8 @@ export LXD_UBUNTU_BASE_VERSION="jammy"
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}" export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud" export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}" export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
export DOCKER_BASE_IMAGE_NAME="ss-docker-${LXD_UBUNTU_BASE_VERSION//./-}"
# Deploy a registry cache on your management machine.
export DEPLOY_MGMT_REGISTRY=false
export OTHER_SITES_LIST= export OTHER_SITES_LIST=
export BTCPAY_ALT_NAMES= export BTCPAY_ALT_NAMES=
export BITCOIN_CHAIN=regtest export BITCOIN_CHAIN=regtest
@ -99,4 +98,7 @@ export ROOT_DISK_SIZE_GB=20
export REGISTRY_URL="https://index.docker.io/v1/" export REGISTRY_URL="https://index.docker.io/v1/"
export PRIMARY_DOMAIN= export PRIMARY_DOMAIN=
export TARGET_PROJECT_GIT_COMMIT=0701de580bdd6d32058852b0c6f290867d2d8ea2 # this is the git commit of the project/ sub git repo.
# used in the migration script to switch into past for backup
# then back to present (TARGET_PROJECT_GIT_COMMIT) for restore.
export TARGET_PROJECT_GIT_COMMIT=29acc1796dca29f56f80005f869d3bdc1faf6c58

View File

@ -1,2 +1 @@
# this is tracked in a distinct git repo.
project project

View File

@ -1,5 +0,0 @@
#!/bin/bash
# purpose of script is to switch the ./project repo to the git commit as

View File

@ -2,46 +2,107 @@
set -e set -e
cd "$(dirname "$0")" cd "$(dirname "$0")"
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
# this script destroys all resources in the current project.
if lxc remote get-default | grep -q "local"; then if lxc remote get-default | grep -q "local"; then
echo "ERROR: you are on the local lxc remote. Nothing to destroy" echo "ERROR: you are on the local lxc remote. Nothing to destroy"
exit 1 exit 1
fi fi
echo "WARNING: This will DESTROY any existing VMs!"
RESPONSE= RESPONSE=
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE read -r -p "Are you sure you want to continue (y/n): ": RESPONSE
if [ "$RESPONSE" != "y" ]; then if [ "$RESPONSE" != "y" ]; then
echo "STOPPING." echo "STOPPING."
exit 0 exit 0
fi fi
USER_TARGET_PROJECT=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--project=*)
USER_TARGET_PROJECT="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
. ../defaults.sh . ../defaults.sh
. ./cluster_env.sh . ./remote_env.sh
. ./project_env.sh for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
for VM in www btcpayserver; do PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
LXD_NAME="$VM-${DOMAIN_NAME//./-}" PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
if lxc list | grep -q "$LXD_NAME"; then # if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
lxc delete -f "$LXD_NAME" if [ -n "$USER_TARGET_PROJECT" ]; then
if ! lxc project list | grep -q "$USER_TARGET_PROJECT"; then
echo "ERROR: the project does not exist! Nothing to destroy."
exit 1
fi
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
exit
fi
fi
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECT_PATH"
. ./project_env.sh
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
if lxc project list | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
fi
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
if lxc list | grep -q "$LXD_NAME"; then
lxc delete -f "$LXD_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi
if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME"
fi
done
if lxc network list -q | grep -q ss-ovn; then
lxc network delete ss-ovn
fi
if ! lxc info | grep "project:" | grep -q default; then
lxc project switch default
fi
if lxc project list | grep -q "$PROJECT_NAME"; then
lxc project delete "$PROJECT_NAME"
fi
# delete the base image so it can be created.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME"
# remove the ssh known endpoint else we get warnings. # remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME" ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi fi
done
if lxc profile list | grep -q "$LXD_NAME"; then
lxc profile delete "$LXD_NAME"
fi
done
# delete the base image so it can be created.
if lxc list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc delete -f "$BASE_IMAGE_VM_NAME"
# remove the ssh known endpoint else we get warnings.
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$LXD_NAME"
fi

View File

@ -1,15 +1,12 @@
Sovereign Stack Help. You are in the Sovereign Stack Management Environment (SSME). From here, you can issue several commands:
You are in the Sovereign Stack management environment. From here, you can issue several commands: ss-remote - Take a remote SSH endpoint under management of Sovereign Stack.
ss-deploy - Creates a deployment to your active LXD remote.
ss-cluster - Take a remote SSH endpoint under management of Sovereign Stack. ss-destroy - Destroys the active deployment (WARNING: destructive).
ss-deploy - Creates an deployment to your active LXD remote (lxc remote get-default). ss-update - brings an existing deployment up to the newest version of Sovereign Stack.
ss-destroy - Destroys the active deployment (Warning: this action is DESTRUCTUVE of user data).
ss-migrate - migrates an existing deployment to the newest version of Sovereign Stack.
ss-show - show the lxd resources associated with the current remote. ss-show - show the lxd resources associated with the current remote.
For more infomation about all these topics, consult the Sovereign Stack website. Relevant posts include: For more infomation about all these topics, consult the Sovereign Stack website starting with:
- https://www.sovereign-stack.org/commands
- https://www.sovereign-stack.org/tag/instance-management/

View File

@ -1,95 +0,0 @@
#!/bin/bash
set -exu
cd "$(dirname "$0")"
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
USER_SAYS_YES=false
for i in "$@"; do
case $i in
-y)
USER_SAYS_YES=true
shift
;;
*)
echo "Unexpected option: $1"
;;
esac
done
. ../defaults.sh
. ./cluster_env.sh
. ./project_env.sh
# deploy clams wallet.
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
else
cd "$PROJECTS_SCRIPTS_PATH"
git pull
cd -
fi
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
# first, let's grab the GIT commit from the remote machine.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
# source the site path so we know what features it has.
source ../defaults.sh
source "$SITE_PATH/site_definition"
source ./project/domain_env.sh
GIT_COMMIT_ON_REMOTE_HOST="$(ssh ubuntu@$BTCPAY_FQDN cat /home/ubuntu/.ss-githead)"
cd project/
git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
cd -
sleep 5
# run deploy which backups up everything, but doesnt restart any services.
bash -c "./project/deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
# call the destroy script. If user proceed, then user data is DESTROYED!
USER_SAYS_YES="$USER_SAYS_YES" ./destroy.sh
cd project/
git checkout "$TARGET_PROJECT_GIT_COMMIT"
cd -
sleep 5
# Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./project/deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"

View File

@ -3,23 +3,32 @@
set -eu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
# source project defition.
# Now let's load the project definition. PROJECT_DEFINITION_PATH="$PROJECT_PATH/project.conf"
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
export PROJECT_NAME="$PROJECT_NAME"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
echo "ERROR: 'project_definition' not found $PROJECT_DEFINITION_PATH not found." echo "ERROR: 'project.conf' not found $PROJECT_DEFINITION_PATH not found."
exit 1 exit 1
fi fi
source "$PROJECT_DEFINITION_PATH" source "$PROJECT_DEFINITION_PATH"
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
source "$PRIMARY_SITE_DEFINITION_PATH"
if [ -z "$PRIMARY_DOMAIN" ]; then export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site.conf"
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
if [ ! -f "$PRIMARY_SITE_DEFINITION_PATH" ]; then
echo "ERROR: the site definition does not exist."
exit 1 exit 1
fi fi
if [ -z "$PRIMARY_DOMAIN" ]; then
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your remote definition at '$PRIMARY_SITE_DEFINITION_PATH'."
exit 1
fi
source "$PRIMARY_SITE_DEFINITION_PATH"
if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
if lxc project list | grep -q "$PROJECT_NAME"; then
lxc project switch "$PROJECT_NAME"
fi
fi

View File

@ -10,57 +10,54 @@ cd "$(dirname "$0")"
DATA_PLANE_MACVLAN_INTERFACE= DATA_PLANE_MACVLAN_INTERFACE=
DISK_TO_USE= DISK_TO_USE=
# override the cluster name. # override the remote name.
CLUSTER_NAME="${1:-}" REMOTE_NAME="${1:-}"
if [ -z "$CLUSTER_NAME" ]; then if [ -z "$REMOTE_NAME" ]; then
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'" echo "ERROR: The remote name was not provided. Syntax is: 'ss-remote REMOTE_NAME SSH_HOST_FQDN'"
echo " for example: 'ss-cluster dev clusterhost01.domain.tld" echo " for example: 'ss-remote dev host01.domain.tld"
exit 1 exit 1
fi fi
#shellcheck disable=SC1091 #shellcheck disable=SC1091
source ../defaults.sh source ../defaults.sh
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME" export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" export REMOTE_DEFINITION="$REMOTE_DEFINITION"
mkdir -p "$CLUSTER_PATH" mkdir -p "$REMOTE_PATH"
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$REMOTE_DEFINITION" ]; then
# stub out a cluster_definition. # stub out a remote.conf.
cat >"$CLUSTER_DEFINITION" <<EOL cat >"$REMOTE_DEFINITION" <<EOL
#!/bin/bash # https://www.sovereign-stack.org/ss-remote
# see https://www.sovereign-stack.org/cluster-definition for more info! LXD_REMOTE_PASSWORD="$(gpg --gen-random --armor 1 14)"
DEPLOYMENT_STRING="(dev|regtest),(staging|testnet)"
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)" # REGISTRY_URL=http://registry.domain.tld:5000
export BITCOIN_CHAIN="regtest"
export PROJECT_PREFIX="dev"
#export REGISTRY_URL=http://registry.domain.tld:5000
EOL EOL
chmod 0744 "$CLUSTER_DEFINITION" chmod 0744 "$REMOTE_DEFINITION"
echo "We stubbed out a '$CLUSTER_DEFINITION' file for you." echo "We stubbed out a '$REMOTE_DEFINITION' file for you."
echo "Use this file to customize your cluster deployment;" echo "Use this file to customize your remote deployment;"
echo "Check out 'https://www.sovereign-stack.org/cluster-definition' for more information." echo "Check out 'https://www.sovereign-stack.org/ss-remote' for more information."
exit 1 exit 1
fi fi
source "$CLUSTER_DEFINITION" source "$REMOTE_DEFINITION"
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then if ! lxc remote list | grep -q "$REMOTE_NAME"; then
FQDN="${2:-}" FQDN="${2:-}"
if [ -z "$FQDN" ]; then if [ -z "$FQDN" ]; then
echo "ERROR: You MUST provide the FQDN of the cluster host." echo "ERROR: You MUST provide the FQDN of the remote host."
exit exit
fi fi
shift shift
if [ -z "$FQDN" ]; then if [ -z "$FQDN" ]; then
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set." echo "ERROR: The Fully Qualified Domain Name of the new remote member was not set."
exit 1 exit 1
fi fi
@ -113,11 +110,14 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
fi fi
else else
echo "ERROR: the cluster already exists! You need to go delete your lxd remote if you want to re-create your cluster." echo "ERROR: the remote already exists! You need to go delete your lxd remote if you want to re-create your remote."
echo " It's may also be helpful to reset/rename your cluster path." echo " It's may also be helpful to reset/rename your remote path."
exit 1 exit 1
fi fi
#ssh "ubuntu@$FQDN" 'sudo echo "ubuntu ALL=(ALL) NOPASSWD: /bin/su - a" >> /etc/sudoers'
# if the disk is loop-based, then we assume the / path exists. # if the disk is loop-based, then we assume the / path exists.
if [ "$DISK_TO_USE" != loop ]; then if [ "$DISK_TO_USE" != loop ]; then
# ensure we actually have that disk/partition on the system. # ensure we actually have that disk/partition on the system.
@ -135,9 +135,9 @@ IP_OF_MGMT_MACHINE="$(ssh ubuntu@"$FQDN" env | grep SSH_CLIENT | cut -d " " -f 1
IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}" IP_OF_MGMT_MACHINE="${IP_OF_MGMT_MACHINE#*=}"
IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)" IP_OF_MGMT_MACHINE="$(echo "$IP_OF_MGMT_MACHINE" | cut -d: -f1)"
# error out if the cluster password is unset. # error out if the remote password is unset.
if [ -z "$LXD_CLUSTER_PASSWORD" ]; then if [ -z "$LXD_REMOTE_PASSWORD" ]; then
echo "ERROR: LXD_CLUSTER_PASSWORD must be set in your cluster_definition." echo "ERROR: LXD_REMOTE_PASSWORD must be set in your remote.conf file."
exit 1 exit 1
fi fi
@ -152,15 +152,26 @@ if ! command -v lxc >/dev/null 2>&1; then
sleep 1 sleep 1
fi fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
sleep 1
fi
fi fi
# install dependencies. # install dependencies.
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable ssh -t "ubuntu@$FQDN" 'sudo snap install lxd --channel=5.11/stable'
sleep 10 sleep 5
fi fi
# install OVN for the project-specific bridge networks
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central"
ssh -t "ubuntu@$FQDN" "sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll # if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
# just attach VMs to the network interface used for for the default route. # just attach VMs to the network interface used for for the default route.
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
@ -169,12 +180,11 @@ fi
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE" export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
# run lxd init on the remote server. # run lxd init on the remote server.
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
config: config:
core.https_address: ${MGMT_PLANE_IP}:8443 core.https_address: ${MGMT_PLANE_IP}:8443
core.trust_password: ${LXD_CLUSTER_PASSWORD} core.trust_password: ${LXD_REMOTE_PASSWORD}
core.dns_address: ${MGMT_PLANE_IP} core.dns_address: ${MGMT_PLANE_IP}
images.auto_update_interval: 15 images.auto_update_interval: 15
@ -183,10 +193,20 @@ networks:
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}" description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
type: bridge type: bridge
config: config:
ipv4.address: 10.9.9.1/24
ipv4.dhcp.ranges: 10.9.9.10-10.9.9.127
ipv4.nat: true ipv4.nat: true
ipv4.dhcp: true
ipv6.address: none ipv6.address: none
dns.mode: managed dns.mode: managed
- name: lxdbr1
description: "Non-natting bridge for ovn networks to connect to."
type: bridge
config:
ipv4.address: 10.10.10.1/24
ipv4.dhcp.ranges: 10.10.10.10-10.10.10.63
ipv4.ovn.ranges: 10.10.10.64-10.10.10.254
ipv4.nat: false
ipv6.address: none
profiles: profiles:
- config: {} - config: {}
description: "default profile for sovereign-stack instances." description: "default profile for sovereign-stack instances."
@ -197,7 +217,7 @@ profiles:
type: disk type: disk
name: default name: default
cluster: cluster:
server_name: ${CLUSTER_NAME} server_name: ${REMOTE_NAME}
enabled: true enabled: true
member_config: [] member_config: []
cluster_address: "" cluster_address: ""
@ -211,11 +231,11 @@ EOF
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it. # ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
if wait-for-it -t 20 "$FQDN:8443"; then if wait-for-it -t 20 "$FQDN:8443"; then
# now create a remote on your local LXC client and switch to it. # now create a remote on your local LXC client and switch to it.
# the software will now target the new cluster. # the software will now target the new remote.
lxc remote add "$CLUSTER_NAME" "$FQDN" --password="$LXD_CLUSTER_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate lxc remote add "$REMOTE_NAME" "$FQDN" --password="$LXD_REMOTE_PASSWORD" --protocol=lxd --auth-type=tls --accept-certificate
lxc remote switch "$CLUSTER_NAME" lxc remote switch "$REMOTE_NAME"
echo "INFO: You have create a new cluster named '$CLUSTER_NAME'. Great! We switched your lxd remote to it." echo "INFO: You have create a new remote named '$REMOTE_NAME'. Great! We switched your lxd remote to it."
else else
echo "ERROR: Could not detect the LXD endpoint. Something went wrong." echo "ERROR: Could not detect the LXD endpoint. Something went wrong."
exit 1 exit 1
@ -228,7 +248,6 @@ if ! lxc storage list --format csv | grep -q ss-base; then
# we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'. # we omit putting a size here so, so LXD will consume the entire disk if '/dev/sdb' or partition if '/dev/sdb1'.
# TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption? # TODO do some sanity/resource checking on DISK_TO_USE. Impelment full-disk encryption?
lxc storage create ss-base zfs source="$DISK_TO_USE" lxc storage create ss-base zfs source="$DISK_TO_USE"
else else
# if a disk is the default 'loop', then we create a zfs storage pool # if a disk is the default 'loop', then we create a zfs storage pool
# on top of the existing filesystem using a loop device, per LXD docs # on top of the existing filesystem using a loop device, per LXD docs

View File

@ -3,9 +3,9 @@
set -eu set -eu
cd "$(dirname "$0")" cd "$(dirname "$0")"
CURRENT_CLUSTER="$(lxc remote get-default)" CURRENT_REMOTE="$(lxc remote get-default)"
if echo "$CURRENT_CLUSTER" | grep -q "production"; then if echo "$CURRENT_REMOTE" | grep -q "production"; then
echo "WARNING: You are running a migration procedure on a production system." echo "WARNING: You are running a migration procedure on a production system."
echo "" echo ""
@ -26,15 +26,15 @@ if echo "$CURRENT_CLUSTER" | grep -q "production"; then
fi fi
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER" export REMOTE_PATH="$REMOTES_DIR/$CURRENT_REMOTE"
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition" REMOTE_DEFINITION="$REMOTE_PATH/remote.conf"
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" export REMOTE_DEFINITION="$REMOTE_DEFINITION"
# ensure the cluster definition exists. # ensure the remote definition exists.
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$REMOTE_DEFINITION" ]; then
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'." echo "ERROR: The remote definition could not be found. You may need to run 'ss-remote'."
echo "INFO: Consult https://www.sovereign-stack.org/clusters for more information." echo "INFO: Consult https://www.sovereign-stack.org/ss-remote for more information."
exit 1 exit 1
fi fi
source "$CLUSTER_DEFINITION" source "$REMOTE_DEFINITION"

View File

@ -17,8 +17,8 @@ if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
lxc image rm "$BASE_IMAGE_VM_NAME" lxc image rm "$BASE_IMAGE_VM_NAME"
fi fi
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then if lxc image list | grep -q "$DOCKER_BASE_IMAGE_NAME"; then
lxc image rm "$UBUNTU_BASE_IMAGE_NAME" lxc image rm "$DOCKER_BASE_IMAGE_NAME"
fi fi
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')" CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
@ -39,6 +39,11 @@ if lxc network list --format csv | grep -q lxdbr0; then
lxc network delete lxdbr0 lxc network delete lxdbr0
fi fi
if lxc network list --format csv | grep -q lxdbr1; then
lxc network delete lxdbr1
fi
if lxc storage list --format csv | grep -q ss-base; then if lxc storage list --format csv | grep -q ss-base; then
lxc storage delete ss-base lxc storage delete ss-base
fi fi
@ -47,26 +52,4 @@ CURRENT_REMOTE="$(lxc remote get-default)"
if ! lxc remote get-default | grep -q "local"; then if ! lxc remote get-default | grep -q "local"; then
lxc remote switch local lxc remote switch local
lxc remote remove "$CURRENT_REMOTE" lxc remote remove "$CURRENT_REMOTE"
fi fi
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
# fi
# if snap list | grep -q lxd; then
# sudo snap remove lxd
# sleep 2
# fi
# if zfs list | grep -q sovereign-stack; then
# sudo zfs destroy -r sovereign-stack
# fi
# if zfs list | grep -q "sovereign-stack"; then
# sudo zfs destroy -r "rpool/lxd"
# fi

View File

@ -1,10 +1,12 @@
#!/bin/bash #!/bin/bash
lxc list echo "LXD REMOTE: $(lxc remote get-default)"
lxc project list
lxc storage list
lxc image list
lxc project list
lxc network list lxc network list
lxc profile list lxc profile list
lxc image list lxc list
lxc storage list
lxc storage info ss-base
lxc project list
lxc remote list

129
deployment/update.sh Executable file
View File

@ -0,0 +1,129 @@
#!/bin/bash
set -eu
cd "$(dirname "$0")"
# check if there are any uncommited changes. It's dangerous to
# alter production systems when you have commits to make or changes to stash.
if git update-index --refresh | grep -q "needs update"; then
echo "ERROR: You have uncommited changes! You MUST commit or stash all changes to continue."
exit 1
fi
echo "WARNING: this script backs up your existing remote and saves all data locally in the SSME."
echo " Then, all your VMs are destroyed on the remote resulting is destruction of user data."
echo " But then we re-create everything using the new codebase, then restore user data to the"
echo " newly provisioned VMs."
RESPONSE=
read -r -p "Are you sure you want to continue (y/n): ": RESPONSE
if [ "$RESPONSE" != "y" ]; then
echo "STOPPING."
exit 0
fi
USER_TARGET_PROJECT=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--project=*)
USER_TARGET_PROJECT="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
. ../defaults.sh
. ./remote_env.sh
for PROJECT_CHAIN in ${DEPLOYMENT_STRING//,/ }; do
NO_PARENS="${PROJECT_CHAIN:1:${#PROJECT_CHAIN}-2}"
PROJECT_PREFIX=$(echo "$NO_PARENS" | cut -d'|' -f1)
BITCOIN_CHAIN=$(echo "$NO_PARENS" | cut -d'|' -f2)
export PROJECT_PREFIX="$PROJECT_PREFIX"
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
# if the user sets USER_TARGET_PROJECT, let's ensure the project exists.
if [ -n "$USER_TARGET_PROJECT" ]; then
if ! lxc project list | grep -q "$USER_TARGET_PROJECT"; then
echo "ERROR: the project does not exist! Nothing to update."
exit 1
fi
if [ "$PROJECT_NAME" != "$USER_TARGET_PROJECT" ]; then
echo "INFO: Skipping project '$PROJECT_NAME' since the system owner has used the --project switch."
exit
fi
fi
export PROJECT_NAME="$PROJECT_NAME"
export PROJECT_PATH="$PROJECT_PATH"
. ./project_env.sh
# Check to see if any of the VMs actually don't exist.
# (we only migrate instantiated vms)
for VM in www btcpayserver; do
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
# if the VM doesn't exist, the we emit an error message and hard quit.
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
exit 1
fi
done
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
# first we run ss-deploy --stop
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
# the --stop flag ensures that services do NOT come back online.
# by default, we grab a backup.
# first, let's grab the GIT commit from the remote machine.
export DOMAIN_NAME="$PRIMARY_DOMAIN"
export SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
# source the site path so we know what features it has.
source ../defaults.sh
source "$SITE_PATH/site.conf"
source ./project/domain_env.sh
# now we want to switch the git HEAD of the project subdirectory to the
# version of code that was last used
GIT_COMMIT_ON_REMOTE_HOST="$(ssh ubuntu@$BTCPAY_FQDN cat /home/ubuntu/.ss-githead)"
cd project/
git checkout "$GIT_COMMIT_ON_REMOTE_HOST"
cd -
sleep 5
# run deploy which backups up everything, but doesnt restart any services.
bash -c "./project/deploy.sh --stop --no-cert-renew --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
# call the destroy script. If user proceed, then user data is DESTROYED!
./destroy.sh
cd project/
git checkout "$TARGET_PROJECT_GIT_COMMIT"
cd -
sleep 5
# Then we can run a restore operation and specify the backup archive at the CLI.
bash -c "./project/deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
done

14
farscapian.gpg Normal file
View File

@ -0,0 +1,14 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mFIEAAAAABMIKoZIzj0DAQcCAwQ3hQeRT8HLyQEtKJ5C3dKilfWhSpqkPFtfuE0I
i+MNLavAM7tL9gShij7tEcyZe0Iy2hc38TizSlQJciIdgtHUtCJEZXJlayBTbWl0
aCA8ZGVyZWtAZmFyc2NhcGlhbi5jb20+iIAEExMIABwFAgAAAAACCwkCGwMEFQgJ
CgQWAgMBAheAAh4BABYJELRD5TChThyQCxpUUkVaT1ItR1BHXMcA/2k4QtiV0eNQ
299XW4Wvoac1Be6+WTPRIaC/PYnd0pR7AP4hi5ou6uyKtqkfhLtRQHN/9ny3MBEG
whGxb/bCIzOdILhWBAAAAAASCCqGSM49AwEHAgMEI0VBpCTeIpfdH2UcWiSPYGAJ
Z1Rsp0uKf6HzZnpGRAdCTNgCh+pVBibP0Cz0pNdM7IfHSfS+OP4/Lb1B5N9BSAMB
CAeIbQQYEwgACQUCAAAAAAIbDAAWCRC0Q+UwoU4ckAsaVFJFWk9SLUdQRxM4AQCw
m24svH13uNAebQurOloy/1qZgNdXANBQQ05oi1tEyAD/eGFFVdgs5L6Hpg/GJLvo
X8bd1+1sa2d9TldbgfNfRA0=
=vZGY
-----END PGP PUBLIC KEY BLOCK-----

View File

@ -24,7 +24,7 @@ fi
# install snap # install snap
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --channel=5.10/stable sudo snap install lxd --channel=5.11/stable
sleep 5 sleep 5
# run lxd init on the remote server./dev/nvme1n1 # run lxd init on the remote server./dev/nvme1n1
@ -60,7 +60,6 @@ profiles:
type: disk type: disk
name: default name: default
projects: [] projects: []
cluster: null
EOF EOF
@ -138,3 +137,14 @@ if [ "$ADDED_COMMAND" = true ]; then
echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment." echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
fi fi
# As part of the install script, we pull down any other sovereign-stack git repos
PROJECTS_SCRIPTS_REPO_URL="https://git.sovereign-stack.org/ss/project"
PROJECTS_SCRIPTS_PATH="$(pwd)/deployment/project"
if [ ! -d "$PROJECTS_SCRIPTS_PATH" ]; then
git clone "$PROJECTS_SCRIPTS_REPO_URL" "$PROJECTS_SCRIPTS_PATH"
else
cd "$PROJECTS_SCRIPTS_PATH"
git pull origin main
cd -
fi

View File

@ -1,10 +1,10 @@
#!/bin/bash #!/bin/bash
alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@' alias ss-deploy='/home/ubuntu/sovereign-stack/deployment/project/deploy.sh $@'
alias ss-cluster='/home/ubuntu/sovereign-stack/deployment/cluster.sh $@' alias ss-remote='/home/ubuntu/sovereign-stack/deployment/remote.sh $@'
alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@' alias ss-show='/home/ubuntu/sovereign-stack/deployment/show.sh $@'
alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@' alias ss-reset='/home/ubuntu/sovereign-stack/deployment/reset.sh $@'
alias ss-migrate='/home/ubuntu/sovereign-stack/deployment/migrate.sh $@' alias ss-update='/home/ubuntu/sovereign-stack/deployment/update.sh $@'
alias ss-destroy='/home/ubuntu/sovereign-stack/deployment/destroy.sh $@' alias ss-destroy='/home/ubuntu/sovereign-stack/deployment/destroy.sh $@'
alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt' alias ss-help='cat /home/ubuntu/sovereign-stack/deployment/help.txt'

View File

@ -34,7 +34,7 @@ sleep 1
# install snap # install snap
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --channel=5.10/stable sudo snap install lxd --channel=5.11/stable
sleep 6 sleep 6
# We just do an auto initialization. All we are using is the LXD client inside the management environment. # We just do an auto initialization. All we are using is the LXD client inside the management environment.