forked from ss/sovereign-stack
Compare commits
2 Commits
47191ba254
...
69d5564e44
Author | SHA1 | Date | |
---|---|---|---|
69d5564e44 | |||
bd9a76108b |
@ -7,10 +7,6 @@
|
||||
# put this into /usr/lib/udev/rules.d or /lib/udev/rules.d
|
||||
# depending on your distribution
|
||||
|
||||
# Trezor
|
||||
SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||
KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl"
|
||||
|
||||
# Trezor v2
|
||||
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
|
||||
|
@ -6,7 +6,7 @@ You can update Sovereign Stack scripts on your management machine by running `gi
|
||||
|
||||
Once your managent machine checkedout a specific version of Sovereign stack, you will want to run the various scripts against your remotes. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management (i.e., add it as a remote). Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place.
|
||||
|
||||
After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site defintions. These files are stubbed out for the user automatically and documetnation guides the user through the process.
|
||||
After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site definitions. These files are stubbed out for the user automatically and documetnation guides the user through the process.
|
||||
|
||||
It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch.
|
||||
|
||||
|
23
check_dependencies.sh
Executable file
23
check_dependencies.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
|
||||
check_dependencies () {
|
||||
for cmd in "$@"; do
|
||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check system's dependencies
|
||||
check_dependencies wait-for-it dig rsync sshfs lxc
|
||||
|
||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
|
||||
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
||||
exit 1
|
||||
fi
|
@ -34,7 +34,7 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
|
||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||
export SOVEREIGN_STACK_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||
export PROJECT_NAME="public"
|
||||
export PROJECT_NAME="regtest"
|
||||
#export REGISTRY_URL="https://index.docker.io/v1/"
|
||||
|
||||
EOL
|
||||
@ -146,7 +146,7 @@ if ! command -v lxc >/dev/null 2>&1; then
|
||||
fi
|
||||
|
||||
ssh -t "ubuntu@$FQDN" "
|
||||
set -ex
|
||||
set -e
|
||||
|
||||
# install ufw and allow SSH.
|
||||
sudo apt update
|
||||
|
13
defaults.sh
13
defaults.sh
@ -37,10 +37,13 @@ export DUPLICITY_BACKUP_PASSPHRASE=
|
||||
|
||||
|
||||
export SSH_HOME="$HOME/.ssh"
|
||||
export VLAN_INTERFACE=
|
||||
export PASS_HOME="$HOME/.password-store"
|
||||
export VM_NAME="sovereign-stack-base"
|
||||
export DEV_MEMORY_MB="8096"
|
||||
export DEV_CPU_COUNT="6"
|
||||
|
||||
export BTCPAY_SERVER_CPU_COUNT="4"
|
||||
export BTCPAY_SERVER_MEMORY_MB="4096"
|
||||
export WWW_SERVER_CPU_COUNT="4"
|
||||
export WWW_SERVER_MEMORY_MB="4096"
|
||||
|
||||
export DOCKER_IMAGE_CACHE_FQDN="registry-1.docker.io"
|
||||
|
||||
@ -67,7 +70,7 @@ DEFAULT_DB_IMAGE="mariadb:10.9.3-jammy"
|
||||
|
||||
|
||||
# run the docker stack.
|
||||
export GHOST_IMAGE="ghost:5.20.0"
|
||||
export GHOST_IMAGE="ghost:5.23.0"
|
||||
|
||||
# TODO switch to mysql. May require intricate export work for existing sites.
|
||||
# THIS MUST BE COMPLETED BEFORE v1 RELEASE
|
||||
@ -78,7 +81,7 @@ export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||
export NGINX_IMAGE="nginx:1.23.2"
|
||||
|
||||
# version of backup is 24.0.3
|
||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.0"
|
||||
export NEXTCLOUD_IMAGE="nextcloud:25.0.1"
|
||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||
|
||||
# TODO PIN the gitea version number.
|
||||
|
95
deploy.sh
95
deploy.sh
@ -1,44 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
RESPOSITORY_PATH="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
||||
export RESPOSITORY_PATH="$RESPOSITORY_PATH"
|
||||
|
||||
check_dependencies () {
|
||||
for cmd in "$@"; do
|
||||
if ! command -v "$cmd" >/dev/null 2>&1; then
|
||||
echo "This script requires \"${cmd}\" to be installed. Please run 'install.sh'."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Check system's dependencies
|
||||
check_dependencies wait-for-it dig rsync sshfs lxc
|
||||
|
||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
|
||||
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
||||
exit 1
|
||||
fi
|
||||
./check_dependencies.sh
|
||||
|
||||
DOMAIN_NAME=
|
||||
RUN_CERT_RENEWAL=false
|
||||
RUN_CERT_RENEWAL=true
|
||||
SKIP_WWW=false
|
||||
RESTORE_WWW=false
|
||||
BACKUP_CERTS=false
|
||||
BACKUP_APPS=false
|
||||
BACKUP_BTCPAY=false
|
||||
BACKUP_CERTS=true
|
||||
BACKUP_APPS=true
|
||||
BACKUP_BTCPAY=true
|
||||
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||
RESTORE_BTCPAY=false
|
||||
BTCPAY_RESTORE_ARCHIVE_PATH=
|
||||
BTCPAY_LOCAL_BACKUP_PATH=
|
||||
SKIP_BTCPAY=false
|
||||
UPDATE_BTCPAY=false
|
||||
RECONFIGURE_BTCPAY_SERVER=false
|
||||
CLUSTER_NAME="$(lxc remote get-default)"
|
||||
STOP_SERVICES=false
|
||||
USER_SAYS_YES=false
|
||||
|
||||
# grab any modifications from the command line.
|
||||
for i in "$@"; do
|
||||
@ -59,6 +43,11 @@ for i in "$@"; do
|
||||
BACKUP_CERTS=true
|
||||
shift
|
||||
;;
|
||||
--no-backup-www)
|
||||
BACKUP_CERTS=false
|
||||
BACKUP_APPS=false
|
||||
shift
|
||||
;;
|
||||
--stop)
|
||||
STOP_SERVICES=true
|
||||
shift
|
||||
@ -67,6 +56,10 @@ for i in "$@"; do
|
||||
DOMAIN_NAME="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
--backup-archive-path=*)
|
||||
BACKUP_BTCPAY_ARCHIVE_PATH="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
--update-btcpay)
|
||||
UPDATE_BTCPAY=true
|
||||
shift
|
||||
@ -83,22 +76,18 @@ for i in "$@"; do
|
||||
BACKUP_APPS=true
|
||||
shift
|
||||
;;
|
||||
--backup-btcpay)
|
||||
BACKUP_BTCPAY=true
|
||||
shift
|
||||
;;
|
||||
--restore-archive=*)
|
||||
BTCPAY_RESTORE_ARCHIVE_PATH="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
--renew-certs)
|
||||
RUN_CERT_RENEWAL=true
|
||||
--no-cert-renew)
|
||||
RUN_CERT_RENEWAL=false
|
||||
shift
|
||||
;;
|
||||
--reconfigure-btcpay)
|
||||
RECONFIGURE_BTCPAY_SERVER=true
|
||||
shift
|
||||
;;
|
||||
-y)
|
||||
USER_SAYS_YES=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected option: $1"
|
||||
exit 1
|
||||
@ -106,20 +95,16 @@ for i in "$@"; do
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
# do some CLI checking.
|
||||
if [ "$RESTORE_BTCPAY" = true ] && [ ! -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then
|
||||
echo "ERROR: The restoration archive is not specified. Ensure --restore-archive= is set on the command line."
|
||||
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||
echo "ERROR: BACKUP_BTCPAY_ARCHIVE_PATH was not set event when the RESTORE_BTCPAY = true. "
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# set up our default paths.
|
||||
source ./defaults.sh
|
||||
|
||||
export CACHES_DIR="$HOME/ss-cache"
|
||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||
export REGISTRY_DOCKER_IMAGE="registry:2"
|
||||
export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
export RESTORE_WWW="$RESTORE_WWW"
|
||||
export STOP_SERVICES="$STOP_SERVICES"
|
||||
export BACKUP_CERTS="$BACKUP_CERTS"
|
||||
@ -129,6 +114,9 @@ export BACKUP_BTCPAY="$BACKUP_BTCPAY"
|
||||
export RUN_CERT_RENEWAL="$RUN_CERT_RENEWAL"
|
||||
export CLUSTER_NAME="$CLUSTER_NAME"
|
||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CLUSTER_NAME"
|
||||
export USER_SAYS_YES="$USER_SAYS_YES"
|
||||
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||
|
||||
|
||||
# ensure our cluster path is created.
|
||||
mkdir -p "$CLUSTER_PATH"
|
||||
@ -149,7 +137,7 @@ export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||
|
||||
#########################################
|
||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
echo "ERROR: The cluster defintion could not be found. You may need to re-run 'ss-cluster create'."
|
||||
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -171,6 +159,7 @@ function instantiate_vms {
|
||||
VPS_HOSTNAME=
|
||||
|
||||
for VIRTUAL_MACHINE in www btcpayserver; do
|
||||
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||
FQDN=
|
||||
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
@ -252,16 +241,13 @@ function instantiate_vms {
|
||||
export VIRTUAL_MACHINE="$VIRTUAL_MACHINE"
|
||||
export REMOTE_CERT_DIR="$REMOTE_CERT_BASE_DIR/$FQDN"
|
||||
export MAC_ADDRESS_TO_PROVISION="$MAC_ADDRESS_TO_PROVISION"
|
||||
export BTCPAY_LOCAL_BACKUP_PATH="$SITE_PATH/backups/btcpayserver/$BACKUP_TIMESTAMP"
|
||||
export BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$UNIX_BACKUP_TIMESTAMP.tar.gz"
|
||||
|
||||
./deployment/deploy_vms.sh
|
||||
|
||||
# if the local docker client isn't logged in, do so;
|
||||
# this helps prevent docker pull errors since they throttle.
|
||||
if [ ! -f "$HOME/.docker/config.json" ]; then
|
||||
echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin
|
||||
fi
|
||||
# if [ ! -f "$HOME/.docker/config.json" ]; then
|
||||
# echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin
|
||||
# fi
|
||||
|
||||
# this tells our local docker client to target the remote endpoint via SSH
|
||||
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
|
||||
@ -313,7 +299,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
||||
EOL
|
||||
|
||||
chmod 0744 "$SITE_DEFINITION_PATH"
|
||||
echo "INFO: we stubbed a new site_defintion for you at '$SITE_DEFINITION_PATH'. Go update it yo!"
|
||||
echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!"
|
||||
exit 1
|
||||
|
||||
fi
|
||||
@ -359,6 +345,10 @@ export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||
export BTC_CHAIN="regtest|testnet|mainnet"
|
||||
export PRIMARY_DOMAIN="domain0.tld"
|
||||
export OTHER_SITES_LIST="domain1.tld,domain2.tld,domain3.tld"
|
||||
export BTCPAY_SERVER_CPU_COUNT="4"
|
||||
export BTCPAY_SERVER_MEMORY_MB="4096"
|
||||
export WWW_SERVER_CPU_COUNT="6"
|
||||
export WWW_SERVER_MEMORY_MB="4096"
|
||||
|
||||
EOL
|
||||
|
||||
@ -373,7 +363,12 @@ fi
|
||||
source "$PROJECT_DEFINITION_PATH"
|
||||
|
||||
# the DOMAIN_LIST is a complete list of all our domains. We often iterate over this list.
|
||||
export DOMAIN_LIST="${PRIMARY_DOMAIN},${OTHER_SITES_LIST}"
|
||||
DOMAIN_LIST="${PRIMARY_DOMAIN}"
|
||||
if [ -n "$OTHER_SITES_LIST" ]; then
|
||||
DOMAIN_LIST="${DOMAIN_LIST},${OTHER_SITES_LIST}"
|
||||
fi
|
||||
|
||||
export DOMAIN_LIST="$DOMAIN_LIST"
|
||||
export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
||||
|
||||
# let's provision our primary domain first.
|
||||
@ -405,6 +400,8 @@ if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
bash -c "./deployment/www/go.sh"
|
||||
fi
|
||||
|
||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
bash -c "./deployment/btcpayserver/go.sh"
|
||||
fi
|
||||
|
@ -9,6 +9,8 @@ cd "$(dirname "$0")"
|
||||
|
||||
echo "INFO: Starting BTCPAY Backup script for host '$BTCPAY_FQDN'."
|
||||
|
||||
sleep 5
|
||||
|
||||
ssh "$BTCPAY_FQDN" "mkdir -p $REMOTE_HOME/backups; cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||
|
||||
# TODO; not sure if this is necessary, but we want to give the VM additional time to take down all services
|
||||
@ -25,8 +27,13 @@ ssh "$BTCPAY_FQDN" "cd $REMOTE_HOME/; sudo BTCPAY_BASE_DIRECTORY=$REMOTE_HOME BT
|
||||
ssh "$BTCPAY_FQDN" "sudo cp /var/lib/docker/volumes/backup_datadir/_data/backup.tar.gz $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||
ssh "$BTCPAY_FQDN" "sudo chown ubuntu:ubuntu $REMOTE_HOME/backups/btcpay.tar.gz"
|
||||
|
||||
# if the backup archive path is not set, then we set it. It is usually set only when we are running a migration script.
|
||||
BTCPAY_LOCAL_BACKUP_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver"
|
||||
if [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||
BACKUP_BTCPAY_ARCHIVE_PATH="$BTCPAY_LOCAL_BACKUP_PATH/$(date +%s).tar.gz"
|
||||
fi
|
||||
|
||||
mkdir -p "$BTCPAY_LOCAL_BACKUP_PATH"
|
||||
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH"
|
||||
scp "$BTCPAY_FQDN:$REMOTE_HOME/backups/btcpay.tar.gz" "$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||
|
||||
echo "INFO: Created backup archive '$BTCPAY_LOCAL_BACKUP_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
||||
echo "INFO: Created backup archive '$BACKUP_BTCPAY_ARCHIVE_PATH' for host '$BTCPAY_FQDN'."
|
||||
|
@ -28,6 +28,7 @@ elif [ "$RESTORE_BTCPAY" = true ]; then
|
||||
|
||||
RUN_SERVICES=true
|
||||
OPEN_URL=true
|
||||
BACKUP_BTCPAY=false
|
||||
|
||||
elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
||||
# the administrator may have indicated a reconfig;
|
||||
@ -44,7 +45,7 @@ if [ "$BACKUP_BTCPAY" = true ]; then
|
||||
./backup_btcpay.sh
|
||||
fi
|
||||
|
||||
if [ "$RUN_SERVICES" = true ]; then
|
||||
if [ "$RUN_SERVICES" = true ] && [ "$STOP_SERVICES" = false ]; then
|
||||
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||
# we bring the services back up by default.
|
||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
||||
|
@ -3,14 +3,18 @@
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [ -f "$BTCPAY_RESTORE_ARCHIVE_PATH" ]; then
|
||||
if [ "$RESTORE_BTCPAY" = false ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ -f "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
|
||||
# push the restoration archive to the remote server
|
||||
echo "INFO: Restoring BTCPAY Server: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
echo "INFO: Restoring BTCPAY Server: $BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||
|
||||
REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/btcpayserver"
|
||||
ssh "$FQDN" mkdir -p "$REMOTE_BACKUP_PATH"
|
||||
REMOTE_BTCPAY_ARCHIVE_PATH="$REMOTE_BACKUP_PATH/btcpay.tar.gz"
|
||||
scp "$BTCPAY_RESTORE_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
||||
scp "$BACKUP_BTCPAY_ARCHIVE_PATH" "$FQDN:$REMOTE_BTCPAY_ARCHIVE_PATH"
|
||||
|
||||
# we clean up any old containers first before restoring.
|
||||
ssh "$FQDN" docker system prune -f
|
||||
|
@ -70,3 +70,6 @@ scp "$SITE_PATH/btcpay.sh" "ubuntu@$FQDN:$REMOTE_HOME/btcpay_setup.sh"
|
||||
ssh "$BTCPAY_FQDN" "chmod 0744 $REMOTE_HOME/btcpay_setup.sh"
|
||||
ssh "$BTCPAY_FQDN" "sudo bash -c $REMOTE_HOME/btcpay_setup.sh"
|
||||
ssh "$BTCPAY_FQDN" "touch $REMOTE_HOME/btcpay.complete"
|
||||
|
||||
# lets give time for the containers to spin up
|
||||
sleep 10
|
@ -33,7 +33,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||
|
||||
# create a base image if needed and instantiate a VM.
|
||||
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
|
||||
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site defintion."
|
||||
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
|
||||
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
|
||||
exit 1
|
||||
fi
|
||||
|
@ -17,11 +17,25 @@ YAML_PATH="$PROJECT_PATH/cloud-init/$FILENAME"
|
||||
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
||||
cat > "$YAML_PATH" <<EOF
|
||||
config:
|
||||
limits.cpu: "${DEV_CPU_COUNT}"
|
||||
limits.memory: "${DEV_MEMORY_MB}MB"
|
||||
EOF
|
||||
|
||||
|
||||
if [ "$VIRTUAL_MACHINE" = www ]; then
|
||||
cat >> "$YAML_PATH" <<EOF
|
||||
limits.cpu: "${WWW_SERVER_CPU_COUNT}"
|
||||
limits.memory: "${WWW_SERVER_MEMORY_MB}MB"
|
||||
|
||||
EOF
|
||||
|
||||
else [ "$VIRTUAL_MACHINE" = btcpayserver ];
|
||||
cat >> "$YAML_PATH" <<EOF
|
||||
limits.cpu: "${BTCPAY_SERVER_CPU_COUNT}"
|
||||
limits.memory: "${BTCPAY_SERVER_MEMORY_MB}MB"
|
||||
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
# if VIRTUAL_MACHINE=sovereign-stack then we are building the base image.
|
||||
if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
||||
# this is for the base image only...
|
||||
@ -161,7 +175,7 @@ if [ "$LXD_HOSTNAME" = "sovereign-stack" ]; then
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||
- echo "alias ll='ls -lah'" >> /home/ubuntu/.bash_profile
|
||||
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
- sudo curl -s -L "https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
- sudo chmod +x /usr/local/bin/docker-compose
|
||||
- sudo apt-get install -y openssh-server
|
||||
|
||||
@ -259,7 +273,8 @@ fi
|
||||
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||
if ! lxc profile list --format csv | grep -q "$LXD_HOSTNAME"; then
|
||||
lxc profile create "$LXD_HOSTNAME"
|
||||
fi
|
||||
|
||||
# configure the profile with our generated cloud-init.yml file.
|
||||
cat "$YAML_PATH" | lxc profile edit "$LXD_HOSTNAME"
|
||||
|
||||
fi
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -e
|
||||
|
||||
|
||||
# let's do a refresh of the certificates. Let's Encrypt will not run if it's not time.
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# Create the nginx config file which covers all domains.
|
||||
@ -84,6 +84,18 @@ done
|
||||
|
||||
./stop_docker_stacks.sh
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||
# # ensure the tor image is built
|
||||
# docker build -t tor:latest ./tor
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
set -eux
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
|
||||
@ -10,6 +10,9 @@ if [ "$FILE_COUNT" = 0 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# if the user said -y at the cli, we can skip this.
|
||||
if [ "$USER_SAYS_YES" = false ]; then
|
||||
|
||||
RESPONSE=
|
||||
read -r -p "Are you sure you want to restore the local path '$LOCAL_BACKUP_PATH' to the remote server at '$PRIMARY_WWW_FQDN' (y/n)": RESPONSE
|
||||
if [ "$RESPONSE" != "y" ]; then
|
||||
@ -17,6 +20,8 @@ if [ "$RESPONSE" != "y" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
# delete the target backup path so we can push restoration files from the management machine.
|
||||
ssh "$PRIMARY_WWW_FQDN" sudo rm -rf "$REMOTE_SOURCE_BACKUP_PATH"
|
||||
|
||||
@ -27,5 +32,6 @@ ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
||||
scp -r "$LOCAL_BACKUP_PATH" "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH"
|
||||
|
||||
# now we run duplicity to restore the archive.
|
||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$BACKUP_TIMESTAMP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||
ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity --force restore "file://$REMOTE_BACKUP_PATH/$APP" "$REMOTE_SOURCE_BACKUP_PATH/"
|
||||
|
||||
ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_BACKUP_PATH"
|
@ -30,7 +30,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||
|
||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP"
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||
|
||||
# ensure our local backup path exists.
|
||||
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||
@ -40,7 +40,7 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
if [ "$RESTORE_WWW" = true ]; then
|
||||
./restore_path.sh
|
||||
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
|
||||
elif [ "$BACKUP_APPS" = true ]; then
|
||||
else
|
||||
# if we're not restoring, then we may or may not back up.
|
||||
./backup_path.sh
|
||||
fi
|
||||
@ -57,14 +57,13 @@ if docker stack list --format "{{.Name}}" | grep -q reverse-proxy; then
|
||||
# wait for all docker containers to stop.
|
||||
# TODO see if there's a way to check for this.
|
||||
sleep 15
|
||||
|
||||
fi
|
||||
|
||||
#
|
||||
if [ "$STOP_SERVICES" = true ]; then
|
||||
echo "STOPPING as indicated by the --stop flag."
|
||||
|
||||
|
||||
exit 1
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# generate the certs and grab a backup
|
||||
@ -72,19 +71,23 @@ if [ "$RUN_CERT_RENEWAL" = true ]; then
|
||||
./generate_certs.sh
|
||||
fi
|
||||
|
||||
# Back each domain's certificates under /home/ubuntu/letsencrypt/domain
|
||||
# let's backup all our letsencrypt certs
|
||||
export APP="letsencrypt"
|
||||
for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
|
||||
# source the site path so we know what features it has.
|
||||
source "$RESPOSITORY_PATH/reset_env.sh"
|
||||
source "$SITE_PATH/site_definition"
|
||||
source "$RESPOSITORY_PATH/domain_env.sh"
|
||||
|
||||
# these variable are used by both backup/restore scripts.
|
||||
export APP="letsencrypt"
|
||||
export REMOTE_BACKUP_PATH="$REMOTE_HOME/backups/www/$APP/$DOMAIN_IDENTIFIER"
|
||||
export REMOTE_SOURCE_BACKUP_PATH="$REMOTE_HOME/$APP/$DOMAIN_NAME"
|
||||
|
||||
# ensure our local backup path exists so we can pull down the duplicity archive to the management machine.
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP/$BACKUP_TIMESTAMP"
|
||||
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
|
||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||
|
||||
if [ "$RESTORE_WWW" = true ]; then
|
||||
@ -95,6 +98,5 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
elif [ "$BACKUP_APPS" = true ]; then
|
||||
# if we're not restoring, then we may or may not back up.
|
||||
./backup_path.sh
|
||||
|
||||
fi
|
||||
done
|
||||
|
@ -103,9 +103,10 @@ EOL
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-ghost-$LANGUAGE_CODE"
|
||||
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
done # language code
|
||||
|
||||
|
@ -80,9 +80,10 @@ EOL
|
||||
${DBNET_NAME}:
|
||||
EOL
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-gitea-$LANGUAGE_CODE"
|
||||
sleep 1
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
|
@ -74,7 +74,9 @@ networks:
|
||||
|
||||
EOL
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nextcloud-en"
|
||||
|
||||
sleep 1
|
||||
fi
|
||||
fi
|
||||
done
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
|
||||
|
@ -129,5 +129,9 @@ EOL
|
||||
done
|
||||
done
|
||||
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
||||
# iterate over all our domains and create the nginx config file.
|
||||
sleep 1
|
||||
fi
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
docker pull "$NOSTR_RELAY_IMAGE"
|
||||
@ -75,8 +75,10 @@ messages_per_sec = 3
|
||||
#max_event_bytes = 131072
|
||||
EOL
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "$DOMAIN_IDENTIFIER-nostr-$LANGUAGE_CODE"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
|
@ -15,16 +15,11 @@ export REMOTE_GITEA_PATH="$REMOTE_HOME/gitea"
|
||||
export BTC_CHAIN="$BTC_CHAIN"
|
||||
export BTCPAY_ADDITIONAL_HOSTNAMES="$BTCPAY_ADDITIONAL_HOSTNAMES"
|
||||
|
||||
BACKUP_TIMESTAMP="$(date +"%Y-%m")"
|
||||
UNIX_BACKUP_TIMESTAMP="$(date +%s)"
|
||||
|
||||
SHASUM_OF_DOMAIN="$(echo -n "$DOMAIN_NAME" | sha256sum | awk '{print $1;}' )"
|
||||
export DOMAIN_IDENTIFIER="${SHASUM_OF_DOMAIN: -6}"
|
||||
echo "$DOMAIN_IDENTIFIER" > "$SITE_PATH/domain_id"
|
||||
|
||||
export BACKUP_TIMESTAMP="$BACKUP_TIMESTAMP"
|
||||
export UNIX_BACKUP_TIMESTAMP="$UNIX_BACKUP_TIMESTAMP"
|
||||
|
||||
export LANGUAGE_CODE_COUNT=$(("$(echo "$SITE_LANGUAGE_CODES" | tr -cd , | wc -c)"+1))
|
||||
|
||||
STACK_NAME="$DOMAIN_IDENTIFIER-en"
|
||||
|
43
install.sh
43
install.sh
@ -17,14 +17,14 @@ fi
|
||||
sudo apt-get update
|
||||
|
||||
# TODO REVIEW management machine software requirements
|
||||
# is docker-ce actually needed here? prefer to move docker registry
|
||||
# to a host on SERVERS LAN so that it can operate
|
||||
# TODO document which dependencies are required by what software, e.g., trezor, docker, etc.
|
||||
# virt-manager allows us to run type-1 vms desktop version. We use remote viewer to get a GUI for the VM
|
||||
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
|
||||
apt-transport-https ca-certificates lsb-release \
|
||||
docker-ce-cli docker-ce containerd.io docker-compose-plugin \
|
||||
apt-transport-https ca-certificates lsb-release docker-ce-cli \
|
||||
python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \
|
||||
libcanberra-gtk-module
|
||||
libcanberra-gtk-module virt-manager pass
|
||||
|
||||
|
||||
# for trezor installation
|
||||
pip3 install setuptools wheel
|
||||
@ -34,36 +34,31 @@ if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then
|
||||
sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
|
||||
fi
|
||||
|
||||
# TODO initialize pass here; need to first initialize Trezor-T certificates.
|
||||
|
||||
|
||||
# install lxd as a snap if it's not installed. We only really use the client part of this package
|
||||
# on the management machine.
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd --candidate
|
||||
|
||||
# initialize the daemon for auto use. Most of the time on the management machine,
|
||||
# we only use the LXC client -- not the daemon. HOWEVER, there are circustances where
|
||||
# you might want to run the management machine in a LXD-based VM. We we init the lxd daemon
|
||||
# after havning installed it so it'll be available for use.
|
||||
# see https://www.sovereign-stack.org/management/
|
||||
sudo lxd init --auto --storage-pool=default --storage-create-loop=50 --storage-backend=zfs
|
||||
fi
|
||||
|
||||
# make ss-deploy available to the user
|
||||
if ! groups | grep -q docker; then
|
||||
sudo groupadd docker
|
||||
fi
|
||||
|
||||
sudo usermod -aG docker "$USER"
|
||||
|
||||
# make the Sovereign Stack commands available to the user.
|
||||
# make the Sovereign Stack commands available to the user via ~/.bashrc
|
||||
# we use ~/.bashrc
|
||||
ADDED_COMMAND=false
|
||||
if ! < "$HOME/.bashrc" grep -q "ss-deploy"; then
|
||||
echo "alias ss-deploy='/home/$USER/sovereign-stack/deploy.sh \$@'" >> "$HOME/.bashrc"
|
||||
ADDED_COMMAND=true
|
||||
fi
|
||||
|
||||
if ! < "$HOME/.bashrc" grep -q "ss-cluster"; then
|
||||
echo "alias ss-cluster='/home/$USER/sovereign-stack/cluster.sh \$@'" >> "$HOME/.bashrc"
|
||||
ADDED_COMMAND=true
|
||||
fi
|
||||
|
||||
if ! < "$HOME/.bashrc" grep -q "ss-projects"; then
|
||||
echo "alias ss-projects='/home/$USER/sovereign-stack/projects.sh \$@'" >> "$HOME/.bashrc"
|
||||
for SS_COMMAND in deploy cluster; do
|
||||
if ! < "$HOME/.bashrc" grep -q "ss-$SS_COMMAND"; then
|
||||
echo "alias ss-${SS_COMMAND}='$(pwd)/${SS_COMMAND}.sh \$@'" >> "$HOME/.bashrc"
|
||||
ADDED_COMMAND=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$ADDED_COMMAND" = true ]; then
|
||||
echo "WARNING! You need to run 'source ~/.bashrc' before continuing."
|
||||
|
91
migrate.sh
Normal file → Executable file
91
migrate.sh
Normal file → Executable file
@ -1,12 +1,89 @@
|
||||
# move all migration logic into this script.
|
||||
#!/bin/bash
|
||||
|
||||
if machine exists, then
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
get backup.
|
||||
don't restart services.
|
||||
CURRENT_CLUSTER="$(lxc remote get-default)"
|
||||
|
||||
Then
|
||||
if echo "$CURRENT_CLUSTER" | grep -q "production"; then
|
||||
echo "ERROR: YOU MUST COMMENT THIS OUT BEFORE YOU CAN RUN MIGRATE ON PROUDCTION/."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
delete machine.
|
||||
source ./defaults.sh
|
||||
|
||||
Then re-run script with --restor option.
|
||||
export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
|
||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||
|
||||
# ensure the cluster definition exists.
|
||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$CLUSTER_DEFINITION"
|
||||
|
||||
# source project defition.
|
||||
# Now let's load the project definition.
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||
source "$PROJECT_DEFINITION_PATH"
|
||||
|
||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||
|
||||
# Check to see if any of the VMs actually don't exist.
|
||||
# (we only migrate instantiated vms)
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
|
||||
# if the VM doesn't exist, the we emit an error message and hard quit.
|
||||
if ! lxc list --format csv | grep -q "$LXD_NAME"; then
|
||||
echo "ERROR: there is no VM named '$LXD_NAME'. You probably need to run ss-deploy again."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
BTCPAY_RESTORE_ARCHIVE_PATH="$SITES_PATH/$PRIMARY_DOMAIN/backups/btcpayserver/$(date +%s).tar.gz"
|
||||
echo "INFO: The BTCPAY_RESTORE_ARCHIVE_PATH for this migration will be: $BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
# first we run ss-deploy --stop
|
||||
# this grabs a backup of all data (backups are on by default) and saves them to the management machine
|
||||
# the --stop flag ensures that services do NOT come back online.
|
||||
# by default, we grab a backup.
|
||||
|
||||
bash -c "./deploy.sh --stop --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
||||
RESPONSE=
|
||||
read -r -p "Are you sure you want to continue the migration? We have a backup TODO.": RESPONSE
|
||||
if [ "$RESPONSE" != "y" ]; then
|
||||
echo "STOPPING."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
lxc delete -f "$LXD_NAME"
|
||||
|
||||
lxc profile delete "$LXD_NAME"
|
||||
done
|
||||
|
||||
|
||||
# delete the base image so it can be created.
|
||||
if lxc list | grep -q sovereign-stack-base; then
|
||||
lxc delete -f sovereign-stack-base
|
||||
fi
|
||||
|
||||
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
||||
if lxc profile list | grep -q sovereign-stack; then
|
||||
lxc profile delete sovereign-stack
|
||||
fi
|
||||
|
||||
if lxc image list | grep -q "sovereign-stack-base"; then
|
||||
lxc image rm sovereign-stack-base
|
||||
fi
|
||||
|
||||
# Then we can run a restore operation and specify the backup archive at the CLI.
|
||||
bash -c "./deploy.sh -y --restore-www --restore-btcpay --backup-archive-path=$BTCPAY_RESTORE_ARCHIVE_PATH"
|
||||
|
Loading…
Reference in New Issue
Block a user