1
1

Compare commits

..

No commits in common. "35a8bfc2c9543fa0dcfca774c6dcae392409b2a3" and "9a4f90d27bb5f3b2094d69d36e3c06c1329620f5" have entirely different histories.

15 changed files with 108 additions and 98 deletions

View File

@ -13,8 +13,8 @@
"shellcheck.customArgs": [],
"shellcheck.ignorePatterns": {},
"shellcheck.exclude": [
// "SC1090",
// "SC1091",
"SC1090",
"SC1091",
"SC2029"
],
"terminal.integrated.fontFamily": "monospace",

51
archive/umbrel/go.sh Executable file
View File

@ -0,0 +1,51 @@
#!/bin/bash
set -eu
ssh "$FQDN" "
set -x
cd /home/ubuntu
# first, lets make sure we have the latest code. We use git over HTTPS and store it in ~/umbrel
# ~/umbrel is the only folder we need to backup
if [ ! -d ./umbrel ]; then
git clone https://github.com/getumbrel/umbrel.git ./umbrel
else
if [ -f ./umbrel/scripts/stop ]; then
sudo ./umbrel/scripts/stop
fi
fi
"
# # DO SOME BACKUP OPERATION
# ssh "$FQDN" "
# set -x
# mkdir -p /home/ubuntu/backup
# sudo PASSPHRASE=${DUPLICITY_BACKUP_PASSPHRASE} duplicity --exclude ${REMOTE_HOME}/umbrel/bitcoin/blocks ${REMOTE_HOME}/umbrel file://${REMOTE_BACKUP_PATH}
# sudo chown -R ubuntu:ubuntu ${REMOTE_BACKUP_PATH}
# "
# Start services back up.
ssh "$FQDN" "
set -e
cd /home/ubuntu/umbrel
git config pull.rebase true
git fetch --all --tags
git checkout master
git pull
git checkout tags/v0.4.18
# To use Umbrel on mainnet, run:
sudo NETWORK=$BTC_CHAIN /home/ubuntu/umbrel/scripts/start
"
# we wait for lightning to comone line too.
wait-for-it -t -60 "$FQDN:80"
xdg-open "http://$FQDN" > /dev/null 2>&1

View File

@ -3,9 +3,9 @@
set -eux
cd "$(dirname "$0")"
# This script is meant to be executed on the management machine.
# it reaches out to an SSH endpoint and provisions that machine
# to use LXD.
# NOTE This script is meant to be executed on your LXD bare metal servers. This script
# ensures that the LXD daemon is installed via snap package, then initialize the daemon
# to operate in clustered mode
COMMAND="${1:-}"
DATA_PLANE_MACVLAN_INTERFACE=
@ -15,7 +15,6 @@ if [ "$COMMAND" = create ]; then
# override the cluster name.
CLUSTER_NAME="${2:-}"
if [ -z "$CLUSTER_NAME" ]; then
echo "ERROR: The cluster name was not provided."
@ -62,7 +61,6 @@ EOL
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
FQDN="${3:-}"
shift
if [ -z "$FQDN" ]; then
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
@ -87,7 +85,7 @@ EOL
shift
;;
*)
# unknown option
;;
esac
done
@ -153,16 +151,14 @@ EOL
exit 1
fi
if ! command -v lxc >/dev/null 2>&1; then
if lxc profile list --format csv | grep -q sovereign-stack; then
lxc profile delete sovereign-stack
sleep 1
fi
if lxc profile list --format csv | grep -q sovereign-stack; then
lxc profile delete sovereign-stack
sleep 1
fi
if lxc network list --format csv | grep -q lxdbr0; then
lxc network delete lxdbr0
sleep 1
fi
if lxc network list --format csv | grep -q lxdfanSS; then
lxc network delete lxdfanSS
sleep 1
fi
ssh -t "ubuntu@$FQDN" "
@ -188,7 +184,7 @@ fi
fi
# stub out the lxd init file for the remote SSH endpoint.
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/$CLUSTER_NAME-lxd_profile.yml"
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/$CLUSTER_NAME-primary.yml"
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
config:
core.https_address: ${MGMT_PLANE_IP}:8443
@ -196,16 +192,26 @@ config:
images.auto_update_interval: 15
networks:
- name: lxdbr0
type: bridge
config:
ipv4.nat: "true"
ipv6.nat: "true"
managed: true
- config:
bridge.mode: fan
fan.underlay_subnet: auto
description: ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-},${DISK_TO_USE:-}
name: lxdfanSS
type: ""
project: default
storage_pools: []
profiles:
- config: {}
description: "inter-vm communication across lxd hosts."
devices:
eth0:
name: eth0
network: lxdfanSS
type: nic
name: sovereign-stack
projects: []
cluster:
server_name: ${CLUSTER_NAME}

View File

@ -3,7 +3,6 @@
set -eu
export DEPLOY_WWW_SERVER=false
export WWW_SERVER_MAC_ADDRESS=
export DEPLOY_BTCPPAY_SERVER=false
export DEPLOY_GHOST=true
@ -14,7 +13,6 @@ export DEPLOY_GITEA=false
export WWW_HOSTNAME="www"
export BTCPAY_HOSTNAME="btcpay"
export BTCPAY_HOSTNAME_IN_CERT="pay"
export NEXTCLOUD_HOSTNAME="nextcloud"
export GITEA_HOSTNAME="git"
export NOSTR_HOSTNAME="relay"
@ -108,18 +106,18 @@ DEFAULT_DB_IMAGE="mariadb:10.8.3-jammy"
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
# run the docker stack.
export GHOST_IMAGE="ghost:5.8.2"
export GHOST_IMAGE="ghost:5.7.0"
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
export NGINX_IMAGE="nginx:1.23.1"
export NEXTCLOUD_IMAGE="nextcloud:24.0.3"
export NGINX_IMAGE="nginx:1.23.0"
export NEXTCLOUD_IMAGE="nextcloud:24.0.2"
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
export GITEA_IMAGE="gitea/gitea:latest"
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
export SOVEREIGN_STACK_MAC_ADDRESS=
export WWW_MAC_ADDRESS=
export BTCPAY_MAC_ADDRESS=
export SOVEREIGN_STACK_MAC_ADDRESS="aa:bb:cc:00:00:03"
export WWW_MAC_ADDRESS="aa:bb:cc:00:00:00"
export BTCPAY_MAC_ADDRESS="aa:bb:cc:00:00:01"
export CLUSTERS_DIR="$HOME/ss-clusters"
export SITES_PATH="$HOME/ss-sites"

View File

@ -18,7 +18,7 @@ check_dependencies wait-for-it dig rsync sshfs lxc docker-machine
# TODO remove dependency on Docker-machine. That's what we use to provision VM on 3rd party vendors. Looking for LXD endpoint.
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
exit 1
fi
@ -30,7 +30,6 @@ VPS_HOSTING_TARGET=lxd
RUN_CERT_RENEWAL=true
USER_NO_BACKUP=false
USER_RUN_RESTORE=false
RESTORE_WWW_USERDATA=true
RESTORE_BTCPAY=false
USER_SKIP_WWW=false
USER_SKIP_BTCPAY=false
@ -95,8 +94,7 @@ for i in "$@"; do
shift
;;
*)
echo "Unexpected option: $1"
exit 1
# unknown option
;;
esac
done
@ -256,7 +254,6 @@ function run_domain {
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
mkdir -p "$LOCAL_BACKUP_PATH"
BACKUP_PATH_CREATED=true
RESTORE_WWW_USERDATA=false
fi
DDNS_HOST=
@ -295,7 +292,6 @@ function run_domain {
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
export LXD_VM_NAME="${FQDN//./-}"
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH"
export RESTORE_WWW_USERDATA="$RESTORE_WWW_USERDATA"
# This next section of if statements is our sanity checking area.
if [ "$VPS_HOSTING_TARGET" = aws ]; then
@ -374,6 +370,8 @@ function run_domain {
exit 1
fi
bash -c ./deployment/stub_nginxconf.sh
MACHINE_EXISTS=false
if [ "$VPS_HOSTING_TARGET" = aws ] && docker-machine ls -q | grep -q "$FQDN"; then
MACHINE_EXISTS=true
@ -384,7 +382,7 @@ function run_domain {
fi
if [ "$USER_NO_BACKUP" = true ]; then
RUN_BACKUP=false
RUN_BACKUP=true
fi
if [ "$MACHINE_EXISTS" = true ]; then
@ -465,7 +463,6 @@ export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
## WWW
export DEPLOY_WWW_SERVER=true
export WWW_SERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
# Deploy APPS to www
export DEPLOY_GHOST=true
@ -493,7 +490,6 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
## BTCPAY SERVER; if true, then a BTCPay server is deployed.
export DEPLOY_BTCPAY_SERVER=false
export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
# CHAIN to DEPLOY; valid are 'regtest', 'testnet', and 'mainnet'
export BTC_CHAIN=regtest

View File

@ -14,13 +14,10 @@ if [ "$UPDATE_BTCPAY" = true ]; then
# btcpay-update.sh brings services back up, but does not take them down.
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
sleep 20
elif [ "$RESTORE_BTCPAY" = true ]; then
# run the update.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
sleep 10
./restore.sh
RUN_BACKUP=false
@ -52,20 +49,12 @@ if [ "$RUN_SERVICES" = true ]; then
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
# we bring the services back up by default.
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
OPEN_URL=true
fi
if [ "$OPEN_URL" = true ]; then
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
if wait-for-it -t 5 "$WWW_FQDN:443"; then
xdg-open "https://$WWW_FQDN" > /dev/null 2>&1
fi
else
if wait-for-it -t 5 "$FQDN:443"; then
xdg-open "https://$FQDN" > /dev/null 2>&1
fi
if wait-for-it -t 5 "$FQDN:443"; then
xdg-open "https://$FQDN" > /dev/null 2>&1
fi
fi

View File

@ -42,22 +42,12 @@ export LETSENCRYPT_EMAIL="${CERTIFICATE_EMAIL_ADDRESS}"
export BTCPAYGEN_LIGHTNING="clightning"
export BTCPAYGEN_CRYPTO1="btc"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-nostr-relay;"
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-nostr-relay;opt-add-tor-relay"
#export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
export BTCPAYGEN_REVERSEPROXY="nginx"
export BTCPAY_ENABLE_SSH=false
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
EOL
# can add opt-add-tor-relay; in BTCPAYGEN_ADDITIONAL_FRAGMENTS
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
cat >> "$SITE_PATH/btcpay.sh" <<EOL
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
EOL
fi
cat >> "$SITE_PATH/btcpay.sh" <<EOL
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
# run fast_sync if it's not been done before.

View File

@ -49,7 +49,9 @@ if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$SOVEREIGN_STACK_MAC_ADDRESS"
lxc start "$VM_NAME"
sleep 10
# let's wait a minimum of 15 seconds before we start checking for an IP address.
sleep 15
# let's wait for the LXC vm remote machine to get an IP address.
./wait_for_lxc_ip.sh "$VM_NAME"
@ -57,5 +59,5 @@ if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
# stop the VM and get a snapshot.
lxc stop "$VM_NAME"
lxc publish "$CLUSTER_NAME:$VM_NAME" --alias "$VM_NAME" --public
lxc delete "$VM_NAME"
fi

View File

@ -3,6 +3,7 @@
set -eux
cd "$(dirname "$0")"
# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa
# TODO convert this to SSH private key held on Trezor. THus trezor-T required for
# login operations. This should be configurable of course.
@ -82,4 +83,4 @@ export DOCKER_HOST="ssh://ubuntu@$FQDN"
# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying.
bash -c "./$VIRTUAL_MACHINE/go.sh"
echo "Successfully deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET;"
echo "Successfull deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET;"

View File

@ -199,10 +199,6 @@ devices:
type: disk
EOF
# TODO get the sovereign-stack lxc profile OFF the lxdbr0 bridge network.
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
if [ "$VIRTUAL_MACHINE" = sovereign-stack ] ; then
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat >> "$YAML_PATH" <<EOF
@ -210,22 +206,13 @@ cat >> "$YAML_PATH" <<EOF
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
type: nic
name: ${FILENAME}
EOF
else
# If we are deploying the www, we attach the vm to the underlay via macvlan.
cat >> "$YAML_PATH" <<EOF
enp5s0:
nictype: macvlan
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
enp6s0:
nictype: bridged
parent: lxdfanSS
type: nic
name: ${FILENAME}
EOF
fi
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
if ! lxc profile list --format csv | grep -q "$VIRTUAL_MACHINE"; then
lxc profile create "$VIRTUAL_MACHINE"

View File

@ -22,7 +22,5 @@ done
# we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS"
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu

View File

@ -27,6 +27,6 @@ elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
-v "$REMOTE_HOME/letsencrypt":/etc/letsencrypt \
-v /var/lib/letsencrypt:/var/lib/letsencrypt \
-v "$REMOTE_HOME/letsencrypt_logs":/var/log/letsencrypt \
certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$BTCPAY_USER_FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" -d "$NOSTR_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" -d "$NOSTR_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
fi

View File

@ -3,8 +3,6 @@
set -exu
cd "$(dirname "$0")"
bash -c ./stub_nginxconf.sh
TOR_CONFIG_PATH=
ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db"
@ -45,11 +43,7 @@ if [ "$RUN_BACKUP" = true ]; then
fi
if [ "$RUN_RESTORE" = true ]; then
# Generally speaking we try to restore data. But if the BACKUP directory was
# just created, we know that we'll deploy fresh.
if [ "$RESTORE_WWW_USERDATA" = true ]; then
./restore.sh
fi
./restore.sh
fi
if [ "$DEPLOY_ONION_SITE" = true ]; then

View File

@ -24,8 +24,6 @@ fi
# TODO, ensure VPS_HOSTING_TARGET is in range.
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"