Compare commits
8 Commits
9a4f90d27b
...
35a8bfc2c9
Author | SHA1 | Date | |
---|---|---|---|
35a8bfc2c9 | |||
a962ab6050 | |||
3bf42bba2a | |||
03f80ade26 | |||
6ffc51eb02 | |||
25f5561848 | |||
e38172dd2d | |||
1ecbfe4442 |
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@ -13,8 +13,8 @@
|
|||||||
"shellcheck.customArgs": [],
|
"shellcheck.customArgs": [],
|
||||||
"shellcheck.ignorePatterns": {},
|
"shellcheck.ignorePatterns": {},
|
||||||
"shellcheck.exclude": [
|
"shellcheck.exclude": [
|
||||||
"SC1090",
|
// "SC1090",
|
||||||
"SC1091",
|
// "SC1091",
|
||||||
"SC2029"
|
"SC2029"
|
||||||
],
|
],
|
||||||
"terminal.integrated.fontFamily": "monospace",
|
"terminal.integrated.fontFamily": "monospace",
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
ssh "$FQDN" "
|
|
||||||
set -x
|
|
||||||
|
|
||||||
cd /home/ubuntu
|
|
||||||
|
|
||||||
# first, lets make sure we have the latest code. We use git over HTTPS and store it in ~/umbrel
|
|
||||||
# ~/umbrel is the only folder we need to backup
|
|
||||||
if [ ! -d ./umbrel ]; then
|
|
||||||
git clone https://github.com/getumbrel/umbrel.git ./umbrel
|
|
||||||
else
|
|
||||||
|
|
||||||
if [ -f ./umbrel/scripts/stop ]; then
|
|
||||||
sudo ./umbrel/scripts/stop
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
"
|
|
||||||
|
|
||||||
# # DO SOME BACKUP OPERATION
|
|
||||||
|
|
||||||
# ssh "$FQDN" "
|
|
||||||
# set -x
|
|
||||||
|
|
||||||
# mkdir -p /home/ubuntu/backup
|
|
||||||
|
|
||||||
# sudo PASSPHRASE=${DUPLICITY_BACKUP_PASSPHRASE} duplicity --exclude ${REMOTE_HOME}/umbrel/bitcoin/blocks ${REMOTE_HOME}/umbrel file://${REMOTE_BACKUP_PATH}
|
|
||||||
# sudo chown -R ubuntu:ubuntu ${REMOTE_BACKUP_PATH}
|
|
||||||
# "
|
|
||||||
|
|
||||||
# Start services back up.
|
|
||||||
ssh "$FQDN" "
|
|
||||||
set -e
|
|
||||||
cd /home/ubuntu/umbrel
|
|
||||||
|
|
||||||
git config pull.rebase true
|
|
||||||
git fetch --all --tags
|
|
||||||
git checkout master
|
|
||||||
git pull
|
|
||||||
git checkout tags/v0.4.18
|
|
||||||
|
|
||||||
# To use Umbrel on mainnet, run:
|
|
||||||
sudo NETWORK=$BTC_CHAIN /home/ubuntu/umbrel/scripts/start
|
|
||||||
"
|
|
||||||
|
|
||||||
# we wait for lightning to comone line too.
|
|
||||||
wait-for-it -t -60 "$FQDN:80"
|
|
||||||
|
|
||||||
xdg-open "http://$FQDN" > /dev/null 2>&1
|
|
40
cluster.sh
40
cluster.sh
@ -3,9 +3,9 @@
|
|||||||
set -eux
|
set -eux
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# NOTE This script is meant to be executed on your LXD bare metal servers. This script
|
# This script is meant to be executed on the management machine.
|
||||||
# ensures that the LXD daemon is installed via snap package, then initialize the daemon
|
# it reaches out to an SSH endpoint and provisions that machine
|
||||||
# to operate in clustered mode
|
# to use LXD.
|
||||||
|
|
||||||
COMMAND="${1:-}"
|
COMMAND="${1:-}"
|
||||||
DATA_PLANE_MACVLAN_INTERFACE=
|
DATA_PLANE_MACVLAN_INTERFACE=
|
||||||
@ -16,6 +16,7 @@ if [ "$COMMAND" = create ]; then
|
|||||||
# override the cluster name.
|
# override the cluster name.
|
||||||
CLUSTER_NAME="${2:-}"
|
CLUSTER_NAME="${2:-}"
|
||||||
|
|
||||||
|
|
||||||
if [ -z "$CLUSTER_NAME" ]; then
|
if [ -z "$CLUSTER_NAME" ]; then
|
||||||
echo "ERROR: The cluster name was not provided."
|
echo "ERROR: The cluster name was not provided."
|
||||||
exit 1
|
exit 1
|
||||||
@ -61,6 +62,7 @@ EOL
|
|||||||
|
|
||||||
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
||||||
FQDN="${3:-}"
|
FQDN="${3:-}"
|
||||||
|
shift
|
||||||
|
|
||||||
if [ -z "$FQDN" ]; then
|
if [ -z "$FQDN" ]; then
|
||||||
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
|
echo "ERROR: The Fully Qualified Domain Name of the new cluster member was not set."
|
||||||
@ -85,7 +87,7 @@ EOL
|
|||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
# unknown option
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
@ -151,15 +153,17 @@ EOL
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if ! command -v lxc >/dev/null 2>&1; then
|
||||||
if lxc profile list --format csv | grep -q sovereign-stack; then
|
if lxc profile list --format csv | grep -q sovereign-stack; then
|
||||||
lxc profile delete sovereign-stack
|
lxc profile delete sovereign-stack
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc network list --format csv | grep -q lxdfanSS; then
|
if lxc network list --format csv | grep -q lxdbr0; then
|
||||||
lxc network delete lxdfanSS
|
lxc network delete lxdbr0
|
||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
ssh -t "ubuntu@$FQDN" "
|
ssh -t "ubuntu@$FQDN" "
|
||||||
# set host firewall policy.
|
# set host firewall policy.
|
||||||
@ -184,7 +188,7 @@ fi
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# stub out the lxd init file for the remote SSH endpoint.
|
# stub out the lxd init file for the remote SSH endpoint.
|
||||||
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/$CLUSTER_NAME-primary.yml"
|
CLUSTER_MASTER_LXD_INIT="$CLUSTER_PATH/$CLUSTER_NAME-lxd_profile.yml"
|
||||||
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
|
cat >"$CLUSTER_MASTER_LXD_INIT" <<EOF
|
||||||
config:
|
config:
|
||||||
core.https_address: ${MGMT_PLANE_IP}:8443
|
core.https_address: ${MGMT_PLANE_IP}:8443
|
||||||
@ -192,26 +196,16 @@ config:
|
|||||||
images.auto_update_interval: 15
|
images.auto_update_interval: 15
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
- config:
|
- name: lxdbr0
|
||||||
bridge.mode: fan
|
type: bridge
|
||||||
fan.underlay_subnet: auto
|
config:
|
||||||
|
ipv4.nat: "true"
|
||||||
|
ipv6.nat: "true"
|
||||||
|
managed: true
|
||||||
description: ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-},${DISK_TO_USE:-}
|
description: ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-},${DISK_TO_USE:-}
|
||||||
name: lxdfanSS
|
|
||||||
type: ""
|
|
||||||
project: default
|
|
||||||
|
|
||||||
storage_pools: []
|
storage_pools: []
|
||||||
|
|
||||||
profiles:
|
|
||||||
- config: {}
|
|
||||||
description: "inter-vm communication across lxd hosts."
|
|
||||||
devices:
|
|
||||||
eth0:
|
|
||||||
name: eth0
|
|
||||||
network: lxdfanSS
|
|
||||||
type: nic
|
|
||||||
name: sovereign-stack
|
|
||||||
|
|
||||||
projects: []
|
projects: []
|
||||||
cluster:
|
cluster:
|
||||||
server_name: ${CLUSTER_NAME}
|
server_name: ${CLUSTER_NAME}
|
||||||
|
14
defaults.sh
14
defaults.sh
@ -3,6 +3,7 @@
|
|||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
export DEPLOY_WWW_SERVER=false
|
export DEPLOY_WWW_SERVER=false
|
||||||
|
export WWW_SERVER_MAC_ADDRESS=
|
||||||
export DEPLOY_BTCPPAY_SERVER=false
|
export DEPLOY_BTCPPAY_SERVER=false
|
||||||
|
|
||||||
export DEPLOY_GHOST=true
|
export DEPLOY_GHOST=true
|
||||||
@ -13,6 +14,7 @@ export DEPLOY_GITEA=false
|
|||||||
|
|
||||||
export WWW_HOSTNAME="www"
|
export WWW_HOSTNAME="www"
|
||||||
export BTCPAY_HOSTNAME="btcpay"
|
export BTCPAY_HOSTNAME="btcpay"
|
||||||
|
export BTCPAY_HOSTNAME_IN_CERT="pay"
|
||||||
export NEXTCLOUD_HOSTNAME="nextcloud"
|
export NEXTCLOUD_HOSTNAME="nextcloud"
|
||||||
export GITEA_HOSTNAME="git"
|
export GITEA_HOSTNAME="git"
|
||||||
export NOSTR_HOSTNAME="relay"
|
export NOSTR_HOSTNAME="relay"
|
||||||
@ -106,18 +108,18 @@ DEFAULT_DB_IMAGE="mariadb:10.8.3-jammy"
|
|||||||
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
|
export ENABLE_NGINX_CACHING="$ENABLE_NGINX_CACHING"
|
||||||
|
|
||||||
# run the docker stack.
|
# run the docker stack.
|
||||||
export GHOST_IMAGE="ghost:5.7.0"
|
export GHOST_IMAGE="ghost:5.8.2"
|
||||||
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
export GHOST_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
export NGINX_IMAGE="nginx:1.23.0"
|
export NGINX_IMAGE="nginx:1.23.1"
|
||||||
export NEXTCLOUD_IMAGE="nextcloud:24.0.2"
|
export NEXTCLOUD_IMAGE="nextcloud:24.0.3"
|
||||||
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
export NEXTCLOUD_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
export GITEA_IMAGE="gitea/gitea:latest"
|
export GITEA_IMAGE="gitea/gitea:latest"
|
||||||
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
export GITEA_DB_IMAGE="$DEFAULT_DB_IMAGE"
|
||||||
|
|
||||||
export SOVEREIGN_STACK_MAC_ADDRESS="aa:bb:cc:00:00:03"
|
export SOVEREIGN_STACK_MAC_ADDRESS=
|
||||||
export WWW_MAC_ADDRESS="aa:bb:cc:00:00:00"
|
export WWW_MAC_ADDRESS=
|
||||||
export BTCPAY_MAC_ADDRESS="aa:bb:cc:00:00:01"
|
export BTCPAY_MAC_ADDRESS=
|
||||||
|
|
||||||
export CLUSTERS_DIR="$HOME/ss-clusters"
|
export CLUSTERS_DIR="$HOME/ss-clusters"
|
||||||
export SITES_PATH="$HOME/ss-sites"
|
export SITES_PATH="$HOME/ss-sites"
|
||||||
|
14
deploy.sh
14
deploy.sh
@ -18,7 +18,7 @@ check_dependencies wait-for-it dig rsync sshfs lxc docker-machine
|
|||||||
# TODO remove dependency on Docker-machine. That's what we use to provision VM on 3rd party vendors. Looking for LXD endpoint.
|
# TODO remove dependency on Docker-machine. That's what we use to provision VM on 3rd party vendors. Looking for LXD endpoint.
|
||||||
|
|
||||||
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
# let's check to ensure the management machine is on the Baseline ubuntu 21.04
|
||||||
if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then
|
if ! lsb_release -d | grep -q "Ubuntu 22.04"; then
|
||||||
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@ -30,6 +30,7 @@ VPS_HOSTING_TARGET=lxd
|
|||||||
RUN_CERT_RENEWAL=true
|
RUN_CERT_RENEWAL=true
|
||||||
USER_NO_BACKUP=false
|
USER_NO_BACKUP=false
|
||||||
USER_RUN_RESTORE=false
|
USER_RUN_RESTORE=false
|
||||||
|
RESTORE_WWW_USERDATA=true
|
||||||
RESTORE_BTCPAY=false
|
RESTORE_BTCPAY=false
|
||||||
USER_SKIP_WWW=false
|
USER_SKIP_WWW=false
|
||||||
USER_SKIP_BTCPAY=false
|
USER_SKIP_BTCPAY=false
|
||||||
@ -94,7 +95,8 @@ for i in "$@"; do
|
|||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
# unknown option
|
echo "Unexpected option: $1"
|
||||||
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
@ -254,6 +256,7 @@ function run_domain {
|
|||||||
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
if [ ! -d "$LOCAL_BACKUP_PATH" ]; then
|
||||||
mkdir -p "$LOCAL_BACKUP_PATH"
|
mkdir -p "$LOCAL_BACKUP_PATH"
|
||||||
BACKUP_PATH_CREATED=true
|
BACKUP_PATH_CREATED=true
|
||||||
|
RESTORE_WWW_USERDATA=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
DDNS_HOST=
|
DDNS_HOST=
|
||||||
@ -292,6 +295,7 @@ function run_domain {
|
|||||||
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
|
export FQDN="$DDNS_HOST.$DOMAIN_NAME"
|
||||||
export LXD_VM_NAME="${FQDN//./-}"
|
export LXD_VM_NAME="${FQDN//./-}"
|
||||||
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH"
|
export REMOTE_BACKUP_PATH="$REMOTE_BACKUP_PATH"
|
||||||
|
export RESTORE_WWW_USERDATA="$RESTORE_WWW_USERDATA"
|
||||||
|
|
||||||
# This next section of if statements is our sanity checking area.
|
# This next section of if statements is our sanity checking area.
|
||||||
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
if [ "$VPS_HOSTING_TARGET" = aws ]; then
|
||||||
@ -370,8 +374,6 @@ function run_domain {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
bash -c ./deployment/stub_nginxconf.sh
|
|
||||||
|
|
||||||
MACHINE_EXISTS=false
|
MACHINE_EXISTS=false
|
||||||
if [ "$VPS_HOSTING_TARGET" = aws ] && docker-machine ls -q | grep -q "$FQDN"; then
|
if [ "$VPS_HOSTING_TARGET" = aws ] && docker-machine ls -q | grep -q "$FQDN"; then
|
||||||
MACHINE_EXISTS=true
|
MACHINE_EXISTS=true
|
||||||
@ -382,7 +384,7 @@ function run_domain {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$USER_NO_BACKUP" = true ]; then
|
if [ "$USER_NO_BACKUP" = true ]; then
|
||||||
RUN_BACKUP=true
|
RUN_BACKUP=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$MACHINE_EXISTS" = true ]; then
|
if [ "$MACHINE_EXISTS" = true ]; then
|
||||||
@ -463,6 +465,7 @@ export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
|||||||
|
|
||||||
## WWW
|
## WWW
|
||||||
export DEPLOY_WWW_SERVER=true
|
export DEPLOY_WWW_SERVER=true
|
||||||
|
export WWW_SERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||||
|
|
||||||
# Deploy APPS to www
|
# Deploy APPS to www
|
||||||
export DEPLOY_GHOST=true
|
export DEPLOY_GHOST=true
|
||||||
@ -490,6 +493,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
|
|||||||
|
|
||||||
## BTCPAY SERVER; if true, then a BTCPay server is deployed.
|
## BTCPAY SERVER; if true, then a BTCPay server is deployed.
|
||||||
export DEPLOY_BTCPAY_SERVER=false
|
export DEPLOY_BTCPAY_SERVER=false
|
||||||
|
export BTCPAYSERVER_MAC_ADDRESS="CHANGE_ME_REQUIRED"
|
||||||
|
|
||||||
# CHAIN to DEPLOY; valid are 'regtest', 'testnet', and 'mainnet'
|
# CHAIN to DEPLOY; valid are 'regtest', 'testnet', and 'mainnet'
|
||||||
export BTC_CHAIN=regtest
|
export BTC_CHAIN=regtest
|
||||||
|
@ -14,9 +14,12 @@ if [ "$UPDATE_BTCPAY" = true ]; then
|
|||||||
# btcpay-update.sh brings services back up, but does not take them down.
|
# btcpay-update.sh brings services back up, but does not take them down.
|
||||||
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
ssh "$FQDN" "sudo bash -c $BTCPAY_SERVER_APPPATH/btcpay-update.sh"
|
||||||
|
|
||||||
|
sleep 20
|
||||||
|
|
||||||
elif [ "$RESTORE_BTCPAY" = true ]; then
|
elif [ "$RESTORE_BTCPAY" = true ]; then
|
||||||
# run the update.
|
# run the update.
|
||||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-down.sh"
|
||||||
|
sleep 10
|
||||||
|
|
||||||
./restore.sh
|
./restore.sh
|
||||||
|
|
||||||
@ -49,12 +52,20 @@ if [ "$RUN_SERVICES" = true ]; then
|
|||||||
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
# The default is to resume services, though admin may want to keep services off (eg., for a migration)
|
||||||
# we bring the services back up by default.
|
# we bring the services back up by default.
|
||||||
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
ssh "$FQDN" "bash -c $BTCPAY_SERVER_APPPATH/btcpay-up.sh"
|
||||||
|
|
||||||
OPEN_URL=true
|
OPEN_URL=true
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$OPEN_URL" = true ]; then
|
if [ "$OPEN_URL" = true ]; then
|
||||||
|
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
||||||
|
if wait-for-it -t 5 "$WWW_FQDN:443"; then
|
||||||
|
xdg-open "https://$WWW_FQDN" > /dev/null 2>&1
|
||||||
|
fi
|
||||||
|
else
|
||||||
if wait-for-it -t 5 "$FQDN:443"; then
|
if wait-for-it -t 5 "$FQDN:443"; then
|
||||||
xdg-open "https://$FQDN" > /dev/null 2>&1
|
xdg-open "https://$FQDN" > /dev/null 2>&1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
@ -42,12 +42,22 @@ export LETSENCRYPT_EMAIL="${CERTIFICATE_EMAIL_ADDRESS}"
|
|||||||
export BTCPAYGEN_LIGHTNING="clightning"
|
export BTCPAYGEN_LIGHTNING="clightning"
|
||||||
export BTCPAYGEN_CRYPTO1="btc"
|
export BTCPAYGEN_CRYPTO1="btc"
|
||||||
|
|
||||||
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-nostr-relay;opt-add-tor-relay"
|
export BTCPAYGEN_ADDITIONAL_FRAGMENTS="opt-save-storage;opt-add-btctransmuter;opt-add-nostr-relay;"
|
||||||
#export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
|
|
||||||
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
|
export BTCPAY_ADDITIONAL_HOSTS="${BTCPAY_ADDITIONAL_HOSTNAMES}"
|
||||||
export BTCPAYGEN_REVERSEPROXY="nginx"
|
export BTCPAYGEN_REVERSEPROXY="nginx"
|
||||||
export BTCPAY_ENABLE_SSH=false
|
export BTCPAY_ENABLE_SSH=false
|
||||||
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
export BTCPAY_BASE_DIRECTORY=${REMOTE_HOME}
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# can add opt-add-tor-relay; in BTCPAYGEN_ADDITIONAL_FRAGMENTS
|
||||||
|
if [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
||||||
|
cat >> "$SITE_PATH/btcpay.sh" <<EOL
|
||||||
|
export BTCPAYGEN_EXCLUDE_FRAGMENTS="nginx-https"
|
||||||
|
EOL
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat >> "$SITE_PATH/btcpay.sh" <<EOL
|
||||||
|
|
||||||
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
if [ "\$NBITCOIN_NETWORK" != regtest ]; then
|
||||||
# run fast_sync if it's not been done before.
|
# run fast_sync if it's not been done before.
|
||||||
|
@ -49,9 +49,7 @@ if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
|
|||||||
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$SOVEREIGN_STACK_MAC_ADDRESS"
|
lxc config set "$VM_NAME" "volatile.enp5s0.hwaddr=$SOVEREIGN_STACK_MAC_ADDRESS"
|
||||||
|
|
||||||
lxc start "$VM_NAME"
|
lxc start "$VM_NAME"
|
||||||
|
sleep 10
|
||||||
# let's wait a minimum of 15 seconds before we start checking for an IP address.
|
|
||||||
sleep 15
|
|
||||||
|
|
||||||
# let's wait for the LXC vm remote machine to get an IP address.
|
# let's wait for the LXC vm remote machine to get an IP address.
|
||||||
./wait_for_lxc_ip.sh "$VM_NAME"
|
./wait_for_lxc_ip.sh "$VM_NAME"
|
||||||
@ -59,5 +57,5 @@ if ! lxc image list --format csv "$VM_NAME" | grep -q "$VM_NAME"; then
|
|||||||
# stop the VM and get a snapshot.
|
# stop the VM and get a snapshot.
|
||||||
lxc stop "$VM_NAME"
|
lxc stop "$VM_NAME"
|
||||||
lxc publish "$CLUSTER_NAME:$VM_NAME" --alias "$VM_NAME" --public
|
lxc publish "$CLUSTER_NAME:$VM_NAME" --alias "$VM_NAME" --public
|
||||||
lxc delete "$VM_NAME"
|
|
||||||
fi
|
fi
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
set -eux
|
set -eux
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa
|
# let's make sure we have an ssh keypair. We just use ~/.ssh/id_rsa
|
||||||
# TODO convert this to SSH private key held on Trezor. THus trezor-T required for
|
# TODO convert this to SSH private key held on Trezor. THus trezor-T required for
|
||||||
# login operations. This should be configurable of course.
|
# login operations. This should be configurable of course.
|
||||||
@ -83,4 +82,4 @@ export DOCKER_HOST="ssh://ubuntu@$FQDN"
|
|||||||
# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying.
|
# the following scripts take responsibility for the rest of the provisioning depending on the app you're deploying.
|
||||||
bash -c "./$VIRTUAL_MACHINE/go.sh"
|
bash -c "./$VIRTUAL_MACHINE/go.sh"
|
||||||
|
|
||||||
echo "Successfull deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET;"
|
echo "Successfully deployed '$DOMAIN_NAME' with git commit '$(cat ./.git/refs/heads/master)' VPS_HOSTING_TARGET=$VPS_HOSTING_TARGET;"
|
||||||
|
@ -199,6 +199,10 @@ devices:
|
|||||||
type: disk
|
type: disk
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
# TODO get the sovereign-stack lxc profile OFF the lxdbr0 bridge network.
|
||||||
|
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
|
||||||
|
|
||||||
|
if [ "$VIRTUAL_MACHINE" = sovereign-stack ] ; then
|
||||||
|
|
||||||
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
||||||
cat >> "$YAML_PATH" <<EOF
|
cat >> "$YAML_PATH" <<EOF
|
||||||
@ -206,13 +210,22 @@ cat >> "$YAML_PATH" <<EOF
|
|||||||
nictype: macvlan
|
nictype: macvlan
|
||||||
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
|
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
|
||||||
type: nic
|
type: nic
|
||||||
enp6s0:
|
|
||||||
nictype: bridged
|
|
||||||
parent: lxdfanSS
|
|
||||||
type: nic
|
|
||||||
name: ${FILENAME}
|
name: ${FILENAME}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
else
|
||||||
|
# If we are deploying the www, we attach the vm to the underlay via macvlan.
|
||||||
|
cat >> "$YAML_PATH" <<EOF
|
||||||
|
enp5s0:
|
||||||
|
nictype: macvlan
|
||||||
|
parent: ${DATA_PLANE_MACVLAN_INTERFACE}
|
||||||
|
type: nic
|
||||||
|
|
||||||
|
name: ${FILENAME}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
# let's create a profile for the BCM TYPE-1 VMs. This is per VM.
|
||||||
if ! lxc profile list --format csv | grep -q "$VIRTUAL_MACHINE"; then
|
if ! lxc profile list --format csv | grep -q "$VIRTUAL_MACHINE"; then
|
||||||
lxc profile create "$VIRTUAL_MACHINE"
|
lxc profile create "$VIRTUAL_MACHINE"
|
||||||
|
@ -22,5 +22,7 @@ done
|
|||||||
# we are using IP address here so we don't have to rely on external DNS
|
# we are using IP address here so we don't have to rely on external DNS
|
||||||
# configuration for the base image preparataion.
|
# configuration for the base image preparataion.
|
||||||
ssh-keygen -R "$IP_V4_ADDRESS"
|
ssh-keygen -R "$IP_V4_ADDRESS"
|
||||||
|
|
||||||
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
|
ssh-keyscan -H -t ecdsa "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
|
||||||
|
|
||||||
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
|
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
|
||||||
|
@ -27,6 +27,6 @@ elif [ "$VPS_HOSTING_TARGET" = lxd ]; then
|
|||||||
-v "$REMOTE_HOME/letsencrypt":/etc/letsencrypt \
|
-v "$REMOTE_HOME/letsencrypt":/etc/letsencrypt \
|
||||||
-v /var/lib/letsencrypt:/var/lib/letsencrypt \
|
-v /var/lib/letsencrypt:/var/lib/letsencrypt \
|
||||||
-v "$REMOTE_HOME/letsencrypt_logs":/var/log/letsencrypt \
|
-v "$REMOTE_HOME/letsencrypt_logs":/var/log/letsencrypt \
|
||||||
certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" -d "$NOSTR_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
|
certbot/certbot certonly -v --noninteractive --agree-tos --key-type ecdsa --standalone --expand -d "$DOMAIN_NAME" -d "$FQDN" -d "$BTCPAY_USER_FQDN" -d "$NEXTCLOUD_FQDN" -d "$GITEA_FQDN" -d "$NOSTR_FQDN" --email "$CERTIFICATE_EMAIL_ADDRESS"
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
set -exu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
bash -c ./stub_nginxconf.sh
|
||||||
|
|
||||||
TOR_CONFIG_PATH=
|
TOR_CONFIG_PATH=
|
||||||
|
|
||||||
ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db"
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/ghost_site" "$REMOTE_HOME/ghost_db"
|
||||||
@ -43,8 +45,12 @@ if [ "$RUN_BACKUP" = true ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$RUN_RESTORE" = true ]; then
|
if [ "$RUN_RESTORE" = true ]; then
|
||||||
|
# Generally speaking we try to restore data. But if the BACKUP directory was
|
||||||
|
# just created, we know that we'll deploy fresh.
|
||||||
|
if [ "$RESTORE_WWW_USERDATA" = true ]; then
|
||||||
./restore.sh
|
./restore.sh
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
if [ "$DEPLOY_ONION_SITE" = true ]; then
|
||||||
# ensure the tor image is built
|
# ensure the tor image is built
|
||||||
|
@ -24,6 +24,8 @@ fi
|
|||||||
|
|
||||||
# TODO, ensure VPS_HOSTING_TARGET is in range.
|
# TODO, ensure VPS_HOSTING_TARGET is in range.
|
||||||
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
export NEXTCLOUD_FQDN="$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
export BTCPAY_USER_FQDN="$BTCPAY_HOSTNAME_IN_CERT.$DOMAIN_NAME"
|
||||||
|
export WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||||
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
export GITEA_FQDN="$GITEA_HOSTNAME.$DOMAIN_NAME"
|
||||||
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
export NOSTR_FQDN="$NOSTR_HOSTNAME.$DOMAIN_NAME"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user