1
1

Compare commits

...

10 Commits

Author SHA1 Message Date
cb93e58591
Update project head. 2024-02-09 11:33:58 -05:00
e34a765c69
DISABLE reset.sh 2024-02-09 11:33:44 -05:00
091e34462e
Update remote.sh for incus. 2024-02-09 11:33:24 -05:00
10801851c7
Check for prod. 2024-02-09 11:33:01 -05:00
ea7bd1d42c
Add some snapshot logic there. 2024-02-09 11:32:33 -05:00
c457118aec
Fix down script bug. 2024-02-09 11:31:35 -05:00
5828fd1a38
Run incus init in a pseudo shell 2024-02-09 11:31:09 -05:00
1973ee54f8
Wire up lnplayserver deployment correctly. 2024-02-09 11:30:42 -05:00
17e9398588
Nitpicks. 2024-02-09 11:28:29 -05:00
0edb94cdea
Remove target.sh 2024-02-09 11:26:10 -05:00
14 changed files with 59 additions and 50 deletions

View File

@ -28,10 +28,9 @@ else
if ! incus list --project default | grep -q "$BASE_IMAGE_VM_NAME"; then
# the base image is ubuntu:22.04.
incus init -q --profile="$BASE_IMAGE_VM_NAME" "$UBUNTU_BASE_IMAGE_NAME" "$BASE_IMAGE_VM_NAME" --vm --project default
script -q -c "incus init -q --profile=$BASE_IMAGE_VM_NAME $UBUNTU_BASE_IMAGE_NAME $BASE_IMAGE_VM_NAME --vm --project default" /dev/null
fi
if incus info "$BASE_IMAGE_VM_NAME" --project default | grep -q "Status: STOPPED"; then
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
incus config set "$BASE_IMAGE_VM_NAME" --project default
@ -85,7 +84,9 @@ else
fi
echo "INFO: Publishing '$BASE_IMAGE_VM_NAME' as image '$DOCKER_BASE_IMAGE_NAME'. Please wait."
incus publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project default --alias="$DOCKER_BASE_IMAGE_NAME" --compression none
incus publish -q --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" \
--project default --alias="$DOCKER_BASE_IMAGE_NAME" \
--compression none
echo "INFO: Success creating the base image. Deleting artifacts from the build process."
incus delete -f "$BASE_IMAGE_VM_NAME" --project default

View File

@ -81,9 +81,9 @@ if ! incus list --format csv | grep -q "$INCUS_VM_NAME"; then
bash -c "./stub_profile.sh --vm=$VIRTUAL_MACHINE --incus-hostname=$INCUS_VM_NAME --ss-volume-name=$SSDATA_VOLUME_NAME --backup-volume-name=$BACKUP_VOLUME_NAME"
if ! incus image list -q --format csv | grep -q "$INCUS_VM_NAME"; then
incus init -q "$DOCKER_BASE_IMAGE_NAME" "$INCUS_VM_NAME" --vm --profile="$INCUS_VM_NAME"
script -q -c "incus init -q $DOCKER_BASE_IMAGE_NAME $INCUS_VM_NAME --vm --profile=$INCUS_VM_NAME" /dev/null
elif [ "$VIRTUAL_MACHINE" = lnplayserver ]; then
incus init -q "$INCUS_VM_NAME" "$INCUS_VM_NAME" --vm --profile="$INCUS_VM_NAME"
script -q -c "incus init -q $INCUS_VM_NAME $INCUS_VM_NAME --vm --profile=$INCUS_VM_NAME" /dev/null
fi
# let's PIN the HW address for now so we don't exhaust IP

View File

@ -12,8 +12,6 @@ export SITES_PATH="$SS_ROOT_PATH/sites"
export INCUS_CONFIG_PATH="$SS_ROOT_PATH/incus"
export SS_CACHE_PATH="$SS_ROOT_PATH/cache"
export REMOTE_HOME="/home/ubuntu"
export REMOTE_DATA_PATH="$REMOTE_HOME/ss-data"
export REMOTE_DATA_PATH_LETSENCRYPT="$REMOTE_DATA_PATH/letsencrypt"

View File

@ -63,23 +63,19 @@ source ./project/domain_env.sh
source ./domain_list.sh
SERVERS=
if [ "$SKIP_WWW_SERVER" = false ] && [ -n "$WWW_SERVER_MAC_ADDRESS" ]; then
SERVERS="www $SERVERS"
fi
if [ "$SKIP_BTCPAY_SERVER" = false ] && [ -n "$BTCPAY_SERVER_MAC_ADDRESS" ]; then
SERVERS="btcpayserver"
SERVERS="$SERVERS btcpayserver"
fi
if [ "$SKIP_LNPLAY_SERVER" = false ] && [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
SERVERS="lnplayserver $SERVERS"
SERVERS="$SERVERS lnplayserver"
fi
for VIRTUAL_MACHINE in $SERVERS; do
INCUS_VM_NAME="$VIRTUAL_MACHINE-${PRIMARY_DOMAIN//./-}"
@ -126,15 +122,16 @@ echo "BACKUP_WWW_APPS: $BACKUP_WWW_APPS"
# let's grab a snapshot of the
if [ "$BACKUP_WWW_APPS" = true ]; then
#SNAPSHOT_ID=$(cat /dev/urandom | tr -dc 'a-aA-Z' | fold -w 6 | head -n 1)
#incus storage volume snapshot create ss-base www-ss-data "$SNAPSHOT_ID"
SNAPSHOT_ID=$(cat /dev/urandom | tr -dc 'a-aA-Z' | fold -w 6 | head -n 1)
incus storage volume snapshot create ss-base www-ss-data "$SNAPSHOT_ID"
BACKUP_LOCATION="$HOME/ss/backups"
mkdir -p "$BACKUP_LOCATION"
incus storage volume export ss-base "www-ss-data" "$BACKUP_LOCATION/project-$(incus project list --format csv | grep "(current)" | awk '{print $1}')_www-ss-data_""$(date +%s)"".tar.gz"
#incus storage volume export ss-base "www-ss-data" "$BACKUP_LOCATION/project-$(incus project list --format csv | grep "(current)" | awk '{print $1}')_www-ss-data_""$(date +%s)"".tar.gz"
#incus storage volume snapshot delete ss-base "www-ss-data" "$SNAPSHOT_ID"
fi
if incus network list -q | grep -q ss-ovn; then
incus network delete ss-ovn
fi
if [[ "$SERVERS" == *"www"* && "$SERVERS" == *"btcpay"* ]]; then
if incus network list -q | grep -q ss-ovn; then
incus network delete ss-ovn
fi
fi

@ -1 +1 @@
Subproject commit ba3cf9425948d156d205758b965b2fc70e63c90b
Subproject commit c07682def000091c3bf30704d07f4c77ff36755d

View File

@ -137,12 +137,13 @@ if ! command -v incus >/dev/null 2>&1; then
fi
# install dependencies.
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano -y'
ssh -t "ubuntu@$FQDN" 'sudo apt update && sudo apt upgrade -y && sudo apt install htop dnsutils nano zfsutils-linux -y'
REMOTE_SCRIPT_PATH="$REMOTE_HOME/install_incus.sh"
scp ../install_incus.sh "ubuntu@$FQDN:$REMOTE_SCRIPT_PATH"
ssh -t "ubuntu@$FQDN" "chmod +x $REMOTE_SCRIPT_PATH"
ssh -t "ubuntu@$FQDN" "sudo bash -c $REMOTE_SCRIPT_PATH"
ssh -t "ubuntu@$FQDN" "sudo adduser ubuntu incus-admin"
# install OVN for the project-specific bridge networks
ssh -t "ubuntu@$FQDN" "sudo apt-get install -y ovn-host ovn-central && sudo ovs-vsctl set open_vswitch . external_ids:ovn-remote=unix:/var/run/ovn/ovnsb_db.sock external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip=127.0.0.1"
@ -237,3 +238,5 @@ if ! incus storage list --format csv | grep -q ss-base; then
# done
fi
echo "INFO: completed remote.sh."

View File

@ -5,7 +5,6 @@ set -eu
CURRENT_REMOTE="$(incus remote get-default)"
DEPLOYMENT_STRING=
SS_ROOT_PATH="$HOME/ss"
REMOTES_PATH="$SS_ROOT_PATH/remotes"
PROJECTS_PATH="$SS_ROOT_PATH/projects"
@ -15,7 +14,7 @@ SS_CACHE_PATH="$SS_ROOT_PATH/cache"
if echo "$CURRENT_REMOTE" | grep -q "production"; then
if echo "$CURRENT_REMOTE" | grep -q "prod"; then
echo "WARNING: You are running a migration procedure on a production system."
echo ""

View File

@ -1,9 +1,12 @@
#!/bin/bash
set -e
set -exu
cd "$(dirname "$0")"
echo "WARNING: THIS SCRIPT NEEDS WORK"
exit 1
PURGE_INCUS=false
# grab any modifications from the command line.
@ -20,7 +23,6 @@ for i in "$@"; do
esac
done
source ../defaults.env
./down.sh

View File

@ -82,8 +82,6 @@ EOF
fi
. ./target.sh
# if VIRTUAL_MACHINE=base, then we doing the base image.
if [ "$VIRTUAL_MACHINE" = base ]; then
# this is for the base image only...
@ -142,9 +140,6 @@ EOF
{
"registry-mirrors": [
"${REGISTRY_URL}"
],
"labels": [
"PROJECT_COMMIT=${TARGET_PROJECT_GIT_COMMIT}"
]
}
@ -160,7 +155,7 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
- echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu jammy stable" | sudo tee /etc/apt/sources.list.d/docker.list
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server

View File

@ -1,3 +0,0 @@
#!/bin/bash
export TARGET_PROJECT_GIT_COMMIT=ca069c7decdc74d2719a7f34927bda49159da2ae

View File

@ -3,8 +3,6 @@
set -exu
cd "$(dirname "$0")"
. ./target.sh
# check to ensure dependencies are met.
for cmd in wait-for-it dig rsync sshfs incus; do
if ! command -v "$cmd" >/dev/null 2>&1; then
@ -54,6 +52,9 @@ USER_SAYS_YES=false
WWW_SERVER_MAC_ADDRESS=
BTCPAY_SERVER_MAC_ADDRESS=
LNPLAY_SERVER_MAC_ADDRESS=
LNPLAY_ENV_PATH=
LNPLAY_VM_EXPIRATION_DATE=
LNPLAY_ORDER_ID=
# grab any modifications from the command line.
for i in "$@"; do
@ -102,6 +103,18 @@ for i in "$@"; do
RUN_CERT_RENEWAL=false
shift
;;
--lnplay-env-path=*)
LNPLAY_ENV_PATH="${i#*=}"
shift
;;
--vm-expiration-date=*)
LNPLAY_VM_EXPIRATION_DATE="${i#*=}"
shift
;;
--order-id=*)
LNPLAY_ORDER_ID="${i#*=}"
shift
;;
-y)
USER_SAYS_YES=true
shift
@ -113,6 +126,7 @@ for i in "$@"; do
esac
done
if [ "$RESTORE_BTCPAY" = true ] && [ -z "$BACKUP_BTCPAY_ARCHIVE_PATH" ]; then
echo "ERROR: Use the '--backup-archive-path=/path/to/btcpay/archive.tar.gz' option when restoring btcpay server."
exit 1
@ -155,6 +169,7 @@ source "$REMOTE_DEFINITION"
# this is our password generation mechanism. Relying on GPG for secure password generation
# TODO see if this is a secure way to do it.
function new_pass {
gpg --gen-random --armor 1 25
}
@ -276,6 +291,7 @@ export DOMAIN_NAME="$PRIMARY_DOMAIN"
export PRIMARY_DOMAIN="$PRIMARY_DOMAIN"
export BITCOIN_CHAIN="$BITCOIN_CHAIN"
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
export PRIMARY_SITE_PATH="$SITES_PATH/$PRIMARY_DOMAIN"
stub_site_definition
@ -419,32 +435,27 @@ if [ "$SKIP_LNPLAY_SERVER" = false ]; then
if [ -n "$LNPLAY_SERVER_MAC_ADDRESS" ]; then
export DOCKER_HOST="ssh://ubuntu@$LNPLAY_SERVER_FQDN"
# set the active env to our LNPLAY_SERVER_FQDN
cat > ./project/lnplay/active_env.txt <<EOL
${LNPLAY_SERVER_FQDN}
EOL
LNPLAY_ENV_FILE=./project/lnplay/environments/"$LNPLAY_SERVER_FQDN"
LNPLAY_ENV_FILE="$PRIMARY_SITE_PATH/$LNPLAY_SERVER_FQDN/lnplay.conf"
if [ ! -f "$LNPLAY_ENV_FILE" ]; then
# and we have to set our environment file as well.
cat > "$LNPLAY_ENV_FILE" <<EOL
DOCKER_HOST=ssh://ubuntu@${LNPLAY_SERVER_FQDN}
DOMAIN_NAME=${PRIMARY_DOMAIN}
BACKEND_FQDN=lnplay.${PRIMARY_DOMAIN}
FRONTEND_FQDN=remote.${PRIMARY_DOMAIN}
ENABLE_TLS=true
BTC_CHAIN=${BITCOIN_CHAIN}
CHANNEL_SETUP=none
LNPLAY_SERVER_PATH=${SITES_PATH}/${PRIMARY_DOMAIN}/lnplayserver
DEPLOY_PRISM_PLUGIN=true
NAMES_FILE_PATH
EOL
fi
INCUS_VM_NAME="${LNPLAY_SERVER_FQDN//./-}"
if ! incus image list -q --format csv | grep -q "$INCUS_VM_NAME"; then
# do all the docker image creation steps, but don't run services.
bash -c "./project/lnplay/up.sh -y --no-services"
bash -c "./project/lnplay/up.sh -y --no-services --lnplay-conf-path=$LNPLAY_ENV_FILE"
# stop the instance so we can get an image yo
incus stop "$INCUS_VM_NAME"
@ -456,9 +467,12 @@ EOL
sleep 10
bash -c "./wait_for_ip.sh --incus-name=$INCUS_VM_NAME"
sleep 3
fi
# bring up lnplay services.
bash -c "./project/lnplay/up.sh -y"
bash -c "./project/lnplay/up.sh -y --lnplay-conf-path=$LNPLAY_ENV_FILE"
fi
fi

View File

@ -221,7 +221,6 @@ ssh-keyscan -H "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
if [ "$FROM_BUILT_IMAGE" = false ]; then
ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh

View File

@ -72,3 +72,4 @@ apt-get update
# TODO see if this can be fixed by installing JUST the incus client.
# none of the systemd/daemon stuff is needed necessarily.
apt-get install incus -y

View File

@ -2,6 +2,9 @@
set -exu
# this script uninstalls incus from the MANAGEMENT MACHINE
# if you want to remove incus from remote cluster hosts, run ss-reset.
PURGE_INCUS=false
# grab any modifications from the command line.