Compare commits

...

8 Commits

10 changed files with 97 additions and 67 deletions

View File

@ -10,7 +10,14 @@ if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
if [ -d "$SS_JAMMY_PATH" ]; then
lxc image import "$SS_JAMMY_PATH/meta-bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.tar.xz" \
"$SS_JAMMY_PATH/bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.qcow2" \
--alias "$UBUNTU_BASE_IMAGE_NAME"
else
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
@ -27,11 +34,11 @@ else
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME"
# for CHAIN in mainnet testnet; do
# for DATA in blocks chainstate; do
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/$CHAIN/$DATA"
# done
# done
for CHAIN in mainnet testnet; do
for DATA in blocks chainstate; do
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
done
done
lxc start "$BASE_IMAGE_VM_NAME"
@ -43,14 +50,23 @@ else
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
sleep 3
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# lxc file push --recursive --project=default "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/" "$BASE_IMAGE_VM_NAME/home/ubuntu/$CHAIN/$DATA/"
# done
# done
# If we have any chaninstate or blocks in our SSME, let's push them to the
# remote host as a zfs volume that way deployments can share a common history
# of chainstate/blocks.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
if [ -d "$DATA_PATH" ]; then
COMPLETE_FILE_PATH="$DATA_PATH/complete"
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/"
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
else
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
fi
fi
done
done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME"
@ -63,3 +79,10 @@ lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=def
echo "INFO: Success! We can now delete the base image."
lxc delete -f "$BASE_IMAGE_VM_NAME"
# now let's get a snapshot of each of the blocks/chainstate directories.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
done
done

View File

@ -44,6 +44,7 @@ DOMAIN_NAME=
RUN_CERT_RENEWAL=true
SKIP_WWW=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=true
BACKUP_APPS=true
BACKUP_BTCPAY=true
@ -61,6 +62,10 @@ USER_TARGET_PROJECT=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www)
RESTORE_WWW=true
BACKUP_APPS=false
@ -149,6 +154,8 @@ export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
@ -195,21 +202,21 @@ function stub_site_definition {
cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-deploy/#siteconf
export DOMAIN_NAME="${DOMAIN_NAME}"
#export BTCPAY_ALT_NAMES="tip,store,pay,send"
export SITE_LANGUAGE_CODES="en"
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
export DEPLOY_GHOST=true
export DEPLOY_CLAMS=true
export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD="$(new_pass)"
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
export GITEA_MYSQL_PASSWORD="$(new_pass)"
export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true
DEPLOY_CLAMS=true
DEPLOY_NEXTCLOUD=false
NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
@ -375,7 +382,6 @@ EOL
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
# ipv4.nat=false
fi
export MAC_ADDRESS_TO_PROVISION=

View File

@ -42,6 +42,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
fi

View File

@ -111,12 +111,28 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
- path: /etc/docker/daemon.json
content: |
{
"registry-mirrors": ["${REGISTRY_URL}"]
}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
]
}
EOF
fi
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
@ -124,6 +140,7 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
EOF

View File

@ -42,3 +42,8 @@ while true; do
printf '.'
fi
done
# wait for cloud-init to complet before returning.
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

View File

@ -98,7 +98,7 @@ if [ "$RESTART_FRONT_END" = true ]; then
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ]; then
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
./generate_certs.sh
fi
@ -121,13 +121,10 @@ if [ "$RESTART_FRONT_END" = true ]; then
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
if [ "$RESTORE_WWW" = true ]; then
sleep 5
echo "STARTING restore_path.sh for letsencrypt."
# we grab a backup of the certs unless we're restoring.
if [ "$RESTORE_CERTS" = true ]; then
./restore_path.sh
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
else
./backup_path.sh
fi
done

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eux
set -eu
cd "$(dirname "$0")"
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
@ -8,6 +8,11 @@ if [ "$FILE_COUNT" = 0 ]; then
exit 0
fi
# if the user specified --restore-certs then we'll go forward on letsencrypt
if [ "$APP" = letsencrypt ] && [ "$RESTORE_CERTS" = true ]; then
USER_SAYS_YES=true
fi
# if the user said -y at the cli, we can skip this.
if [ "$USER_SAYS_YES" = false ]; then

View File

@ -1,11 +0,0 @@
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y tor
#COPY ./torrc /etc/tor/torrc
#RUN chown root:root /etc/tor/torrc
#RUN chmod 0644 /etc/tor/torrc
#RUN mkdir /data
#VOLUME /data
# RUN chown 1000:1000 -R /data
#USER 1000:1000
CMD tor -f /etc/tor/torrc

View File

@ -1,8 +0,0 @@
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
SocksPort 0
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 nginx:443
Log info file /var/log/tor/tor.log

View File

@ -1,5 +0,0 @@
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 127.0.0.1:443
Log info file /var/log/tor/tor.log