Compare commits

..

No commits in common. "bde59ef71718fd8810762dbbbd4877a7968a8cd1" and "c661ac0be91276593f718debe1266e8bab3a3c65" have entirely different histories.

10 changed files with 65 additions and 95 deletions

View File

@ -10,14 +10,7 @@ if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"
# if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream.
if [ -d "$SS_JAMMY_PATH" ]; then
lxc image import "$SS_JAMMY_PATH/meta-bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.tar.xz" \
"$SS_JAMMY_PATH/bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.qcow2" \
--alias "$UBUNTU_BASE_IMAGE_NAME"
else
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
# If the lxc VM does exist, then we will delete it (so we can start fresh)
@ -34,11 +27,11 @@ else
# TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME"
for CHAIN in mainnet testnet; do
for DATA in blocks chainstate; do
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
done
done
# for CHAIN in mainnet testnet; do
# for DATA in blocks chainstate; do
# lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/$CHAIN/$DATA"
# done
# done
lxc start "$BASE_IMAGE_VM_NAME"
@ -50,23 +43,14 @@ else
# ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
# If we have any chaninstate or blocks in our SSME, let's push them to the
# remote host as a zfs volume that way deployments can share a common history
# of chainstate/blocks.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA"
if [ -d "$DATA_PATH" ]; then
COMPLETE_FILE_PATH="$DATA_PATH/complete"
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/"
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
else
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
fi
fi
done
done
sleep 3
# for CHAIN in testnet mainnet; do
# for DATA in blocks chainstate; do
# lxc file push --recursive --project=default "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/" "$BASE_IMAGE_VM_NAME/home/ubuntu/$CHAIN/$DATA/"
# done
# done
# stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME"
@ -79,10 +63,3 @@ lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=def
echo "INFO: Success! We can now delete the base image."
lxc delete -f "$BASE_IMAGE_VM_NAME"
# now let's get a snapshot of each of the blocks/chainstate directories.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
done
done

View File

@ -44,7 +44,6 @@ DOMAIN_NAME=
RUN_CERT_RENEWAL=true
SKIP_WWW=false
RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=true
BACKUP_APPS=true
BACKUP_BTCPAY=true
@ -62,10 +61,6 @@ USER_TARGET_PROJECT=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www)
RESTORE_WWW=true
BACKUP_APPS=false
@ -154,8 +149,6 @@ export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
@ -202,21 +195,21 @@ function stub_site_definition {
cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-deploy/#siteconf
DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true
DEPLOY_CLAMS=true
DEPLOY_NEXTCLOUD=false
NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
export DOMAIN_NAME="${DOMAIN_NAME}"
#export BTCPAY_ALT_NAMES="tip,store,pay,send"
export SITE_LANGUAGE_CODES="en"
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
export DEPLOY_GHOST=true
export DEPLOY_CLAMS=true
export DEPLOY_NEXTCLOUD=false
export NOSTR_ACCOUNT_PUBKEY=
export DEPLOY_GITEA=false
export GHOST_MYSQL_PASSWORD="$(new_pass)"
export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
export GITEA_MYSQL_PASSWORD="$(new_pass)"
export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL
@ -382,6 +375,7 @@ EOL
# check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
# ipv4.nat=false
fi
export MAC_ADDRESS_TO_PROVISION=

View File

@ -42,7 +42,6 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
fi

View File

@ -111,28 +111,12 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: |
{
"registry-mirrors": [
"${REGISTRY_URL}"
]
}
{
"registry-mirrors": ["${REGISTRY_URL}"]
}
EOF
fi
cat >> "$YAML_PATH" <<EOF
runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
@ -140,7 +124,6 @@ EOF
- sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
EOF

View File

@ -42,8 +42,3 @@ while true; do
printf '.'
fi
done
# wait for cloud-init to complet before returning.
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

View File

@ -98,7 +98,7 @@ if [ "$RESTART_FRONT_END" = true ]; then
fi
# generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then
if [ "$RUN_CERT_RENEWAL" = true ]; then
./generate_certs.sh
fi
@ -121,10 +121,13 @@ if [ "$RESTART_FRONT_END" = true ]; then
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH"
# we grab a backup of the certs unless we're restoring.
if [ "$RESTORE_CERTS" = true ]; then
if [ "$RESTORE_WWW" = true ]; then
sleep 5
echo "STARTING restore_path.sh for letsencrypt."
./restore_path.sh
else
#ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh
fi
done

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -eu
set -eux
cd "$(dirname "$0")"
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
@ -8,11 +8,6 @@ if [ "$FILE_COUNT" = 0 ]; then
exit 0
fi
# if the user specified --restore-certs then we'll go forward on letsencrypt
if [ "$APP" = letsencrypt ] && [ "$RESTORE_CERTS" = true ]; then
USER_SAYS_YES=true
fi
# if the user said -y at the cli, we can skip this.
if [ "$USER_SAYS_YES" = false ]; then

11
www/tor/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y tor
#COPY ./torrc /etc/tor/torrc
#RUN chown root:root /etc/tor/torrc
#RUN chmod 0644 /etc/tor/torrc
#RUN mkdir /data
#VOLUME /data
# RUN chown 1000:1000 -R /data
#USER 1000:1000
CMD tor -f /etc/tor/torrc

8
www/tor/torrc Normal file
View File

@ -0,0 +1,8 @@
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
SocksPort 0
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 nginx:443
Log info file /var/log/tor/tor.log

5
www/tor/torrc-init Normal file
View File

@ -0,0 +1,5 @@
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 127.0.0.1:443
Log info file /var/log/tor/tor.log