Compare commits

..

No commits in common. "bde59ef71718fd8810762dbbbd4877a7968a8cd1" and "c661ac0be91276593f718debe1266e8bab3a3c65" have entirely different histories.

10 changed files with 65 additions and 95 deletions

View File

@ -10,14 +10,7 @@ if ! lxc image list --format csv --columns l | grep -q "$UBUNTU_BASE_IMAGE_NAME"
# if the image doesn't exist, download it from Ubuntu's image server # if the image doesn't exist, download it from Ubuntu's image server
# TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs # TODO see if we can fetch this file from a more censorship-resistant source, e.g., ipfs
# we don't really need to cache this locally since it gets continually updated upstream. # we don't really need to cache this locally since it gets continually updated upstream.
if [ -d "$SS_JAMMY_PATH" ]; then
lxc image import "$SS_JAMMY_PATH/meta-bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.tar.xz" \
"$SS_JAMMY_PATH/bf1a2627bdddbfb0a9bf1f8ae146fa794800c6c91281d3db88c8d762f58bd057.qcow2" \
--alias "$UBUNTU_BASE_IMAGE_NAME"
else
# copy the image down from canonical.
lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update lxc image copy "images:$BASE_LXC_IMAGE" "$REMOTE_NAME": --alias "$UBUNTU_BASE_IMAGE_NAME" --public --vm --auto-update
fi
fi fi
# If the lxc VM does exist, then we will delete it (so we can start fresh) # If the lxc VM does exist, then we will delete it (so we can start fresh)
@ -34,11 +27,11 @@ else
# TODO move this sovereign-stack-base construction VM to separate dedicated IP # TODO move this sovereign-stack-base construction VM to separate dedicated IP
lxc config set "$BASE_IMAGE_VM_NAME" lxc config set "$BASE_IMAGE_VM_NAME"
for CHAIN in mainnet testnet; do # for CHAIN in mainnet testnet; do
for DATA in blocks chainstate; do # for DATA in blocks chainstate; do
lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA" # lxc storage volume attach ss-base "$CHAIN-$DATA" "$BASE_IMAGE_VM_NAME" "/home/ubuntu/$CHAIN/$DATA"
done # done
done # done
lxc start "$BASE_IMAGE_VM_NAME" lxc start "$BASE_IMAGE_VM_NAME"
@ -50,23 +43,14 @@ else
# ensure the ssh service is listening at localhost # ensure the ssh service is listening at localhost
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22 lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it -t 100 127.0.0.1:22
# If we have any chaninstate or blocks in our SSME, let's push them to the sleep 3
# remote host as a zfs volume that way deployments can share a common history
# of chainstate/blocks.
for CHAIN in testnet mainnet; do # for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do # for DATA in blocks chainstate; do
DATA_PATH="/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA" # lxc file push --recursive --project=default "/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/" "$BASE_IMAGE_VM_NAME/home/ubuntu/$CHAIN/$DATA/"
if [ -d "$DATA_PATH" ]; then # done
COMPLETE_FILE_PATH="$DATA_PATH/complete" # done
if lxc exec "$BASE_IMAGE_VM_NAME" -- [ ! -f "$COMPLETE_FILE_PATH" ]; then
lxc file push --recursive --project=default "$DATA_PATH/" "$BASE_IMAGE_VM_NAME/home/ubuntu/.ss/cache/bitcoin/$CHAIN/$DATA/"
lxc exec "$BASE_IMAGE_VM_NAME" -- su ubuntu - bash -c "echo $(date) > $COMPLETE_FILE_PATH"
else
echo "INFO: it appears as though $CHAIN/$DATA has already been initialized. Continuing."
fi
fi
done
done
# stop the VM and get a snapshot. # stop the VM and get a snapshot.
lxc stop "$BASE_IMAGE_VM_NAME" lxc stop "$BASE_IMAGE_VM_NAME"
@ -79,10 +63,3 @@ lxc publish --public "$BASE_IMAGE_VM_NAME/$UBUNTU_BASE_IMAGE_NAME" --project=def
echo "INFO: Success! We can now delete the base image." echo "INFO: Success! We can now delete the base image."
lxc delete -f "$BASE_IMAGE_VM_NAME" lxc delete -f "$BASE_IMAGE_VM_NAME"
# now let's get a snapshot of each of the blocks/chainstate directories.
for CHAIN in testnet mainnet; do
for DATA in blocks chainstate; do
lxc storage volume snapshot ss-base --project=default "$CHAIN-$DATA"
done
done

View File

@ -44,7 +44,6 @@ DOMAIN_NAME=
RUN_CERT_RENEWAL=true RUN_CERT_RENEWAL=true
SKIP_WWW=false SKIP_WWW=false
RESTORE_WWW=false RESTORE_WWW=false
RESTORE_CERTS=false
BACKUP_CERTS=true BACKUP_CERTS=true
BACKUP_APPS=true BACKUP_APPS=true
BACKUP_BTCPAY=true BACKUP_BTCPAY=true
@ -62,10 +61,6 @@ USER_TARGET_PROJECT=
# grab any modifications from the command line. # grab any modifications from the command line.
for i in "$@"; do for i in "$@"; do
case $i in case $i in
--restore-certs)
RESTORE_CERTS=true
shift
;;
--restore-www) --restore-www)
RESTORE_WWW=true RESTORE_WWW=true
BACKUP_APPS=false BACKUP_APPS=false
@ -154,8 +149,6 @@ export REMOTE_PATH="$REMOTES_DIR/$REMOTE_NAME"
export USER_SAYS_YES="$USER_SAYS_YES" export USER_SAYS_YES="$USER_SAYS_YES"
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH" export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
export RESTART_FRONT_END="$RESTART_FRONT_END" export RESTART_FRONT_END="$RESTART_FRONT_END"
export RESTORE_CERTS="$RESTORE_CERTS"
# todo convert this to Trezor-T # todo convert this to Trezor-T
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub" SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
@ -202,21 +195,21 @@ function stub_site_definition {
cat >"$SITE_DEFINITION_PATH" <<EOL cat >"$SITE_DEFINITION_PATH" <<EOL
# https://www.sovereign-stack.org/ss-deploy/#siteconf # https://www.sovereign-stack.org/ss-deploy/#siteconf
DOMAIN_NAME="${DOMAIN_NAME}" export DOMAIN_NAME="${DOMAIN_NAME}"
# BTCPAY_ALT_NAMES="tip,store,pay,send" #export BTCPAY_ALT_NAMES="tip,store,pay,send"
SITE_LANGUAGE_CODES="en" export SITE_LANGUAGE_CODES="en"
DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)" export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
DEPLOY_GHOST=true export DEPLOY_GHOST=true
DEPLOY_CLAMS=true export DEPLOY_CLAMS=true
DEPLOY_NEXTCLOUD=false export DEPLOY_NEXTCLOUD=false
NOSTR_ACCOUNT_PUBKEY= export NOSTR_ACCOUNT_PUBKEY=
DEPLOY_GITEA=false export DEPLOY_GITEA=false
GHOST_MYSQL_PASSWORD="$(new_pass)" export GHOST_MYSQL_PASSWORD="$(new_pass)"
GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)" export GHOST_MYSQL_ROOT_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)" export NEXTCLOUD_MYSQL_PASSWORD="$(new_pass)"
NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)" export NEXTCLOUD_MYSQL_ROOT_PASSWORD="$(new_pass)"
GITEA_MYSQL_PASSWORD="$(new_pass)" export GITEA_MYSQL_PASSWORD="$(new_pass)"
GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)" export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL EOL
@ -382,6 +375,7 @@ EOL
# check if the OVN network exists in this project. # check if the OVN network exists in this project.
if ! lxc network list | grep -q "ss-ovn"; then if ! lxc network list | grep -q "ss-ovn"; then
lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none lxc network create ss-ovn --type=ovn network=lxdbr1 ipv6.address=none
# ipv4.nat=false
fi fi
export MAC_ADDRESS_TO_PROVISION= export MAC_ADDRESS_TO_PROVISION=

View File

@ -42,7 +42,6 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB" lxc config device override "$LXD_VM_NAME" root size="${ROOT_DISK_SIZE_GB}GB"
lxc start "$LXD_VM_NAME" lxc start "$LXD_VM_NAME"
sleep 10
bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME" bash -c "./wait_for_lxc_ip.sh --lxd-name=$LXD_VM_NAME"
fi fi

View File

@ -111,28 +111,12 @@ if [ "$VIRTUAL_MACHINE" = base ]; then
ssh_authorized_keys: ssh_authorized_keys:
- ${SSH_AUTHORIZED_KEY} - ${SSH_AUTHORIZED_KEY}
EOF
if [ "$REGISTRY_URL" != "https://index.docker.io/v1" ]; then
cat >> "$YAML_PATH" <<EOF
write_files:
- path: /etc/docker/daemon.json - path: /etc/docker/daemon.json
permissions: 0644
owner: root
content: | content: |
{ {
"registry-mirrors": [ "registry-mirrors": ["${REGISTRY_URL}"]
"${REGISTRY_URL}"
]
} }
EOF
fi
cat >> "$YAML_PATH" <<EOF
runcmd: runcmd:
- sudo mkdir -m 0755 -p /etc/apt/keyrings - sudo mkdir -m 0755 -p /etc/apt/keyrings
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
@ -140,7 +124,6 @@ EOF
- sudo apt-get update - sudo apt-get update
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
- sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server - sudo DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-server
- sudo chown -R ubuntu:ubuntu /home/ubuntu/
EOF EOF

View File

@ -42,8 +42,3 @@ while true; do
printf '.' printf '.'
fi fi
done done
# wait for cloud-init to complet before returning.
while lxc exec "$LXC_INSTANCE_NAME" -- [ ! -f /var/lib/cloud/instance/boot-finished ]; do
sleep 1
done

View File

@ -98,7 +98,7 @@ if [ "$RESTART_FRONT_END" = true ]; then
fi fi
# generate the certs and grab a backup # generate the certs and grab a backup
if [ "$RUN_CERT_RENEWAL" = true ] && [ "$RESTORE_CERTS" = false ]; then if [ "$RUN_CERT_RENEWAL" = true ]; then
./generate_certs.sh ./generate_certs.sh
fi fi
@ -121,10 +121,13 @@ if [ "$RESTART_FRONT_END" = true ]; then
export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP" export LOCAL_BACKUP_PATH="$SITE_PATH/backups/www/$APP"
mkdir -p "$LOCAL_BACKUP_PATH" mkdir -p "$LOCAL_BACKUP_PATH"
# we grab a backup of the certs unless we're restoring. if [ "$RESTORE_WWW" = true ]; then
if [ "$RESTORE_CERTS" = true ]; then sleep 5
echo "STARTING restore_path.sh for letsencrypt."
./restore_path.sh ./restore_path.sh
else #ssh "$PRIMARY_WWW_FQDN" sudo chown ubuntu:ubuntu "$REMOTE_HOME/$APP"
elif [ "$BACKUP_APPS" = true ]; then
# if we're not restoring, then we may or may not back up.
./backup_path.sh ./backup_path.sh
fi fi
done done

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -eu set -eux
cd "$(dirname "$0")" cd "$(dirname "$0")"
FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)" FILE_COUNT="$(find "$LOCAL_BACKUP_PATH" -type f | wc -l)"
@ -8,11 +8,6 @@ if [ "$FILE_COUNT" = 0 ]; then
exit 0 exit 0
fi fi
# if the user specified --restore-certs then we'll go forward on letsencrypt
if [ "$APP" = letsencrypt ] && [ "$RESTORE_CERTS" = true ]; then
USER_SAYS_YES=true
fi
# if the user said -y at the cli, we can skip this. # if the user said -y at the cli, we can skip this.
if [ "$USER_SAYS_YES" = false ]; then if [ "$USER_SAYS_YES" = false ]; then

11
www/tor/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y tor
#COPY ./torrc /etc/tor/torrc
#RUN chown root:root /etc/tor/torrc
#RUN chmod 0644 /etc/tor/torrc
#RUN mkdir /data
#VOLUME /data
# RUN chown 1000:1000 -R /data
#USER 1000:1000
CMD tor -f /etc/tor/torrc

8
www/tor/torrc Normal file
View File

@ -0,0 +1,8 @@
# we configure a hidden service that listens on onion:80 and redirects to nginx:80 at the at the torv3 onion address
SocksPort 0
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 nginx:443
Log info file /var/log/tor/tor.log

5
www/tor/torrc-init Normal file
View File

@ -0,0 +1,5 @@
HiddenServiceDir /var/lib/tor/www
HiddenServiceVersion 3
HiddenServicePort 443 127.0.0.1:443
Log info file /var/log/tor/tor.log