forked from ss/sovereign-stack
Compare commits
5 Commits
b6e0e5ac4d
...
8caae387b6
Author | SHA1 | Date | |
---|---|---|---|
8caae387b6 | |||
610946e357 | |||
ddb0fbef57 | |||
4a84fd24e5 | |||
16f88d964d |
@ -2,8 +2,9 @@
|
||||
|
||||
set -e
|
||||
|
||||
export DEPLOY_WWW_SERVER=false
|
||||
export DEPLOY_GHOST=false
|
||||
|
||||
export DEPLOY_GHOST=true
|
||||
export DEPLOY_CLAMS=true
|
||||
|
||||
export DEPLOY_NEXTCLOUD=false
|
||||
export DEPLOY_GITEA=false
|
||||
@ -80,7 +81,7 @@ export PROJECTS_DIR="$HOME/ss-projects"
|
||||
export SITES_PATH="$HOME/ss-sites"
|
||||
|
||||
# The base VM image.
|
||||
export LXD_UBUNTU_BASE_VERSION="22.04"
|
||||
export LXD_UBUNTU_BASE_VERSION="jammy"
|
||||
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
||||
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||
|
@ -87,6 +87,9 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
||||
esac
|
||||
done
|
||||
|
||||
# first let's copy our ssh pubkey to the remote server so we don't have to login constantly.
|
||||
ssh-copy-id -i "$HOME/.ssh/id_rsa.pub" "ubuntu@$FQDN"
|
||||
|
||||
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
||||
echo "INFO: It looks like you didn't provide input on the command line for the data plane macvlan interface."
|
||||
echo " We need to know which interface that is! Enter it here now."
|
||||
@ -151,18 +154,12 @@ if ! command -v lxc >/dev/null 2>&1; then
|
||||
|
||||
fi
|
||||
|
||||
ssh -t "ubuntu@$FQDN" "
|
||||
set -e
|
||||
|
||||
# install tool/dependencies
|
||||
sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
|
||||
|
||||
# install lxd as a snap if it's not installed.
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd
|
||||
# install dependencies.
|
||||
ssh "ubuntu@$FQDN" sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils nano -y
|
||||
if ! ssh "ubuntu@$FQDN" snap list | grep -q lxd; then
|
||||
ssh "ubuntu@$FQDN" sudo snap install lxd --channel=5.10/stable
|
||||
sleep 10
|
||||
fi
|
||||
"
|
||||
|
||||
# if the DATA_PLANE_MACVLAN_INTERFACE is not specified, then we 'll
|
||||
# just attach VMs to the network interface used for for the default route.
|
||||
|
@ -40,24 +40,3 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||
fi
|
||||
|
||||
source "$CLUSTER_DEFINITION"
|
||||
|
||||
# source project defition.
|
||||
# Now let's load the project definition.
|
||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||
export PROJECT_NAME="$PROJECT_NAME"
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||
|
||||
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
|
||||
echo "ERROR: 'project_definition' not found $PROJECT_DEFINITION_PATH not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$PROJECT_DEFINITION_PATH"
|
||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||
|
||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
|
||||
exit 1
|
||||
fi
|
||||
|
@ -17,7 +17,7 @@ fi
|
||||
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
||||
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||
# if there's no snapshot, we dispense with the old image and try again.
|
||||
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$(date +%Y-%m)"; then
|
||||
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$LXD_UBUNTU_BASE_VERSION"; then
|
||||
lxc delete "$BASE_IMAGE_VM_NAME" --force
|
||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
||||
fi
|
||||
@ -31,12 +31,13 @@ else
|
||||
|
||||
lxc start "$BASE_IMAGE_VM_NAME"
|
||||
|
||||
sleep 70
|
||||
sleep 30
|
||||
|
||||
# ensure the ssh service is listening at localhost
|
||||
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
|
||||
|
||||
|
||||
# stop the VM and get a snapshot.
|
||||
lxc stop "$BASE_IMAGE_VM_NAME"
|
||||
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$(date +%Y-%m)"
|
||||
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$LXD_UBUNTU_BASE_VERSION"
|
||||
fi
|
||||
|
@ -292,6 +292,7 @@ export DOMAIN_NAME="${DOMAIN_NAME}"
|
||||
export SITE_LANGUAGE_CODES="en"
|
||||
export DUPLICITY_BACKUP_PASSPHRASE="$(new_pass)"
|
||||
export DEPLOY_GHOST=true
|
||||
export DEPLOY_CLAMS=true
|
||||
export DEPLOY_NEXTCLOUD=false
|
||||
export NOSTR_ACCOUNT_PUBKEY=
|
||||
export DEPLOY_GITEA=false
|
||||
@ -396,10 +397,6 @@ export DOMAIN_COUNT=$(("$(echo "$DOMAIN_LIST" | tr -cd , | wc -c)"+1))
|
||||
# let's provision our primary domain first.
|
||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||
|
||||
# we deploy the WWW and btcpay server under the PRIMARY_DOMAIN.
|
||||
export DEPLOY_WWW_SERVER=true
|
||||
export DEPLOY_BTCPAY_SERVER=true
|
||||
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
export PRIMARY_WWW_FQDN="$WWW_HOSTNAME.$DOMAIN_NAME"
|
||||
|
||||
@ -408,7 +405,6 @@ stub_site_definition
|
||||
# bring the VMs up under the primary domain name.
|
||||
instantiate_vms
|
||||
|
||||
|
||||
# let's stub out the rest of our site definitions, if any.
|
||||
for DOMAIN_NAME in ${OTHER_SITES_LIST//,/ }; do
|
||||
export DOMAIN_NAME="$DOMAIN_NAME"
|
||||
@ -420,14 +416,14 @@ done
|
||||
|
||||
|
||||
# now let's run the www and btcpay-specific provisioning scripts.
|
||||
if [ "$SKIP_WWW" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
if [ "$SKIP_WWW" = false ]; then
|
||||
bash -c "./www/go.sh"
|
||||
ssh ubuntu@"$PRIMARY_WWW_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||
fi
|
||||
|
||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
if [ "$SKIP_BTCPAY" = false ]; then
|
||||
./btcpayserver/go.sh
|
||||
|
||||
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||
|
@ -33,7 +33,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
||||
|
||||
./stub_lxc_profile.sh "$LXD_VM_NAME"
|
||||
|
||||
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$(date +%Y-%m)" "$LXD_VM_NAME"
|
||||
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$LXD_UBUNTU_BASE_VERSION" "$LXD_VM_NAME"
|
||||
|
||||
# now let's create a new VM to work with.
|
||||
#@lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
||||
|
@ -22,6 +22,8 @@ fi
|
||||
|
||||
. ./cluster_env.sh
|
||||
|
||||
. ./project_env.sh
|
||||
|
||||
for VM in www btcpayserver; do
|
||||
LXD_NAME="$VM-${DOMAIN_NAME//./-}"
|
||||
|
||||
|
@ -21,6 +21,8 @@ done
|
||||
|
||||
. ./cluster_env.sh
|
||||
|
||||
. ./project_env.sh
|
||||
|
||||
# Check to see if any of the VMs actually don't exist.
|
||||
# (we only migrate instantiated vms)
|
||||
for VM in www btcpayserver; do
|
||||
|
25
deployment/project_env.sh
Executable file
25
deployment/project_env.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# source project defition.
|
||||
# Now let's load the project definition.
|
||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
||||
export PROJECT_NAME="$PROJECT_NAME"
|
||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||
|
||||
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
|
||||
echo "ERROR: 'project_definition' not found $PROJECT_DEFINITION_PATH not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source "$PROJECT_DEFINITION_PATH"
|
||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||
|
||||
if [ -z "$PRIMARY_DOMAIN" ]; then
|
||||
echo "ERROR: The PRIMARY_DOMAIN is not specified. Check your cluster definition."
|
||||
exit 1
|
||||
fi
|
@ -50,12 +50,6 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
||||
preserve_hostname: false
|
||||
fqdn: ${BASE_IMAGE_VM_NAME}
|
||||
|
||||
apt:
|
||||
sources:
|
||||
docker.list:
|
||||
source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable"
|
||||
keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
|
||||
packages:
|
||||
- curl
|
||||
- ssh-askpass
|
||||
@ -76,10 +70,6 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
||||
- wait-for-it
|
||||
- dnsutils
|
||||
- wget
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-compose-plugin
|
||||
|
||||
groups:
|
||||
- docker
|
||||
@ -104,18 +94,33 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
||||
UsePAM no
|
||||
LogLevel INFO
|
||||
|
||||
- path: /etc/docker/daemon.json
|
||||
content: |
|
||||
{
|
||||
"registry-mirrors": ["${REGISTRY_URL}"]
|
||||
}
|
||||
|
||||
runcmd:
|
||||
- sudo mkdir -m 0755 -p /etc/apt/keyrings
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
- sudo apt-get install -y openssh-server
|
||||
|
||||
EOF
|
||||
#,
|
||||
#"labels": [ "githead=${LATEST_GIT_COMMIT}" ]
|
||||
|
||||
|
||||
# apt:
|
||||
# sources:
|
||||
# docker.list:
|
||||
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
|
||||
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
|
||||
# - path: /etc/docker/daemon.json
|
||||
# content: |
|
||||
# {
|
||||
# "registry-mirrors": ["${REGISTRY_URL}"],
|
||||
# "labels": [ "githead=${LATEST_GIT_COMMIT}" ]
|
||||
# }
|
||||
|
||||
|
||||
# - sudo apt-get update
|
||||
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
else
|
||||
# all other machines.
|
||||
|
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# deploy clams wallet.
|
||||
LOCAL_CLAMS_REPO_PATH="$(pwd)/www/clams"
|
||||
if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
|
||||
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_REPO_PATH"
|
||||
else
|
||||
cd "$LOCAL_CLAMS_REPO_PATH"
|
||||
git pull
|
||||
cd -
|
||||
fi
|
||||
fi
|
||||
|
||||
lxc file push -r -p ./clams "${PRIMARY_WWW_FQDN//./-}"/home/ubuntu/code
|
||||
|
||||
# run the primary script and output the files to --output-path
|
||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/clams/browser-app"
|
||||
ssh "$PRIMARY_WWW_FQDN" "$REMOTE_HOME/code/clams/browser-app/run.sh --output-path=$REMOTE_HOME/clams/browser-app"
|
||||
ssh "$PRIMARY_WWW_FQDN" rm -rf "$REMOTE_HOME/code"
|
@ -25,7 +25,9 @@ for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do
|
||||
if [ "$DOMAIN_NAME" = "$PRIMARY_DOMAIN" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
||||
if [ "$DEPLOY_NEXTCLOUD" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NEXTCLOUD_FQDN"; fi
|
||||
if [ "$DEPLOY_GITEA" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $GITEA_FQDN"; fi
|
||||
if [ "$DEPLOY_CLAMS" = true ]; then DOMAIN_STRING="$DOMAIN_STRING -d $CLAMS_FQDN"; fi
|
||||
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then DOMAIN_STRING="$DOMAIN_STRING -d $NOSTR_FQDN"; fi
|
||||
|
||||
|
||||
# if BTCPAY_ALT_NAMES has been set by the admin, iterate over the list
|
||||
# and append the domain names to the certbot request
|
||||
|
@ -3,6 +3,9 @@
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# prepare clams images and such
|
||||
./prepare_clams.sh
|
||||
|
||||
# Create the nginx config file which covers all domains.
|
||||
bash -c ./stub/nginx_config.sh
|
||||
|
||||
@ -137,7 +140,6 @@ fi
|
||||
./stub/nextcloud_yml.sh
|
||||
./stub/gitea_yml.sh
|
||||
./stub/nostr_yml.sh
|
||||
./deploy_clams.sh
|
||||
|
||||
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||
# # WWW_FQDN is in our certificate, so we resolve to that.
|
||||
@ -157,6 +159,3 @@ fi
|
||||
# xdg-open "http://$GITEA_FQDN" > /dev/null 2>&1
|
||||
# fi
|
||||
|
||||
# if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||
# xdg-open "http://$BTCPAY_USER_FQDN" > /dev/null 2>&1
|
||||
# fi
|
||||
|
46
deployment/www/prepare_clams.sh
Executable file
46
deployment/www/prepare_clams.sh
Executable file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# deploy clams wallet.
|
||||
LOCAL_CLAMS_REPO_PATH="$(pwd)/clams"
|
||||
|
||||
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
|
||||
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_REPO_PATH"
|
||||
else
|
||||
cd "$LOCAL_CLAMS_REPO_PATH"
|
||||
git config --global pull.rebase false
|
||||
git pull
|
||||
cd -
|
||||
fi
|
||||
|
||||
|
||||
# # overwrite the clams/.env file with Sovereign Stack specific parameters.
|
||||
# CLAMS_CONFIG_PATH="$LOCAL_CLAMS_REPO_PATH/.env"
|
||||
# cat > "$CLAMS_CONFIG_PATH" <<EOF
|
||||
# CLAMS_FQDN=${CLAMS_FQDN}
|
||||
# BTC_CHAIN=${BITCOIN_CHAIN}
|
||||
# DEPLOY_BTC_BACKEND=false
|
||||
# EOF
|
||||
|
||||
# lxc file push -r -p "$LOCAL_CLAMS_REPO_PATH" "${PRIMARY_WWW_FQDN//./-}$REMOTE_HOME"
|
||||
|
||||
|
||||
BROWSER_APP_GIT_TAG="1.5.0"
|
||||
BROWSER_APP_GIT_REPO_URL="https://github.com/clams-tech/browser-app"
|
||||
BROWSER_APP_IMAGE_NAME="browser-app:$BROWSER_APP_GIT_TAG"
|
||||
|
||||
# build the browser-app image.
|
||||
if ! docker image list --format "{{.Repository}}:{{.Tag}}" | grep -q "$BROWSER_APP_IMAGE_NAME"; then
|
||||
docker build --build-arg GIT_REPO_URL="$BROWSER_APP_GIT_REPO_URL" \
|
||||
--build-arg VERSION="$BROWSER_APP_GIT_TAG" \
|
||||
-t "$BROWSER_APP_IMAGE_NAME" \
|
||||
./clams/frontend/browser-app/
|
||||
fi
|
||||
|
||||
# If the clams-root volume doesn't exist, we create and seed it.
|
||||
if ! docker volume list | grep -q clams-root; then
|
||||
docker volume create clams-root
|
||||
docker run -t --rm -v clams-root:/output --name browser-app "$BROWSER_APP_IMAGE_NAME"
|
||||
fi
|
@ -141,9 +141,6 @@ EOL
|
||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
e
|
||||
# TODO change resolver to local DNS resolver, or inherit from system.
|
||||
|
||||
|
||||
# default server if hostname not specified.
|
||||
server {
|
||||
@ -518,6 +515,31 @@ EOL
|
||||
EOL
|
||||
fi
|
||||
|
||||
# deploy Clams browser app under the primary domain.
|
||||
if [ $iteration = 0 ]; then
|
||||
|
||||
cat >> "$NGINX_CONF_PATH" <<EOF
|
||||
|
||||
# server block for the clams browser-app; just a static website
|
||||
server {
|
||||
listen 443 ssl;
|
||||
|
||||
server_name ${CLAMS_FQDN};
|
||||
|
||||
autoindex off;
|
||||
server_tokens off;
|
||||
|
||||
gzip_static on;
|
||||
|
||||
root /browser-app;
|
||||
index 200.html;
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
fi
|
||||
|
||||
iteration=$((iteration+1))
|
||||
done
|
||||
|
||||
|
@ -31,25 +31,25 @@ EOL
|
||||
for LANGUAGE_CODE in ${SITE_LANGUAGE_CODES//,/ }; do
|
||||
# We create another ghost instance under /
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
||||
- ghostnet-$DOMAIN_IDENTIFIER-$LANGUAGE_CODE
|
||||
EOL
|
||||
|
||||
if [ "$LANGUAGE_CODE" = en ]; then
|
||||
if [ "$DEPLOY_GITEA" = "true" ]; then
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
- giteanet-$DOMAIN_IDENTIFIER-en
|
||||
- giteanet-$DOMAIN_IDENTIFIER-en
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [ "$DEPLOY_NEXTCLOUD" = "true" ]; then
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
- nextcloudnet-$DOMAIN_IDENTIFIER-en
|
||||
- nextcloudnet-$DOMAIN_IDENTIFIER-en
|
||||
EOL
|
||||
fi
|
||||
|
||||
if [ -n "$NOSTR_ACCOUNT_PUBKEY" ]; then
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
- nostrnet-$DOMAIN_IDENTIFIER-en
|
||||
- nostrnet-$DOMAIN_IDENTIFIER-en
|
||||
EOL
|
||||
fi
|
||||
fi
|
||||
@ -58,16 +58,24 @@ EOL
|
||||
|
||||
done
|
||||
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
volumes:
|
||||
- ${REMOTE_HOME}/letsencrypt:/etc/letsencrypt:ro
|
||||
EOL
|
||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
- clams-browser-app:/browser-app:ro
|
||||
EOL
|
||||
fi
|
||||
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
configs:
|
||||
- source: nginx-config
|
||||
target: /etc/nginx/nginx.conf
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
|
||||
configs:
|
||||
nginx-config:
|
||||
file: ${PROJECT_PATH}/nginx.conf
|
||||
@ -129,9 +137,18 @@ EOL
|
||||
done
|
||||
done
|
||||
|
||||
if [ "$DEPLOY_CLAMS" = true ]; then
|
||||
cat >> "$DOCKER_YAML_PATH" <<EOL
|
||||
volumes:
|
||||
clams-browser-app:
|
||||
external: true
|
||||
name: clams-root
|
||||
EOL
|
||||
fi
|
||||
|
||||
|
||||
if [ "$STOP_SERVICES" = false ]; then
|
||||
docker stack deploy -c "$DOCKER_YAML_PATH" "reverse-proxy"
|
||||
# iterate over all our domains and create the nginx config file.
|
||||
sleep 1
|
||||
fi
|
||||
fi
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
set -eu
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# see https://www.sovereign-stack.org/management/
|
||||
@ -24,8 +24,8 @@ fi
|
||||
|
||||
# install snap
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd --channel=5.11/candidate
|
||||
sleep 3
|
||||
sudo snap install lxd --channel=5.10/stable
|
||||
sleep 5
|
||||
|
||||
# run lxd init on the remote server./dev/nvme1n1
|
||||
#
|
||||
|
@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
# NOTE! This script MUST be executed as root.
|
||||
@ -34,7 +34,7 @@ sleep 1
|
||||
|
||||
# install snap
|
||||
if ! snap list | grep -q lxd; then
|
||||
sudo snap install lxd
|
||||
sudo snap install lxd --channel=5.10/stable
|
||||
sleep 6
|
||||
|
||||
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
||||
|
Loading…
Reference in New Issue
Block a user