Compare commits
No commits in common. "16f88d964d5edb9ad5ac223d3c07303629dae418" and "c3bab8c844520d33424e0ed6fa79b7cfebb28d0d" have entirely different histories.
16f88d964d
...
c3bab8c844
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -e
|
set -ex
|
||||||
|
|
||||||
export DEPLOY_WWW_SERVER=false
|
export DEPLOY_WWW_SERVER=false
|
||||||
export DEPLOY_GHOST=false
|
export DEPLOY_GHOST=false
|
||||||
@ -80,7 +80,7 @@ export PROJECTS_DIR="$HOME/ss-projects"
|
|||||||
export SITES_PATH="$HOME/ss-sites"
|
export SITES_PATH="$HOME/ss-sites"
|
||||||
|
|
||||||
# The base VM image.
|
# The base VM image.
|
||||||
export LXD_UBUNTU_BASE_VERSION="jammy"
|
export LXD_UBUNTU_BASE_VERSION="22.04"
|
||||||
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export BASE_IMAGE_VM_NAME="ss-base-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
export BASE_LXC_IMAGE="ubuntu/$LXD_UBUNTU_BASE_VERSION/cloud"
|
||||||
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
||||||
@ -89,7 +89,7 @@ export UBUNTU_BASE_IMAGE_NAME="ss-ubuntu-${LXD_UBUNTU_BASE_VERSION//./-}"
|
|||||||
export DEPLOY_MGMT_REGISTRY=false
|
export DEPLOY_MGMT_REGISTRY=false
|
||||||
export OTHER_SITES_LIST=
|
export OTHER_SITES_LIST=
|
||||||
export BTCPAY_ALT_NAMES=
|
export BTCPAY_ALT_NAMES=
|
||||||
export BITCOIN_CHAIN=regtest
|
|
||||||
export REMOTE_HOME="/home/ubuntu"
|
export REMOTE_HOME="/home/ubuntu"
|
||||||
|
|
||||||
export BTCPAY_SERVER_APPPATH="$REMOTE_HOME/btcpayserver-docker"
|
export BTCPAY_SERVER_APPPATH="$REMOTE_HOME/btcpayserver-docker"
|
||||||
|
1
deployment/btcpayserver/.gitignore
vendored
1
deployment/btcpayserver/.gitignore
vendored
@ -1 +0,0 @@
|
|||||||
core-lightning
|
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -ex
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# take the services down, create a backup archive, then pull it down.
|
# take the services down, create a backup archive, then pull it down.
|
||||||
|
@ -5,15 +5,6 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
|
export DOCKER_HOST="ssh://ubuntu@$BTCPAY_FQDN"
|
||||||
|
|
||||||
docker pull btcpayserver/lightning:v22.11.1
|
|
||||||
docker build -t clightning:latest ./core-lightning
|
|
||||||
|
|
||||||
# run the btcpay setup script if it hasn't been done before.
|
|
||||||
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
|
|
||||||
./stub_btcpay_setup.sh
|
|
||||||
BACKUP_BTCPAY=false
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN_SERVICES=true
|
RUN_SERVICES=true
|
||||||
|
|
||||||
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
# we will re-run the btcpayserver provisioning scripts if directed to do so.
|
||||||
@ -43,7 +34,6 @@ elif [ "$RECONFIGURE_BTCPAY_SERVER" == true ]; then
|
|||||||
./stub_btcpay_setup.sh
|
./stub_btcpay_setup.sh
|
||||||
|
|
||||||
RUN_SERVICES=true
|
RUN_SERVICES=true
|
||||||
BACKUP_BTCPAY=false
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if the script gets this far, then we grab a regular backup.
|
# if the script gets this far, then we grab a regular backup.
|
||||||
|
@ -3,26 +3,12 @@
|
|||||||
set -e
|
set -e
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
# default is for regtest
|
|
||||||
CLIGHTNING_WEBSOCKET_PORT=9736
|
|
||||||
if [ "$BITCOIN_CHAIN" = testnet ]; then
|
|
||||||
CLIGHTNING_WEBSOCKET_PORT=9737
|
|
||||||
elif [ "$BITCOIN_CHAIN" = mainnet ]; then
|
|
||||||
CLIGHTNING_WEBSOCKET_PORT=9738
|
|
||||||
fi
|
|
||||||
|
|
||||||
export CLIGHTNING_WEBSOCKET_PORT="$CLIGHTNING_WEBSOCKET_PORT"
|
|
||||||
|
|
||||||
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
# export BTCPAY_FASTSYNC_ARCHIVE_FILENAME="utxo-snapshot-bitcoin-testnet-1445586.tar"
|
||||||
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
# BTCPAY_REMOTE_RESTORE_PATH="/var/lib/docker/volumes/generated_bitcoin_datadir/_data"
|
||||||
|
|
||||||
# This is the config for a basic proxy to the listening port 127.0.0.1:2368
|
# This is the config for a basic proxy to the listening port 127.0.0.1:2368
|
||||||
# It also supports modern TLS, so SSL certs must be available.
|
# It also supports modern TLS, so SSL certs must be available.
|
||||||
#opt-add-nostr-relay;
|
#opt-add-nostr-relay;
|
||||||
|
|
||||||
export BTCPAYSERVER_GITREPO="https://github.com/farscapian/btcpayserver-docker"
|
|
||||||
#https://github.com/btcpayserver/btcpayserver-docker
|
|
||||||
|
|
||||||
cat > "$SITE_PATH/btcpay.sh" <<EOL
|
cat > "$SITE_PATH/btcpay.sh" <<EOL
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
@ -36,7 +22,7 @@ done
|
|||||||
|
|
||||||
if [ ! -d "btcpayserver-docker" ]; then
|
if [ ! -d "btcpayserver-docker" ]; then
|
||||||
echo "cloning btcpayserver-docker";
|
echo "cloning btcpayserver-docker";
|
||||||
git clone -b master ${BTCPAYSERVER_GITREPO} btcpayserver-docker;
|
git clone -b master https://github.com/btcpayserver/btcpayserver-docker btcpayserver-docker;
|
||||||
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
|
git config --global --add safe.directory /home/ubuntu/btcpayserver-docker
|
||||||
else
|
else
|
||||||
cd ./btcpayserver-docker
|
cd ./btcpayserver-docker
|
||||||
@ -81,11 +67,6 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
LIGHTNINGD_OPT: |
|
LIGHTNINGD_OPT: |
|
||||||
announce-addr-dns=true
|
announce-addr-dns=true
|
||||||
experimental-websocket-port=9736
|
|
||||||
ports:
|
|
||||||
- "${CLIGHTNING_WEBSOCKET_PORT}:9736"
|
|
||||||
expose:
|
|
||||||
- "9736"
|
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -8,13 +8,12 @@ cd "$(dirname "$0")"
|
|||||||
# to use LXD.
|
# to use LXD.
|
||||||
|
|
||||||
DATA_PLANE_MACVLAN_INTERFACE=
|
DATA_PLANE_MACVLAN_INTERFACE=
|
||||||
DISK_TO_USE=
|
DISK_TO_USE=loop
|
||||||
|
|
||||||
# override the cluster name.
|
# override the cluster name.
|
||||||
CLUSTER_NAME="${1:-}"
|
CLUSTER_NAME="${1:-}"
|
||||||
if [ -z "$CLUSTER_NAME" ]; then
|
if [ -z "$CLUSTER_NAME" ]; then
|
||||||
echo "ERROR: The cluster name was not provided. Syntax is: 'ss-cluster CLUSTER_NAME SSH_HOST_FQDN'"
|
echo "ERROR: The cluster name was not provided."
|
||||||
echo " for example: 'ss-cluster dev clusterhost01.domain.tld"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -35,7 +34,6 @@ if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
|||||||
|
|
||||||
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
export LXD_CLUSTER_PASSWORD="$(gpg --gen-random --armor 1 14)"
|
||||||
export BITCOIN_CHAIN="regtest"
|
export BITCOIN_CHAIN="regtest"
|
||||||
export PROJECT_PREFIX="dev"
|
|
||||||
#export REGISTRY_URL="https://index.docker.io/v1/"
|
#export REGISTRY_URL="https://index.docker.io/v1/"
|
||||||
|
|
||||||
EOL
|
EOL
|
||||||
@ -87,9 +85,6 @@ if ! lxc remote list | grep -q "$CLUSTER_NAME"; then
|
|||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
# first let's copy our ssh pubkey to the remote server so we don't have to login constantly.
|
|
||||||
ssh-copy-id -i "$HOME/.ssh/id_rsa.pub" "ubuntu@$FQDN"
|
|
||||||
|
|
||||||
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
||||||
echo "INFO: It looks like you didn't provide input on the command line for the data plane macvlan interface."
|
echo "INFO: It looks like you didn't provide input on the command line for the data plane macvlan interface."
|
||||||
echo " We need to know which interface that is! Enter it here now."
|
echo " We need to know which interface that is! Enter it here now."
|
||||||
@ -152,6 +147,10 @@ if ! command -v lxc >/dev/null 2>&1; then
|
|||||||
sleep 1
|
sleep 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if lxc network list --format csv | grep -q lxdbr1; then
|
||||||
|
lxc network delete lxdbr1
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ssh -t "ubuntu@$FQDN" "
|
ssh -t "ubuntu@$FQDN" "
|
||||||
@ -162,7 +161,7 @@ sudo apt-get update && sudo apt-get upgrade -y && sudo apt install htop dnsutils
|
|||||||
|
|
||||||
# install lxd as a snap if it's not installed.
|
# install lxd as a snap if it's not installed.
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --channel=5.10/stable
|
sudo snap install lxd
|
||||||
sleep 10
|
sleep 10
|
||||||
fi
|
fi
|
||||||
"
|
"
|
||||||
@ -173,9 +172,6 @@ if [ -z "$DATA_PLANE_MACVLAN_INTERFACE" ]; then
|
|||||||
DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)"
|
DATA_PLANE_MACVLAN_INTERFACE="$(ssh -t ubuntu@"$FQDN" ip route | grep default | cut -d " " -f 5)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export DATA_PLANE_MACVLAN_INTERFACE="$DATA_PLANE_MACVLAN_INTERFACE"
|
|
||||||
|
|
||||||
echo "DATA_PLANE_MACVLAN_INTERFACE: $DATA_PLANE_MACVLAN_INTERFACE"
|
|
||||||
# run lxd init on the remote server.
|
# run lxd init on the remote server.
|
||||||
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
|
cat <<EOF | ssh ubuntu@"$FQDN" lxd init --preseed
|
||||||
config:
|
config:
|
||||||
@ -186,10 +182,19 @@ config:
|
|||||||
|
|
||||||
networks:
|
networks:
|
||||||
- name: lxdbr0
|
- name: lxdbr0
|
||||||
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-error}"
|
description: "ss-config,${DATA_PLANE_MACVLAN_INTERFACE:-}"
|
||||||
type: bridge
|
type: bridge
|
||||||
config:
|
config:
|
||||||
ipv4.nat: true
|
ipv4.nat: "true"
|
||||||
|
ipv4.dhcp: "true"
|
||||||
|
ipv6.address: "none"
|
||||||
|
dns.mode: "managed"
|
||||||
|
- name: lxdbr1
|
||||||
|
description: "For regtest"
|
||||||
|
type: bridge
|
||||||
|
config:
|
||||||
|
ipv4.address: 10.139.144.1/24
|
||||||
|
ipv4.nat: false
|
||||||
ipv4.dhcp: true
|
ipv4.dhcp: true
|
||||||
ipv6.address: none
|
ipv6.address: none
|
||||||
dns.mode: managed
|
dns.mode: managed
|
||||||
@ -214,6 +219,8 @@ cluster:
|
|||||||
cluster_token: ""
|
cluster_token: ""
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
# #
|
||||||
|
|
||||||
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
# ensure the lxd service is available over the network, then add a lxc remote, then switch the active remote to it.
|
||||||
if wait-for-it -t 20 "$FQDN:8443"; then
|
if wait-for-it -t 20 "$FQDN:8443"; then
|
||||||
# now create a remote on your local LXC client and switch to it.
|
# now create a remote on your local LXC client and switch to it.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -eu
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
CURRENT_CLUSTER="$(lxc remote get-default)"
|
CURRENT_CLUSTER="$(lxc remote get-default)"
|
||||||
@ -30,8 +30,6 @@ export CLUSTER_PATH="$CLUSTERS_DIR/$CURRENT_CLUSTER"
|
|||||||
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
CLUSTER_DEFINITION="$CLUSTER_PATH/cluster_definition"
|
||||||
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
|
||||||
|
|
||||||
echo "CLUSTER_DEFINITION: $CLUSTER_DEFINITION"
|
|
||||||
|
|
||||||
# ensure the cluster definition exists.
|
# ensure the cluster definition exists.
|
||||||
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
if [ ! -f "$CLUSTER_DEFINITION" ]; then
|
||||||
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
|
echo "ERROR: The cluster definition could not be found. You may need to run 'ss-cluster'."
|
||||||
@ -43,17 +41,10 @@ source "$CLUSTER_DEFINITION"
|
|||||||
|
|
||||||
# source project defition.
|
# source project defition.
|
||||||
# Now let's load the project definition.
|
# Now let's load the project definition.
|
||||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
|
||||||
export PROJECT_NAME="$PROJECT_NAME"
|
|
||||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||||
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
PROJECT_DEFINITION_PATH="$PROJECT_PATH/project_definition"
|
||||||
|
|
||||||
if [ ! -f "$PROJECT_DEFINITION_PATH" ]; then
|
|
||||||
echo "ERROR: 'project_definition' not found $PROJECT_DEFINITION_PATH not found."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
source "$PROJECT_DEFINITION_PATH"
|
source "$PROJECT_DEFINITION_PATH"
|
||||||
|
|
||||||
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
export PRIMARY_SITE_DEFINITION_PATH="$SITES_PATH/$PRIMARY_DOMAIN/site_definition"
|
||||||
source "$PRIMARY_SITE_DEFINITION_PATH"
|
source "$PRIMARY_SITE_DEFINITION_PATH"
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ fi
|
|||||||
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
# If the lxc VM does exist, then we will delete it (so we can start fresh)
|
||||||
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
|
if lxc list -q --format csv | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
# if there's no snapshot, we dispense with the old image and try again.
|
# if there's no snapshot, we dispense with the old image and try again.
|
||||||
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$LXD_UBUNTU_BASE_VERSION"; then
|
if ! lxc info "$BASE_IMAGE_VM_NAME" | grep -q "ss-docker-$(date +%Y-%m)"; then
|
||||||
lxc delete "$BASE_IMAGE_VM_NAME" --force
|
lxc delete "$BASE_IMAGE_VM_NAME" --force
|
||||||
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
ssh-keygen -f "$SSH_HOME/known_hosts" -R "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
fi
|
||||||
@ -31,13 +31,12 @@ else
|
|||||||
|
|
||||||
lxc start "$BASE_IMAGE_VM_NAME"
|
lxc start "$BASE_IMAGE_VM_NAME"
|
||||||
|
|
||||||
sleep 30
|
sleep 70
|
||||||
|
|
||||||
# ensure the ssh service is listening at localhost
|
# ensure the ssh service is listening at localhost
|
||||||
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
|
lxc exec "$BASE_IMAGE_VM_NAME" -- wait-for-it 127.0.0.1:22 -t 120
|
||||||
|
|
||||||
|
|
||||||
# stop the VM and get a snapshot.
|
# stop the VM and get a snapshot.
|
||||||
lxc stop "$BASE_IMAGE_VM_NAME"
|
lxc stop "$BASE_IMAGE_VM_NAME"
|
||||||
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$LXD_UBUNTU_BASE_VERSION"
|
lxc snapshot "$BASE_IMAGE_VM_NAME" "ss-docker-$(date +%Y-%m)"
|
||||||
fi
|
fi
|
||||||
|
@ -13,8 +13,8 @@ DOMAIN_NAME=
|
|||||||
RUN_CERT_RENEWAL=true
|
RUN_CERT_RENEWAL=true
|
||||||
SKIP_WWW=false
|
SKIP_WWW=false
|
||||||
RESTORE_WWW=false
|
RESTORE_WWW=false
|
||||||
BACKUP_CERTS=false
|
BACKUP_CERTS=true
|
||||||
BACKUP_APPS=false
|
BACKUP_APPS=true
|
||||||
BACKUP_BTCPAY=true
|
BACKUP_BTCPAY=true
|
||||||
BACKUP_BTCPAY_ARCHIVE_PATH=
|
BACKUP_BTCPAY_ARCHIVE_PATH=
|
||||||
RESTORE_BTCPAY=false
|
RESTORE_BTCPAY=false
|
||||||
@ -126,6 +126,7 @@ export USER_SAYS_YES="$USER_SAYS_YES"
|
|||||||
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
export BACKUP_BTCPAY_ARCHIVE_PATH="$BACKUP_BTCPAY_ARCHIVE_PATH"
|
||||||
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
export RESTART_FRONT_END="$RESTART_FRONT_END"
|
||||||
|
|
||||||
|
|
||||||
# todo convert this to Trezor-T
|
# todo convert this to Trezor-T
|
||||||
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
|
SSH_PUBKEY_PATH="$SSH_HOME/id_rsa.pub"
|
||||||
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
|
export SSH_PUBKEY_PATH="$SSH_PUBKEY_PATH"
|
||||||
@ -184,8 +185,8 @@ function instantiate_vms {
|
|||||||
|
|
||||||
# Goal is to get the macvlan interface.
|
# Goal is to get the macvlan interface.
|
||||||
LXD_SS_CONFIG_LINE=
|
LXD_SS_CONFIG_LINE=
|
||||||
if lxc network list --format csv | grep lxdbr0 | grep -q ss-config; then
|
if lxc network list --format csv | grep lxdbrSS | grep -q ss-config; then
|
||||||
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbr0 | grep ss-config)"
|
LXD_SS_CONFIG_LINE="$(lxc network list --format csv | grep lxdbrSS | grep ss-config)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
|
if [ -z "$LXD_SS_CONFIG_LINE" ]; then
|
||||||
@ -204,7 +205,7 @@ function instantiate_vms {
|
|||||||
# fi
|
# fi
|
||||||
|
|
||||||
# create the lxd base image.
|
# create the lxd base image.
|
||||||
./create_lxc_base.sh
|
#./create_lxc_base.sh
|
||||||
|
|
||||||
# # now switch to the current chain project.
|
# # now switch to the current chain project.
|
||||||
# if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then
|
# if ! lxc project list --format csv | grep -a "$BITCOIN_CHAIN"; then
|
||||||
@ -256,15 +257,15 @@ function instantiate_vms {
|
|||||||
|
|
||||||
./deploy_vms.sh
|
./deploy_vms.sh
|
||||||
|
|
||||||
if [ "$VIRTUAL_MACHINE" = www ]; then
|
# this tells our local docker client to target the remote endpoint via SSH
|
||||||
# this tells our local docker client to target the remote endpoint via SSH
|
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
|
||||||
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
|
|
||||||
|
|
||||||
# enable docker swarm mode so we can support docker stacks.
|
|
||||||
if docker info | grep -q "Swarm: inactive"; then
|
# enable docker swarm mode so we can support docker stacks.
|
||||||
docker swarm init --advertise-addr enp6s0
|
if docker info | grep -q "Swarm: inactive"; then
|
||||||
fi
|
docker swarm init --advertise-addr enp6s0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -313,10 +314,8 @@ EOL
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PROJECT_NAME="$PROJECT_PREFIX-$BITCOIN_CHAIN"
|
|
||||||
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
PROJECT_PATH="$PROJECTS_DIR/$PROJECT_NAME"
|
||||||
mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects"
|
mkdir -p "$PROJECT_PATH" "$CLUSTER_PATH/projects"
|
||||||
export PROJECT_NAME="$PROJECT_NAME"
|
|
||||||
export PROJECT_PATH="$PROJECT_PATH"
|
export PROJECT_PATH="$PROJECT_PATH"
|
||||||
|
|
||||||
# create a symlink from ./clusterpath/projects/project
|
# create a symlink from ./clusterpath/projects/project
|
||||||
@ -328,13 +327,12 @@ fi
|
|||||||
if ! lxc project list | grep -q "$PROJECT_NAME"; then
|
if ! lxc project list | grep -q "$PROJECT_NAME"; then
|
||||||
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
|
echo "INFO: The lxd project specified in the cluster_definition did not exist. We'll create one!"
|
||||||
lxc project create "$PROJECT_NAME"
|
lxc project create "$PROJECT_NAME"
|
||||||
lxc project set "$PROJECT_NAME" features.networks=true
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# # check if we need to provision a new lxc project.
|
# # check if we need to provision a new lxc project.
|
||||||
# if ! lxc info | grep "project:" | grep -q "$PROJECT_NAME"; then
|
# if [ "$BITCOIN_CHAIN" != "$CURRENT_PROJECT" ]; then
|
||||||
# echo "INFO: switch to lxd project '$PROJECT_NAME'."
|
# echo "INFO: switch to lxd project '$BITCOIN_CHAIN'."
|
||||||
# lxc project switch "$PROJECT_NAME"
|
# lxc project switch "$BITCOIN_CHAIN"
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
# check to see if the enf file exists. exist if not.
|
# check to see if the enf file exists. exist if not.
|
||||||
@ -428,7 +426,21 @@ fi
|
|||||||
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
export DOMAIN_NAME="$PRIMARY_DOMAIN"
|
||||||
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
export SITE_PATH="$SITES_PATH/$DOMAIN_NAME"
|
||||||
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
if [ "$SKIP_BTCPAY" = false ] && [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||||
./btcpayserver/go.sh
|
bash -c "./btcpayserver/go.sh"
|
||||||
|
|
||||||
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
ssh ubuntu@"$BTCPAY_FQDN" "echo $LATEST_GIT_COMMIT > /home/ubuntu/.ss-githead"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# deploy clams wallet.
|
||||||
|
LOCAL_CLAMS_PATH="$(pwd)/www/clams"
|
||||||
|
if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
||||||
|
if [ ! -d "$LOCAL_CLAMS_PATH" ]; then
|
||||||
|
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_PATH"
|
||||||
|
else
|
||||||
|
cd "$LOCAL_CLAMS_PATH"
|
||||||
|
git pull
|
||||||
|
cd -
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -exu
|
set -ex
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
|
||||||
@ -33,10 +33,9 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
|
|||||||
|
|
||||||
./stub_lxc_profile.sh "$LXD_VM_NAME"
|
./stub_lxc_profile.sh "$LXD_VM_NAME"
|
||||||
|
|
||||||
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$LXD_UBUNTU_BASE_VERSION" "$LXD_VM_NAME"
|
lxc copy --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME"/"ss-docker-$(date +%Y-%m)" "$LXD_VM_NAME"
|
||||||
|
|
||||||
# now let's create a new VM to work with.
|
# now let's create a new VM to work with.
|
||||||
#@lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
#lxc init --profile="$LXD_VM_NAME" "$BASE_IMAGE_VM_NAME" "$LXD_VM_NAME" --vm
|
||||||
|
|
||||||
# let's PIN the HW address for now so we don't exhaust IP
|
# let's PIN the HW address for now so we don't exhaust IP
|
||||||
# and so we can set DNS internally.
|
# and so we can set DNS internally.
|
||||||
@ -53,3 +52,11 @@ ssh-keyscan -H -t ecdsa "$FQDN" >> "$SSH_HOME/known_hosts"
|
|||||||
|
|
||||||
# create a directory to store backup archives. This is on all new vms.
|
# create a directory to store backup archives. This is on all new vms.
|
||||||
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
ssh "$FQDN" mkdir -p "$REMOTE_HOME/backups"
|
||||||
|
|
||||||
|
# if this execution is for btcpayserver, then we run the stub/btcpay setup script
|
||||||
|
# but only if it hasn't been executed before.
|
||||||
|
if [ "$VIRTUAL_MACHINE" = btcpayserver ]; then
|
||||||
|
if [ "$(ssh "$BTCPAY_FQDN" [[ ! -f "$REMOTE_HOME/btcpay.complete" ]]; echo $?)" -eq 0 ]; then
|
||||||
|
./btcpayserver/stub_btcpay_setup.sh
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
@ -1,15 +1,9 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -ex
|
set -exu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
|
# this script takes down all resources in the cluster. This script is DESTRUCTIVE of data, so make sure it's backed up first.
|
||||||
|
|
||||||
|
|
||||||
if lxc remote get-default | grep -q "local"; then
|
|
||||||
echo "ERROR: you are on the local lxc remote. Nothing to destroy"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
RESPONSE=
|
RESPONSE=
|
||||||
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE
|
read -r -p "Are you sure you want to continue? Responding 'y' here results in destruction of user data!": RESPONSE
|
||||||
if [ "$RESPONSE" != "y" ]; then
|
if [ "$RESPONSE" != "y" ]; then
|
||||||
@ -18,8 +12,6 @@ if [ "$RESPONSE" != "y" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
. ../defaults.sh
|
. ../defaults.sh
|
||||||
|
|
||||||
|
|
||||||
. ./cluster_env.sh
|
. ./cluster_env.sh
|
||||||
|
|
||||||
for VM in www btcpayserver; do
|
for VM in www btcpayserver; do
|
||||||
|
@ -6,48 +6,54 @@ cd "$(dirname "$0")"
|
|||||||
|
|
||||||
source ../defaults.sh
|
source ../defaults.sh
|
||||||
|
|
||||||
./destroy.sh
|
echo "Need to uncomment"
|
||||||
|
exit 1
|
||||||
|
# ./destroy.sh
|
||||||
|
|
||||||
# these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
# # these only get initialzed upon creation, so we MUST delete here so they get recreated.
|
||||||
if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
# if lxc profile list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc profile delete "$BASE_IMAGE_VM_NAME"
|
# lxc profile delete "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
# if lxc image list | grep -q "$BASE_IMAGE_VM_NAME"; then
|
||||||
lxc image rm "$BASE_IMAGE_VM_NAME"
|
# lxc image rm "$BASE_IMAGE_VM_NAME"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
|
# lxc image rm "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
# CURRENT_PROJECT="$(lxc info | grep "project:" | awk '{print $2}')"
|
||||||
if ! lxc info | grep -q "project: default"; then
|
# if ! lxc info | grep -q "project: default"; then
|
||||||
lxc project switch default
|
# lxc project switch default
|
||||||
lxc project delete "$CURRENT_PROJECT"
|
# lxc project delete "$CURRENT_PROJECT"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc profile show default | grep -q "root:"; then
|
# if lxc profile show default | grep -q "root:"; then
|
||||||
lxc profile device remove default root
|
# lxc profile device remove default root
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc profile show default| grep -q "eth0:"; then
|
# if lxc profile show default| grep -q "eth0:"; then
|
||||||
lxc profile device remove default eth0
|
# lxc profile device remove default eth0
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc network list --format csv | grep -q lxdbr0; then
|
# if lxc network list --format csv | grep -q lxdbr0; then
|
||||||
lxc network delete lxdbr0
|
# lxc network delete lxdbr0
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc storage list --format csv | grep -q ss-base; then
|
# if lxc network list --format csv | grep -q lxdbr1; then
|
||||||
lxc storage delete ss-base
|
# lxc network delete lxdbr1
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
CURRENT_REMOTE="$(lxc remote get-default)"
|
# if lxc storage list --format csv | grep -q ss-base; then
|
||||||
if ! lxc remote get-default | grep -q "local"; then
|
# lxc storage delete ss-base
|
||||||
lxc remote switch local
|
# fi
|
||||||
lxc remote remove "$CURRENT_REMOTE"
|
|
||||||
fi
|
# CURRENT_REMOTE="$(lxc remote get-default)"
|
||||||
|
# if ! lxc remote get-default | grep -q "local"; then
|
||||||
|
# lxc remote switch local
|
||||||
|
# lxc remote remove "$CURRENT_REMOTE"
|
||||||
|
# fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -50,6 +50,12 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
|||||||
preserve_hostname: false
|
preserve_hostname: false
|
||||||
fqdn: ${BASE_IMAGE_VM_NAME}
|
fqdn: ${BASE_IMAGE_VM_NAME}
|
||||||
|
|
||||||
|
apt:
|
||||||
|
sources:
|
||||||
|
docker.list:
|
||||||
|
source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu jammy stable"
|
||||||
|
keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||||
|
|
||||||
packages:
|
packages:
|
||||||
- curl
|
- curl
|
||||||
- ssh-askpass
|
- ssh-askpass
|
||||||
@ -70,6 +76,10 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
|||||||
- wait-for-it
|
- wait-for-it
|
||||||
- dnsutils
|
- dnsutils
|
||||||
- wget
|
- wget
|
||||||
|
- docker-ce
|
||||||
|
- docker-ce-cli
|
||||||
|
- containerd.io
|
||||||
|
- docker-compose-plugin
|
||||||
|
|
||||||
groups:
|
groups:
|
||||||
- docker
|
- docker
|
||||||
@ -94,34 +104,18 @@ if [ "$LXD_HOSTNAME" = "$BASE_IMAGE_VM_NAME" ]; then
|
|||||||
UsePAM no
|
UsePAM no
|
||||||
LogLevel INFO
|
LogLevel INFO
|
||||||
|
|
||||||
|
- path: /etc/docker/daemon.json
|
||||||
|
content: |
|
||||||
|
{
|
||||||
|
"registry-mirrors": ["${REGISTRY_URL}"],
|
||||||
|
"labels": [ "githead=${LATEST_GIT_COMMIT}" ]
|
||||||
|
}
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
- sudo mkdir -m 0755 -p /etc/apt/keyrings
|
|
||||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
|
||||||
- echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
|
||||||
- sudo apt-get update
|
|
||||||
- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
|
||||||
- sudo apt-get install -y openssh-server
|
- sudo apt-get install -y openssh-server
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
|
||||||
# apt:
|
|
||||||
# sources:
|
|
||||||
# docker.list:
|
|
||||||
# source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${LXD_UBUNTU_BASE_VERSION} stable"
|
|
||||||
# keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
|
||||||
|
|
||||||
# - path: /etc/docker/daemon.json
|
|
||||||
# content: |
|
|
||||||
# {
|
|
||||||
# "registry-mirrors": ["${REGISTRY_URL}"],
|
|
||||||
# "labels": [ "githead=${LATEST_GIT_COMMIT}" ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
|
||||||
# - sudo apt-get update
|
|
||||||
#- sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
|
||||||
|
|
||||||
else
|
else
|
||||||
# all other machines.
|
# all other machines.
|
||||||
cat >> "$YAML_PATH" <<EOF
|
cat >> "$YAML_PATH" <<EOF
|
||||||
@ -201,7 +195,7 @@ cat >> "$YAML_PATH" <<EOF
|
|||||||
type: nic
|
type: nic
|
||||||
enp6s0:
|
enp6s0:
|
||||||
name: enp6s0
|
name: enp6s0
|
||||||
network: lxdbr0
|
network: lxdbr1
|
||||||
type: nic
|
type: nic
|
||||||
|
|
||||||
name: ${PRIMARY_DOMAIN}
|
name: ${PRIMARY_DOMAIN}
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
cd "$(dirname "$0")"
|
|
||||||
|
|
||||||
# deploy clams wallet.
|
|
||||||
LOCAL_CLAMS_REPO_PATH="$(pwd)/www/clams"
|
|
||||||
if [ "$DEPLOY_BTCPAY_SERVER" = true ]; then
|
|
||||||
if [ ! -d "$LOCAL_CLAMS_REPO_PATH" ]; then
|
|
||||||
git clone "$CLAMS_GIT_REPO" "$LOCAL_CLAMS_REPO_PATH"
|
|
||||||
else
|
|
||||||
cd "$LOCAL_CLAMS_REPO_PATH"
|
|
||||||
#git config pull.ff only
|
|
||||||
git pull
|
|
||||||
cd -
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
lxc file push -r -p ./clams "${PRIMARY_WWW_FQDN//./-}"/home/ubuntu/code
|
|
||||||
|
|
||||||
# run the primary script and output the files to --output-path
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" mkdir -p "$REMOTE_HOME/clams/browser-app"
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" "$REMOTE_HOME/code/clams/browser-app/run.sh --output-path=$REMOTE_HOME/clams/browser-app"
|
|
||||||
ssh "$PRIMARY_WWW_FQDN" rm -rf "$REMOTE_HOME/code"
|
|
@ -137,7 +137,7 @@ fi
|
|||||||
./stub/nextcloud_yml.sh
|
./stub/nextcloud_yml.sh
|
||||||
./stub/gitea_yml.sh
|
./stub/gitea_yml.sh
|
||||||
./stub/nostr_yml.sh
|
./stub/nostr_yml.sh
|
||||||
./deploy_clams.sh
|
|
||||||
|
|
||||||
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
# # start a browser session; point it to port 80 to ensure HTTPS redirect.
|
||||||
# # WWW_FQDN is in our certificate, so we resolve to that.
|
# # WWW_FQDN is in our certificate, so we resolve to that.
|
||||||
|
@ -141,7 +141,7 @@ EOL
|
|||||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||||
ssl_stapling on;
|
ssl_stapling on;
|
||||||
ssl_stapling_verify on;
|
ssl_stapling_verify on;
|
||||||
e
|
resolver 198.54.117.10;
|
||||||
# TODO change resolver to local DNS resolver, or inherit from system.
|
# TODO change resolver to local DNS resolver, or inherit from system.
|
||||||
|
|
||||||
|
|
||||||
|
19
install.sh
19
install.sh
@ -24,7 +24,7 @@ fi
|
|||||||
|
|
||||||
# install snap
|
# install snap
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --channel=5.10/stable
|
sudo snap install lxd
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
# run lxd init on the remote server./dev/nvme1n1
|
# run lxd init on the remote server./dev/nvme1n1
|
||||||
@ -39,7 +39,8 @@ networks:
|
|||||||
ipv6.address: none
|
ipv6.address: none
|
||||||
description: "Default network bridge for ss-mgmt outbound network access."
|
description: "Default network bridge for ss-mgmt outbound network access."
|
||||||
name: lxdbr0
|
name: lxdbr0
|
||||||
type: bridge
|
type: "bridge"
|
||||||
|
project: default
|
||||||
storage_pools:
|
storage_pools:
|
||||||
- config:
|
- config:
|
||||||
source: ${DISK}
|
source: ${DISK}
|
||||||
@ -72,7 +73,7 @@ if ! lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if ! lxc list --format csv | grep -q ss-mgmt; then
|
if ! lxc list --format csv | grep -q ss-mgmt; then
|
||||||
lxc init "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
|
lxc init "images:$BASE_LXC_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB
|
||||||
|
|
||||||
# mount the pre-verified sovereign stack git repo into the new vm
|
# mount the pre-verified sovereign stack git repo into the new vm
|
||||||
lxc config device add ss-mgmt sscode disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
|
lxc config device add ss-mgmt sscode disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
|
||||||
@ -80,7 +81,7 @@ fi
|
|||||||
|
|
||||||
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
if lxc list --format csv | grep -q "ss-mgmt,STOPPED"; then
|
||||||
lxc start ss-mgmt
|
lxc start ss-mgmt
|
||||||
sleep 20
|
sleep 15
|
||||||
fi
|
fi
|
||||||
|
|
||||||
. ./management/wait_for_lxc_ip.sh
|
. ./management/wait_for_lxc_ip.sh
|
||||||
@ -91,15 +92,13 @@ fi
|
|||||||
# sleep 1
|
# sleep 1
|
||||||
# done
|
# done
|
||||||
|
|
||||||
SSH_PUBKEY_PATH="$HOME/.ssh/id_rsa.pub"
|
|
||||||
if [ ! -f "$SSH_PUBKEY_PATH" ]; then
|
|
||||||
ssh-keygen -f "$SSH_HOME/id_rsa" -t ecdsa -b 521 -N ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# now run the mgmt provisioning script.
|
# now run the mgmt provisioning script.
|
||||||
|
SSH_PUBKEY_PATH="$HOME/.ssh/id_rsa.pub"
|
||||||
if [ -f "$SSH_PUBKEY_PATH" ]; then
|
if [ -f "$SSH_PUBKEY_PATH" ]; then
|
||||||
lxc file push "$SSH_PUBKEY_PATH" ss-mgmt/home/ubuntu/.ssh/authorized_keys
|
lxc file push "$SSH_PUBKEY_PATH" ss-mgmt/home/ubuntu/.ssh/authorized_keys
|
||||||
|
else
|
||||||
|
echo "ERROR: You need to generate an SSH key."
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
|
lxc file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
|
||||||
|
@ -3,8 +3,8 @@
|
|||||||
set -eu
|
set -eu
|
||||||
cd "$(dirname "$0")"
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
if ! lxc remote get-default | grep -q "local"; then
|
if ! lxc remote get-default | grep -q local; then
|
||||||
lxc remote switch "local"
|
lxc remote switch local
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh
|
# if the mgmt machine doesn't exist, then warn the user to perform ./install.sh
|
||||||
@ -24,5 +24,4 @@ fi
|
|||||||
|
|
||||||
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
|
wait-for-it -t 300 "$IP_V4_ADDRESS:22" > /dev/null 2>&1
|
||||||
|
|
||||||
# let's ensure ~/.ssh/ssh_config is using the correct IP address for ss-mgmt.
|
|
||||||
ssh ubuntu@"$IP_V4_ADDRESS"
|
ssh ubuntu@"$IP_V4_ADDRESS"
|
||||||
|
@ -85,7 +85,7 @@ if [ -x /usr/bin/dircolors ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# colored GCC warnings and errors
|
# colored GCC warnings and errors
|
||||||
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
|
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
|
||||||
|
|
||||||
# some more ls aliases
|
# some more ls aliases
|
||||||
alias ll='ls -alF'
|
alias ll='ls -alF'
|
||||||
|
@ -34,7 +34,7 @@ sleep 1
|
|||||||
|
|
||||||
# install snap
|
# install snap
|
||||||
if ! snap list | grep -q lxd; then
|
if ! snap list | grep -q lxd; then
|
||||||
sudo snap install lxd --channel=5.10/stable
|
sudo snap install lxd
|
||||||
sleep 6
|
sleep 6
|
||||||
|
|
||||||
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
# We just do an auto initialization. All we are using is the LXD client inside the management environment.
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
FROM btcpayserver/lightning:v22.11.1
|
|
||||||
|
|
||||||
EXPOSE 9736
|
|
53
uninstall.sh
53
uninstall.sh
@ -3,52 +3,37 @@
|
|||||||
set -exu
|
set -exu
|
||||||
|
|
||||||
# this script undoes install.sh
|
# this script undoes install.sh
|
||||||
if ! command -v lxc >/dev/null 2>&1; then
|
|
||||||
echo "This script requires 'lxc' to be installed. Please run 'install.sh'."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
. ./defaults.sh
|
. ./defaults.sh
|
||||||
|
|
||||||
if lxc list --format csv | grep -q ss-mgmt; then
|
if lxc list --format csv | grep -q ss-mgmt; then
|
||||||
|
|
||||||
if ! lxc list --format csv | grep ss-mgmt | grep -q "RUNNING"; then
|
if ! list list --format csv | grep ss-mgmt | grep -q "RUNNING"; then
|
||||||
lxc stop ss-mgmt
|
lxc stop ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
lxc config device remove ss-mgmt sscode
|
lxc config device remove ss-mgmt sscode
|
||||||
lxc delete ss-mgmt -f
|
lxc delete ss-mgmt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if lxc profile device list default | grep -q root; then
|
# if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
||||||
lxc profile device remove default root
|
# lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc profile device list default | grep -q enp5s0; then
|
# if lxc storage list --format csv | grep -q sovereign-stack; then
|
||||||
lxc profile device remove default enp5s0
|
# lxc profile device remove default root
|
||||||
fi
|
# lxc storage delete sovereign-stack
|
||||||
|
# fi
|
||||||
|
|
||||||
if lxc network list | grep -q lxdbr0; then
|
# if snap list | grep -q lxd; then
|
||||||
lxc network delete lxdbr0
|
# sudo snap remove lxd
|
||||||
fi
|
# sleep 2
|
||||||
|
# fi
|
||||||
|
|
||||||
if lxc image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
|
# if zfs list | grep -q sovereign-stack; then
|
||||||
lxc image delete "$UBUNTU_BASE_IMAGE_NAME"
|
# sudo zfs destroy -r sovereign-stack
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if lxc storage list --format csv | grep -q sovereign-stack; then
|
# if zfs list | grep -q "sovereign-stack"; then
|
||||||
lxc storage delete sovereign-stack
|
# sudo zfs destroy -r "rpool/lxd"
|
||||||
fi
|
# fi
|
||||||
|
|
||||||
if snap list | grep -q lxd; then
|
|
||||||
sudo snap remove lxd
|
|
||||||
sleep 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if zfs list | grep -q sovereign-stack; then
|
|
||||||
sudo zfs destroy -r sovereign-stack
|
|
||||||
fi
|
|
||||||
|
|
||||||
if zfs list | grep -q "sovereign-stack"; then
|
|
||||||
sudo zfs destroy -r "rpool/lxd"
|
|
||||||
fi
|
|
||||||
|
Loading…
Reference in New Issue
Block a user