1
1
sovereign-stack/install.sh

241 lines
8.0 KiB
Bash
Raw Normal View History

2021-12-25 18:43:01 +00:00
#!/bin/bash
2023-09-22 23:46:07 +00:00
set -exu
cd "$(dirname "$0")"
2023-04-02 13:43:55 +00:00
# https://www.sovereign-stack.org/install/
2023-02-01 19:44:05 +00:00
2023-03-16 19:50:02 +00:00
# this script is not meant to be executed from the SSME; Let's let's check and abort if so.
if [ "$(hostname)" = ss-mgmt ]; then
echo "ERROR: This command is meant to be executed from the bare metal management machine -- not the SSME."
exit 1
fi
DISK_OR_PARTITION=
# grab any modifications from the command line.
for i in "$@"; do
case $i in
2023-11-29 19:04:24 +00:00
--disk=*)
DISK_OR_PARTITION="${i#*=}"
shift
;;
*)
echo "Unexpected option: $1"
exit 1
;;
esac
done
# ensure the iptables forward policy is set to ACCEPT so your host can act as a router
# Note this is necessary if docker is running (or has been previuosly installed) on the
2023-11-29 19:04:24 +00:00
# same host running incus.
sudo iptables -F FORWARD
sudo iptables -P FORWARD ACCEPT
# if the user didn't specify the disk or partition, we create a loop device under
# the user's home directory. If the user does specify a disk or partition, we will
# create the ZFS pool there.
if [ -z "$DISK_OR_PARTITION" ]; then
2023-11-29 19:04:24 +00:00
echo "ERROR: You MUST set DISK_OR_PARTITION"
exit 1
fi
2023-11-29 19:04:24 +00:00
# run the incus install script.
sudo bash -c ./install_incus.sh
2023-02-01 19:44:05 +00:00
2023-11-29 19:04:24 +00:00
# run incus init
cat <<EOF | sudo incus admin init --preseed
2023-02-01 19:44:05 +00:00
config: {}
networks:
- config:
ipv4.address: auto
ipv4.dhcp: true
ipv6.address: none
description: "Default network bridge for ss-mgmt outbound network access."
2023-09-22 23:46:07 +00:00
name: incusbr0
2023-03-02 14:46:17 +00:00
type: bridge
2023-09-22 23:46:07 +00:00
project: default
2023-02-01 19:44:05 +00:00
storage_pools:
- config:
2023-11-29 19:04:24 +00:00
source: ${DISK_OR_PARTITION}
2023-02-01 19:44:05 +00:00
description: ""
name: sovereign-stack
driver: zfs
profiles:
- config: {}
2023-09-23 16:26:51 +00:00
description: "Default profile for ss-mgmt."
2023-02-01 19:44:05 +00:00
devices:
enp5s0:
name: enp5s0
2023-09-22 23:46:07 +00:00
network: incusbr0
2023-02-01 19:44:05 +00:00
type: nic
root:
path: /
pool: sovereign-stack
type: disk
name: default
projects: []
2023-09-22 23:46:07 +00:00
cluster: null
2023-02-01 19:44:05 +00:00
EOF
2021-12-25 18:43:01 +00:00
2023-09-22 23:46:07 +00:00
2023-04-12 14:07:41 +00:00
. ./deployment/deployment_defaults.sh
. ./deployment/base.sh
2023-04-12 14:05:49 +00:00
# we need to get the base image. IMport it if it's cached, else download it then cache it.
2023-09-22 23:46:07 +00:00
if ! incus image list | grep -q "$UBUNTU_BASE_IMAGE_NAME"; then
2023-04-12 14:05:49 +00:00
# if the image if cached locally, import it from disk, otherwise download it from ubuntu
IMAGE_PATH="$HOME/ss/cache/ss-ubuntu-jammy"
IMAGE_IDENTIFIER=$(find "$IMAGE_PATH" | grep ".qcow2" | head -n1 | cut -d "." -f1)
METADATA_FILE="$IMAGE_PATH/meta-$IMAGE_IDENTIFIER.tar.xz"
IMAGE_FILE="$IMAGE_PATH/$IMAGE_IDENTIFIER.qcow2"
if [ -d "$IMAGE_PATH" ] && [ -f "$METADATA_FILE" ] && [ -f "$IMAGE_FILE" ]; then
2023-09-22 23:46:07 +00:00
incus image import "$METADATA_FILE" "$IMAGE_FILE" --alias "$UBUNTU_BASE_IMAGE_NAME"
2023-04-12 14:05:49 +00:00
else
2023-09-22 23:46:07 +00:00
incus image copy "images:$BASE_INCUS_IMAGE" local: --alias "$UBUNTU_BASE_IMAGE_NAME" --vm --auto-update
2023-04-12 14:05:49 +00:00
mkdir -p "$IMAGE_PATH"
2023-09-22 23:46:07 +00:00
incus image export "$UBUNTU_BASE_IMAGE_NAME" "$IMAGE_PATH" --vm
2023-04-12 14:05:49 +00:00
fi
fi
# if the ss-mgmt doesn't exist, create it.
2023-04-12 14:05:25 +00:00
SSH_PATH="$HOME/.ssh"
SSH_PRIVKEY_PATH="$SSH_PATH/id_rsa"
SSH_PUBKEY_PATH="$SSH_PRIVKEY_PATH.pub"
if [ ! -f "$SSH_PRIVKEY_PATH" ]; then
ssh-keygen -f "$SSH_PRIVKEY_PATH" -t rsa -b 4096
fi
# add SSH_PUBKEY_PATH to authorized_keys
2023-09-22 23:46:07 +00:00
grep -qxF "$(cat "$SSH_PUBKEY_PATH")" "$SSH_PATH/authorized_keys" || cat "$SSH_PUBKEY_PATH" >> "$SSH_PATH/authorized_keys"
2023-04-12 14:05:25 +00:00
2023-04-02 13:43:55 +00:00
FROM_BUILT_IMAGE=false
2023-09-22 23:46:07 +00:00
if ! incus list --format csv | grep -q ss-mgmt; then
2022-06-22 17:40:34 +00:00
2023-04-02 13:43:55 +00:00
# TODO check to see if there's an existing ss-mgmt image to spawn from, otherwise do this.
2023-09-22 23:46:07 +00:00
if incus image list | grep -q ss-mgmt; then
2023-04-02 13:43:55 +00:00
FROM_BUILT_IMAGE=true
2023-09-22 23:46:07 +00:00
incus init ss-mgmt ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
2023-04-02 13:43:55 +00:00
else
2023-09-22 23:46:07 +00:00
incus init "images:$BASE_INCUS_IMAGE" ss-mgmt --vm -c limits.cpu=4 -c limits.memory=4GiB --profile=default
2023-04-02 13:43:55 +00:00
fi
fi
# mount the pre-verified sovereign stack git repo into the new vm
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-code; then
incus config device add ss-mgmt ss-code disk source="$(pwd)" path=/home/ubuntu/sovereign-stack
2023-04-02 13:43:55 +00:00
fi
2023-04-06 00:16:23 +00:00
2023-04-06 00:08:57 +00:00
# create the ~/ss path and mount it into the vm.
2023-04-07 14:23:04 +00:00
source ./deployment/deployment_defaults.sh
source ./deployment/base.sh
2023-04-02 13:43:55 +00:00
mkdir -p "$SS_ROOT_PATH"
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-root; then
incus config device add ss-mgmt ss-root disk source="$SS_ROOT_PATH" path=/home/ubuntu/ss
2023-04-02 13:43:55 +00:00
fi
2023-03-18 15:11:12 +00:00
2023-05-17 02:02:32 +00:00
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
BITCOIN_DIR="$HOME/.bitcoin"
REMOTE_BITCOIN_CACHE_PATH="/home/ubuntu/ss/cache/bitcoin"
BITCOIN_TESTNET_BLOCKS_PATH="$BITCOIN_DIR/testnet3/blocks"
if [ -d "$BITCOIN_TESTNET_BLOCKS_PATH" ]; then
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-testnet-blocks; then
incus config device add ss-mgmt ss-testnet-blocks disk source="$BITCOIN_TESTNET_BLOCKS_PATH" path=$REMOTE_BITCOIN_CACHE_PATH/testnet/blocks
2023-05-17 02:02:32 +00:00
fi
fi
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
BITCOIN_TESTNET_CHAINSTATE_PATH="$BITCOIN_DIR/testnet3/chainstate"
if [ -d "$BITCOIN_TESTNET_CHAINSTATE_PATH" ]; then
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-testnet-chainstate; then
incus config device add ss-mgmt ss-testnet-chainstate disk source="$BITCOIN_TESTNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/testnet/chainstate"
2023-05-17 02:02:32 +00:00
fi
fi
# if a ~/.bitcoin/blocks dir exists, mount it in.
BITCOIN_MAINNET_BLOCKS_PATH="$BITCOIN_DIR/blocks"
if [ -d "$BITCOIN_MAINNET_BLOCKS_PATH" ]; then
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then
incus config device add ss-mgmt ss-mainnet-blocks disk source="$BITCOIN_MAINNET_BLOCKS_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/blocks"
2023-05-17 02:02:32 +00:00
fi
fi
# if a ~/.bitcoin/testnet3/blocks direrectory exists, mount it in.
BITCOIN_MAINNET_CHAINSTATE_PATH="$BITCOIN_DIR/chainstate"
if [ -d "$BITCOIN_MAINNET_CHAINSTATE_PATH" ]; then
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-mainnet-blocks; then
incus config device add ss-mgmt ss-mainnet-chainstate disk source="$BITCOIN_MAINNET_CHAINSTATE_PATH" path="$REMOTE_BITCOIN_CACHE_PATH/mainnet/chainstate"
2023-05-17 02:02:32 +00:00
fi
fi
2023-03-18 15:11:12 +00:00
2023-04-02 13:43:55 +00:00
# mount the ssh directory in there.
if [ -f "$SSH_PUBKEY_PATH" ]; then
2023-09-22 23:46:07 +00:00
if ! incus config device show ss-mgmt | grep -q ss-ssh; then
incus config device add ss-mgmt ss-ssh disk source="$HOME/.ssh" path=/home/ubuntu/.ssh
2023-03-18 15:11:12 +00:00
fi
2022-06-22 17:40:34 +00:00
fi
# start the vm if it's not already running
2023-09-22 23:46:07 +00:00
if incus list --format csv | grep -q "ss-mgmt,STOPPED"; then
incus start ss-mgmt
sleep 10
2023-02-01 19:44:05 +00:00
fi
2022-11-21 15:58:32 +00:00
# wait for the vm to have an IP address
2023-09-22 23:46:07 +00:00
. ./management/wait_for_ip.sh
2022-11-21 15:58:32 +00:00
# do some other preparations for user experience
2023-09-22 23:46:07 +00:00
incus file push ./management/bash_aliases ss-mgmt/home/ubuntu/.bash_aliases
incus file push ./management/bash_profile ss-mgmt/home/ubuntu/.bash_profile
incus file push ./management/bashrc ss-mgmt/home/ubuntu/.bashrc
incus file push ./management/motd ss-mgmt/etc/update-motd.d/sovereign-stack
2023-02-01 19:44:05 +00:00
# install SSH
2023-09-22 23:46:07 +00:00
incus exec ss-mgmt apt-get update
incus exec ss-mgmt -- apt-get install -y openssh-server
incus file push ./management/sshd_config ss-mgmt/etc/ssh/sshd_config
incus exec ss-mgmt -- sudo systemctl restart sshd
2023-02-01 19:44:05 +00:00
# add 'ss-manage' to the bare metal ~/.bashrc
2022-06-22 17:40:34 +00:00
ADDED_COMMAND=false
2023-02-01 19:44:05 +00:00
if ! < "$HOME/.bashrc" grep -q "ss-manage"; then
echo "alias ss-manage='$(pwd)/manage.sh \$@'" >> "$HOME/.bashrc"
ADDED_COMMAND=true
fi
# Let's remove any entry in our known_hosts, then add it back.
# we are using IP address here so we don't have to rely on external DNS
# configuration for the base image preparataion.
ssh-keygen -R "$IP_V4_ADDRESS"
2023-04-12 14:05:25 +00:00
ssh-keyscan -H "$IP_V4_ADDRESS" >> "$SSH_HOME/known_hosts"
2023-02-01 19:44:05 +00:00
ssh "ubuntu@$IP_V4_ADDRESS" sudo chown -R ubuntu:ubuntu /home/ubuntu
2023-04-02 13:43:55 +00:00
if [ "$FROM_BUILT_IMAGE" = false ]; then
ssh "ubuntu@$IP_V4_ADDRESS" /home/ubuntu/sovereign-stack/management/provision.sh
2023-09-22 23:46:07 +00:00
incus stop ss-mgmt
2023-04-02 13:43:55 +00:00
2023-09-22 23:46:07 +00:00
if ! incus image list | grep -q "ss-mgmt"; then
2023-04-12 14:06:37 +00:00
echo "Publishing image. Please wait, this may take a while..."
2023-09-22 23:46:07 +00:00
incus publish ss-mgmt --alias=ss-mgmt
2023-04-02 13:43:55 +00:00
fi
2023-09-22 23:46:07 +00:00
incus start ss-mgmt
2023-04-02 13:43:55 +00:00
fi
2022-06-22 17:40:34 +00:00
if [ "$ADDED_COMMAND" = true ]; then
2023-02-01 19:44:05 +00:00
echo "NOTICE! You need to run 'source ~/.bashrc' before continuing. After that, type 'ss-manage' to enter your management environment."
2022-11-21 15:58:32 +00:00
fi