1
1
Fork 1

Various updates.

This commit is contained in:
Derek Smith 2022-11-21 10:58:32 -05:00
parent 47191ba254
commit bd9a76108b
Signed by: farscapian
GPG Key ID: 8F1CD799CCA516CC
6 changed files with 35 additions and 42 deletions

View File

@ -7,10 +7,6 @@
# put this into /usr/lib/udev/rules.d or /lib/udev/rules.d # put this into /usr/lib/udev/rules.d or /lib/udev/rules.d
# depending on your distribution # depending on your distribution
# Trezor
SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl"
# Trezor v2 # Trezor v2
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"
SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n"

View File

@ -6,10 +6,10 @@ You can update Sovereign Stack scripts on your management machine by running `gi
Once your managent machine checkedout a specific version of Sovereign stack, you will want to run the various scripts against your remotes. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management (i.e., add it as a remote). Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place. Once your managent machine checkedout a specific version of Sovereign stack, you will want to run the various scripts against your remotes. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management (i.e., add it as a remote). Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place.
After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site defintions. These files are stubbed out for the user automatically and documetnation guides the user through the process. After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site definitions. These files are stubbed out for the user automatically and documetnation guides the user through the process.
It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch. It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch.
By default (i.e., without any command line modifiers), Sovereign Stack scripts will back up active deployments resulting in minimal downtime. (zero downtime for Ghost, minimal for Nextcloud/Gitea, BTCPAY Server). By default (i.e., without any command line modifiers), Sovereign Stack scripts will back up active deployments resulting in minimal downtime. (zero downtime for Ghost, minimal for Nextcloud/Gitea, BTCPAY Server).
All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org). All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org).

View File

@ -37,6 +37,7 @@ export DUPLICITY_BACKUP_PASSPHRASE=
export SSH_HOME="$HOME/.ssh" export SSH_HOME="$HOME/.ssh"
export PASS_HOME="$HOME/.password-store"
export VLAN_INTERFACE= export VLAN_INTERFACE=
export VM_NAME="sovereign-stack-base" export VM_NAME="sovereign-stack-base"
export DEV_MEMORY_MB="8096" export DEV_MEMORY_MB="8096"

View File

@ -116,7 +116,6 @@ fi
# set up our default paths. # set up our default paths.
source ./defaults.sh source ./defaults.sh
export CACHES_DIR="$HOME/ss-cache"
export DOMAIN_NAME="$DOMAIN_NAME" export DOMAIN_NAME="$DOMAIN_NAME"
export REGISTRY_DOCKER_IMAGE="registry:2" export REGISTRY_DOCKER_IMAGE="registry:2"
export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH" export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH"
@ -149,7 +148,7 @@ export CLUSTER_DEFINITION="$CLUSTER_DEFINITION"
######################################### #########################################
if [ ! -f "$CLUSTER_DEFINITION" ]; then if [ ! -f "$CLUSTER_DEFINITION" ]; then
echo "ERROR: The cluster defintion could not be found. You may need to re-run 'ss-cluster create'." echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'."
exit 1 exit 1
fi fi
@ -259,9 +258,9 @@ function instantiate_vms {
# if the local docker client isn't logged in, do so; # if the local docker client isn't logged in, do so;
# this helps prevent docker pull errors since they throttle. # this helps prevent docker pull errors since they throttle.
if [ ! -f "$HOME/.docker/config.json" ]; then # if [ ! -f "$HOME/.docker/config.json" ]; then
echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin # echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin
fi # fi
# this tells our local docker client to target the remote endpoint via SSH # this tells our local docker client to target the remote endpoint via SSH
export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN"
@ -313,7 +312,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)"
EOL EOL
chmod 0744 "$SITE_DEFINITION_PATH" chmod 0744 "$SITE_DEFINITION_PATH"
echo "INFO: we stubbed a new site_defintion for you at '$SITE_DEFINITION_PATH'. Go update it yo!" echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!"
exit 1 exit 1
fi fi

View File

@ -33,7 +33,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then
# create a base image if needed and instantiate a VM. # create a base image if needed and instantiate a VM.
if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then
echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site defintion." echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition."
echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS." echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS."
exit 1 exit 1
fi fi

View File

@ -1,10 +1,12 @@
#!/bin/bash #!/bin/bash
set -eu set -exu
cd "$(dirname "$0")" cd "$(dirname "$0")"
source ./defaults.sh
# let's check to ensure the management machine is on the Baseline ubuntu 21.04 # let's check to ensure the management machine is on the Baseline ubuntu 21.04
if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then if ! lsb_release -d | grep "Ubuntu 22.04" | grep -q "LTS"; then
echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine."
exit 1 exit 1
fi fi
@ -17,14 +19,14 @@ fi
sudo apt-get update sudo apt-get update
# TODO REVIEW management machine software requirements # TODO REVIEW management machine software requirements
# is docker-ce actually needed here? prefer to move docker registry
# to a host on SERVERS LAN so that it can operate # to a host on SERVERS LAN so that it can operate
# TODO document which dependencies are required by what software, e.g., trezor, docker, etc. # TODO document which dependencies are required by what software, e.g., trezor, docker, etc.
# virt-manager allows us to run type-1 vms desktop version. We use remote viewer to get a GUI for the VM
sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \ sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \
apt-transport-https ca-certificates lsb-release \ apt-transport-https ca-certificates lsb-release docker-ce-cli \
docker-ce-cli docker-ce containerd.io docker-compose-plugin \
python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \ python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \
libcanberra-gtk-module libcanberra-gtk-module virt-manager pass
# for trezor installation # for trezor installation
pip3 install setuptools wheel pip3 install setuptools wheel
@ -34,37 +36,32 @@ if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then
sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules
fi fi
# TODO initialize pass here; need to first initialize Trezor-T certificates.
# install lxd as a snap if it's not installed. We only really use the client part of this package # install lxd as a snap if it's not installed. We only really use the client part of this package
# on the management machine. # on the management machine.
if ! snap list | grep -q lxd; then if ! snap list | grep -q lxd; then
sudo snap install lxd --candidate sudo snap install lxd --candidate
# initialize the daemon for auto use. Most of the time on the management machine,
# we only use the LXC client -- not the daemon. HOWEVER, there are circustances where
# you might want to run the management machine in a LXD-based VM. We we init the lxd daemon
# after havning installed it so it'll be available for use.
# see https://www.sovereign-stack.org/management/
sudo lxd init --auto --storage-pool=default --storage-create-loop=50 --storage-backend=zfs
fi fi
# make ss-deploy available to the user # make the Sovereign Stack commands available to the user via ~/.bashrc
if ! groups | grep -q docker; then
sudo groupadd docker
fi
sudo usermod -aG docker "$USER"
# make the Sovereign Stack commands available to the user.
# we use ~/.bashrc # we use ~/.bashrc
ADDED_COMMAND=false ADDED_COMMAND=false
if ! < "$HOME/.bashrc" grep -q "ss-deploy"; then for SS_COMMAND in deploy cluster; do
echo "alias ss-deploy='/home/$USER/sovereign-stack/deploy.sh \$@'" >> "$HOME/.bashrc" if ! < "$HOME/.bashrc" grep -q "ss-$SS_COMMAND"; then
ADDED_COMMAND=true echo "alias ss-${SS_COMMAND}='$(pwd)/${SS_COMMAND}.sh \$@'" >> "$HOME/.bashrc"
fi ADDED_COMMAND=true
fi
if ! < "$HOME/.bashrc" grep -q "ss-cluster"; then done
echo "alias ss-cluster='/home/$USER/sovereign-stack/cluster.sh \$@'" >> "$HOME/.bashrc"
ADDED_COMMAND=true
fi
if ! < "$HOME/.bashrc" grep -q "ss-projects"; then
echo "alias ss-projects='/home/$USER/sovereign-stack/projects.sh \$@'" >> "$HOME/.bashrc"
ADDED_COMMAND=true
fi
if [ "$ADDED_COMMAND" = true ]; then if [ "$ADDED_COMMAND" = true ]; then
echo "WARNING! You need to run 'source ~/.bashrc' before continuing." echo "WARNING! You need to run 'source ~/.bashrc' before continuing."
fi fi