diff --git a/51-trezor.rules b/51-trezor.rules index c0d43b2..9cce6fa 100644 --- a/51-trezor.rules +++ b/51-trezor.rules @@ -7,10 +7,6 @@ # put this into /usr/lib/udev/rules.d or /lib/udev/rules.d # depending on your distribution -# Trezor -SUBSYSTEM=="usb", ATTR{idVendor}=="534c", ATTR{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" -KERNEL=="hidraw*", ATTRS{idVendor}=="534c", ATTRS{idProduct}=="0001", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl" - # Trezor v2 SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c0", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" SUBSYSTEM=="usb", ATTR{idVendor}=="1209", ATTR{idProduct}=="53c1", MODE="0660", GROUP="plugdev", TAG+="uaccess", TAG+="udev-acl", SYMLINK+="trezor%n" diff --git a/README.md b/README.md index 4ebb70f..e6f9075 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,10 @@ You can update Sovereign Stack scripts on your management machine by running `gi Once your managent machine checkedout a specific version of Sovereign stack, you will want to run the various scripts against your remotes. But before you can do that, you need to bring a bare-metal Ubuntu 22.04 cluster host under management (i.e., add it as a remote). Generally speaking you will run `ss-cluster` to bring a new bare-metal host under management of your management machine. This can be run AFTER you have verified SSH access to the bare-metal hosts. The device SHOULD also have a DHCP Reservation and DNS records in place. -After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site defintions. These files are stubbed out for the user automatically and documetnation guides the user through the process. +After you have taken a machine under management, you can run `ss-deploy` it. All Sovereign Stack scripts execute against your current lxc remote. (Run `lxc remote list` to see your remotes). This will deploy Sovereign Stack software to your active remote in accordance with the various cluster, project, and site definitions. These files are stubbed out for the user automatically and documetnation guides the user through the process. It is the responsiblity of the management machine (i.e,. system owner) to run the scripts on a regular and ongoing basis to ensure active deployments stay up-to-date with the Sovereign Stack master branch. By default (i.e., without any command line modifiers), Sovereign Stack scripts will back up active deployments resulting in minimal downtime. (zero downtime for Ghost, minimal for Nextcloud/Gitea, BTCPAY Server). -All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org). \ No newline at end of file +All other documentation for this project can be found at the [sovereign-stack.org](https://www.sovereign-stack.org). diff --git a/defaults.sh b/defaults.sh index ef5e846..b79cc47 100755 --- a/defaults.sh +++ b/defaults.sh @@ -37,6 +37,7 @@ export DUPLICITY_BACKUP_PASSPHRASE= export SSH_HOME="$HOME/.ssh" +export PASS_HOME="$HOME/.password-store" export VLAN_INTERFACE= export VM_NAME="sovereign-stack-base" export DEV_MEMORY_MB="8096" diff --git a/deploy.sh b/deploy.sh index 62b80f7..a16cfbb 100755 --- a/deploy.sh +++ b/deploy.sh @@ -116,7 +116,6 @@ fi # set up our default paths. source ./defaults.sh -export CACHES_DIR="$HOME/ss-cache" export DOMAIN_NAME="$DOMAIN_NAME" export REGISTRY_DOCKER_IMAGE="registry:2" export BTCPAY_RESTORE_ARCHIVE_PATH="$BTCPAY_RESTORE_ARCHIVE_PATH" @@ -149,7 +148,7 @@ export CLUSTER_DEFINITION="$CLUSTER_DEFINITION" ######################################### if [ ! -f "$CLUSTER_DEFINITION" ]; then - echo "ERROR: The cluster defintion could not be found. You may need to re-run 'ss-cluster create'." + echo "ERROR: The cluster definition could not be found. You may need to re-run 'ss-cluster create'." exit 1 fi @@ -259,9 +258,9 @@ function instantiate_vms { # if the local docker client isn't logged in, do so; # this helps prevent docker pull errors since they throttle. - if [ ! -f "$HOME/.docker/config.json" ]; then - echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin - fi + # if [ ! -f "$HOME/.docker/config.json" ]; then + # echo "$REGISTRY_PASSWORD" | docker login --username "$REGISTRY_USERNAME" --password-stdin + # fi # this tells our local docker client to target the remote endpoint via SSH export DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" @@ -313,7 +312,7 @@ export GITEA_MYSQL_ROOT_PASSWORD="$(new_pass)" EOL chmod 0744 "$SITE_DEFINITION_PATH" - echo "INFO: we stubbed a new site_defintion for you at '$SITE_DEFINITION_PATH'. Go update it yo!" + echo "INFO: we stubbed a new site_definition for you at '$SITE_DEFINITION_PATH'. Go update it yo!" exit 1 fi diff --git a/deployment/deploy_vms.sh b/deployment/deploy_vms.sh index acd8505..2488222 100755 --- a/deployment/deploy_vms.sh +++ b/deployment/deploy_vms.sh @@ -33,7 +33,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then # create a base image if needed and instantiate a VM. if [ -z "$MAC_ADDRESS_TO_PROVISION" ]; then - echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site defintion." + echo "ERROR: You MUST define a MAC Address for all your machines by setting WWW_SERVER_MAC_ADDRESS, BTCPAYSERVER_MAC_ADDRESS in your site definition." echo "INFO: IMPORTANT! You MUST have DHCP Reservations for these MAC addresses. You also need records established the DNS." exit 1 fi diff --git a/install.sh b/install.sh index b5e350a..d8e1716 100755 --- a/install.sh +++ b/install.sh @@ -1,10 +1,12 @@ #!/bin/bash -set -eu +set -exu cd "$(dirname "$0")" +source ./defaults.sh + # let's check to ensure the management machine is on the Baseline ubuntu 21.04 -if ! lsb_release -d | grep -q "Ubuntu 22.04 LTS"; then +if ! lsb_release -d | grep "Ubuntu 22.04" | grep -q "LTS"; then echo "ERROR: Your machine is not running the Ubuntu 22.04 LTS baseline OS on your management machine." exit 1 fi @@ -17,14 +19,14 @@ fi sudo apt-get update # TODO REVIEW management machine software requirements -# is docker-ce actually needed here? prefer to move docker registry # to a host on SERVERS LAN so that it can operate # TODO document which dependencies are required by what software, e.g., trezor, docker, etc. +# virt-manager allows us to run type-1 vms desktop version. We use remote viewer to get a GUI for the VM sudo apt-get install -y wait-for-it dnsutils rsync sshfs curl gnupg \ - apt-transport-https ca-certificates lsb-release \ - docker-ce-cli docker-ce containerd.io docker-compose-plugin \ + apt-transport-https ca-certificates lsb-release docker-ce-cli \ python3-pip python3-dev libusb-1.0-0-dev libudev-dev pinentry-curses \ - libcanberra-gtk-module + libcanberra-gtk-module virt-manager pass + # for trezor installation pip3 install setuptools wheel @@ -34,37 +36,32 @@ if [ ! -f /etc/udev/rules.d/51-trezor.rules ]; then sudo cp ./51-trezor.rules /etc/udev/rules.d/51-trezor.rules fi +# TODO initialize pass here; need to first initialize Trezor-T certificates. + + # install lxd as a snap if it's not installed. We only really use the client part of this package # on the management machine. if ! snap list | grep -q lxd; then sudo snap install lxd --candidate + + # initialize the daemon for auto use. Most of the time on the management machine, + # we only use the LXC client -- not the daemon. HOWEVER, there are circustances where + # you might want to run the management machine in a LXD-based VM. We we init the lxd daemon + # after havning installed it so it'll be available for use. + # see https://www.sovereign-stack.org/management/ + sudo lxd init --auto --storage-pool=default --storage-create-loop=50 --storage-backend=zfs fi -# make ss-deploy available to the user -if ! groups | grep -q docker; then - sudo groupadd docker -fi - -sudo usermod -aG docker "$USER" - -# make the Sovereign Stack commands available to the user. +# make the Sovereign Stack commands available to the user via ~/.bashrc # we use ~/.bashrc ADDED_COMMAND=false -if ! < "$HOME/.bashrc" grep -q "ss-deploy"; then - echo "alias ss-deploy='/home/$USER/sovereign-stack/deploy.sh \$@'" >> "$HOME/.bashrc" - ADDED_COMMAND=true -fi - -if ! < "$HOME/.bashrc" grep -q "ss-cluster"; then - echo "alias ss-cluster='/home/$USER/sovereign-stack/cluster.sh \$@'" >> "$HOME/.bashrc" - ADDED_COMMAND=true -fi - -if ! < "$HOME/.bashrc" grep -q "ss-projects"; then - echo "alias ss-projects='/home/$USER/sovereign-stack/projects.sh \$@'" >> "$HOME/.bashrc" - ADDED_COMMAND=true -fi +for SS_COMMAND in deploy cluster; do + if ! < "$HOME/.bashrc" grep -q "ss-$SS_COMMAND"; then + echo "alias ss-${SS_COMMAND}='$(pwd)/${SS_COMMAND}.sh \$@'" >> "$HOME/.bashrc" + ADDED_COMMAND=true + fi +done if [ "$ADDED_COMMAND" = true ]; then echo "WARNING! You need to run 'source ~/.bashrc' before continuing." -fi \ No newline at end of file +fi