diff --git a/deploy_vm.sh b/deploy_vm.sh index 885e92f..f35c451 100755 --- a/deploy_vm.sh +++ b/deploy_vm.sh @@ -44,7 +44,7 @@ if ! lxc list --format csv | grep -q "$LXD_VM_NAME"; then # # attach any volumes # for CHAIN in testnet mainnet; do # for DATA in blocks chainstate; do - # MOUNT_PATH="/$CHAIN/$DATA" + # MOUNT_PATH="/$CHAIN-$DATA" # if lxc storage volume list ss-base -q --format csv -c n | grep -q "$CHAIN-$DATA/snap0"; then # lxc storage volume attach ss-base "$CHAIN-$DATA" "$LXD_VM_NAME" "$MOUNT_PATH" # fi diff --git a/www/backup_path.sh b/www/backup_path.sh index 336705c..02be60a 100755 --- a/www/backup_path.sh +++ b/www/backup_path.sh @@ -14,16 +14,5 @@ fi ssh "$PRIMARY_WWW_FQDN" sudo PASSPHRASE="$DUPLICITY_BACKUP_PASSPHRASE" duplicity "$REMOTE_SOURCE_BACKUP_PATH" "file://$REMOTE_BACKUP_PATH" ssh "$PRIMARY_WWW_FQDN" sudo chown -R ubuntu:ubuntu "$REMOTE_BACKUP_PATH" -SSHFS_PATH="/tmp/sshfs_temp" -mkdir -p "$SSHFS_PATH" - -# now let's pull down the latest files from the backup directory. -# create a temp directory to serve as the mountpoint for the remote machine backups directory -sshfs "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH" "$SSHFS_PATH" - -# rsync the files from the remote server to our local backup path. -rsync -av "$SSHFS_PATH" "$LOCAL_BACKUP_PATH" - -# step 4: unmount the SSHFS filesystem and cleanup. -umount "$SSHFS_PATH" -rm -rf "$SSHFS_PATH" +# sync the remote backup path down +rsync -a "$PRIMARY_WWW_FQDN:$REMOTE_BACKUP_PATH/" "$LOCAL_BACKUP_PATH/" diff --git a/www/go.sh b/www/go.sh index 1177de6..a8513f2 100755 --- a/www/go.sh +++ b/www/go.sh @@ -8,9 +8,9 @@ DOCKER_HOST="ssh://ubuntu@$PRIMARY_WWW_FQDN" export DOCKER_HOST="$DOCKER_HOST" # prepare clams images and such -./prepare_clams.sh +#./prepare_clams.sh -# Create the nginx config file which covers all domains. +# Create the nginx config file which covers all domainys. bash -c ./stub/nginx_config.sh for DOMAIN_NAME in ${DOMAIN_LIST//,/ }; do