Browse Source

terraform apply creates instance and ansible runs successfully

master
forest 2 years ago
parent
commit
2c62f13759
  1. 2
      ansible-wrapper/ansible.cfg
  2. 0
      ansible-wrapper/callback_plugins/default.py
  3. 1
      ansible-wrapper/main.go
  4. 148
      automation/post-to-s3-bash/bin/s3-delete
  5. 148
      automation/post-to-s3-bash/bin/s3-get
  6. 170
      automation/post-to-s3-bash/bin/s3-put
  7. 309
      automation/post-to-s3-bash/lib/s3-common.sh
  8. 134
      automation/terraformActions.go
  9. 111
      automation/terraformCodeGeneration.go
  10. 115
      configuration/configuration.go
  11. 104
      host-key-poller/main.go
  12. 242
      main.go
  13. 154
      objectStorage/backblazeb2.go
  14. 5
      objectStorage/e2eeObjectStorage.go
  15. 132
      objectStorage/initialization.go
  16. 16
      objectStorage/objectStorager.go
  17. 33
      objectStorage/redundantObjectStorage.go
  18. 5
      objectStorage/s3Like.go

2
ansible-wrapper/ansible.cfg

@ -1,5 +1,5 @@
[defaults]
host_key_checking = True
callback_plugins = ./callback_plugins
stdout_callback = default-with-json-interleaved

0
ansible-wrapper/callback_plugins/default-copy.py → ansible-wrapper/callback_plugins/default.py

1
ansible-wrapper/main.go

@ -113,4 +113,5 @@ func main() {
err = errors.Wrapf(err, "can't ShellExec(ansible-playbook %s), process.Wait() returned", strings.Join(arguments, " "))
}
os.Exit(process.ProcessState.ExitCode())
}

148
automation/post-to-s3-bash/bin/s3-delete

@ -0,0 +1,148 @@
#!/usr/bin/env bash
#
# Delete a file from S3
# (c) 2015 Chi Vinh Le <cvl@winged.kiwi>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
set -euo pipefail
readonly PROJECT_PATH=$( cd $(dirname $0) ; cd ../; pwd -P )
readonly SCRIPT_NAME="$(basename $0)"
readonly METHOD="DELETE"
# Includes
source ${PROJECT_PATH}/lib/s3-common.sh
##
# Print help and exit
# Arguments:
# $1 int exit code
# Output:
# string help
##
printUsageAndExitWith() {
printf "Usage:\n"
printf " ${SCRIPT_NAME} [-vi] [-k key] [-s file] [-r region] resource_path\n"
printf " ${SCRIPT_NAME} -h\n"
printf "Example:\n"
printf " ${SCRIPT_NAME} -k key -s secret -r eu-central-1 /bucket/file.ext\n"
printf "Options:\n"
printf " --debug\tEnable debugging mode\n"
printf " -h,--help\tPrint this help\n"
printf " -i,--insecure\tUse http instead of https\n"
printf " -k,--key\tAWS Access Key ID. Default to environment variable AWS_ACCESS_KEY_ID\n"
printf " -r,--region\tAWS S3 Region. Default to environment variable AWS_DEFAULT_REGION\n"
printf " -s,--secret\tFile containing AWS Secret Access Key. If not set, secret will be environment variable AWS_SECRET_ACCESS_KEY\n"
printf " -t,--token\tSecurity token for temporary credentials. If not set, token will be environment variable AWS_SECURITY_TOKEN\n"
printf " -v,--verbose\tVerbose output\n"
printf " --version\tShow version\n"
exit $1
}
##
# Parse command line and set global variables
# Arguments:
# $@ command line
# Globals:
# AWS_ACCESS_KEY_ID string
# AWS_SECRET_ACCESS_KEY string
# AWS_REGION string
# AWS_SECURITY_TOKEN string
# RESOURCE_PATH string
# VERBOSE bool
# INSECURE bool
# DEBUG bool
##
parseCommandLine() {
# Init globals
AWS_REGION=${AWS_DEFAULT_REGION:-""}
AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-""}
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-""}
AWS_SECURITY_TOKEN=${AWS_SECURITY_TOKEN:-""}
VERBOSE=false
INSECURE=false
DEBUG=false
# Parse options
local remaining=
local secretKeyFile=
while [[ $# > 0 ]]; do
local key="$1"
case ${key} in
--version) showVersionAndExit;;
--debug) DEBUG=true;;
-h|--help) printUsageAndExitWith 0;;
-v|--verbose) VERBOSE=true;;
-i|--insecure) INSECURE=true;;
-r|--region) assertArgument $@; AWS_REGION=$2; shift;;
-k|--key) assertArgument $@; AWS_ACCESS_KEY_ID=$2; shift;;
-s|--secret) assertArgument $@; secretKeyFile=$2; shift;;
-t|--token) assertArgument $@; AWS_SECURITY_TOKEN=$2; shift;;
-*) err "Unknown option $1"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE};;
*) remaining="${remaining} \"${key}\"";;
esac
shift
done
# Set the non-parameters back into the positional parameters ($1 $2 ..)
eval set -- ${remaining}
# Read secret file if set
if ! [[ -z "${secretKeyFile}" ]]; then
AWS_SECRET_ACCESS_KEY=$(processAWSSecretFile "${secretKeyFile}")
fi
# Parse arguments
if [[ $# != 1 ]]; then
err "You need to specify the resource path to download e.g. /bucket/file.ext"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
assertResourcePath "$1"
RESOURCE_PATH="$1"
if [[ -z "${AWS_REGION}" ]]; then
err "AWS Region not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_ACCESS_KEY_ID}" ]]; then
err "AWS Access Key ID not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_SECRET_ACCESS_KEY}" ]]; then
err "AWS Secret Access Key not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
# Freeze globals
readonly AWS_REGION
readonly AWS_ACCESS_KEY_ID
readonly AWS_SECRET_ACCESS_KEY
readonly RESOURCE_PATH
readonly DEBUG
readonly VERBOSE
readonly INSECURE
}
##
# Main routine
##
main() {
checkEnvironment
parseCommandLine $@
performRequest
}
main $@

148
automation/post-to-s3-bash/bin/s3-get

@ -0,0 +1,148 @@
#!/usr/bin/env bash
#
# Download a file from S3
# (c) 2015 Chi Vinh Le <cvl@winged.kiwi>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
set -euo pipefail
readonly PROJECT_PATH=$( cd $(dirname $0) ; cd ../; pwd -P )
readonly SCRIPT_NAME="$(basename $0)"
readonly METHOD="GET"
# Includes
source ${PROJECT_PATH}/lib/s3-common.sh
##
# Print help and exit
# Arguments:
# $1 int exit code
# Output:
# string help
##
printUsageAndExitWith() {
printf "Usage:\n"
printf " ${SCRIPT_NAME} [-vi] [-k key] [-s file] [-r region] resource_path\n"
printf " ${SCRIPT_NAME} -h\n"
printf "Example:\n"
printf " ${SCRIPT_NAME} -k key -s secret -r eu-central-1 /bucket/file.ext\n"
printf "Options:\n"
printf " --debug\tEnable debugging mode\n"
printf " -h,--help\tPrint this help\n"
printf " -i,--insecure\tUse http instead of https\n"
printf " -k,--key\tAWS Access Key ID. Default to environment variable AWS_ACCESS_KEY_ID\n"
printf " -r,--region\tAWS S3 Region. Default to environment variable AWS_DEFAULT_REGION\n"
printf " -s,--secret\tFile containing AWS Secret Access Key. If not set, secret will be environment variable AWS_SECRET_ACCESS_KEY\n"
printf " -t,--token\tSecurity token for temporary credentials. If not set, token will be environment variable AWS_SECURITY_TOKEN\n"
printf " -v,--verbose\tVerbose output\n"
printf " --version\tShow version\n"
exit $1
}
##
# Parse command line and set global variables
# Arguments:
# $@ command line
# Globals:
# AWS_ACCESS_KEY_ID string
# AWS_SECRET_ACCESS_KEY string
# AWS_REGION string
# AWS_SECURITY_TOKEN string
# RESOURCE_PATH string
# VERBOSE bool
# INSECURE bool
# DEBUG bool
##
parseCommandLine() {
# Init globals
AWS_REGION=${AWS_DEFAULT_REGION:-""}
AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-""}
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-""}
AWS_SECURITY_TOKEN=${AWS_SECURITY_TOKEN:-""}
VERBOSE=false
INSECURE=false
DEBUG=false
# Parse options
local remaining=
local secretKeyFile=
while [[ $# > 0 ]]; do
local key="$1"
case ${key} in
--version) showVersionAndExit;;
--debug) DEBUG=true;;
-h|--help) printUsageAndExitWith 0;;
-v|--verbose) VERBOSE=true;;
-i|--insecure) INSECURE=true;;
-r|--region) assertArgument $@; AWS_REGION=$2; shift;;
-k|--key) assertArgument $@; AWS_ACCESS_KEY_ID=$2; shift;;
-s|--secret) assertArgument $@; secretKeyFile=$2; shift;;
-t|--token) assertArgument $@; AWS_SECURITY_TOKEN=$2; shift;;
-*) err "Unknown option $1"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE};;
*) remaining="${remaining} \"${key}\"";;
esac
shift
done
# Set the non-parameters back into the positional parameters ($1 $2 ..)
eval set -- ${remaining}
# Read secret file if set
if ! [[ -z "${secretKeyFile}" ]]; then
AWS_SECRET_ACCESS_KEY=$(processAWSSecretFile "${secretKeyFile}")
fi
# Parse arguments
if [[ $# != 1 ]]; then
err "You need to specify the resource path to download e.g. /bucket/file.ext"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
assertResourcePath "$1"
RESOURCE_PATH="$1"
if [[ -z "${AWS_REGION}" ]]; then
err "AWS Region not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_ACCESS_KEY_ID}" ]]; then
err "AWS Access Key ID not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_SECRET_ACCESS_KEY}" ]]; then
err "AWS Secret Access Key not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
# Freeze globals
readonly AWS_REGION
readonly AWS_ACCESS_KEY_ID
readonly AWS_SECRET_ACCESS_KEY
readonly RESOURCE_PATH
readonly DEBUG
readonly VERBOSE
readonly INSECURE
}
##
# Main routine
##
main() {
checkEnvironment
parseCommandLine $@
performRequest
}
main $@

170
automation/post-to-s3-bash/bin/s3-put

@ -0,0 +1,170 @@
#!/usr/bin/env bash
#
# Upload a file to S3
# (c) 2015 Chi Vinh Le <cvl@winged.kiwi>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
set -euo pipefail
readonly PROJECT_PATH=$( cd $(dirname $0) ; cd ../; pwd -P )
readonly SCRIPT_NAME="$(basename $0)"
readonly METHOD="PUT"
# Includes
source ${PROJECT_PATH}/lib/s3-common.sh
##
# Print help and exit
# Arguments:
# $1 int exit code
# Output:
# string help
##
printUsageAndExitWith() {
printf "Usage:\n"
printf " ${SCRIPT_NAME} [--debug] [-vip] [-k key] [-r region] [-s file] [-c content_type] -T file_to_upload resource_path\n"
printf " ${SCRIPT_NAME} -h\n"
printf "Example:\n"
printf " ${SCRIPT_NAME} -k key -s secret -r eu-central-1 -T file.ext -c text/plain /bucket/file.ext\n"
printf "Options:\n"
printf " -c,--content-type\tMIME content type\n"
printf " --debug\tEnable debugging mode\n"
printf " -h,--help\tPrint this help\n"
printf " -i,--insecure\tUse http instead of https\n"
printf " -k,--key\tAWS Access Key ID. Default to environment variable AWS_ACCESS_KEY_ID\n"
printf " -p,--public\tGrant public read on uploaded file\n"
printf " -r,--region\tAWS S3 Region. Default to environment variable AWS_DEFAULT_REGION\n"
printf " -s,--secret\tFile containing AWS Secret Access Key. If not set, secret will be environment variable AWS_SECRET_ACCESS_KEY\n"
printf " -t,--token\tSecurity token for temporary credentials. If not set, token will be environment variable AWS_SECURITY_TOKEN\n"
printf " -T,--upload-file\tPath to file to upload\n"
printf " -v,--verbose\tVerbose output\n"
printf " --version\tShow version\n"
exit $1
}
##
# Parse command line and set global variables
# Arguments:
# $@ command line
# Sets the following Globals:
# AWS_ACCESS_KEY_ID string
# AWS_SECRET_ACCESS_KEY string
# AWS_REGION string
# AWS_SECURITY_TOKEN string
# RESOURCE_PATH string
# FILE_TO_UPLOAD string
# CONTENT_TYPE string
# PUBLISH bool
# VERBOSE bool
# INSECURE bool
# DEBUG bool
# PUBLISH bool
##
parseCommandLine() {
# Init globals
AWS_REGION=${AWS_DEFAULT_REGION:-""}
AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-""}
AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-""}
AWS_SECURITY_TOKEN=${AWS_SECURITY_TOKEN:-""}
FILE_TO_UPLOAD=
CONTENT_TYPE=
PUBLISH=false
VERBOSE=false
INSECURE=false
DEBUG=false
# Parse options
local remaining=
local secretKeyFile=
while [[ $# > 0 ]]; do
local key="$1"
case ${key} in
--version) showVersionAndExit;;
--debug) DEBUG=true;;
-h|--help) printUsageAndExitWith 0;;
-v|--verbose) VERBOSE=true;;
-i|--insecure) INSECURE=true;;
-p|--publish) PUBLISH=true;;
-c|--content-type) assertArgument $@; CONTENT_TYPE=$2; shift;;
-T|--upload-file) assertArgument $@; FILE_TO_UPLOAD=$2; shift;;
-r|--region) assertArgument $@; AWS_REGION=$2; shift;;
-k|--key) assertArgument $@; AWS_ACCESS_KEY_ID=$2; shift;;
-s|--secret) assertArgument $@; secretKeyFile=$2; shift;;
-t|--token) assertArgument $@; AWS_SECURITY_TOKEN=$2; shift;;
-*) err "Unknown option $1"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE};;
*) remaining="${remaining} \"${key}\"";;
esac
shift
done
# Set the non-parameters back into the positional parameters ($1 $2 ..)
eval set -- ${remaining}
# Read secret file if set
if ! [[ -z "${secretKeyFile}" ]]; then
AWS_SECRET_ACCESS_KEY=$(processAWSSecretFile "${secretKeyFile}")
fi
# Parse arguments
if [[ $# != 1 ]]; then
err "You need to specify the resource path to upload to e.g. /bucket/file.ext"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
assertResourcePath $1
RESOURCE_PATH="$1"
if [[ -z "${FILE_TO_UPLOAD}" ]]; then
err "You need to specify the file to upload using -T"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
assertFileExists "${FILE_TO_UPLOAD}"
if [[ -z "${AWS_REGION}" ]]; then
err "AWS Region not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_ACCESS_KEY_ID}" ]]; then
err "AWS Access Key ID not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
if [[ -z "${AWS_SECRET_ACCESS_KEY}" ]]; then
err "AWS Secret Access Key not specified"
printUsageAndExitWith ${INVALID_USAGE_EXIT_CODE}
fi
# Freeze globals
readonly AWS_REGION
readonly AWS_ACCESS_KEY_ID
readonly AWS_SECRET_ACCESS_KEY
readonly RESOURCE_PATH
readonly CONTENT_TYPE
readonly PUBLISH
readonly DEBUG
readonly VERBOSE
readonly INSECURE
}
##
# Main routine
##
main() {
checkEnvironment
parseCommandLine $@
performRequest
}
main $@

309
automation/post-to-s3-bash/lib/s3-common.sh

@ -0,0 +1,309 @@
#!/usr/bin/env bash
#
# Common functions for s3-bash4 commands
# (c) 2015 Chi Vinh Le <cvl@winged.kiwi>
# Constants
readonly VERSION="0.0.1"
# Exit codes
readonly INVALID_USAGE_EXIT_CODE=1
readonly INVALID_USER_DATA_EXIT_CODE=2
readonly INVALID_ENVIRONMENT_EXIT_CODE=3
##
# Write error to stderr
# Arguments:
# $1 string to output
##
err() {
echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')] Error: $@" >&2
}
##
# Display version and exit
##
showVersionAndExit() {
printf "$VERSION\n"
exit
}
##
# Helper for parsing the command line.
##
assertArgument() {
if [[ $# -lt 2 ]]; then
err "Option $1 needs an argument."
exit $INVALID_USAGE_EXIT_CODE
fi
}
##
# Asserts given resource path
# Arguments:
# $1 string resource path
##
assertResourcePath() {
if [[ $1 = !(/*) ]]; then
err "Resource should start with / e.g. /bucket/file.ext"
exit $INVALID_USAGE_EXIT_CODE
fi
}
##
# Asserts given file exists.
# Arguments:
# $1 string file path
##
assertFileExists() {
if [[ ! -f $1 ]]; then
err "$1 file doesn't exists"
exit $INVALID_USER_DATA_EXIT_CODE
fi
}
##
# Check for valid environment. Exit if invalid.
##
checkEnvironment()
{
programs=(openssl curl printf echo sed awk od date pwd dirname)
for program in "${programs[@]}"; do
if [ ! -x "$(which $program)" ]; then
err "$program is required to run"
exit $INVALID_ENVIRONMENT_EXIT_CODE
fi
done
if [ ! -x "$(which sha256sum)" ]; then
if [ ! -x "$(which shasum)" ]; then
err "sha256sum or shasum is required to run"
exit $INVALID_ENVIRONMENT_EXIT_CODE
else
SHACMD="shasum -a 256 "
fi
else
SHACMD="sha256sum "
fi
}
##
# Reads, validates and return aws secret stored in a file
# Arguments:
# $1 path to secret file
# Output:
# string AWS secret
##
processAWSSecretFile() {
local errStr="The Amazon AWS secret key must be 40 bytes long. Make sure that there is no carriage return at the end of line."
if ! [[ -f $1 ]]; then
err "The file $1 does not exist."
exit $INVALID_USER_DATA_EXIT_CODE
fi
# limit file size to max 41 characters. 40 + potential null terminating character.
local fileSize="$(ls -l "$1" | awk '{ print $5 }')"
if [[ $fileSize -gt 41 ]]; then
err $errStr
exit $INVALID_USER_DATA_EXIT_CODE
fi
secret=$(<$1)
# exact string size should be 40.
if [[ ${#secret} != 40 ]]; then
err $errStr
exit $INVALID_USER_DATA_EXIT_CODE
fi
echo $secret
}
##
# Convert string to hex with max line size of 256
# Arguments:
# $1 string to convert
# Returns:
# string hex
##
hex256() {
printf "$1" | od -A n -t x1 | sed ':a;N;$!ba;s/[\n ]//g'
}
##
# Calculate sha256 hash
# Arguments:
# $1 string to hash
# Returns:
# string hash
##
sha256Hash() {
local output=$(printf "$1" | $SHACMD)
echo "${output%% *}"
}
##
# Calculate sha256 hash of file
# Arguments:
# $1 file path
# Returns:
# string hash
##
sha256HashFile() {
local output=$($SHACMD $1)
echo "${output%% *}"
}
##
# Generate HMAC signature using SHA256
# Arguments:
# $1 signing key in hex
# $2 string data to sign
# Returns:
# string signature
##
hmac_sha256() {
printf "$2" | openssl dgst -binary -hex -sha256 -mac HMAC -macopt hexkey:$1 \
| sed 's/^.* //'
}
##
# Sign data using AWS Signature Version 4
# Arguments:
# $1 AWS Secret Access Key
# $2 yyyymmdd
# $3 AWS Region
# $4 AWS Service
# $5 string data to sign
# Returns:
# signature
##
sign() {
local kSigning=$(hmac_sha256 $(hmac_sha256 $(hmac_sha256 \
$(hmac_sha256 $(hex256 "AWS4$1") $2) $3) $4) "aws4_request")
hmac_sha256 "${kSigning}" "$5"
}
##
# Get endpoint of specified region
# Arguments:
# $1 region
# Returns:
# amazon andpoint
##
convS3RegionToEndpoint() {
case "$1" in
us-east-1) echo "s3.amazonaws.com"
;;
*) echo s3-${1}.amazonaws.com
;;
esac
}
##
# Perform request to S3
# Uses the following Globals:
# METHOD string
# AWS_ACCESS_KEY_ID string
# AWS_SECRET_ACCESS_KEY string
# AWS_REGION string
# RESOURCE_PATH string
# FILE_TO_UPLOAD string
# CONTENT_TYPE string
# PUBLISH bool
# DEBUG bool
# VERBOSE bool
# INSECURE bool
##
performRequest() {
local timestamp=$(date -u "+%Y-%m-%d %H:%M:%S")
local isoTimestamp=$(date -ud "${timestamp}" "+%Y%m%dT%H%M%SZ")
local dateScope=$(date -ud "${timestamp}" "+%Y%m%d")
local host=$(convS3RegionToEndpoint "${AWS_REGION}")
# Generate payload hash
if [[ $METHOD == "PUT" ]]; then
local payloadHash=$(sha256HashFile $FILE_TO_UPLOAD)
else
local payloadHash=$(sha256Hash "")
fi
local cmd=("curl")
local headers=
local headerList=
if [[ ${DEBUG} != true ]]; then
cmd+=("--fail")
fi
if [[ ${VERBOSE} == true ]]; then
cmd+=("--verbose")
fi
if [[ ${METHOD} == "PUT" ]]; then
cmd+=("-T" "${FILE_TO_UPLOAD}")
fi
cmd+=("-X" "${METHOD}")
if [[ ${METHOD} == "PUT" && ! -z "${CONTENT_TYPE}" ]]; then
cmd+=("-H" "Content-Type: ${CONTENT_TYPE}")
headers+="content-type:${CONTENT_TYPE}\n"
headerList+="content-type;"
fi
cmd+=("-H" "Host: ${host}")
headers+="host:${host}\n"
headerList+="host;"
if [[ ${METHOD} == "PUT" && "${PUBLISH}" == true ]]; then
cmd+=("-H" "x-amz-acl: public-read")
headers+="x-amz-acl:public-read\n"
headerList+="x-amz-acl;"
fi
cmd+=("-H" "x-amz-content-sha256: ${payloadHash}")
headers+="x-amz-content-sha256:${payloadHash}\n"
headerList+="x-amz-content-sha256;"
cmd+=("-H" "x-amz-date: ${isoTimestamp}")
headers+="x-amz-date:${isoTimestamp}"
headerList+="x-amz-date"
if [[ -n "${AWS_SECURITY_TOKEN}" ]]; then
cmd+=("-H" "x-amz-security-token: ${AWS_SECURITY_TOKEN}")
headers+="\nx-amz-security-token:${AWS_SECURITY_TOKEN}"
headerList+=";x-amz-security-token"
fi
# Generate canonical request
local canonicalRequest="${METHOD}
${RESOURCE_PATH}
${headers}
${headerList}
${payloadHash}"
# Generated request hash
local hashedRequest=$(sha256Hash "${canonicalRequest}")
# Generate signing data
local stringToSign="AWS4-HMAC-SHA256
${isoTimestamp}
${dateScope}/${AWS_REGION}/s3/aws4_request
${hashedRequest}"
# Sign data
local signature=$(sign "${AWS_SECRET_ACCESS_KEY}" "${dateScope}" "${AWS_REGION}" \
"s3" "${stringToSign}")
local authorizationHeader="AWS4-HMAC-SHA256 Credential=${AWS_ACCESS_KEY_ID}/${dateScope}/${AWS_REGION}/s3/aws4_request, SignedHeaders=${headerList}, Signature=${signature}"
cmd+=("-H" "Authorization: ${authorizationHeader}")
local protocol="https"
if [[ $INSECURE == true ]]; then
protocol="http"
fi
cmd+=("${protocol}://${host}${RESOURCE_PATH}")
# Curl
"${cmd[@]}"
}

134
automation/terraformActions.go

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
@ -104,11 +105,11 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
return nil, err
}
// exitCode, planStdout, planStderr, err := shellExec(terraformDirectory, "terraform", "plan", "-out", configuration.TERRAFORM_PLAN_FILE_NAME)
// err = errorFromShellExecResult("terraform plan", exitCode, planStdout, planStderr, err)
// if err != nil {
// return nil, err
// }
exitCode, planStdout, planStderr, err := shellExec(terraformDirectory, "terraform", "plan", "-out", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform plan", exitCode, planStdout, planStderr, err)
if err != nil {
return nil, err
}
exitCode, tfJson, showJsonStderr, err := shellExec(terraformDirectory, "terraform", "show", "-json", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform show", exitCode, tfJson, showJsonStderr, err)
@ -121,7 +122,7 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
var tfShow TerraformShow
err = json.Unmarshal([]byte(tfJson), &tfShow)
if err != nil {
return nil, errors.Wrap(err, "can't GenerateTerraformPlan because can't json.Unmarshal the output of `terraform show`")
return nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't json.Unmarshal the output of `terraform show`")
}
// json, err := json.MarshalIndent(tfShow, "", " ")
@ -131,48 +132,50 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
// log.Println(string(json))
getAnsibleRoles := func(ansibleModuleName string) (map[string][]string, error) {
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules", ansibleModuleName)
exitCode, ansibleStdout, ansibleStderr, err := shellExec(ansibleDirectory, "ansible-playbook", "--list-tasks", configuration.ANSIBLE_PLAYBOOK_FILE_NAME)
err = errorFromShellExecResult("ansible-playbook --list-tasks", exitCode, ansibleStdout, ansibleStderr, err)
if err != nil {
return nil, err
}
// ansibleStdout looks like:
// playbook: provision.yml
//
// play #1 (localhost): This is a hello-world example TAGS: []
// tasks:
// test-role : Create a file called '/tmp/testfile.txt' with the content 'hello world'. TAGS: []
// .....
matchTask := regexp.MustCompile(`\n\s+(( *[^ :]+)+) : (.*?)\s*TAGS: \[.+`)
taskMatches := matchTask.FindAllStringSubmatch(string(ansibleStdout), -1)
ansibleRoles := map[string][]string{}
for _, match := range taskMatches {
role := match[1]
task := match[3]
if _, has := ansibleRoles[role]; !has {
ansibleRoles[role] = []string{}
for moduleName := range tfShow.Configuraton.RootModule.ModuleCalls {
if strings.HasPrefix(moduleName, "ansible-") {
err := linkAnsibleWrapperToModule(moduleName, workingDirectory)
if err != nil {
return nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't linkAnsibleWrapperToModule")
}
ansibleRoles[role] = append(ansibleRoles[role], task)
}
}
return ansibleRoles, nil
svg, err := makeSvgFromTerraformShowOutput(config, workingDirectory, tfShow)
if err != nil {
return nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't makeSvgFromTerraformShowOutput")
}
fmt.Println(svg)
// exitCode, tfJson, showJsonStderr, err := shellExec(terraformDirectory, "terraform", "show", "-json", configuration.TERRAFORM_PLAN_FILE_NAME)
// err = errorFromShellExecResult("terraform show", exitCode, tfJson, showJsonStderr, err)
// if err != nil {
// return nil, errors.Wrap(err, "can't terraform show! \nstdout: \n%s\n\n stderr: \n%s\n\n error:")
// }
// err = os.Remove(filepath.Join(terraformDirectory, configuration.TERRAFORM_PLAN_FILE_NAME))
// if err != nil {
// return nil, nil, errors.Wrap(err, "can't GenerateTerraformPlan because can't remove plan file")
// }
return make(chan SimplifiedTerraformStatus), nil
}
func makeSvgFromTerraformShowOutput(
config *configuration.Configuration,
workingDirectory string,
tfShow TerraformShow,
) (string, error) {
simpleModules := map[string]*SimplifiedTerraformModule{}
for name, module := range tfShow.Configuraton.RootModule.ModuleCalls {
resources := []SimplifiedTerraformResource{}
if strings.HasPrefix(name, "ansible-") {
rolesMap, err := getAnsibleRoles(name)
rolesMap, err := getAnsibleRolesFromModule(name, workingDirectory)
if err != nil {
return nil, errors.Wrapf(err, "cant getAnsibleRoles(%s) because", name)
return "", errors.Wrapf(err, "cant getAnsibleRoles(%s) because", name)
}
for roleName, tasks := range rolesMap {
resources = append(resources, SimplifiedTerraformResource{
@ -364,7 +367,7 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
exitCode, dotStdout, dotStderr, err := shellExecInputPipe(".", &dot, "dot", "-Tsvg")
err = errorFromShellExecResult("dot -Tsvg", exitCode, dotStdout, dotStderr, err)
if err != nil {
return nil, err
return "", err
}
svgString := strings.ReplaceAll(string(dotStdout), `fill="none"`, `fill="#ffffff"`)
@ -372,20 +375,55 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
svgString = matchTitleTag.ReplaceAllString(svgString, "")
svgString = strings.ReplaceAll(svgString, `Times,serif`, `-apple-system,system-ui,BlinkMacSystemFont,Ubuntu,Roboto,Segoe UI,sans-serif`)
fmt.Println(svgString)
return svgString, nil
}
// exitCode, tfJson, showJsonStderr, err := shellExec(terraformDirectory, "terraform", "show", "-json", configuration.TERRAFORM_PLAN_FILE_NAME)
// err = errorFromShellExecResult("terraform show", exitCode, tfJson, showJsonStderr, err)
// if err != nil {
// return nil, errors.Wrap(err, "can't terraform show! \nstdout: \n%s\n\n stderr: \n%s\n\n error:")
// }
func linkAnsibleWrapperToModule(moduleName, workingDirectory string) error {
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules", moduleName)
// err = os.Remove(filepath.Join(terraformDirectory, configuration.TERRAFORM_PLAN_FILE_NAME))
// if err != nil {
// return nil, nil, errors.Wrap(err, "can't GenerateTerraformPlan because can't remove plan file")
// }
for _, toLink := range configuration.GET_ANSIBLE_WRAPPER_FILES() {
inModule := filepath.Join(ansibleDirectory, toLink)
inAnsibleWrapper := filepath.Join(workingDirectory, configuration.ANSIBLE_WRAPPER_PATH, toLink)
os.Remove(inModule)
err := os.Symlink(inAnsibleWrapper, inModule)
if err != nil {
return errors.Wrapf(err, "could not create symbolic link %s for ansible wrapper")
}
}
return nil
}
return make(chan SimplifiedTerraformStatus), nil
func getAnsibleRolesFromModule(moduleName, workingDirectory string) (map[string][]string, error) {
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules", moduleName)
exitCode, ansibleStdout, ansibleStderr, err := shellExec(ansibleDirectory, "ansible-playbook", "--list-tasks", configuration.ANSIBLE_PLAYBOOK_FILE_NAME)
err = errorFromShellExecResult("ansible-playbook --list-tasks", exitCode, ansibleStdout, ansibleStderr, err)
if err != nil {
return nil, err
}
// ansibleStdout looks like:
// playbook: provision.yml
//
// play #1 (localhost): This is a hello-world example TAGS: []
// tasks:
// test-role : Create a file called '/tmp/testfile.txt' with the content 'hello world'. TAGS: []
// .....
matchTask := regexp.MustCompile(`\n\s+(( *[^ :]+)+) : (.*?)\s*TAGS: \[.+`)
taskMatches := matchTask.FindAllStringSubmatch(string(ansibleStdout), -1)
ansibleRoles := map[string][]string{}
for _, match := range taskMatches {
role := match[1]
task := match[3]
if _, has := ansibleRoles[role]; !has {
ansibleRoles[role] = []string{}
}
ansibleRoles[role] = append(ansibleRoles[role], task)
}
return ansibleRoles, nil
}
func padStringForDot(str string) string {

111
automation/terraformCodeGeneration.go

@ -41,9 +41,14 @@ type tfProvidedBy struct {
const ssh_public_keys = "ssh_public_keys"
const ssh_private_key_filepath = "ssh_private_key_filepath"
const ssh_private_key_filepath_value = "../ssh/severgarden_builtin_ed22519"
const post_to_object_storage_shell_script = "post_to_object_storage_shell_script"
const ssh_private_key_filepath_value = "ssh/severgarden_builtin_ed22519"
func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, workingDirectory string) error {
func WriteTerraformCodeForTargetedModules(
config *configuration.Configuration,
workingDirectory string,
hostKeysObjectStorageCredentials []configuration.Credential,
) error {
providers := map[string]tfProvider{
"digitalocean": tfProvider{
@ -61,7 +66,7 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
modules, err := parseModulesFolder(providers, workingDirectory)
if err != nil {
return errors.Wrap(err, "can't initializeTerraformProject because can't parseModulesFolder()")
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't parseModulesFolder()")
}
usedProviders := make(map[string]tfProvider)
@ -86,16 +91,19 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
}
}
// these variables don't have to have the real values, but all used variables do have to be
// included in this map so we can wire up the modules correctly.
allVariables := map[string]string{}
for k, v := range config.Terraform.Variables {
allVariables[k] = v
}
allVariables[ssh_public_keys] = "<secure shell public keys>"
allVariables[ssh_private_key_filepath] = ssh_private_key_filepath_value
allVariables[post_to_object_storage_shell_script] = "<long shell script>"
err = fillOutProvidedByOnAttributes(usedModules, allVariables)
if err != nil {
return errors.Wrap(err, "can't initializeTerraformProject because can't fillOutProvidedByOnAttributes")
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't fillOutProvidedByOnAttributes")
}
modulesToCreate := map[string]bool{}
@ -104,7 +112,7 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
if !has {
return errors.Wrapf(
err,
`can't initializeTerraformProject because the TargetedModule \"%s\"
`can't WriteTerraformCodeForTargetedModules because the TargetedModule \"%s\"
was not found in the list of modules that we have authenticated providers for.
Missing credentials or incorrect module name.`,
target,
@ -113,7 +121,7 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
dependencies, err := getDependencies(module, usedModules, nil)
if !has {
return errors.Wrapf(err, `can't initializeTerraformProject because cant getDependencies for module "%s"`, module.Name)
return errors.Wrapf(err, `can't WriteTerraformCodeForTargetedModules because cant getDependencies for module "%s"`, module.Name)
}
modulesToCreate[module.Name] = true
@ -173,7 +181,7 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
sshDirectory := filepath.Join(workingDirectory, configuration.SSH_KEYS_PATH)
sshFileInfos, err := ioutil.ReadDir(sshDirectory)
if err != nil {
return errors.Wrapf(err, "can't initializeTerraformProject because can't ioutil.ReadDir(\"%s\")", sshDirectory)
return errors.Wrapf(err, "can't WriteTerraformCodeForTargetedModules because can't ioutil.ReadDir(\"%s\")", sshDirectory)
}
for _, fileInfo := range sshFileInfos {
filepath := filepath.Join(sshDirectory, fileInfo.Name())
@ -189,13 +197,33 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
}
}
// we can't use the allVariables here like we used above, because we have to handle variable values
// that are a list of objects, not just a string.
// so we special case it below this loop.
variableStanzas := make([]string, 0)
for key, value := range config.Terraform.Variables {
variableStanzas = append(variableStanzas, fmt.Sprintf(`
variable "%s" { default = "%s" }
`, key, value,
))
if strings.Contains(value, "\n") {
variableStanzas = append(variableStanzas, fmt.Sprintf(`
variable "%s" {
default = <<EOT
%s
EOT
}
`, key, value,
))
} else {
variableStanzas = append(variableStanzas, fmt.Sprintf(`
variable "%s" { default = "%s" }
`, key, value,
))
}
}
postToObjectStorageShellScript, err := getPostToObjectStorageShellScript(config, hostKeysObjectStorageCredentials)
if err != nil {
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because")
}
variableStanzas = append(variableStanzas,
fmt.Sprintf(`
variable "%s" {
@ -211,6 +239,14 @@ func WriteTerraformCodeForTargetedModules(config *configuration.Configuration, w
variable "%s" { default = "%s" }
`, ssh_private_key_filepath, ssh_private_key_filepath_value,
),
fmt.Sprintf(`
variable "%s" {
default = <<EOT
%s
EOT
}
`, post_to_object_storage_shell_script, postToObjectStorageShellScript,
),
)
moduleStanzas := make([]string, 0)
@ -457,3 +493,56 @@ func parseModulesFolder(providers map[string]tfProvider, workingDirectory string
return modules, nil
}
func getPostToObjectStorageShellScript(
config *configuration.Configuration,
credentials []configuration.Credential,
) (string, error) {
backblazeTemplate := `
BUCKET_NAME="%s"
AUTH_JSON="$(curl -sS -u "%s:%s" https://api.backblazeb2.com/b2api/v2/b2_authorize_account)"
API_URL="$(echo "$AUTH_JSON" | grep -E -o '"apiUrl": "([^"]+)"' | sed -E 's|"apiUrl": "([^"]+)"|\1|')"
ACCOUNT_ID="$(echo "$AUTH_JSON" | grep -E -o '"accountId": "([^"]+)"' | sed -E 's|"accountId": "([^"]+)"|\1|')"
AUTH_TOKEN="$(echo "$AUTH_JSON" | grep -E -o '"authorizationToken": "([^"]+)"' | sed -E 's|"authorizationToken": "([^"]+)"|\1|')"
LIST_BUCKETS_JSON="$(curl -sS -H "Authorization: $AUTH_TOKEN" "$API_URL/b2api/v2/b2_list_buckets?accountId=$ACCOUNT_ID&bucketName=$BUCKET_NAME" )"
BUCKET_ID="$(echo "$LIST_BUCKETS_JSON" | grep -E -o '"bucketId": "([^"]+)"' | sed -E 's|"bucketId": "([^"]+)"|\1|')"
UPLOAD_URL_JSON="$(curl -sS -H "Authorization: $AUTH_TOKEN" "$API_URL/b2api/v2/b2_get_upload_url?bucketId=$BUCKET_ID" )"
UPLOAD_URL="$(echo "$UPLOAD_URL_JSON" | grep -E -o '"uploadUrl": "([^"]+)"' | sed -E 's|"uploadUrl": "([^"]+)"|\1|')"
AUTH_TOKEN="$(echo "$UPLOAD_URL_JSON" | grep -E -o '"authorizationToken": "([^"]+)"' | sed -E 's|"authorizationToken": "([^"]+)"|\1|')"
CONTENT_SHA1="$(echo -n "$CONTENT" | sha1sum | awk '{ print $1 }')"
curl -sS -X POST \
-H "Authorization: $AUTH_TOKEN" \
-H "X-Bz-File-Name: $FILE_PATH" \
-H "X-Bz-Content-Sha1: $CONTENT_SHA1" \
-H "Content-Type: text/plain" \
"$UPLOAD_URL" -d "$CONTENT"
`
scripts := []string{}
for _, backend := range config.ObjectStorage.Backends {
for _, credential := range credentials {
if credential.Type == backend.Provider {
if backend.Provider == configuration.BACKBLAZE_B2 {
scripts = append(scripts, fmt.Sprintf(backblazeTemplate, backend.Name, credential.Username, credential.Password))
}
if backend.Provider == configuration.AMAZON_S3 {
// TODO
return "", errors.New("getPostToObjectStorageShellScript not implemented yet for S3-compatible")
}
}
}
}
if len(scripts) == 0 {
return "", errors.New("No credentials were passed to getPostToObjectStorageShellScript")
}
return strings.Join(scripts, "\n"), nil
}

115
configuration/configuration.go

@ -1,5 +1,18 @@
package configuration
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
errors "git.sequentialread.com/forest/pkg-errors"
)
type Configuration struct {
Host HostConfiguration
Terraform TerraformConfiguration
@ -42,5 +55,107 @@ const OBJECT_STORAGE_PASSPHRASE = "ObjectStoragePassphrase"
const TERRAFORM_CODE_PATH = "terraform-project"
const ANSIBLE_PLAYBOOK_FILE_NAME = "playbook.yml"
const TERRAFORM_PLAN_FILE_NAME = "terraform-plan-file"
const KNOWN_HOSTS_FILE_NAME = "/home/forest/.ssh/known_hosts"
const ANSIBLE_WRAPPER_PATH = "ansible-wrapper"
func GET_ANSIBLE_WRAPPER_FILES() []string {
return []string{
"ansible-playbook-wrapper",
"callback_plugins",
"ansible.cfg",
}
}
const SSH_KEYS_PATH = "ssh"
const TERRAFORM_STATE_SERVER_PORT_NUMBER = 6471
func LoadConfiguration() (*Configuration, string, error) {
if runtime.GOOS == "windows" {
return nil, "", errors.New("windows operating system is not supported.")
}
workingDirectory, err := os.Getwd()
if err != nil {
return nil, "", errors.Wrap(err, "can't os.Getwd()")
}
executableDirectory, err := getCurrentExecDir()
if err != nil {
return nil, "", errors.Wrap(err, "can't getCurrentExecDir()")
}
configFileLocation1 := filepath.Join(executableDirectory, "config.json")
configFileLocation2 := filepath.Join(workingDirectory, "config.json")
configFileLocation := configFileLocation1
configFileStat, err := os.Stat(configFileLocation)
workingDirectoryToReturn := executableDirectory
if err != nil || !configFileStat.Mode().IsRegular() {
configFileLocation = configFileLocation2
configFileStat, err = os.Stat(configFileLocation)
workingDirectoryToReturn = workingDirectory
}
if err != nil || !configFileStat.Mode().IsRegular() {
return nil, workingDirectoryToReturn, fmt.Errorf("no config file. checked %s and %s", configFileLocation1, configFileLocation2)
}
// configFileUid, err := getUID(configFileStat)
// if err != nil {
// return nil, errors.Wrapf(err, "can't getUID() config file %s", configFileLocation)
// }
// if configFileUid != 0 {
// return nil, fmt.Errorf("can't start rootsystem: the config file %s is not owned by root.", configFileLocation)
// }
// ownerReadWriteOnlyPermissionsOctal := "600"
// configFilePermissionsOctal := fmt.Sprintf("%o", configFileStat.Mode().Perm())
// if configFilePermissionsOctal != ownerReadWriteOnlyPermissionsOctal {
// return nil, fmt.Errorf(
// "can't start rootsystem: the config file %s had permissions %s. expected %s. %s",
// configFileLocation,
// configFilePermissionsOctal,
// ownerReadWriteOnlyPermissionsOctal,
// "(config file should only be readable and writable by the owner, aka the root user)",
// )
// }
jsonBytes, err := ioutil.ReadFile(configFileLocation)
if err != nil {
return nil, workingDirectoryToReturn, errors.Wrap(err, "can't read config file")
}
var config Configuration
err = json.Unmarshal(jsonBytes, &config)
if err != nil {
return nil, workingDirectoryToReturn, errors.Wrap(err, "can't json.Unmarshal config file")
}
return &config, workingDirectoryToReturn, nil
}
func getUID(fileInfo os.FileInfo) (int, error) {
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok {
return -1, fmt.Errorf("can't cast os.Stat(\"%s\").Sys() to *syscall.Stat_t")
}
return int(stat.Uid), nil
}
func getCurrentExecDir() (dir string, err error) {
path, err := exec.LookPath(os.Args[0])
if err != nil {
fmt.Printf("exec.LookPath(%s) returned %s\n", os.Args[0], err)
return "", err
}
absPath, err := filepath.Abs(path)
if err != nil {
fmt.Printf("filepath.Abs(%s) returned %s\n", path, err)
return "", err
}
dir = filepath.Dir(absPath)
return dir, nil
}

104
host-key-poller/main.go

@ -0,0 +1,104 @@
package main
import (
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"time"
errors "git.sequentialread.com/forest/pkg-errors"
"git.sequentialread.com/forest/rootsystem/configuration"
"git.sequentialread.com/forest/rootsystem/objectStorage"
)
func main() {
config, _, err := configuration.LoadConfiguration()
if err != nil {
panic(errors.Wrap(err, "host-key-poller failed because loadConfiguration() returned"))
}
storage, err := objectStorage.InitializeObjectStorage(config, false)
if err != nil {
panic(errors.Wrap(err, "host-key-poller failed to initialize object storage"))
}
iterations := 0
for iterations < 60 {
iterations++
filename := fmt.Sprintf("rootsystem/known-hosts/%s", os.Args[1])
fmt.Printf("polling for %s...\n", filename)
file, notFound, err := storage.Get(filename)
fmt.Printf("result: notFound: %t, err: %t\n", notFound, err != nil)
if err != nil {
fmt.Printf("the error was: %s\n", err)
}
if err == nil && !notFound {
lines := strings.Split(string(file.Content), "\n")
validLines := []string{}
ipAddress := ""
for _, line := range lines {
if len(strings.Trim(line, "\t \n\r")) > 10 {
fields := strings.Split(line, " ")
if len(fields) >= 3 {
ip := fields[0]
hostKeyType := fields[1]
base64PublicKey := fields[2]
ipValid := regexp.MustCompile("(\\d+\\.)+\\d+").FindString(ip) != ""
typeValid := (hostKeyType == "ecdsa-sha2-nistp256" || hostKeyType == "ssh-rsa" || hostKeyType == "ssh-ed25519")
base64Valid := regexp.MustCompile("[A-Za-z0-9+/=]+").FindString(base64PublicKey) != ""
if ipValid && typeValid && base64Valid {
ipAddress = ip
validLines = append(validLines, fmt.Sprintf("%s %s %s", ip, hostKeyType, base64PublicKey))
}
}
}
}
if len(validLines) > 0 {
fmt.Printf("Removing %s from %s:\n", ipAddress, configuration.KNOWN_HOSTS_FILE_NAME)
fmt.Printf("ssh-keygen -f %s -R %s\n", configuration.KNOWN_HOSTS_FILE_NAME, ipAddress)
process := exec.Command("ssh-keygen", "-f", configuration.KNOWN_HOSTS_FILE_NAME, "-R", ipAddress)
err := process.Start()
if err != nil {
panic(err)
}
err = process.Wait()
if err != nil {
panic(err)
}
file, err := os.OpenFile(configuration.KNOWN_HOSTS_FILE_NAME, os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
panic(err)
}
defer file.Close()
fmt.Printf("Writing to %s:\n", configuration.KNOWN_HOSTS_FILE_NAME)
for _, line := range validLines {
fmt.Println(line)
if _, err = file.WriteString(fmt.Sprintf("\n%s", line)); err != nil {
panic(err)
}
}
os.Exit(0)
}
}
time.Sleep(time.Second * time.Duration(5))
}
panic(errors.New("Timed Out"))
}

242
main.go

@ -1,16 +1,8 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"sync"
"syscall"
"time"
errors "git.sequentialread.com/forest/pkg-errors"
@ -28,15 +20,17 @@ type applicationState struct {
var global applicationState
func main() {
config, err := loadConfiguration()
config, workingDirectory, err := configuration.LoadConfiguration()
if err != nil {
panic(errors.Wrap(err, "rootsystem can't start because loadConfiguration() returned"))
}
global.workingDirectory = workingDirectory
err = initializeObjectStorage(config)
storage, err := objectStorage.InitializeObjectStorage(config, true)
if err != nil {
panic(errors.Wrap(err, "rootsystem can't start because failed to initialize object storage"))
}
global.storage = storage
go terraformStateServer()
@ -47,7 +41,20 @@ func main() {
func initializeAutomation(config *configuration.Configuration) {
err := automation.WriteTerraformCodeForTargetedModules(config, global.workingDirectory)
hostKeysAccessSpec := objectStorage.ObjectStorageKey{
Name: "rootsystem-known-hosts",
PathPrefix: "rootsystem/known-hosts",
Read: true,
Write: true,
Delete: false,
List: false,
}
knownHostsCredentials, err := global.storage.CreateAccessKeyIfNotExists(hostKeysAccessSpec)
if err != nil {
panic(err)
}
err = automation.WriteTerraformCodeForTargetedModules(config, global.workingDirectory, knownHostsCredentials)
if err != nil {
panic(err)
}