Browse Source

major refactor related to global/local terraform projects

- start committing the terraform modules and ansible
master
forest 2 years ago
parent
commit
c7ed3e40b9
  1. 8
      .gitignore
  2. 3
      ansible-roles/threshold-client-config/files/ReadMe.md
  3. 31
      ansible-roles/threshold-client-config/tasks/main.yml
  4. 13
      ansible-roles/threshold-client-config/templates/templates/config.j2
  5. 3
      ansible-roles/threshold-register-client-with-server/files/ReadMe.md
  6. 7
      ansible-roles/threshold-register-client-with-server/tasks/main.yml
  7. 3
      ansible-roles/threshold-server-config/files/ReadMe.md
  8. 24
      ansible-roles/threshold-server-config/tasks/main.yml
  9. 9
      ansible-roles/threshold-server-config/templates/config.j2
  10. 43
      ansible-roles/threshold/tasks/main.yml
  11. 1
      ansible-wrapper/ansible.cfg
  12. 180
      automation/patchDigitalOcean.go
  13. 151
      automation/terraformActions.go
  14. 279
      automation/terraformCodeGeneration.go
  15. 10
      build.sh
  16. 11
      configuration/configuration.go
  17. 122
      main.go
  18. 302
      pki/pki.go
  19. 38
      terraform-modules/ReadMe.md
  20. 40
      terraform-modules/ansible-threshold-client/main.tf
  21. 11
      terraform-modules/ansible-threshold-client/playbook.yml
  22. 36
      terraform-modules/ansible-threshold-server/main.tf
  23. 7
      terraform-modules/ansible-threshold-server/playbook.yml
  24. 23
      terraform-modules/dns-gandi/main.tf
  25. 27
      terraform-modules/gateway-dns/main.tf
  26. 74
      terraform-modules/gateway-instance-digitalocean/main.tf
  27. 7
      terraform-modules/gateway-instance-digitalocean/upload_known_hosts.tpl
  28. 16
      terraform-modules/ssh-keys-digitalocean/main.tf
  29. 19
      terraformStateHandler.go

8
.gitignore vendored

@ -1,6 +1,12 @@
config.json
ssh
terraform-project
terraform-global
terraform-local-*
terraform-modules/*/ansible.cfg
terraform-modules/*/callback_plugins
terraform-modules/*/ansible-playbook-wrapper
*.crt
*.key
testexe
test2
.terraform

3
ansible-roles/threshold-client-config/files/ReadMe.md

@ -0,0 +1,3 @@
## threshold-register-client-with-server files folder
the PKI files will be written here at runtime by rootsystem

31
ansible-roles/threshold-client-config/tasks/main.yml

@ -0,0 +1,31 @@
- name: install threshold config file
template:
src: config.j2
dest: /root/threshold/config.json
owner: threshold
group: threshold
mode: '0600'
- name: install CA cert used to sign the server's key
copy:
src: '{{ domain }}_CA.crt'
dest: '/root/threshold/{{ domain }}_CA.crt'
owner: threshold
group: threshold
mode: '0600'
- name: install threshold client TLS certificate
copy:
src: '{{ clientId }}@{{ domain }}.crt'
dest: '/root/threshold/{{ clientId }}@{{ domain }}.crt'
owner: threshold
group: threshold
mode: '0600'
- name: install threshold client TLS key
copy:
src: '{{ clientId }}@{{ domain }}.key'
dest: '/root/threshold/{{ clientId }}@{{ domain }}.key'
owner: threshold
group: threshold
mode: '0600'

13
ansible-roles/threshold-client-config/templates/templates/config.j2

@ -0,0 +1,13 @@
{
"DebugLog": false,
"ClientIdentifier": "{{ clientId }}",
"ServerAddr": "{{ domain }}:9056",
"UseTls": true,
"ServiceToLocalAddrMap": {
"https": "127.0.0.1:445",
"http": "127.0.0.1:80"
},
"CaCertificateFilesGlob": "{{ domain }}_CA.crt",
"ClientTlsKeyFile": "{{ clientId }}@{{ domain }}.key",
"ClientTlsCertificateFile": "{{ clientId }}@{{ domain }}.crt"
}

3
ansible-roles/threshold-register-client-with-server/files/ReadMe.md

@ -0,0 +1,3 @@
## threshold-client-config files folder
the PKI files will be written here at runtime by rootsystem

7
ansible-roles/threshold-register-client-with-server/tasks/main.yml

@ -0,0 +1,7 @@
- name: install threshold CA
copy:
src: '{{ clientId }}_CA.crt'
dest: '/root/threshold/{{ clientId }}_CA.crt'
owner: threshold
group: threshold
mode: '0600'

3
ansible-roles/threshold-server-config/files/ReadMe.md

@ -0,0 +1,3 @@
## threshold-server-config files folder
the PKI files will be written here at runtime by rootsystem

24
ansible-roles/threshold-server-config/tasks/main.yml

@ -0,0 +1,24 @@
- name: install threshold config file
template:
src: config.j2
dest: /root/threshold/config.json
owner: threshold
group: threshold
mode: '0600'
- name: install threshold server TLS certificate
copy:
src: '{{ domain }}.crt'
dest: '/root/threshold/{{ domain }}.crt'
owner: threshold
group: threshold
mode: '0600'
- name: install threshold server TLS key
copy:
src: '{{ domain }}.key'
dest: '/root/threshold/{{ domain }}.key'
owner: threshold
group: threshold
mode: '0600'

9
ansible-roles/threshold-server-config/templates/config.j2

@ -0,0 +1,9 @@
{
"DebugLog": false,
"ListenPort": 9056,
"UseTls": true,
"CaCertificateFilesGlob": "*_CA.crt",
"ServerTlsKeyFile": "{{ domain }}.key",
"ServerTlsCertificateFile": "{{ domain }}.crt"
}

43
ansible-roles/threshold/tasks/main.yml

@ -0,0 +1,43 @@
- name: ensure threshold folder exists
file:
path: /root/threshold
state: directory
- name: checksum the Threshold binary
stat:
path: /root/threshold/threshold
checksum_algorithm: sha256
register: threshold_binary
- name: log the checksum
debug:
var: threshold_binary.stat.checksum
- name: Download & validate the Threshold tar.gz file (if reinstall is desired)
get_url:
url: https://f000.backblazeb2.com/file/server-garden-artifacts/threshold-arm.tar.gz
dest: /tmp/threshold-arm.tar.gz
checksum: "sha256:816bdc58718be15e374dfa7fbf602cbab1dcf915df7d66873d141fe32bb0869a"
when: threshold_binary.stat.checksum is not defined or threshold_binary.stat.checksum != '535936ef678df2f1cbc1cd26454c576d4e0a641690aa6cf7d0668c23d9ae783f'
- name: unarchive threshold tar.gz file (if reinstall is desired)
unarchive:
remote_src: yes
src: /tmp/threshold-arm.tar.gz
dest: /root/threshold
when: threshold_binary.stat.checksum is not defined or threshold_binary.stat.checksum != '535936ef678df2f1cbc1cd26454c576d4e0a641690aa6cf7d0668c23d9ae783f'
- name: clean up threshold tar.gz file
file:
path: /tmp/threshold-arm.tar.gz
state: absent
- name: Ensure threshold user group exists
group:
name: threshold
state: present
- name: Ensure threshold user exists
user:
name: threshold
state: present
group: threshold

1
ansible-wrapper/ansible.cfg

@ -1,5 +1,6 @@
[defaults]
host_key_checking = True
interpreter_python = /usr/bin/python3
callback_plugins = ./callback_plugins
stdout_callback = default-with-json-interleaved

180
automation/patchDigitalOcean.go

@ -0,0 +1,180 @@
package automation
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
errors "git.sequentialread.com/forest/pkg-errors"
"git.sequentialread.com/forest/rootsystem/configuration"
)
type digialOceanSSHKeysResponse struct {
SSHKeys []digitalOceanSSHKey `json:"ssh_keys"`
}
type digitalOceanSSHKey struct {
Id int `json:"id"`
Fingerprint string `json:"fingerprint"`
PublicKey string `json:"public_key"`
Pame string `json:"name"`
}
func handleDigitalOceanSSHKeyAlreadyExistsBug(
config *configuration.Configuration,
workingDirectory string,
terraformDirectory string,
status *SimplifiedTerraformStatus,
tfShow *TerraformShow,
) (bool, error) {
var digitaloceanCredential *configuration.Credential = nil
for _, cred := range config.Credentials {
if cred.Type == configuration.DIGITALOCEAN {
digitaloceanCredential = &cred
break
}
}
if digitaloceanCredential == nil {
return false, nil
}
// Find all changed digitalocean_ssh_key resources in terraform plan
changedSSHKeyResourcePaths := []string{}
for _, module := range status.Modules {
for _, resource := range module.Resources {
if resource.ResourceType == "digitalocean_ssh_key" && resource.Plan != "none" {
changedSSHKeyResourcePaths = append(
changedSSHKeyResourcePaths,
//module.ssh-keys-digitalocean.digitalocean_ssh_key.default
fmt.Sprintf("module.%s.%s", module.DisplayName, resource.DisplayName),
)
}
}
}
// fmt.Println(changedSSHKeyResourcePaths)
if len(changedSSHKeyResourcePaths) == 0 {
return false, nil
}
createdIndexes := map[int]string{}
// use the tfShow data to identify the index of each sshPublicKey that will be created
for _, resourcePath := range changedSSHKeyResourcePaths {
for _, resourceChange := range tfShow.ResourceChanges {
hasCreate := false
for _, action := range resourceChange.Change.Actions {
if action == "create" {
hasCreate = true
}
}
if hasCreate && strings.HasPrefix(resourceChange.Address, resourcePath) {
indexMatches := regexp.MustCompile(`\[([0-9]+)\]$`).FindStringSubmatch(resourceChange.Address)
if len(indexMatches) > 1 {
index, err := strconv.Atoi(indexMatches[1])
if err == nil {
createdIndexes[index] = resourcePath
}
}
}
}
}
// fmt.Println(createdIndexes)
if len(createdIndexes) == 0 {
return false, nil
}
// Get the ssh public keys from disk
sshPublicKeys, err := getSSHPublicKeys(workingDirectory)
if err != nil {
return false, err
}
// Get the ssh public keys from digital ocean API
httpClient := http.Client{}
request, err := http.NewRequest("GET", fmt.Sprintf("%s%s", configuration.DIGITALOCEAN_API_URL, "/v2/account/keys"), nil)
request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", digitaloceanCredential.Password))
response, err := httpClient.Do(request)
if err != nil {
return false, err
}
if response.StatusCode != 200 {
return false, fmt.Errorf("HTTP %d when calling /v2/account/keys on digitalocean API", response.StatusCode)
}
bytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return false, errors.Wrap(err, "HTTP read error when calling /v2/account/keys on digitalocean API")
}
var responseObject digialOceanSSHKeysResponse
err = json.Unmarshal(bytes, &responseObject)
if err != nil {
return false, errors.Wrap(err, "JSON parse error when calling /v2/account/keys on digitalocean API")
}
digitalOceanIdByKeyContent := map[string]int{}
for _, sshKey := range responseObject.SSHKeys {
digitalOceanIdByKeyContent[sshKey.PublicKey] = sshKey.Id
}
importedAnySSHKeys := false
// for each local SSH key which tf plans to create, check if it already exists in DO
for index, tfResourcePath := range createdIndexes {
if index < len(sshPublicKeys) {
localKey := sshPublicKeys[index]
keyPath := localKey[0]
keyName := localKey[1]
localKeyBytes, err := ioutil.ReadFile(keyPath)
if err != nil {
return false, err
}
digitalOceanId, has := digitalOceanIdByKeyContent[strings.TrimSpace(string(localKeyBytes))]
if has {
fmt.Println("---------------------------------------")
fmt.Printf(
"Terraform plans to add the local ssh public key %s to digitalocean, but digitalocean already has that key \n",
keyName,
)
fullyQualifiedResourcePath := fmt.Sprintf("%s[%d]", tfResourcePath, index)
fmt.Printf(
"to fix this, I will run: \n(%s) $ terraform import %s %d\n\n",
terraformDirectory, fullyQualifiedResourcePath, digitalOceanId,
)
exitCode, stdout, stderr, err := shellExec(terraformDirectory, "terraform", "import", fullyQualifiedResourcePath, strconv.Itoa(digitalOceanId))
if err != nil {
fmt.Println("shellExec failed! aborting.")
return false, err
}
fmt.Printf(
"exitCode: %d\n\nstdout:\n%s\n\nstderr:\n%s\n\n",
exitCode, stdout, stderr,
)
fmt.Println("---------------------------------------")
if exitCode != 0 {
return false, errors.New("terraform import returned a non-zero exit code")
}
importedAnySSHKeys = true
}
} else {
return false, fmt.Errorf("ssh key index %d (from terraform plan) is out of range", index)
}
}
return importedAnySSHKeys, nil
}

151
automation/terraformActions.go

@ -38,6 +38,7 @@ type TerraformShowModule struct {
type TerraformShowResource struct {
Address string `json:"address"`
Type string `json:"type"`
}
type TerraformShowResourceChange struct {
@ -72,6 +73,7 @@ type SimplifiedTerraformModule struct {
}
type SimplifiedTerraformResource struct {
ResourceType string
DisplayName string
Plan string
State string
@ -103,8 +105,12 @@ type TerraformApplyResult struct {
const data_template_file = "data.template_file"
func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory string) (string, chan TerraformApplyResult, error) {
terraformDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH)
func TerraformPlanAndApply(
config *configuration.Configuration,
workingDirectory string,
terraformProject string,
) (string, chan TerraformApplyResult, error) {
terraformDirectory := filepath.Join(workingDirectory, terraformProject)
// it looks like it might be safer to simply init every time. So skipping alreadyHasTerraformCacheDirectory.
@ -125,35 +131,73 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
return "", nil, err
}
exitCode, planStdout, planStderr, err := shellExec(terraformDirectory, "terraform", "plan", "-out", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform plan", exitCode, planStdout, planStderr, err)
if err != nil {
return "", nil, err
}
// Convenience function so we can plan multiple times if needed
doPlan := func() (*SimplifiedTerraformStatus, *TerraformShow, string, error) {
exitCode, tfJson, showJsonStderr, err := shellExec(terraformDirectory, "terraform", "show", "-json", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform show", exitCode, tfJson, showJsonStderr, err)
if err != nil {
return "", nil, err
}
exitCode, planStdout, planStderr, err := shellExec(terraformDirectory, "terraform", "plan", "-out", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform plan", exitCode, planStdout, planStderr, err)
if err != nil {
return nil, nil, "", err
}
//log.Println(string(tfJson))
exitCode, tfJson, showJsonStderr, err := shellExec(terraformDirectory, "terraform", "show", "-json", configuration.TERRAFORM_PLAN_FILE_NAME)
err = errorFromShellExecResult("terraform show", exitCode, tfJson, showJsonStderr, err)
if err != nil {
return nil, nil, "", err
}
var tfShow TerraformShow
err = json.Unmarshal([]byte(tfJson), &tfShow)
if err != nil {
return "", nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't json.Unmarshal the output of `terraform show`")
//log.Println(string(tfJson))
var tfShow TerraformShow
err = json.Unmarshal([]byte(tfJson), &tfShow)
if err != nil {
return nil, nil, "", errors.Wrap(err, "can't TerraformPlanAndApply because can't json.Unmarshal the output of `terraform show`")
}
// json, err := json.MarshalIndent(tfShow, "", " ")
// if err != nil {
// return nil, nil, "", errors.Wrap(err, "can't GenerateTerraformPlan because can't json.Marshal the output of `terraform show`")
// }
// log.Println(string(json))
simpleStatus, err := makeSimplifiedTerraformStatus(config, workingDirectory, tfShow)
if err != nil {
return nil, nil, "", errors.Wrap(err, "can't TerraformPlanAndApply because can't makeSimplifiedTerraformStatus")
}
return &simpleStatus, &tfShow, string(planStdout), nil
}
// json, err := json.MarshalIndent(tfShow, "", " ")
// if err != nil {
// return "", nil, errors.Wrap(err, "can't GenerateTerraformPlan because can't json.Marshal the output of `terraform show`")
// }
// log.Println(string(json))
simpleStatus, tfShow, planStdout, err := doPlan()
simpleStatus, err := makeSimplifiedTerraformStatus(config, workingDirectory, tfShow)
if err != nil {
return "", nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't makeSimplifiedTerraformStatus")
// After plan but before apply, check over the plan and fix any known issues with cloud providers
stateModified := false
// DigitalOcean
// TODO remove/import any orphaned server.garden tagged instances?
hasDigitalOcean := false
for _, cred := range config.Credentials {
if cred.Type == configuration.DIGITALOCEAN {
hasDigitalOcean = true
}
}
if hasDigitalOcean {
stateModified, err = handleDigitalOceanSSHKeyAlreadyExistsBug(
config,
workingDirectory,
terraformDirectory,
simpleStatus,
tfShow,
)
if err != nil {
return "", nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't handleKnownIssuesWithProviders")
}
}
// After we try to fix any known issues, we may have to plan again if the terraform state was changed
// for example, with terraform import or state commands.
if stateModified {
simpleStatus, tfShow, planStdout, err = doPlan()
}
// json, err := json.MarshalIndent(simpleStatus, "", " ")
@ -162,14 +206,14 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
// }
// log.Println(string(json))
svg, err := makeSVGFromSimpleStatus(&simpleStatus)
svg, err := makeSVGFromSimpleStatus(simpleStatus)
if err != nil {
return "", nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't makeSVGFromSimpleStatus")
}
for moduleName, module := range simpleStatus.Modules {
if module.IsAnsible {
err := linkAnsibleWrapperToModule(strings.TrimPrefix(moduleName, "module."), workingDirectory)
err := linkAnsibleWrapperToModule(strings.TrimPrefix(moduleName, "module."), workingDirectory, terraformProject)
if err != nil {
return "", nil, errors.Wrap(err, "can't TerraformPlanAndApply because can't linkAnsibleWrapperToModule")
}
@ -206,16 +250,18 @@ func TerraformPlanAndApply(config *configuration.Configuration, workingDirectory
toReturn := make(chan TerraformApplyResult)
logSoFar := fmt.Sprintf(
"%s\n%s\n%s\n%s\n%s\n",
"\n$ terraform init\n",
initStdout,
fmt.Sprintf("\n$ terraform plan -out %s\n", configuration.TERRAFORM_PLAN_FILE_NAME),
planStdout,
fmt.Sprintf("\n$ terraform apply -auto-approve %s\n", configuration.TERRAFORM_PLAN_FILE_NAME),
logSoFar := strings.Join(
[]string{
fmt.Sprintf("\n(%s) $ terraform init\n", terraformDirectory),
string(initStdout),
fmt.Sprintf("\n(%s) $ terraform plan -out %s\n", terraformDirectory, configuration.TERRAFORM_PLAN_FILE_NAME),
planStdout,
fmt.Sprintf("\n(%s) $ terraform apply -auto-approve %s\n", terraformDirectory, configuration.TERRAFORM_PLAN_FILE_NAME),
},
"\n",
)
go monitorTerraformApplyProgress(terraformDirectory, &simpleStatus, process, logSoFar, logLinesChannel, toReturn)
go monitorTerraformApplyProgress(terraformDirectory, simpleStatus, process, logSoFar, logLinesChannel, toReturn)
return svg, toReturn, nil
}
@ -442,6 +488,7 @@ func makeSimplifiedTerraformStatus(
continue
}
resource := SimplifiedTerraformResource{
ResourceType: "ansible_role",
DisplayName: roleName,
Plan: "none",
State: "ok",
@ -456,9 +503,10 @@ func makeSimplifiedTerraformStatus(
continue
}
resource := SimplifiedTerraformResource{
DisplayName: resource.Address,
Plan: "none",
State: "ok",
ResourceType: resource.Type,
DisplayName: resource.Address,
Plan: "none",
State: "ok",
}
resources = append(resources, &resource)
}
@ -477,12 +525,18 @@ func makeSimplifiedTerraformStatus(
}
}
resourceIndexRegexp := regexp.MustCompile(`\[([0-9]+)\]$`)
for _, resourceChange := range tfShow.ResourceChanges {
module, has := simpleModules[resourceChange.ModuleAddress]
if has {
address := strings.TrimPrefix(resourceChange.Address, resourceChange.ModuleAddress)
address = strings.Trim(address, ".")
address = regexp.MustCompile(`\[[0-9]+\]$`).ReplaceAllString(address, "")
// TODO it looks like the simplified terraform status wraps up repeated resources into one
// and it takes the "Plan" and "State" from whichever was the last one
// Is that ok? do we need to change that ?
//indexMatches = resourceIndexRegexp.FindStringSubmatch(address)
address = resourceIndexRegexp.ReplaceAllString(address, "")
if shouldOmit(address) {
continue
@ -492,6 +546,7 @@ func makeSimplifiedTerraformStatus(
for _, resource := range module.Resources {
if resource.DisplayName == address || module.IsAnsible {
foundResource = true
create := false
delete := false
update := false
@ -724,8 +779,8 @@ func makeSVGFromSimpleStatus(simpleStatus *SimplifiedTerraformStatus) (string, e
return svgString, nil
}
func linkAnsibleWrapperToModule(moduleName, workingDirectory string) error {
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules", moduleName)
func linkAnsibleWrapperToModule(moduleName, workingDirectory, terraformProject string) error {
ansibleDirectory := filepath.Join(workingDirectory, terraformProject, "modules", moduleName)
for _, toLink := range configuration.GET_ANSIBLE_WRAPPER_FILES() {
inModule := filepath.Join(ansibleDirectory, toLink)
@ -740,7 +795,19 @@ func linkAnsibleWrapperToModule(moduleName, workingDirectory string) error {
}
func getAnsibleRolesFromModule(moduleName, workingDirectory string) (map[string][]string, error) {
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules", moduleName)
ansibleDirectory := filepath.Join(workingDirectory, configuration.TERRAFORM_MODULES, moduleName)
rolesFolder := filepath.Join(ansibleDirectory, "roles")
if _, err := os.Stat(rolesFolder); os.IsNotExist(err) {
err = os.Symlink(
filepath.Join(workingDirectory, configuration.ANSIBLE_ROLES),
rolesFolder,
)
if err != nil {
return nil, errors.Wrapf(err, "could not create symbolic link to ansible roles folder")
}
}
exitCode, ansibleStdout, ansibleStderr, err := shellExec(ansibleDirectory, "ansible-playbook", "--list-tasks", configuration.ANSIBLE_PLAYBOOK_FILE_NAME)
err = errorFromShellExecResult("ansible-playbook --list-tasks", exitCode, ansibleStdout, ansibleStderr, err)

279
automation/terraformCodeGeneration.go

@ -12,6 +12,14 @@ import (
"git.sequentialread.com/forest/rootsystem/configuration"
)
type TerraformConfiguration struct {
TargetedModules []string
TerraformProject string
RemoteState string
RemoteStateVariables []string
HostKeysObjectStorageCredentials []configuration.Credential
}
type tfProvider struct {
Name string
Version string
@ -34,21 +42,23 @@ type tfVariable struct {
}
type tfProvidedBy struct {
Module string
Attribute string
Variable string
Module string
Attribute string
Variable string
RemoteStateOutput string
}
const ssh_public_keys = "ssh_public_keys"
const ssh_private_key_filepath = "ssh_private_key_filepath"
const node_id = "node_id"
const post_to_object_storage_shell_script = "post_to_object_storage_shell_script"
const ssh_private_key_filepath_value = "ssh/severgarden_builtin_ed22519"
func WriteTerraformCodeForTargetedModules(
config *configuration.Configuration,
workingDirectory string,
hostKeysObjectStorageCredentials []configuration.Credential,
) error {
terraformConfig TerraformConfiguration,
) ([]string, error) {
providers := map[string]tfProvider{
"digitalocean": tfProvider{
@ -66,7 +76,7 @@ func WriteTerraformCodeForTargetedModules(
modules, err := parseModulesFolder(providers, workingDirectory)
if err != nil {
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't parseModulesFolder()")
return []string{}, errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't parseModulesFolder()")
}
usedProviders := make(map[string]tfProvider)
@ -98,19 +108,20 @@ func WriteTerraformCodeForTargetedModules(
allVariables[k] = v
}
allVariables[ssh_public_keys] = "<secure shell public keys>"
allVariables[node_id] = config.Host.Name
allVariables[ssh_private_key_filepath] = ssh_private_key_filepath_value
allVariables[post_to_object_storage_shell_script] = "<long shell script>"
err = fillOutProvidedByOnAttributes(usedModules, allVariables)
err = fillOutProvidedByOnAttributes(usedModules, allVariables, terraformConfig.RemoteStateVariables)
if err != nil {
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't fillOutProvidedByOnAttributes")
return []string{}, errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't fillOutProvidedByOnAttributes")
}
modulesToCreate := map[string]bool{}
for _, target := range config.Terraform.TargetedModules {
for _, target := range terraformConfig.TargetedModules {
module, has := usedModules[target]
if !has {
return errors.Wrapf(
return []string{}, errors.Wrapf(
err,
`can't WriteTerraformCodeForTargetedModules because the TargetedModule \"%s\"
was not found in the list of modules that we have authenticated providers for.
@ -121,7 +132,7 @@ func WriteTerraformCodeForTargetedModules(
dependencies, err := getDependencies(module, usedModules, nil)
if !has {
return errors.Wrapf(err, `can't WriteTerraformCodeForTargetedModules because cant getDependencies for module "%s"`, module.Name)
return []string{}, errors.Wrapf(err, `can't WriteTerraformCodeForTargetedModules because cant getDependencies for module "%s"`, module.Name)
}
modulesToCreate[module.Name] = true
@ -177,24 +188,21 @@ func WriteTerraformCodeForTargetedModules(
}
sshPublicKeyObjectStrings := make([]string, 0)
// TODO Make sure these keys only readable by root
sshDirectory := filepath.Join(workingDirectory, configuration.SSH_KEYS_PATH)
sshFileInfos, err := ioutil.ReadDir(sshDirectory)
sshPublicKeys, err := getSSHPublicKeys(workingDirectory)
if err != nil {
return errors.Wrapf(err, "can't WriteTerraformCodeForTargetedModules because can't ioutil.ReadDir(\"%s\")", sshDirectory)
return []string{}, errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because can't getSSHPublicKeys")
}
for _, fileInfo := range sshFileInfos {
filepath := filepath.Join(sshDirectory, fileInfo.Name())
if !fileInfo.IsDir() && strings.HasSuffix(filepath, ".pub") {
sshPublicKeyObjectStrings = append(sshPublicKeyObjectStrings, fmt.Sprintf(
`{
name = "%s"
filepath = "%s"
}`,
strings.TrimSuffix(fileInfo.Name(), ".pub"),
filepath,
))
}
for _, sshPublicKey := range sshPublicKeys {
sshPublicKeyObjectStrings = append(sshPublicKeyObjectStrings, fmt.Sprintf(
`{
name = "%s"
filepath = "%s"
}`,
sshPublicKey[1],
sshPublicKey[0],
))
}
// we can't use the allVariables here like we used above, because we have to handle variable values
@ -207,7 +215,7 @@ func WriteTerraformCodeForTargetedModules(
variable "%s" {
default = <<EOT
%s
EOT
EOT
}
`, key, value,
))
@ -219,9 +227,15 @@ EOT
}
}
postToObjectStorageShellScript, err := getPostToObjectStorageShellScript(config, hostKeysObjectStorageCredentials)
if err != nil {
return errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because")
postToObjectStorageShellScript := ""
if terraformConfig.HostKeysObjectStorageCredentials != nil {
postToObjectStorageShellScript, err = getPostToObjectStorageShellScript(
config,
terraformConfig.HostKeysObjectStorageCredentials,
)
if err != nil {
return []string{}, errors.Wrap(err, "can't WriteTerraformCodeForTargetedModules because")
}
}
variableStanzas = append(variableStanzas,
@ -237,13 +251,17 @@ EOT
),
fmt.Sprintf(`
variable "%s" { default = "%s" }
`, ssh_private_key_filepath, ssh_private_key_filepath_value,
`, node_id, config.Host.Name,
),
fmt.Sprintf(`
variable "%s" { default = "%s" }
`, ssh_private_key_filepath, filepath.Join(workingDirectory, ssh_private_key_filepath_value),
),
fmt.Sprintf(`
variable "%s" {
default = <<EOT
%s
EOT
EOT
}
`, post_to_object_storage_shell_script, postToObjectStorageShellScript,
),
@ -253,33 +271,71 @@ EOT
for moduleName := range modulesToCreate {
module := usedModules[moduleName]
argumentLines := []string{}
providedByToString := func(provision *tfProvidedBy) string {
if provision.Variable != "" {
return fmt.Sprintf("var.%s", provision.Variable)
} else if provision.RemoteStateOutput != "" {
return fmt.Sprintf(
"data.terraform_remote_state.%s.outputs.%s",
terraformConfig.RemoteState, provision.RemoteStateOutput,
)
} else {
return fmt.Sprintf("module.%s.%s", provision.Module, provision.Attribute)
}
}
for _, argument := range module.Arguments {
if !argument.IsList {
if len(argument.ProvidedBy) > 1 {
return fmt.Errorf("TODO: what do do in this case? a non-list argument is provided by more than one source")
var usedProvision *tfProvidedBy = nil
providedByVariable := false
providedByModules := []string{}
for _, provision := range argument.ProvidedBy {
if provision.Variable != "" {
providedByVariable = true
usedProvision = provision
}
if provision.RemoteStateOutput != "" {
usedProvision = provision
}
if provision.Module != "" {
if usedProvision == nil {
usedProvision = provision
}
providedByModules = append(providedByModules, provision.Module)
}
}
provision := argument.ProvidedBy[0]
var provisionString string
if provision.Variable != "" {
provisionString = fmt.Sprintf("var.%s", provision.Variable)
} else {
provisionString = fmt.Sprintf("module.%s.%s", provision.Module, provision.Attribute)
if providedByVariable && len(providedByModules) > 0 {
return []string{},
fmt.Errorf(
"TODO: what do do in this case? a non-list argument %s is provided by both a variable and a module (%s)",
argument.Name,
providedByModules[0],
)
}
if len(providedByModules) > 1 {
return []string{},
fmt.Errorf(
"TODO: what do do in this case? a non-list argument %s is provided by more than one module: [%s]",
argument.Name,
strings.Join(providedByModules, ", "),
)
}
if usedProvision == nil {
return []string{}, fmt.Errorf("argument %s is not provided by any variables or modules", argument.Name)
}
argumentLines = append(argumentLines, fmt.Sprintf(
`
%s = %s
`,
argument.Name,
provisionString,
providedByToString(usedProvision),
))
} else {
fixedProvidedBy := removeRedundantModuleProvision(argument.ProvidedBy)
provisionStrings := []string{}
for _, provision := range argument.ProvidedBy {
if provision.Variable != "" {
provisionStrings = append(provisionStrings, fmt.Sprintf("var.%s", provision.Variable))
} else {
provisionStrings = append(provisionStrings, fmt.Sprintf("module.%s.%s", provision.Module, provision.Attribute))
}
for _, provision := range fixedProvidedBy {
provisionStrings = append(provisionStrings, providedByToString(provision))
}
argumentLines = append(argumentLines, fmt.Sprintf(
`
@ -306,11 +362,57 @@ EOT
))
}
terraformCodeFilepath := filepath.Join(configuration.TERRAFORM_CODE_PATH, "main.tf")
// err := os.Truncate(terraformCodeFilepath, 0)
terraformCodeFile, err := os.OpenFile(terraformCodeFilepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
outputStanzas := []string{}
outputVariables := []string{}
for moduleName := range modulesToCreate {
module := usedModules[moduleName]
for _, attribute := range module.Attributes {
outputVariables = append(outputVariables, attribute.Name)
outputStanzas = append(outputStanzas, fmt.Sprintf(`
output "%s" {
value = module.%s.%s
}
`, attribute.Name, module.Name, attribute.Name))
}
}
terraformFolder := filepath.Join(workingDirectory, terraformConfig.TerraformProject)
if _, err := os.Stat(terraformFolder); os.IsNotExist(err) {
err = os.Mkdir(terraformFolder, 0700)
if err != nil {
return []string{}, errors.Wrapf(err, "can't initializeTerraformProject because can't os.Mkdir(\"%s\")", terraformFolder)
}
}
terraformCodeFilepath := filepath.Join(terraformFolder, "main.tf")
terraformCodeFile, err := os.OpenFile(terraformCodeFilepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
return errors.Wrapf(err, "can't initializeTerraformProject because can't os.OpenFile(\"%s\")", terraformCodeFilepath)
return []string{}, errors.Wrapf(err, "can't initializeTerraformProject because can't os.OpenFile(\"%s\")", terraformCodeFilepath)
}
terraformModulesFolder := filepath.Join(terraformFolder, "modules")
if _, err := os.Stat(terraformModulesFolder); os.IsNotExist(err) {
err = os.Symlink(
filepath.Join(workingDirectory, configuration.TERRAFORM_MODULES),
filepath.Join(terraformFolder, "modules"),
)
if err != nil {
return []string{}, errors.Wrapf(err, "could not create symbolic link to terraform modules folder")
}
}
remoteState := ""
if terraformConfig.RemoteState != "" {
remoteState = fmt.Sprintf(`
data "terraform_remote_state" "%s" {
backend = "http"
config = {
address = "http://localhost:6471/%s"
}
}
`,
terraformConfig.RemoteState,
terraformConfig.RemoteState,
)
}
fmt.Fprintf(
@ -318,20 +420,47 @@ EOT
`
terraform {
backend "http" {
address = "http://localhost:%d/"
address = "http://localhost:%d/%s"
}
}
%s
%s
%s
%s
%s
`,
configuration.TERRAFORM_STATE_SERVER_PORT_NUMBER,
terraformConfig.TerraformProject,
remoteState,
strings.Join(providerStanzas, "\n"),
strings.Join(variableStanzas, "\n"),
strings.Join(moduleStanzas, "\n"),
strings.Join(outputStanzas, "\n"),
)
return nil
return outputVariables, nil
}
func removeRedundantModuleProvision(provisions []*tfProvidedBy) []*tfProvidedBy {
provisionMap := map[string]*tfProvidedBy{}
for _, provision := range provisions {
name := provision.Attribute
if name == "" {
name = provision.Variable
}
if name == "" {
name = provision.RemoteStateOutput
}
_, has := provisionMap[name]
if !has || provision.Variable != "" || provision.RemoteStateOutput != "" {
provisionMap[name] = provision
}
}
toReturn := []*tfProvidedBy{}
for _, provision := range provisionMap {
toReturn = append(toReturn, provision)
}
return toReturn
}
func provides(attributeName string, argument *tfVariable) bool {
@ -340,7 +469,11 @@ func provides(attributeName string, argument *tfVariable) bool {
return attributeName == argument.Name || listMatch
}
func fillOutProvidedByOnAttributes(modules map[string]tfModule, variables map[string]string) error {
func fillOutProvidedByOnAttributes(
modules map[string]tfModule,
variables map[string]string,
remoteStateOutputs []string,
) error {
for _, module := range modules {
for _, argument := range module.Arguments {
argument.ProvidedBy = make([]*tfProvidedBy, 0)
@ -351,6 +484,12 @@ func fillOutProvidedByOnAttributes(modules map[string]tfModule, variables map[st
}
}
for _, outputName := range remoteStateOutputs {
if provides(outputName, argument) {
argument.ProvidedBy = append(argument.ProvidedBy, &tfProvidedBy{RemoteStateOutput: outputName})
}
}
for _, otherModule := range modules {
if otherModule.Name != module.Name {
for _, otherModuleAttribute := range otherModule.Attributes {
@ -391,8 +530,12 @@ func getDependencies(
dependencies := make([]string, 0)
for _, argument := range module.Arguments {
for _, provision := range argument.ProvidedBy {
if provision.Module != "" && !visited[provision.Module] {
fixedProvidedBy := removeRedundantModuleProvision(argument.ProvidedBy)
for _, provision := range fixedProvidedBy {
requiresModule := provision.RemoteStateOutput == "" && provision.Variable == ""
if requiresModule && provision.Module != "" && !visited[provision.Module] {
visited[provision.Module] = true
dependencies = append(dependencies, provision.Module)
otherModule, has := modules[provision.Module]
@ -420,7 +563,7 @@ func parseModulesFolder(providers map[string]tfProvider, workingDirectory string
modules := make([]tfModule, 0)
// TODO make sure modules folder and all the code inside is owned by root and only writable by root
modulesFolder := filepath.Join(workingDirectory, configuration.TERRAFORM_CODE_PATH, "modules")
modulesFolder := filepath.Join(workingDirectory, configuration.TERRAFORM_MODULES)
modulesFileInfos, err := ioutil.ReadDir(modulesFolder)
if err != nil {
return nil, errors.Wrapf(err, "can't parseModulesFolder because can't ioutil.ReadDir(\"%s\")", modulesFolder)
@ -546,3 +689,23 @@ curl -sS -X POST \
return strings.Join(scripts, "\n"), nil
}
func getSSHPublicKeys(workingDirectory string) ([][]string, error) {
// TODO Make sure these keys only readable by root
sshDirectory := filepath.Join(workingDirectory, configuration.SSH_KEYS_PATH)
sshFileInfos, err := ioutil.ReadDir(sshDirectory)
if err != nil {
return [][]string{}, err
}
toReturn := [][]string{}
for _, fileInfo := range sshFileInfos {
filepath := filepath.Join(sshDirectory, fileInfo.Name())
if !fileInfo.IsDir() && strings.HasSuffix(filepath, ".pub") {
// todo scrape the name from the key comment ??
name := strings.TrimSuffix(fileInfo.Name(), ".pub")
toReturn = append(toReturn, []string{filepath, name})
}
}
return toReturn, nil
}

10
build.sh

@ -13,9 +13,13 @@ GOOS=linux GOARCH=arm go build -o build/rootsystem/ansible-wrapper/ansible-playb
cp -r ansible-wrapper/callback_plugins build/rootsystem/ansible-wrapper/callback_plugins
cp ansible-wrapper/ansible.cfg build/rootsystem/ansible-wrapper/ansible.cfg
mkdir -p build/rootsystem/terraform-project
cp -r terraform-project/modules build/rootsystem/terraform-project/modules
cp terraform-project/main.tf build/rootsystem/terraform-project/modules/main.tf
mkdir -p build/rootsystem/terraform-global
cp -r terraform-global/modules build/rootsystem/terraform-global/modules
cp terraform-global/main.tf build/rootsystem/terraform-global/modules/main.tf
mkdir -p build/rootsystem/terraform-local
cp -r terraform-local/modules build/rootsystem/terraform-local/modules
cp terraform-local/main.tf build/rootsystem/terraform-local/modules/main.tf
cp ReadMe.md build/rootsystem/ReadMe.md

11
configuration/configuration.go

@ -25,8 +25,9 @@ type HostConfiguration struct {
}
type TerraformConfiguration struct {
TargetedModules []string
Variables map[string]string
GlobalModules []string
LocalModules []string
Variables map[string]string
}
type ObjectStorageConfiguration struct {
@ -49,10 +50,14 @@ type Credential struct {
const GANDI = "Gandi"
const DIGITALOCEAN = "DigitalOcean"
const DIGITALOCEAN_API_URL = "https://api.digitalocean.com"
const AMAZON_S3 = "AmazonS3"
const BACKBLAZE_B2 = "BackblazeB2"
const OBJECT_STORAGE_PASSPHRASE = "ObjectStoragePassphrase"
const TERRAFORM_CODE_PATH = "terraform-project"
const GLOBAL_TERRAFORM_PROJECT = "terraform-global"
const LOCAL_TERRAFORM_PROJECT = "terraform-local"
const TERRAFORM_MODULES = "terraform-modules"
const ANSIBLE_ROLES = "ansible-roles"
const ANSIBLE_PLAYBOOK_FILE_NAME = "playbook.yml"
const TERRAFORM_PLAN_FILE_NAME = "terraform-plan-file"
const ANSIBLE_WRAPPER_PATH = "ansible-wrapper"

122
main.go

@ -5,7 +5,6 @@ import (
"fmt"
"log"
"net/http"
"time"
errors "git.sequentialread.com/forest/pkg-errors"
@ -36,15 +35,9 @@ func main() {
go terraformStateServer()
time.Sleep(time.Second)
initializeAutomation(config)
}
func initializeAutomation(config *configuration.Configuration) {
// releaseLock, err := aquireLock(config.Host.Name)
// This creates an access key that the gateway cloud instance can use to upload its SSH public key
// to our object storage. the host-key-poller will download this SSH host public key and add it to our known_hosts
// so that we can SSH to the gateway instance securely
hostKeysAccessSpec := objectStorage.ObjectStorageKey{
Name: "rootsystem-known-hosts",
PathPrefix: "rootsystem/known-hosts",
@ -58,60 +51,105 @@ func initializeAutomation(config *configuration.Configuration) {
panic(err)
}
err = automation.WriteTerraformCodeForTargetedModules(config, global.workingDirectory, knownHostsCredentials)
// BuildTLSCertsForThreshold fills in the CAs, Keys, and Certificates in the Threshold ansible roles.
// So when terraform invokes ansible to install threshold client/server, it will install working
// certificates and keys
// err = pki.BuildTLSCertsForThreshold(
// global.workingDirectory,
// config.Terraform.Variables["domain_name"],
// config.Host.Name,
// global.storage,
// )
// if err != nil {
// panic(err)
// }
// First, run the terraform build for the GLOBAL components, meaning the components
// that exist in the cloud, independent of how many server.garden nodes are being used.
outputVariables, err := terraformBuild(
config,
automation.TerraformConfiguration{
TargetedModules: config.Terraform.GlobalModules,
TerraformProject: configuration.GLOBAL_TERRAFORM_PROJECT,
HostKeysObjectStorageCredentials: knownHostsCredentials,
},
)
if err != nil {
panic(err)
}
svg, statusChannel, err := automation.TerraformPlanAndApply(config, global.workingDirectory)
// Next, we run a separate LOCAL terraform build which is specific to THIS server.garden node,
// this build will be responsible for installing software on this node & registering this node with the
// cloud resources
_, err = terraformBuild(
config,
automation.TerraformConfiguration{
TargetedModules: config.Terraform.LocalModules,
TerraformProject: fmt.Sprintf("%s-%s", configuration.LOCAL_TERRAFORM_PROJECT, config.Host.Name),
RemoteState: configuration.GLOBAL_TERRAFORM_PROJECT,
RemoteStateVariables: outputVariables,
},
)
if err != nil {
panic(err)
}
a := make(chan bool)
<-a
}
func terraformBuild(
config *configuration.Configuration,
terraformConfig automation.TerraformConfiguration,
) ([]string, error) {
outputVariables, err := automation.WriteTerraformCodeForTargetedModules(
config,
global.workingDirectory,
terraformConfig,
)
if err != nil {
return []string{}, err
}
fmt.Println("WriteTerraformCodeForTargetedModules done")
svg, statusChannel, err := automation.TerraformPlanAndApply(config, global.workingDirectory, terraformConfig.TerraformProject)
if err != nil {
return []string{}, err
}
fmt.Println("TerraformPlanAndApply done")
err = global.storage.Put("rootsystem/terraform/diagram.svg", []byte(svg))
if err != nil {
panic(err)
return []string{}, err
}
var terraformPlanAndApplyError error = nil
for status := range statusChannel {
json, err := json.MarshalIndent(status, "", " ")
if err != nil {
panic(err)
return []string{}, err
}
log.Println(string(json))
log.Println(status.Log)
err = global.storage.Put("rootsystem/terraform/status.json", []byte(json))
if err != nil {
panic(err)
return []string{}, err
}
if status.Complete {
terraformPlanAndApplyError = status.Error
}
}
// releaseLock()
a := make(chan bool)
<-a
// if err != nil {
// panic(err)
// }
// time.Sleep(time.Second * time.Duration(5))
// releaseLock()
// time.Sleep(time.Second * time.Duration(5))
// err := global.storage.Put("asd", []byte("dooofoooo"))
// if err != nil {
// panic(err)
// }
// file, _, err := global.storage.Get("asd2")
// if err != nil {
// panic(err)
// }
// fmt.Println(string(file.Content))
if terraformPlanAndApplyError != nil {
return outputVariables, terraformPlanAndApplyError
}
return outputVariables, nil
}
func terraformStateServer() error {

302
pki/pki.go

@ -0,0 +1,302 @@
package pki
import (
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"git.sequentialread.com/forest/easypki/pkg/certificate"
"git.sequentialread.com/forest/easypki/pkg/easypki"
"git.sequentialread.com/forest/easypki/pkg/store"
"git.sequentialread.com/forest/rootsystem/configuration"
"git.sequentialread.com/forest/rootsystem/objectStorage"
"github.com/pkg/errors"
)
func BuildTLSCertsForThreshold(
workingDirectory string,
domain string,
clientId string,
storage objectStorage.ObjectStorager,
) error {
inMemoryStore := &store.InMemory{}
pki := &easypki.EasyPKI{Store: inMemoryStore}
//pkiBytes, err := json.MarshalIndent(inMemoryStore.CAs, "", " ")
clientCA := fmt.Sprintf("%s_CA", clientId)
domainCA := fmt.Sprintf("%s_CA", domain)
thresholdServerConfigRole := filepath.Join(
workingDirectory,
configuration.ANSIBLE_ROLES,
"threshold-server-config/files",
)
thresholdClientConfigRole := filepath.Join(
workingDirectory,
configuration.ANSIBLE_ROLES,
"threshold-client-config/files",
)
thresholdRegisterClientWithServerConfigRole := filepath.Join(
workingDirectory,
configuration.ANSIBLE_ROLES,
"threshold-register-client-with-server/files",
)
for _, path := range []string{
thresholdServerConfigRole,
thresholdClientConfigRole,
thresholdRegisterClientWithServerConfigRole,
} {
if _, err := os.Stat(path); os.IsNotExist(err) {
err = os.Mkdir(path, 0600)
if err != nil {
return errors.Wrap(err, "BuildTLSCertsForThreshold(): failed trying to ensure files folder exists in ansible role")
<