server.garden privileged automation agent (mirror of https://git.sequentialread.com/forest/rootsystem)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

709 lines
22 KiB

package automation
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"regexp"
"strings"
"time"
errors "git.sequentialread.com/forest/pkg-errors"
"git.sequentialread.com/forest/rootsystem/configuration"
composeloader "github.com/docker/cli/cli/compose/loader"
composeschema "github.com/docker/cli/cli/compose/schema"
composetypes "github.com/docker/cli/cli/compose/types"
)
func DockerComposeUp(
config *configuration.Configuration,
workingDirectory string,
) ([]byte, chan TerraformApplyResult, error) {
existingModuleFolders := map[string]bool{}
applicationModulesMap := map[string]*composetypes.ConfigDetails{}
applicationModulesPath := path.Join(workingDirectory, configuration.APPLICATION_MODULES_PATH)
fileInfos, err := ioutil.ReadDir(applicationModulesPath)
if err != nil {
return nil, nil, errors.Wrap(err, "can't list application modules directory")
}
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
existingModuleFolders[fileInfo.Name()] = true
}
}
for _, moduleName := range config.ApplicationModules {
_, has := existingModuleFolders[moduleName]
if !has {
return nil, nil, errors.New(fmt.Sprintf("unknown application module '%s'", moduleName))
}
workingDir := path.Join(applicationModulesPath, moduleName)
dockerComposePath := path.Join(workingDir, "docker-compose.yml")
fileInfo, err := os.Stat(dockerComposePath)
if err != nil || fileInfo.IsDir() {
return nil, nil, errors.New(
fmt.Sprintf("application module '%s' doesn't contain a docker-compose.yml file", moduleName),
)
}
bytes, err := ioutil.ReadFile(dockerComposePath)
if err != nil {
return nil, nil, errors.Wrap(err, "can't read application module compose file")
}
config, err := composeloader.ParseYAML(bytes)
if err != nil {
return nil, nil, errors.New(
fmt.Sprintf("can't ParseYAML(docker-compose.yml) in application module '%s'", moduleName),
)
}
configDetails := composetypes.ConfigDetails{
WorkingDir: workingDir,
Version: composeschema.Version(config),
ConfigFiles: []composetypes.ConfigFile{
composetypes.ConfigFile{
Filename: dockerComposePath,
Config: config,
},
},
}
applicationModulesMap[moduleName] = &configDetails
// TODO set details.Environment ?
}
configs := map[string]*composetypes.Config{}
moduleByNetwork := map[string]string{}
connectionsMap := map[string]map[string]SimplifiedTerraformConnection{}
for _, moduleName := range config.ApplicationModules {
dockerCompose, err := composeloader.Load(*(applicationModulesMap[moduleName]))
if err != nil {
return nil, nil, errors.Wrapf(
err, "can't composeloader.Load(docker-compose.yml) for application module '%s'", moduleName,
)
}
configs[moduleName] = dockerCompose
createsDefaultNetwork := false
for _, service := range dockerCompose.Services {
for networkName := range service.Networks {
if networkName == "default" {
createsDefaultNetwork = true
}
}
if len(service.Networks) == 0 {
createsDefaultNetwork = true
}
}
if createsDefaultNetwork {
moduleByNetwork[fmt.Sprintf("%s_default", moduleName)] = moduleName
}
for networkId, network := range dockerCompose.Networks {
if !network.External.External {
networkName := fmt.Sprintf("%s_%s", moduleName, networkId)
if network.Name != "" {
networkName = network.Name
}
moduleByNetwork[networkName] = moduleName
}
}
// bytes2, _ := json.MarshalIndent(dockerCompose, "", " ")
// log.Printf("\ndockerCompose %s: %s\n\n", moduleName, string(bytes2))
}
bytes1, _ := json.MarshalIndent(moduleByNetwork, "", " ")
log.Printf("\nmoduleByNetwork: %s\n\n", string(bytes1))
for _, moduleName := range config.ApplicationModules {
config := configs[moduleName]
for _, network := range config.Networks {
dependsOnModule := moduleByNetwork[network.Name]
if network.External.External && dependsOnModule != "" {
if connectionsMap[moduleName] == nil {
connectionsMap[moduleName] = map[string]SimplifiedTerraformConnection{}
}
connectionsMap[moduleName][dependsOnModule] = SimplifiedTerraformConnection{
From: dependsOnModule,
To: moduleName,
DisplayName: network.Name,
}
}
}
//TODO external volumes as well..?
//TODO depends_on.. ?
//TODO dis-allow links? other validation?
}
sortedApplicationModulesMap := map[string]bool{}
sortedApplicationModules := []string{}
iterations := 0
for len(sortedApplicationModules) < len(config.ApplicationModules) && iterations < 100 {
iterations++
for _, moduleName := range config.ApplicationModules {
if !sortedApplicationModulesMap[moduleName] {
allDependenciesMet := true
dependencies := connectionsMap[moduleName]
if dependencies != nil {
for dependsOnModule := range dependencies {
if !sortedApplicationModulesMap[dependsOnModule] {
allDependenciesMet = false
}
}
}
if allDependenciesMet {
sortedApplicationModulesMap[moduleName] = true
sortedApplicationModules = append(sortedApplicationModules, moduleName)
}
}
}
}
if len(sortedApplicationModules) < len(config.ApplicationModules) {
return nil, nil, errors.New(
"circular dependency detected: can't sort application modules in dependency order",
)
}
connections := []SimplifiedTerraformConnection{}
for _, dependsOn := range connectionsMap {
for _, connection := range dependsOn {
connections = append(connections, connection)
}
}
simpleStatus, err := dockerComposePlan(configs, applicationModulesMap, connections)
if err != nil {
return nil, nil, errors.Wrap(err, "can't create docker-compose plan")
}
svgBytes, err := makeSVGFromSimpleStatus(simpleStatus)
//svg, err := makeSVGFromDockerCompose(moduleDependencies, applicationModulesMap, configs)
if err != nil {
return nil, nil, errors.Wrap(err, "can't create svg diagram from docker-compose plan document")
}
outputChannel := make(chan TerraformApplyResult)
go (func() {
logLinesChannel := make(chan string)
logLines := []string{}
dockerComposeIsRunning := true
go (func() {
for logLine := range logLinesChannel {
logLines = append(logLines, logLine)
split := strings.Split(logLine, " ... ")
if len(split) == 2 {
leftSide := strings.Split(strings.TrimSpace(split[0]), " ")
status := split[1]
if len(leftSide) == 2 {
action := leftSide[0]
containerName := leftSide[1]
for moduleName, _ := range configs {
moduleNameUnderscore := fmt.Sprintf("%s_", moduleName)
if strings.HasPrefix(containerName, moduleNameUnderscore) {
serviceName := strings.TrimPrefix(containerName, moduleNameUnderscore)
serviceName = regexp.MustCompile("_[0-9]$").ReplaceAllString(serviceName, "")
if simpleStatus.Modules[moduleName] != nil {
for _, service := range simpleStatus.Modules[moduleName].Resources {
if service.DisplayName == serviceName {
if status == "done" {
service.State = "ok"
} else if action == "Recreating" {
service.State = "modifying"
} else if action == "Creating" {
service.State = "creating"
}
}
}
}
}
}
}
}
}
})()
abort := func(err error) {
dockerComposeIsRunning = false
outputChannel <- TerraformApplyResult{
Error: err,
Complete: true,
Success: false,
Log: strings.Join(logLines, "\n"),
Status: simpleStatus,
}
close(outputChannel)
}
go (func() {
for dockerComposeIsRunning {
time.Sleep(time.Second * configuration.TERRAFORM_APPLY_STATUS_UPDATE_INTERVAL_SECONDS)
if dockerComposeIsRunning {
outputChannel <- TerraformApplyResult{
Error: nil,
Log: strings.Join(logLines, "\n"),
Status: simpleStatus,
}
}
}
})()
dockerComposeSuccess := true
for _, moduleName := range sortedApplicationModules {
// TODO handle panics in here with abort
details := applicationModulesMap[moduleName]
process := exec.Command("docker-compose", "--no-ansi", "up", "-d", "--remove-orphans")
logLinesChannel <- fmt.Sprintf("\n(%s) $ docker-compose --no-ansi up -d --remove-orphans\n", details.WorkingDir)
process.Dir = details.WorkingDir
stdoutPipe, err := process.StdoutPipe()
if err != nil {
abort(errors.Wrap(err, "can't DockerComposeUp because can't process.StdoutPipe() docker-compose process"))
return
}
stderrPipe, err := process.StderrPipe()
if err != nil {
abort(errors.Wrap(err, "can't DockerComposeUp because can't process.StderrPipe() docker-compose process"))
return
}
err = process.Start()
if err != nil {
abort(errors.Wrap(err, "can't DockerComposeUp because can't process.Start() docker-compose process"))
return
}
go scanAllOutput(stdoutPipe, logLinesChannel)
go scanAllOutput(stderrPipe, logLinesChannel)
err = process.Wait()
_, isExitError := err.(*exec.ExitError)
if err != nil && !isExitError {
abort(errors.Wrap(err, "can't DockerComposeUp, error occurred while waiting for docker-compose to finish"))
return
}
if process.ProcessState.ExitCode() != 0 {
dockerComposeSuccess = false
break
}
}
dockerComposeIsRunning = false
if !dockerComposeSuccess {
for _, module := range simpleStatus.Modules {
for _, resource := range module.Resources {
if resource.Plan != "none" {
resource.State = "error"
}
}
}
}
time.Sleep(time.Millisecond * 100)
close(logLinesChannel)
outputChannel <- TerraformApplyResult{
Error: nil,
Complete: true,
Success: dockerComposeSuccess,
Log: strings.Join(logLines, "\n"),
Status: simpleStatus,
}
close(outputChannel)
})()
return svgBytes, outputChannel, nil
}
func dockerComposePlan(
configs map[string]*composetypes.Config,
configDetails map[string]*composetypes.ConfigDetails,
connections []SimplifiedTerraformConnection,
) (*SimplifiedTerraformStatus, error) {
tasks := []func() TaskResult{
func() TaskResult {
containers, err := ListDockerContainers()
if err != nil {
return TaskResult{
Name: "list_containers_task",
Err: errors.Wrap(err, "can't list docker containers"),
}
}
return TaskResult{
Name: "list_containers_task",
Result: containers,
Err: err,
}
},
}
for moduleName, details := range configDetails {
// https://stackoverflow.com/questions/26692844/captured-closure-for-loop-variable-in-go
// golang way of doing a closure over the CURRENT VALUE of a variable (not the variable itself)
moduleName := moduleName
details := *details
tasks = append(tasks, func() TaskResult {
taskName := fmt.Sprintf("%s_get_compose_config_hash", moduleName)
exitCode, stdout, stderr, err := shellExec(details.WorkingDir, "docker-compose", "config", "--hash=*")
err = errorFromShellExecResult("docker-compose config --hash=*", exitCode, stdout, stderr, err)
if err != nil {
return TaskResult{
Name: taskName,
Err: err,
}
}
// $ sudo docker-compose config --hash=*
// threshold 5f6deb1fa7dfc76fa0dc04e96d22cb4a2cc05a02060a4a3e77d38b87ff1d7f82
// caddy 95b3516b900089a3d04376b2635bfbde5596ec3769c50f12ce53b65f2d2e62a0
lines := strings.Split(string(stdout), "\n")
configHashByServiceName := map[string]string{}
for _, line := range lines {
elements := strings.Split(line, " ")
if len(elements) == 2 {
configHashByServiceName[elements[0]] = elements[1]
}
}
return TaskResult{
Name: taskName,
Result: configHashByServiceName,
}
})
}
results := DoInParallel(tasks...)
for _, result := range results {
if result.Err != nil {
return nil, errors.Wrapf(result.Err, "can't dockerComposePlan because %s failed", result.Name)
}
}
containers := results["list_containers_task"].Result.([]DockerContainer)
containersByModuleService := map[string]DockerContainer{}
planModules := map[string]*SimplifiedTerraformModule{}
// First we go through all the containers returned by `docker ps -a` api command.
// we fill out the containersByModuleService map, and
// mark to be deleted any docker-compose containers that aren't in the current docker-compose configs.
for _, container := range containers {
containersModuleName := container.Labels["com.docker.compose.project"]
containersServiceName := container.Labels["com.docker.compose.service"]
if containersModuleName == "" {
continue
}
containersConfig, has := configs[containersModuleName]
foundInConfig := false
if has {
if containersServiceName != "" {
containersByModuleService[fmt.Sprintf("%s_%s", containersModuleName, containersServiceName)] = container
}
var containersService composetypes.ServiceConfig
for _, service := range containersConfig.Services {
if service.Name == containersServiceName {
containersService = service
}
}
if containersService.Name != "" {
foundInConfig = true
}
}
if !foundInConfig {
if _, has := planModules[containersModuleName]; !has {
planModules[containersModuleName] = &SimplifiedTerraformModule{
DisplayName: containersModuleName,
Resources: []*SimplifiedTerraformResource{},
}
}
planModules[containersModuleName].Resources = append(
planModules[containersModuleName].Resources,
&SimplifiedTerraformResource{
DisplayName: containersServiceName,
Plan: "delete",
},
)
}
}
// Now we go through the current docker-compose configs and mark any services
// that have no containers as to be created,
// plus mark any that have differing configHash as to be updated
for moduleName, config := range configs {
if _, has := planModules[moduleName]; !has {
planModules[moduleName] = &SimplifiedTerraformModule{
DisplayName: moduleName,
Resources: []*SimplifiedTerraformResource{},
}
}
for _, service := range config.Services {
container := containersByModuleService[fmt.Sprintf("%s_%s", moduleName, service.Name)]
plan := "none"
if container.Id != "" {
existingConfigHash := container.Labels["com.docker.compose.config-hash"]
getConfigHashTaskName := fmt.Sprintf("%s_get_compose_config_hash", moduleName)
newConfigHash := results[getConfigHashTaskName].Result.(map[string]string)[service.Name]
if existingConfigHash != newConfigHash {
plan = "recreate"
}
} else {
plan = "create"
}
planModules[moduleName].Resources = append(
planModules[moduleName].Resources,
&SimplifiedTerraformResource{
DisplayName: service.Name,
Plan: plan,
},
)
}
}
return &SimplifiedTerraformStatus{
Modules: planModules,
Variables: map[string]string{},
Connections: connections,
}, nil
}
func DoInParallel(actions ...func() TaskResult) map[string]TaskResult {
resultsChannel := make(chan TaskResult)
results := map[string]TaskResult{}
log.Printf("do %d actions in parallel", len(actions))
for _, action := range actions {
// this is how you do closures over the VALUE of a variable (not the variable itself) in golang
// https://stackoverflow.com/questions/26692844/captured-closure-for-loop-variable-in-go
action := action
go (func() {
result := action()
resultsChannel <- result
})()
}
for range actions {
result := <-resultsChannel
results[result.Name] = result
log.Printf("task '%s' completed", result.Name)
if result.Err != nil {
break
}
}
return results
}
type TaskResult struct {
Name string
Err error
Result interface{}
}
// func makeSVGFromDockerCompose(
// moduleDependencies map[string][]string,
// configs map[string]*composetypes.Config,
// ) ([]byte, error) {
// moduleDots := []string{}
// for moduleName := range configs {
// serviceDots := []string{}
// config := configs[moduleName]
// for _, service := range config.Services {
// id := fmt.Sprintf(`service_%s_%s`, moduleName, service.Name)
// id = strings.ReplaceAll(strings.ReplaceAll(id, ".", "_"), "-", "_")
// serviceDot := fmt.Sprintf(`
// "%s" [label = "%s", tooltip="%s", margin = 0.1, shape = "box3d]`,
// id, padStringForDot(service.Name), id,
// )
// serviceDots = append(serviceDots, serviceDot)
// }
// i := 0
// for _, service := range config.Services {
// if i != 0 {
// id0 := fmt.Sprintf(`service_%s_%s`, moduleName, config.Services[i-1].Name)
// id0 = strings.ReplaceAll(strings.ReplaceAll(id0, ".", "_"), "-", "_")
// id1 := fmt.Sprintf(`service_%s_%s`, moduleName, service.Name)
// id1 = strings.ReplaceAll(strings.ReplaceAll(id1, ".", "_"), "-", "_")
// serviceDot := fmt.Sprintf(`
// "%s" -> "%s" [style=invis]`, id0, id1,
// )
// serviceDots = append(serviceDots, serviceDot)
// }
// i++
// }
// moduleDot := fmt.Sprintf(`
// subgraph cluster_%s {
// bgcolor = lightgrey;
// tooltip = "%s";
// label = "%s";
// %s
// }`,
// strings.ReplaceAll(strings.ReplaceAll(moduleName, ".", "_"), "-", "_"),
// moduleName,
// padStringForDot(moduleName),
// strings.Join(serviceDots, ""),
// )
// moduleDots = append(moduleDots, moduleDot)
// }
// dependencyDots := []string{}
// for to, fromList := range moduleDependencies {
// for _, from := range fromList {
// fromServiceName := configs[from].Services[len(configs[from].Services)-1].Name
// toServiceName := configs[to].Services[0].Name
// fromService := fmt.Sprintf(`service_%s_%s`, from, fromServiceName)
// toService := fmt.Sprintf(`service_%s_%s`, to, toServiceName)
// dependencyDot := fmt.Sprintf(`
// "%s" -> "%s" [ ltail="cluster_%s", lhead="cluster_%s" ];`,
// strings.ReplaceAll(strings.ReplaceAll(fromService, ".", "_"), "-", "_"),
// strings.ReplaceAll(strings.ReplaceAll(toService, ".", "_"), "-", "_"),
// strings.ReplaceAll(strings.ReplaceAll(from, ".", "_"), "-", "_"),
// strings.ReplaceAll(strings.ReplaceAll(to, ".", "_"), "-", "_"),
// )
// dependencyDots = append(dependencyDots, dependencyDot)
// }
// }
// dot := fmt.Sprintf(`
// digraph {
// compound = "true"
// newrank = "true"
// ranksep = 0.1;
// %s
// %s
// }`,
// strings.Join(moduleDots, ""),
// strings.Join(dependencyDots, ""),
// )
// exitCode, dotStdout, dotStderr, err := shellExecInputPipe(".", &dot, "dot", "-Tsvg")
// err = errorFromShellExecResult("dot -Tsvg", exitCode, dotStdout, dotStderr, err)
// if err != nil {
// return []byte{}, err
// }
// svgBytes, err := modifyDotSVGAsXML(dotStdout, func(svgDoc *XMLNode) error {
// // correctly set the dot property for services
// svgDoc.WithQuerySelector(
// // 'g.node > title'
// []XMLQuery{XMLQuery{NodeType: "g", Class: "node"}, XMLQuery{NodeType: "title", DirectChild: true}},
// func(node *XMLNode) {
// if node.Parent != nil {
// title := html.UnescapeString(string(node.Content))
// node.Parent.SetAttr("data-dot", title)
// if strings.HasPrefix(title, "service_") {
// node.Parent.SetAttr("class", "resource")
// }
// }
// },
// )
// // correctly set the dot property for the "cluster"s (aka modules)
// moduleNames := []string{}
// svgDoc.WithQuerySelector(
// // 'a[xlink:title]'
// []XMLQuery{XMLQuery{NodeType: "a", Attr: "xlink:title"}},
// func(node *XMLNode) {
// if node.Parent != nil && node.Parent.Parent != nil {
// title := html.UnescapeString(node.GetAttr("xlink:title"))
// title = regexp.MustCompile(`[.-]`).ReplaceAllString(title, "_")
// if node.Parent.Parent.GetAttr("data-dot") == "" {
// moduleNames = append(moduleNames, title)
// node.Parent.Parent.SetAttr("data-dot", title)
// node.Parent.Parent.SetAttr("class", "module")
// }
// }
// },
// )
// // correctly set the dot property for edges
// // in order to get dot to render it correctly, the connections are actually between resources,
// // not between modules. So we have to trim the resource name off of the dot attribute
// // on the connections to make them match what we have in the status JSON object.
// svgDoc.WithQuerySelector(
// // 'g.edge title'
// []XMLQuery{XMLQuery{NodeType: "g", Class: "edge"}, XMLQuery{NodeType: "title"}},
// func(node *XMLNode) {
// if node.Parent != nil {
// title := html.UnescapeString(string(node.Content))
// fromTo := strings.Split(title, "->")
// if len(fromTo) != 2 {
// fmt.Printf("malformed dot string on svg edge: fromTo.length != 2: %s", title)
// }
// // if this edge starts or ends at a resource inside a module, override the dot string
// // to make it start or end at that module instead.
// for _, moduleName := range moduleNames {
// if strings.HasPrefix(strings.TrimPrefix(fromTo[0], "service_"), moduleName) {
// fromTo[0] = moduleName
// }
// if strings.HasPrefix(strings.TrimPrefix(fromTo[1], "service_"), moduleName) {
// fromTo[1] = moduleName
// }
// }
// node.Parent.SetAttr("data-dot", html.EscapeString(fmt.Sprintf("%s->%s", fromTo[0], fromTo[1])))
// }
// },
// )
// // two last things to do:
// // 1. center each resource horizontally within the bounding box of its parent module
// // because dot is dumb and it just kinda throws them wherever
// // 2. create little create/delete/modify icons next to each changed resource
// // similar to how terraform displays -, +, +/-, or ~ on each line-item in the plan
// for moduleName, config := range configs {
// moduleId := regexp.MustCompile(`[.-]`).ReplaceAllString(moduleName, "_")
// svgDoc.WithQuerySelector(
// []XMLQuery{XMLQuery{Attr: "data-dot", AttrValue: moduleId}},
// func(node *XMLNode) {
// moduleRect := node.GetBoundingBox()
// for _, service := range config.Services {
// resourceId := fmt.Sprintf("%s_%s", moduleId, regexp.MustCompile(`[.-]`).ReplaceAllString(service.Name, "_"))
// svgDoc.WithQuerySelector(
// // '[data-dot=module_dns_gandi_gandi_livedns_record_dns_entries]' for example
// []XMLQuery{XMLQuery{Attr: "data-dot", AttrValue: resourceId}},
// func(node *XMLNode) {
// // TODO pass the plan here
// centerResourceAndAddPlannedAction(moduleRect, node, resourceId, "create")
// },
// )
// }
// },
// )
// }
// return nil
// })
// return svgBytes, err
// }