diff --git a/agent/agent.go b/agent/agent.go
new file mode 100644
index 000000000..c76eca72a
--- /dev/null
+++ b/agent/agent.go
@@ -0,0 +1,282 @@
+package agent
+
+import (
+ "fmt"
+ "net/url"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/drone/drone/build"
+ "github.com/drone/drone/model"
+ "github.com/drone/drone/queue"
+ "github.com/drone/drone/version"
+ "github.com/drone/drone/yaml"
+ "github.com/drone/drone/yaml/expander"
+ "github.com/drone/drone/yaml/transform"
+)
+
+type Logger interface {
+ Write(*build.Line)
+}
+
+type Agent struct {
+ Update UpdateFunc
+ Logger LoggerFunc
+ Engine build.Engine
+ Timeout time.Duration
+ Platform string
+ Namespace string
+ Disable []string
+ Escalate []string
+ Netrc []string
+ Local string
+ Pull bool
+}
+
+func (a *Agent) Poll() error {
+
+ // logrus.Infof("Starting build %s/%s#%d.%d",
+ // payload.Repo.Owner, payload.Repo.Name, payload.Build.Number, payload.Job.Number)
+ //
+ //
+ // logrus.Infof("Finished build %s/%s#%d.%d",
+ // payload.Repo.Owner, payload.Repo.Name, payload.Build.Number, payload.Job.Number)
+
+ return nil
+}
+
+func (a *Agent) Run(payload *queue.Work, cancel <-chan bool) error {
+
+ payload.Job.Status = model.StatusRunning
+ payload.Job.Started = time.Now().Unix()
+
+ spec, err := a.prep(payload)
+ if err != nil {
+ payload.Job.Error = err.Error()
+ payload.Job.ExitCode = 255
+ payload.Job.Finished = payload.Job.Started
+ payload.Job.Status = model.StatusError
+ a.Update(payload)
+ return err
+ }
+ a.Update(payload)
+ err = a.exec(spec, payload, cancel)
+
+ if err != nil {
+ payload.Job.ExitCode = 255
+ }
+ if exitErr, ok := err.(*build.ExitError); ok {
+ payload.Job.ExitCode = exitErr.Code
+ }
+
+ payload.Job.Finished = time.Now().Unix()
+
+ switch payload.Job.ExitCode {
+ case 128, 130, 137:
+ payload.Job.Status = model.StatusKilled
+ case 0:
+ payload.Job.Status = model.StatusSuccess
+ default:
+ payload.Job.Status = model.StatusFailure
+ }
+
+ a.Update(payload)
+
+ return err
+}
+
+func (a *Agent) prep(w *queue.Work) (*yaml.Config, error) {
+
+ envs := toEnv(w)
+ w.Yaml = expander.ExpandString(w.Yaml, envs)
+
+ // inject the netrc file into the clone plugin if the repositroy is
+ // private and requires authentication.
+ var secrets []*model.Secret
+ if w.Verified {
+ secrets = append(secrets, w.Secrets...)
+ }
+
+ if w.Repo.IsPrivate {
+ secrets = append(secrets, &model.Secret{
+ Name: "DRONE_NETRC_USERNAME",
+ Value: w.Netrc.Login,
+ Images: []string{"*"},
+ Events: []string{"*"},
+ })
+ secrets = append(secrets, &model.Secret{
+ Name: "DRONE_NETRC_PASSWORD",
+ Value: w.Netrc.Password,
+ Images: []string{"*"},
+ Events: []string{"*"},
+ })
+ secrets = append(secrets, &model.Secret{
+ Name: "DRONE_NETRC_MACHINE",
+ Value: w.Netrc.Machine,
+ Images: []string{"*"},
+ Events: []string{"*"},
+ })
+ }
+
+ conf, err := yaml.ParseString(w.Yaml)
+ if err != nil {
+ return nil, err
+ }
+
+ src := "src"
+ if url, _ := url.Parse(w.Repo.Link); url != nil {
+ src = filepath.Join(src, url.Host, url.Path)
+ }
+
+ transform.Clone(conf, w.Repo.Kind)
+ transform.Environ(conf, envs)
+ transform.DefaultFilter(conf)
+
+ transform.ImageSecrets(conf, secrets, w.Build.Event)
+ transform.Identifier(conf)
+ transform.WorkspaceTransform(conf, "/drone", src)
+
+ if err := transform.Check(conf, w.Repo.IsTrusted); err != nil {
+ return nil, err
+ }
+
+ transform.CommandTransform(conf)
+ transform.ImagePull(conf, a.Pull)
+ transform.ImageTag(conf)
+ transform.ImageName(conf)
+ transform.ImageNamespace(conf, a.Namespace)
+ transform.ImageEscalate(conf, a.Escalate)
+ transform.PluginParams(conf)
+
+ if a.Local != "" {
+ transform.PluginDisable(conf, a.Disable)
+ transform.ImageVolume(conf, []string{a.Local + ":" + conf.Workspace.Path})
+ }
+
+ transform.Pod(conf)
+
+ return conf, nil
+}
+
+func (a *Agent) exec(spec *yaml.Config, payload *queue.Work, cancel <-chan bool) error {
+
+ conf := build.Config{
+ Engine: a.Engine,
+ Buffer: 500,
+ }
+
+ pipeline := conf.Pipeline(spec)
+ defer pipeline.Teardown()
+
+ // setup the build environment
+ if err := pipeline.Setup(); err != nil {
+ return err
+ }
+
+ timeout := time.After(time.Duration(payload.Repo.Timeout) * time.Minute)
+
+ for {
+ select {
+ case <-pipeline.Done():
+ return pipeline.Err()
+ case <-cancel:
+ pipeline.Stop()
+ return fmt.Errorf("termination request received, build cancelled")
+ case <-timeout:
+ pipeline.Stop()
+ return fmt.Errorf("maximum time limit exceeded, build cancelled")
+ case <-time.After(a.Timeout):
+ pipeline.Stop()
+ return fmt.Errorf("terminal inactive for %v, build cancelled", a.Timeout)
+ case <-pipeline.Next():
+
+ // TODO(bradrydzewski) this entire block of code should probably get
+ // encapsulated in the pipeline.
+ status := model.StatusSuccess
+ if pipeline.Err() != nil {
+ status = model.StatusFailure
+ }
+ // updates the build status passed into each container. I realize this is
+ // a bit out of place and will work to resolve.
+ pipeline.Head().Environment["DRONE_STATUS"] = status
+
+ if !pipeline.Head().Constraints.Match(
+ a.Platform,
+ payload.Build.Deploy,
+ payload.Build.Event,
+ payload.Build.Branch,
+ status, payload.Job.Environment) { // TODO: fix this whole section
+
+ pipeline.Skip()
+ } else {
+ pipeline.Exec()
+ }
+ case line := <-pipeline.Pipe():
+ a.Logger(line)
+ }
+ }
+}
+
+func toEnv(w *queue.Work) map[string]string {
+ envs := map[string]string{
+ "CI": "drone",
+ "DRONE": "true",
+ "DRONE_ARCH": "linux/amd64",
+ "DRONE_REPO": w.Repo.FullName,
+ "DRONE_REPO_SCM": w.Repo.Kind,
+ "DRONE_REPO_OWNER": w.Repo.Owner,
+ "DRONE_REPO_NAME": w.Repo.Name,
+ "DRONE_REPO_LINK": w.Repo.Link,
+ "DRONE_REPO_AVATAR": w.Repo.Avatar,
+ "DRONE_REPO_BRANCH": w.Repo.Branch,
+ "DRONE_REPO_PRIVATE": fmt.Sprintf("%v", w.Repo.IsPrivate),
+ "DRONE_REPO_TRUSTED": fmt.Sprintf("%v", w.Repo.IsTrusted),
+ "DRONE_REMOTE_URL": w.Repo.Clone,
+ "DRONE_COMMIT_SHA": w.Build.Commit,
+ "DRONE_COMMIT_REF": w.Build.Ref,
+ "DRONE_COMMIT_BRANCH": w.Build.Branch,
+ "DRONE_COMMIT_LINK": w.Build.Link,
+ "DRONE_COMMIT_MESSAGE": w.Build.Message,
+ "DRONE_COMMIT_AUTHOR": w.Build.Author,
+ "DRONE_COMMIT_AUTHOR_EMAIL": w.Build.Email,
+ "DRONE_COMMIT_AUTHOR_AVATAR": w.Build.Avatar,
+ "DRONE_BUILD_NUMBER": fmt.Sprintf("%d", w.Build.Number),
+ "DRONE_BUILD_EVENT": w.Build.Event,
+ "DRONE_BUILD_STATUS": w.Build.Status,
+ "DRONE_BUILD_LINK": fmt.Sprintf("%s/%s/%d", w.System.Link, w.Repo.FullName, w.Build.Number),
+ "DRONE_BUILD_CREATED": fmt.Sprintf("%d", w.Build.Created),
+ "DRONE_BUILD_STARTED": fmt.Sprintf("%d", w.Build.Started),
+ "DRONE_BUILD_FINISHED": fmt.Sprintf("%d", w.Build.Finished),
+ "DRONE_YAML_VERIFIED": fmt.Sprintf("%v", w.Verified),
+ "DRONE_YAML_SIGNED": fmt.Sprintf("%v", w.Signed),
+ "DRONE_BRANCH": w.Build.Branch,
+ "DRONE_COMMIT": w.Build.Commit,
+ "DRONE_VERSION": version.Version,
+ }
+
+ if w.Build.Event == model.EventTag {
+ envs["DRONE_TAG"] = strings.TrimPrefix(w.Build.Ref, "refs/tags/")
+ }
+ if w.Build.Event == model.EventPull {
+ envs["DRONE_PULL_REQUEST"] = pullRegexp.FindString(w.Build.Ref)
+ }
+ if w.Build.Event == model.EventDeploy {
+ envs["DRONE_DEPLOY_TO"] = w.Build.Deploy
+ }
+
+ if w.BuildLast != nil {
+ envs["DRONE_PREV_BUILD_STATUS"] = w.BuildLast.Status
+ envs["DRONE_PREV_BUILD_NUMBER"] = fmt.Sprintf("%v", w.BuildLast.Number)
+ envs["DRONE_PREV_COMMIT_SHA"] = w.BuildLast.Commit
+ }
+
+ // inject matrix values as environment variables
+ for key, val := range w.Job.Environment {
+ envs[key] = val
+ }
+ return envs
+}
+
+var pullRegexp = regexp.MustCompile("\\d+")
diff --git a/agent/updater.go b/agent/updater.go
new file mode 100644
index 000000000..d207876f3
--- /dev/null
+++ b/agent/updater.go
@@ -0,0 +1,62 @@
+package agent
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/drone/drone/build"
+ "github.com/drone/drone/client"
+ "github.com/drone/drone/queue"
+)
+
+// UpdateFunc handles buid pipeline status updates.
+type UpdateFunc func(*queue.Work)
+
+// LoggerFunc handles buid pipeline logging updates.
+type LoggerFunc func(*build.Line)
+
+var NoopUpdateFunc = func(*queue.Work) {}
+
+var TermLoggerFunc = func(line *build.Line) {
+ fmt.Println(line)
+}
+
+// NewClientUpdater returns an updater that sends updated build details
+// to the drone server.
+func NewClientUpdater(client client.Client) UpdateFunc {
+ return func(w *queue.Work) {
+ for {
+ err := client.Push(w)
+ if err == nil {
+ return
+ }
+ logrus.Errorf("Error updating %s/%s#%d.%d. Retry in 30s. %s",
+ w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number, err)
+ logrus.Infof("Retry update in 30s")
+ time.Sleep(time.Second * 30)
+ }
+ }
+}
+
+func NewClientLogger(client client.Client, id int64, rc io.ReadCloser, wc io.WriteCloser) LoggerFunc {
+ var once sync.Once
+ return func(line *build.Line) {
+ // annoying hack to only start streaming once the first line is written
+ once.Do(func() {
+ go func() {
+ err := client.Stream(id, rc)
+ if err != nil && err != io.ErrClosedPipe {
+ logrus.Errorf("Error streaming build logs. %s", err)
+ }
+ }()
+ })
+
+ linejson, _ := json.Marshal(line)
+ wc.Write(linejson)
+ wc.Write([]byte{'\n'})
+ }
+}
diff --git a/build/config.go b/build/config.go
new file mode 100644
index 000000000..d8d3232eb
--- /dev/null
+++ b/build/config.go
@@ -0,0 +1,48 @@
+package build
+
+import "github.com/drone/drone/yaml"
+
+// Config defines the configuration for creating the Pipeline.
+type Config struct {
+ Engine Engine
+
+ // Buffer defines the size of the buffer for the channel to which the
+ // console output is streamed.
+ Buffer uint
+}
+
+// Pipeline creates a build Pipeline using the specific configuration for
+// the given Yaml specification.
+func (c *Config) Pipeline(spec *yaml.Config) *Pipeline {
+
+ pipeline := Pipeline{
+ engine: c.Engine,
+ pipe: make(chan *Line, c.Buffer),
+ next: make(chan error),
+ done: make(chan error),
+ }
+
+ var containers []*yaml.Container
+ containers = append(containers, spec.Services...)
+ containers = append(containers, spec.Pipeline...)
+
+ for _, c := range containers {
+ if c.Disabled {
+ continue
+ }
+ next := &element{Container: c}
+ if pipeline.head == nil {
+ pipeline.head = next
+ pipeline.tail = next
+ } else {
+ pipeline.tail.next = next
+ pipeline.tail = next
+ }
+ }
+
+ go func() {
+ pipeline.next <- nil
+ }()
+
+ return &pipeline
+}
diff --git a/engine/runner/docker/docker.go b/build/docker/docker.go
similarity index 86%
rename from engine/runner/docker/docker.go
rename to build/docker/docker.go
index 9e1bb4606..4f8416ee7 100644
--- a/engine/runner/docker/docker.go
+++ b/build/docker/docker.go
@@ -3,8 +3,9 @@ package docker
import (
"io"
- "github.com/drone/drone/engine/runner"
- "github.com/drone/drone/engine/runner/docker/internal"
+ "github.com/drone/drone/build"
+ "github.com/drone/drone/build/docker/internal"
+ "github.com/drone/drone/yaml"
"github.com/samalba/dockerclient"
)
@@ -13,7 +14,7 @@ type dockerEngine struct {
client dockerclient.Client
}
-func (e *dockerEngine) ContainerStart(container *runner.Container) (string, error) {
+func (e *dockerEngine) ContainerStart(container *yaml.Container) (string, error) {
conf := toContainerConfig(container)
auth := toAuthConfig(container)
@@ -25,7 +26,7 @@ func (e *dockerEngine) ContainerStart(container *runner.Container) (string, erro
}
// create and start the container and return the Container ID.
- id, err := e.client.CreateContainer(conf, container.Name, auth)
+ id, err := e.client.CreateContainer(conf, container.ID, auth)
if err != nil {
return id, err
}
@@ -52,7 +53,7 @@ func (e *dockerEngine) ContainerRemove(id string) error {
return nil
}
-func (e *dockerEngine) ContainerWait(id string) (*runner.State, error) {
+func (e *dockerEngine) ContainerWait(id string) (*build.State, error) {
// wait for the container to exit
//
// TODO(bradrydzewski) we should have a for loop here
@@ -64,7 +65,7 @@ func (e *dockerEngine) ContainerWait(id string) (*runner.State, error) {
if err != nil {
return nil, err
}
- return &runner.State{
+ return &build.State{
ExitCode: v.State.ExitCode,
OOMKilled: v.State.OOMKilled,
}, nil
diff --git a/engine/runner/docker/docker_test.go b/build/docker/docker_test.go
similarity index 100%
rename from engine/runner/docker/docker_test.go
rename to build/docker/docker_test.go
diff --git a/build/docker/helper.go b/build/docker/helper.go
new file mode 100644
index 000000000..46bcc3cdb
--- /dev/null
+++ b/build/docker/helper.go
@@ -0,0 +1,25 @@
+package docker
+
+import (
+ "github.com/drone/drone/build"
+ "github.com/samalba/dockerclient"
+)
+
+// NewClient returns a new Docker engine using the provided Docker client.
+func NewClient(client dockerclient.Client) build.Engine {
+ return &dockerEngine{client}
+}
+
+// New returns a new Docker engine from the provided DOCKER_HOST and
+// DOCKER_CERT_PATH environment variables.
+func New(host, cert string, tls bool) (build.Engine, error) {
+ config, err := dockerclient.TLSConfigFromCertPath(cert)
+ if err == nil && tls {
+ config.InsecureSkipVerify = true
+ }
+ client, err := dockerclient.NewDockerClient(host, config)
+ if err != nil {
+ return nil, err
+ }
+ return NewClient(client), nil
+}
diff --git a/engine/runner/docker/helper_test.go b/build/docker/helper_test.go
similarity index 100%
rename from engine/runner/docker/helper_test.go
rename to build/docker/helper_test.go
diff --git a/engine/runner/docker/internal/README b/build/docker/internal/README
similarity index 100%
rename from engine/runner/docker/internal/README
rename to build/docker/internal/README
diff --git a/engine/runner/docker/internal/stdcopy.go b/build/docker/internal/stdcopy.go
similarity index 100%
rename from engine/runner/docker/internal/stdcopy.go
rename to build/docker/internal/stdcopy.go
diff --git a/engine/runner/docker/internal/stdcopy_test.go b/build/docker/internal/stdcopy_test.go
similarity index 100%
rename from engine/runner/docker/internal/stdcopy_test.go
rename to build/docker/internal/stdcopy_test.go
diff --git a/engine/runner/docker/util.go b/build/docker/util.go
similarity index 83%
rename from engine/runner/docker/util.go
rename to build/docker/util.go
index 2d35fee7c..c4b9d3e4d 100644
--- a/engine/runner/docker/util.go
+++ b/build/docker/util.go
@@ -4,13 +4,13 @@ import (
"fmt"
"strings"
- "github.com/drone/drone/engine/runner"
+ "github.com/drone/drone/yaml"
"github.com/samalba/dockerclient"
)
// helper function that converts the Continer data structure to the exepcted
// dockerclient.ContainerConfig.
-func toContainerConfig(c *runner.Container) *dockerclient.ContainerConfig {
+func toContainerConfig(c *yaml.Container) *dockerclient.ContainerConfig {
config := &dockerclient.ContainerConfig{
Image: c.Image,
Env: toEnvironmentSlice(c.Environment),
@@ -77,17 +77,15 @@ func toContainerConfig(c *runner.Container) *dockerclient.ContainerConfig {
// helper function that converts the AuthConfig data structure to the exepcted
// dockerclient.AuthConfig.
-func toAuthConfig(container *runner.Container) *dockerclient.AuthConfig {
+func toAuthConfig(container *yaml.Container) *dockerclient.AuthConfig {
if container.AuthConfig.Username == "" &&
- container.AuthConfig.Password == "" &&
- container.AuthConfig.Token == "" {
+ container.AuthConfig.Password == "" {
return nil
}
return &dockerclient.AuthConfig{
- Email: container.AuthConfig.Email,
- Username: container.AuthConfig.Username,
- Password: container.AuthConfig.Password,
- RegistryToken: container.AuthConfig.Token,
+ Email: container.AuthConfig.Email,
+ Username: container.AuthConfig.Username,
+ Password: container.AuthConfig.Password,
}
}
diff --git a/engine/runner/docker/util_test.go b/build/docker/util_test.go
similarity index 100%
rename from engine/runner/docker/util_test.go
rename to build/docker/util_test.go
diff --git a/build/engine.go b/build/engine.go
new file mode 100644
index 000000000..b93065680
--- /dev/null
+++ b/build/engine.go
@@ -0,0 +1,16 @@
+package build
+
+import (
+ "io"
+
+ "github.com/drone/drone/yaml"
+)
+
+// Engine defines the container runtime engine.
+type Engine interface {
+ ContainerStart(*yaml.Container) (string, error)
+ ContainerStop(string) error
+ ContainerRemove(string) error
+ ContainerWait(string) (*State, error)
+ ContainerLogs(string) (io.ReadCloser, error)
+}
diff --git a/engine/runner/error.go b/build/error.go
similarity index 98%
rename from engine/runner/error.go
rename to build/error.go
index e10040cb6..a92573eca 100644
--- a/engine/runner/error.go
+++ b/build/error.go
@@ -1,4 +1,4 @@
-package runner
+package build
import (
"errors"
diff --git a/engine/runner/error_test.go b/build/error_test.go
similarity index 97%
rename from engine/runner/error_test.go
rename to build/error_test.go
index 4bee938dd..0e99a2119 100644
--- a/engine/runner/error_test.go
+++ b/build/error_test.go
@@ -1,4 +1,4 @@
-package runner
+package build
import (
"testing"
diff --git a/build/pipeline.go b/build/pipeline.go
new file mode 100644
index 000000000..1c17274dc
--- /dev/null
+++ b/build/pipeline.go
@@ -0,0 +1,169 @@
+package build
+
+import (
+ "bufio"
+ "time"
+
+ "github.com/drone/drone/yaml"
+)
+
+// element represents a link in the linked list.
+type element struct {
+ *yaml.Container
+ next *element
+}
+
+// Pipeline represents a build pipeline.
+type Pipeline struct {
+ conf *yaml.Config
+ head *element
+ tail *element
+ pipe chan (*Line)
+ next chan (error)
+ done chan (error)
+ err error
+
+ containers []string
+ volumes []string
+ networks []string
+
+ engine Engine
+}
+
+// Done returns when the process is done executing.
+func (p *Pipeline) Done() <-chan error {
+ return p.done
+}
+
+// Err returns the error for the current process.
+func (p *Pipeline) Err() error {
+ return p.err
+}
+
+// Next returns the next step in the process.
+func (p *Pipeline) Next() <-chan error {
+ return p.next
+}
+
+// Exec executes the current step.
+func (p *Pipeline) Exec() {
+ go func() {
+ err := p.exec(p.head.Container)
+ if err != nil {
+ p.err = err
+ }
+ p.step()
+ }()
+}
+
+// Skip skips the current step.
+func (p *Pipeline) Skip() {
+ p.step()
+}
+
+// Pipe returns the build output pipe.
+func (p *Pipeline) Pipe() <-chan *Line {
+ return p.pipe
+}
+
+// Head returns the head item in the list.
+func (p *Pipeline) Head() *yaml.Container {
+ return p.head.Container
+}
+
+// Tail returns the tail item in the list.
+func (p *Pipeline) Tail() *yaml.Container {
+ return p.tail.Container
+}
+
+// Stop stops the pipeline.
+func (p *Pipeline) Stop() {
+ go func() {
+ p.done <- ErrTerm
+ }()
+}
+
+// Setup prepares the build pipeline environment.
+func (p *Pipeline) Setup() error {
+ return nil
+}
+
+// Teardown removes the pipeline environment.
+func (p *Pipeline) Teardown() {
+ for _, id := range p.containers {
+ p.engine.ContainerRemove(id)
+ }
+ close(p.next)
+ close(p.done)
+
+ // TODO we have a race condition here where the program can try to async
+ // write to a closed pipe channel. This package, in general, needs to be
+ // tested for race conditions.
+ // close(p.pipe)
+}
+
+// step steps through the pipeline to head.next
+func (p *Pipeline) step() {
+ if p.head == p.tail {
+ go func() {
+ p.done <- nil
+ }()
+ } else {
+ go func() {
+ p.head = p.head.next
+ p.next <- nil
+ }()
+ }
+}
+
+// close closes open channels and signals the pipeline is done.
+func (p *Pipeline) close(err error) {
+ go func() {
+ p.done <- err
+ }()
+}
+
+func (p *Pipeline) exec(c *yaml.Container) error {
+ name, err := p.engine.ContainerStart(c)
+ if err != nil {
+ return err
+ }
+ p.containers = append(p.containers, name)
+
+ go func() {
+ rc, rerr := p.engine.ContainerLogs(name)
+ if rerr != nil {
+ return
+ }
+ defer rc.Close()
+
+ num := 0
+ now := time.Now().UTC()
+ scanner := bufio.NewScanner(rc)
+ for scanner.Scan() {
+ p.pipe <- &Line{
+ Proc: c.Name,
+ Time: int64(time.Since(now).Seconds()),
+ Pos: num,
+ Out: scanner.Text(),
+ }
+ num++
+ }
+ }()
+
+ // exit when running container in detached mode in background
+ if c.Detached {
+ return nil
+ }
+
+ state, err := p.engine.ContainerWait(name)
+ if err != nil {
+ return err
+ }
+ if state.OOMKilled {
+ return &OomError{c.Name}
+ } else if state.ExitCode != 0 {
+ return &ExitError{c.Name, state.ExitCode}
+ }
+ return nil
+}
diff --git a/build/pipeline_test.go b/build/pipeline_test.go
new file mode 100644
index 000000000..639d146f5
--- /dev/null
+++ b/build/pipeline_test.go
@@ -0,0 +1,42 @@
+package build
+
+var sampleYaml = `
+image: hello-world
+build:
+ context: .
+ dockerfile: Dockerfile
+
+workspace:
+ path: src/github.com/octocat/hello-world
+ base: /go
+
+pipeline:
+ test:
+ image: golang
+ commands:
+ - go install
+ - go test
+ build:
+ image: golang
+ commands:
+ - go build
+ when:
+ event: push
+ notify:
+ image: slack
+ channel: dev
+ when:
+ event: failure
+
+services:
+ database:
+ image: mysql
+
+networks:
+ custom:
+ driver: overlay
+
+volumes:
+ custom:
+ driver: blockbridge
+`
diff --git a/build/types.go b/build/types.go
new file mode 100644
index 000000000..44d12633a
--- /dev/null
+++ b/build/types.go
@@ -0,0 +1,22 @@
+package build
+
+import "fmt"
+
+// Line is a line of console output.
+type Line struct {
+ Proc string `json:"proc,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ Type int `json:"type,omitempty"`
+ Pos int `json:"pos,omityempty"`
+ Out string `json:"out,omitempty"`
+}
+
+func (l *Line) String() string {
+ return fmt.Sprintf("[%s:L%v:%vs] %s", l.Proc, l.Pos, l.Time, l.Out)
+}
+
+// State defines the state of the container.
+type State struct {
+ ExitCode int // container exit code
+ OOMKilled bool // container exited due to oom error
+}
diff --git a/build/types_test.go b/build/types_test.go
new file mode 100644
index 000000000..c0fc0abac
--- /dev/null
+++ b/build/types_test.go
@@ -0,0 +1,23 @@
+package build
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestLine(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Line output", func() {
+ g.It("should prefix string() with metadata", func() {
+ line := Line{
+ Proc: "redis",
+ Time: 60,
+ Pos: 1,
+ Out: "starting redis server",
+ }
+ g.Assert(line.String()).Equal("[redis:L1:60s] starting redis server")
+ })
+ })
+}
diff --git a/drone/agent/agent.go b/drone/agent/agent.go
index e8321b139..32405a3e6 100644
--- a/drone/agent/agent.go
+++ b/drone/agent/agent.go
@@ -141,6 +141,10 @@ func start(c *cli.Context) {
} else {
logrus.SetLevel(logrus.WarnLevel)
}
+ logrus.Infof("Connecting to %s with token %s",
+ c.String("drone-server"),
+ c.String("drone-token"),
+ )
client := client.NewClientToken(
c.String("drone-server"),
diff --git a/drone/agent/exec.go b/drone/agent/exec.go
index 3e03af759..1df73378e 100644
--- a/drone/agent/exec.go
+++ b/drone/agent/exec.go
@@ -1,27 +1,15 @@
package agent
import (
- "encoding/json"
- "fmt"
"io"
- "regexp"
- "strings"
"time"
"github.com/Sirupsen/logrus"
- "github.com/dchest/uniuri"
+ "github.com/drone/drone/agent"
+ "github.com/drone/drone/build/docker"
"github.com/drone/drone/client"
- "github.com/drone/drone/engine/compiler"
- "github.com/drone/drone/engine/compiler/builtin"
- "github.com/drone/drone/engine/runner"
- "github.com/drone/drone/engine/runner/docker"
- "github.com/drone/drone/model"
- "github.com/drone/drone/queue"
- "github.com/drone/drone/version"
- "github.com/drone/drone/yaml/expander"
"github.com/samalba/dockerclient"
- "golang.org/x/net/context"
)
type config struct {
@@ -48,233 +36,45 @@ func (r *pipeline) run() error {
logrus.Infof("Starting build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
- w.Job.Status = model.StatusRunning
- w.Job.Started = time.Now().Unix()
+ cancel := make(chan bool, 1)
+ engine := docker.NewClient(r.docker)
- prefix := fmt.Sprintf("drone_%s", uniuri.New())
+ // streaming the logs
+ rc, wc := io.Pipe()
+ defer func() {
+ wc.Close()
+ rc.Close()
+ }()
- envs := toEnv(w)
- w.Yaml = expander.ExpandString(w.Yaml, envs)
-
- // inject the netrc file into the clone plugin if the repositroy is
- // private and requires authentication.
- var secrets []*model.Secret
- if w.Verified {
- secrets = append(secrets, w.Secrets...)
+ a := agent.Agent{
+ Update: agent.NewClientUpdater(r.drone),
+ Logger: agent.NewClientLogger(r.drone, w.Job.ID, rc, wc),
+ Engine: engine,
+ Timeout: time.Minute * 15,
+ Platform: r.config.platform,
+ Namespace: r.config.namespace,
+ Escalate: r.config.privileged,
+ Pull: r.config.pull,
}
- if w.Repo.IsPrivate {
- secrets = append(secrets, &model.Secret{
- Name: "DRONE_NETRC_USERNAME",
- Value: w.Netrc.Login,
- Images: []string{"*"},
- Events: []string{"*"},
- })
- secrets = append(secrets, &model.Secret{
- Name: "DRONE_NETRC_PASSWORD",
- Value: w.Netrc.Password,
- Images: []string{"*"},
- Events: []string{"*"},
- })
- secrets = append(secrets, &model.Secret{
- Name: "DRONE_NETRC_MACHINE",
- Value: w.Netrc.Machine,
- Images: []string{"*"},
- Events: []string{"*"},
- })
- }
-
- var lastStatus string
- if w.BuildLast != nil {
- lastStatus = w.BuildLast.Status
- }
-
- trans := []compiler.Transform{
- builtin.NewCloneOp(w.Repo.Kind, true),
- builtin.NewSecretOp(w.Build.Event, secrets),
- builtin.NewNormalizeOp(r.config.namespace),
- builtin.NewWorkspaceOp("/drone", "/drone/src/github.com/"+w.Repo.FullName),
- builtin.NewValidateOp(
- w.Repo.IsTrusted,
- r.config.whitelist,
- ),
- builtin.NewEnvOp(envs),
- builtin.NewShellOp(builtin.Linux_adm64),
- builtin.NewArgsOp(),
- builtin.NewEscalateOp(r.config.privileged),
- builtin.NewPodOp(prefix),
- builtin.NewAliasOp(prefix),
- builtin.NewPullOp(r.config.pull),
- builtin.NewFilterOp(
- lastStatus,
- w.Build.Branch,
- w.Build.Event,
- w.Build.Deploy,
- w.Job.Environment,
- ),
- }
-
- compile := compiler.New()
- compile.Transforms(trans)
- spec, err := compile.CompileString(w.Yaml)
- if err != nil {
- w.Job.Error = err.Error()
- w.Job.ExitCode = 255
- w.Job.Finished = w.Job.Started
- w.Job.Status = model.StatusError
- pushRetry(r.drone, w)
- return nil
- }
-
- pushRetry(r.drone, w)
-
- conf := runner.Config{
- Engine: docker.New(r.docker),
- }
-
- c := context.TODO()
- c, timout := context.WithTimeout(c, time.Minute*time.Duration(w.Repo.Timeout))
- c, cancel := context.WithCancel(c)
- defer cancel()
- defer timout()
-
- run := conf.Runner(c, spec)
- run.Run()
-
+ // signal for canceling the build.
wait := r.drone.Wait(w.Job.ID)
defer wait.Cancel()
go func() {
if _, err := wait.Done(); err == nil {
+ cancel <- true
logrus.Infof("Cancel build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
- cancel()
}
}()
- rc, wc := io.Pipe()
- go func() {
- // TODO(bradrydzewski) figure out how to resume upload on failure
- err := r.drone.Stream(w.Job.ID, rc)
- if err != nil && err != io.ErrClosedPipe {
- logrus.Errorf("Error streaming build logs. %s", err)
- }
- }()
+ a.Run(w, cancel)
- pipe := run.Pipe()
- for {
- line := pipe.Next()
- if line == nil {
- break
- }
- linejson, _ := json.Marshal(line)
- wc.Write(linejson)
- wc.Write([]byte{'\n'})
- }
-
- err = run.Wait()
-
- pipe.Close()
wc.Close()
rc.Close()
- // catch the build result
- if err != nil {
- w.Job.ExitCode = 255
- }
- if exitErr, ok := err.(*runner.ExitError); ok {
- w.Job.ExitCode = exitErr.Code
- }
-
- w.Job.Finished = time.Now().Unix()
-
- switch w.Job.ExitCode {
- case 128, 130, 137:
- w.Job.Status = model.StatusKilled
- case 0:
- w.Job.Status = model.StatusSuccess
- default:
- w.Job.Status = model.StatusFailure
- }
-
- pushRetry(r.drone, w)
-
logrus.Infof("Finished build %s/%s#%d.%d",
w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number)
return nil
}
-
-func pushRetry(client client.Client, w *queue.Work) {
- for {
- err := client.Push(w)
- if err == nil {
- return
- }
- logrus.Errorf("Error updating %s/%s#%d.%d. Retry in 30s. %s",
- w.Repo.Owner, w.Repo.Name, w.Build.Number, w.Job.Number, err)
- logrus.Infof("Retry update in 30s")
- time.Sleep(time.Second * 30)
- }
-}
-
-func toEnv(w *queue.Work) map[string]string {
- envs := map[string]string{
- "CI": "drone",
- "DRONE": "true",
- "DRONE_ARCH": "linux_amd64",
- "DRONE_REPO": w.Repo.FullName,
- "DRONE_REPO_SCM": w.Repo.Kind,
- "DRONE_REPO_OWNER": w.Repo.Owner,
- "DRONE_REPO_NAME": w.Repo.Name,
- "DRONE_REPO_LINK": w.Repo.Link,
- "DRONE_REPO_AVATAR": w.Repo.Avatar,
- "DRONE_REPO_BRANCH": w.Repo.Branch,
- "DRONE_REPO_PRIVATE": fmt.Sprintf("%v", w.Repo.IsPrivate),
- "DRONE_REPO_TRUSTED": fmt.Sprintf("%v", w.Repo.IsTrusted),
- "DRONE_REMOTE_URL": w.Repo.Clone,
- "DRONE_COMMIT_SHA": w.Build.Commit,
- "DRONE_COMMIT_REF": w.Build.Ref,
- "DRONE_COMMIT_BRANCH": w.Build.Branch,
- "DRONE_COMMIT_LINK": w.Build.Link,
- "DRONE_COMMIT_MESSAGE": w.Build.Message,
- "DRONE_COMMIT_AUTHOR": w.Build.Author,
- "DRONE_COMMIT_AUTHOR_EMAIL": w.Build.Email,
- "DRONE_COMMIT_AUTHOR_AVATAR": w.Build.Avatar,
- "DRONE_BUILD_NUMBER": fmt.Sprintf("%d", w.Build.Number),
- "DRONE_BUILD_EVENT": w.Build.Event,
- "DRONE_BUILD_STATUS": w.Build.Status,
- "DRONE_BUILD_LINK": fmt.Sprintf("%s/%s/%d", w.System.Link, w.Repo.FullName, w.Build.Number),
- "DRONE_BUILD_CREATED": fmt.Sprintf("%d", w.Build.Created),
- "DRONE_BUILD_STARTED": fmt.Sprintf("%d", w.Build.Started),
- "DRONE_BUILD_FINISHED": fmt.Sprintf("%d", w.Build.Finished),
- "DRONE_YAML_VERIFIED": fmt.Sprintf("%v", w.Verified),
- "DRONE_YAML_SIGNED": fmt.Sprintf("%v", w.Signed),
- "DRONE_BRANCH": w.Build.Branch,
- "DRONE_COMMIT": w.Build.Commit,
- "DRONE_VERSION": version.Version,
- }
-
- if w.Build.Event == model.EventTag {
- envs["DRONE_TAG"] = strings.TrimPrefix(w.Build.Ref, "refs/tags/")
- }
- if w.Build.Event == model.EventPull {
- envs["DRONE_PULL_REQUEST"] = pullRegexp.FindString(w.Build.Ref)
- }
- if w.Build.Event == model.EventDeploy {
- envs["DRONE_DEPLOY_TO"] = w.Build.Deploy
- }
-
- if w.BuildLast != nil {
- envs["DRONE_PREV_BUILD_STATUS"] = w.BuildLast.Status
- envs["DRONE_PREV_BUILD_NUMBER"] = fmt.Sprintf("%v", w.BuildLast.Number)
- envs["DRONE_PREV_COMMIT_SHA"] = w.BuildLast.Commit
- }
-
- // inject matrix values as environment variables
- for key, val := range w.Job.Environment {
- envs[key] = val
- }
- return envs
-}
-
-var pullRegexp = regexp.MustCompile("\\d+")
diff --git a/drone/exec.go b/drone/exec.go
index 06ab7d0f9..0bdabc347 100644
--- a/drone/exec.go
+++ b/drone/exec.go
@@ -1 +1,424 @@
package main
+
+import (
+ "io/ioutil"
+ "log"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/drone/drone/agent"
+ "github.com/drone/drone/build/docker"
+ "github.com/drone/drone/model"
+ "github.com/drone/drone/queue"
+
+ "github.com/codegangsta/cli"
+)
+
+var execCmd = cli.Command{
+ Name: "exec",
+ Usage: "execute a local build",
+ Action: func(c *cli.Context) {
+ if err := exec(c); err != nil {
+ log.Fatalln(err)
+ }
+ },
+ Flags: []cli.Flag{
+ cli.BoolTFlag{
+ Name: "local",
+ Usage: "build from local directory",
+ EnvVar: "DRONE_LOCAL",
+ },
+ cli.StringSliceFlag{
+ Name: "plugin",
+ Usage: "plugin steps to enable",
+ EnvVar: "DRONE_PLUGIN_ENABLE",
+ },
+ cli.StringSliceFlag{
+ Name: "secret",
+ Usage: "build secrets in KEY=VALUE format",
+ EnvVar: "DRONE_SECRET",
+ },
+ cli.StringSliceFlag{
+ Name: "matrix",
+ Usage: "build matrix in KEY=VALUE format",
+ EnvVar: "DRONE_MATRIX",
+ },
+ cli.DurationFlag{
+ Name: "timeout",
+ Usage: "build timeout",
+ Value: time.Hour,
+ EnvVar: "DRONE_TIMEOUT",
+ },
+ cli.DurationFlag{
+ Name: "timeout.inactivity",
+ Usage: "build timeout for inactivity",
+ Value: time.Minute * 15,
+ EnvVar: "DRONE_TIMEOUT_INACTIVITY",
+ },
+ cli.BoolFlag{
+ EnvVar: "DRONE_PLUGIN_PULL",
+ Name: "pull",
+ Usage: "always pull latest plugin images",
+ },
+ cli.StringFlag{
+ EnvVar: "DRONE_PLUGIN_NAMESPACE",
+ Name: "namespace",
+ Value: "plugins",
+ Usage: "default plugin image namespace",
+ },
+ cli.StringSliceFlag{
+ EnvVar: "DRONE_PLUGIN_PRIVILEGED",
+ Name: "privileged",
+ Usage: "plugins that require privileged mode",
+ Value: &cli.StringSlice{
+ "plugins/docker",
+ "plugins/docker:*",
+ "plguins/gcr",
+ "plguins/gcr:*",
+ "plugins/ecr",
+ "plugins/ecr:*",
+ },
+ },
+
+ // Docker daemon flags
+
+ cli.StringFlag{
+ EnvVar: "DOCKER_HOST",
+ Name: "docker-host",
+ Usage: "docker deamon address",
+ Value: "unix:///var/run/docker.sock",
+ },
+ cli.BoolFlag{
+ EnvVar: "DOCKER_TLS_VERIFY",
+ Name: "docker-tls-verify",
+ Usage: "docker daemon supports tlsverify",
+ },
+ cli.StringFlag{
+ EnvVar: "DOCKER_CERT_PATH",
+ Name: "docker-cert-path",
+ Usage: "docker certificate directory",
+ Value: "",
+ },
+
+ //
+ // Please note the below flags are mirrored in the plugin starter kit and
+ // should be kept synchronized.
+ // https://github.com/drone/drone-plugin-starter
+ //
+
+ cli.StringFlag{
+ Name: "repo.fullname",
+ Usage: "repository full name",
+ EnvVar: "DRONE_REPO",
+ },
+ cli.StringFlag{
+ Name: "repo.owner",
+ Usage: "repository owner",
+ EnvVar: "DRONE_REPO_OWNER",
+ },
+ cli.StringFlag{
+ Name: "repo.name",
+ Usage: "repository name",
+ EnvVar: "DRONE_REPO_NAME",
+ },
+ cli.StringFlag{
+ Name: "repo.type",
+ Value: "git",
+ Usage: "repository type",
+ EnvVar: "DRONE_REPO_SCM",
+ },
+ cli.StringFlag{
+ Name: "repo.link",
+ Usage: "repository link",
+ EnvVar: "DRONE_REPO_LINK",
+ },
+ cli.StringFlag{
+ Name: "repo.avatar",
+ Usage: "repository avatar",
+ EnvVar: "DRONE_REPO_AVATAR",
+ },
+ cli.StringFlag{
+ Name: "repo.branch",
+ Usage: "repository default branch",
+ EnvVar: "DRONE_REPO_BRANCH",
+ },
+ cli.BoolFlag{
+ Name: "repo.private",
+ Usage: "repository is private",
+ EnvVar: "DRONE_REPO_PRIVATE",
+ },
+ cli.BoolFlag{
+ Name: "repo.trusted",
+ Usage: "repository is trusted",
+ EnvVar: "DRONE_REPO_TRUSTED",
+ },
+ cli.StringFlag{
+ Name: "remote.url",
+ Usage: "git remote url",
+ EnvVar: "DRONE_REMOTE_URL",
+ },
+ cli.StringFlag{
+ Name: "commit.sha",
+ Usage: "git commit sha",
+ EnvVar: "DRONE_COMMIT_SHA",
+ },
+ cli.StringFlag{
+ Name: "commit.ref",
+ Value: "refs/heads/master",
+ Usage: "git commit ref",
+ EnvVar: "DRONE_COMMIT_REF",
+ },
+ cli.StringFlag{
+ Name: "commit.branch",
+ Value: "master",
+ Usage: "git commit branch",
+ EnvVar: "DRONE_COMMIT_BRANCH",
+ },
+ cli.StringFlag{
+ Name: "commit.message",
+ Usage: "git commit message",
+ EnvVar: "DRONE_COMMIT_MESSAGE",
+ },
+ cli.StringFlag{
+ Name: "commit.link",
+ Usage: "git commit link",
+ EnvVar: "DRONE_COMMIT_LINK",
+ },
+ cli.StringFlag{
+ Name: "commit.author.name",
+ Usage: "git author name",
+ EnvVar: "DRONE_COMMIT_AUTHOR",
+ },
+ cli.StringFlag{
+ Name: "commit.author.email",
+ Usage: "git author email",
+ EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL",
+ },
+ cli.StringFlag{
+ Name: "commit.author.avatar",
+ Usage: "git author avatar",
+ EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR",
+ },
+ cli.StringFlag{
+ Name: "build.event",
+ Value: "push",
+ Usage: "build event",
+ EnvVar: "DRONE_BUILD_EVENT",
+ },
+ cli.IntFlag{
+ Name: "build.number",
+ Usage: "build number",
+ EnvVar: "DRONE_BUILD_NUMBER",
+ },
+ cli.IntFlag{
+ Name: "build.created",
+ Usage: "build created",
+ EnvVar: "DRONE_BUILD_CREATED",
+ },
+ cli.IntFlag{
+ Name: "build.started",
+ Usage: "build started",
+ EnvVar: "DRONE_BUILD_STARTED",
+ },
+ cli.IntFlag{
+ Name: "build.finished",
+ Usage: "build finished",
+ EnvVar: "DRONE_BUILD_FINISHED",
+ },
+ cli.StringFlag{
+ Name: "build.status",
+ Usage: "build status",
+ Value: "success",
+ EnvVar: "DRONE_BUILD_STATUS",
+ },
+ cli.StringFlag{
+ Name: "build.link",
+ Usage: "build link",
+ EnvVar: "DRONE_BUILD_LINK",
+ },
+ cli.StringFlag{
+ Name: "build.deploy",
+ Usage: "build deployment target",
+ EnvVar: "DRONE_DEPLOY_TO",
+ },
+ cli.BoolTFlag{
+ Name: "yaml.verified",
+ Usage: "build yaml is verified",
+ EnvVar: "DRONE_YAML_VERIFIED",
+ },
+ cli.BoolTFlag{
+ Name: "yaml.signed",
+ Usage: "build yaml is signed",
+ EnvVar: "DRONE_YAML_SIGNED",
+ },
+ cli.IntFlag{
+ Name: "prev.build.number",
+ Usage: "previous build number",
+ EnvVar: "DRONE_PREV_BUILD_NUMBER",
+ },
+ cli.StringFlag{
+ Name: "prev.build.status",
+ Usage: "previous build status",
+ EnvVar: "DRONE_PREV_BUILD_STATUS",
+ },
+ cli.StringFlag{
+ Name: "prev.commit.sha",
+ Usage: "previous build sha",
+ EnvVar: "DRONE_PREV_COMMIT_SHA",
+ },
+
+ cli.StringFlag{
+ Name: "netrc.username",
+ Usage: "previous build sha",
+ EnvVar: "DRONE_NETRC_USERNAME",
+ },
+ cli.StringFlag{
+ Name: "netrc.password",
+ Usage: "previous build sha",
+ EnvVar: "DRONE_NETRC_PASSWORD",
+ },
+ cli.StringFlag{
+ Name: "netrc.machine",
+ Usage: "previous build sha",
+ EnvVar: "DRONE_NETRC_MACHINE",
+ },
+ },
+}
+
+func exec(c *cli.Context) error {
+ sigterm := make(chan os.Signal, 1)
+ cancelc := make(chan bool, 1)
+ signal.Notify(sigterm, os.Interrupt)
+ go func() {
+ <-sigterm
+ cancelc <- true
+ }()
+
+ path := c.Args().First()
+ if path == "" {
+ path = ".drone.yml"
+ }
+ path, _ = filepath.Abs(path)
+ dir := filepath.Dir(path)
+
+ file, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ engine, err := docker.New(
+ c.String("docker-host"),
+ c.String("docker-cert-path"),
+ c.Bool("docker-tls-verify"),
+ )
+ if err != nil {
+ return err
+ }
+
+ a := agent.Agent{
+ Update: agent.NoopUpdateFunc,
+ Logger: agent.TermLoggerFunc,
+ Engine: engine,
+ Timeout: c.Duration("timeout.inactivity"),
+ Platform: "linux/amd64",
+ Namespace: c.String("namespace"),
+ Disable: c.StringSlice("plugin"),
+ Escalate: c.StringSlice("privileged"),
+ Netrc: []string{},
+ Local: dir,
+ Pull: c.Bool("pull"),
+ }
+
+ payload := queue.Work{
+ Yaml: string(file),
+ Verified: c.BoolT("yaml.verified"),
+ Signed: c.BoolT("yaml.signed"),
+ Repo: &model.Repo{
+ FullName: c.String("repo.fullname"),
+ Owner: c.String("repo.owner"),
+ Name: c.String("repo.name"),
+ Kind: c.String("repo.type"),
+ Link: c.String("repo.link"),
+ Branch: c.String("repo.branch"),
+ Avatar: c.String("repo.avatar"),
+ Timeout: int64(c.Duration("timeout").Minutes()),
+ IsPrivate: c.Bool("repo.private"),
+ IsTrusted: c.Bool("repo.trusted"),
+ Clone: c.String("remote.url"),
+ },
+ System: &model.System{
+ Link: c.GlobalString("server"),
+ },
+ Secrets: getSecrets(c),
+ Netrc: &model.Netrc{
+ Login: c.String("netrc.username"),
+ Password: c.String("netrc.password"),
+ Machine: c.String("netrc.machine"),
+ },
+ Build: &model.Build{
+ Commit: c.String("commit.sha"),
+ Branch: c.String("commit.branch"),
+ Ref: c.String("commit.ref"),
+ Link: c.String("commit.link"),
+ Message: c.String("commit.message"),
+ Author: c.String("commit.author.name"),
+ Email: c.String("commit.author.email"),
+ Avatar: c.String("commit.author.avatar"),
+ Number: c.Int("build.number"),
+ Event: c.String("build.event"),
+ Deploy: c.String("build.deploy"),
+ },
+ BuildLast: &model.Build{
+ Number: c.Int("prev.build.number"),
+ Status: c.String("prev.build.status"),
+ Commit: c.String("prev.commit.sha"),
+ },
+ Job: &model.Job{
+ Environment: getMatrix(c),
+ },
+ }
+
+ return a.Run(&payload, cancelc)
+}
+
+// helper function to retrieve matrix variables.
+func getMatrix(c *cli.Context) map[string]string {
+ envs := map[string]string{}
+ for _, s := range c.StringSlice("matrix") {
+ parts := strings.SplitN(s, "=", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ k := parts[0]
+ v := parts[1]
+ envs[k] = v
+ }
+ return envs
+}
+
+// helper function to retrieve secret variables.
+func getSecrets(c *cli.Context) []*model.Secret {
+ var secrets []*model.Secret
+ for _, s := range c.StringSlice("secret") {
+ parts := strings.SplitN(s, "=", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ secret := &model.Secret{
+ Name: parts[0],
+ Value: parts[1],
+ Events: []string{
+ model.EventPull,
+ model.EventPush,
+ model.EventTag,
+ model.EventDeploy,
+ },
+ Images: []string{"*"},
+ }
+ secrets = append(secrets, secret)
+ }
+ return secrets
+}
diff --git a/drone/main.go b/drone/main.go
index fb97b6190..d18470861 100644
--- a/drone/main.go
+++ b/drone/main.go
@@ -34,6 +34,7 @@ func main() {
agent.AgentCmd,
buildCmd,
deployCmd,
+ execCmd,
infoCmd,
secretCmd,
serverCmd,
diff --git a/drone/secert_add.go b/drone/secert_add.go
index 2840e6256..4ab115f83 100644
--- a/drone/secert_add.go
+++ b/drone/secert_add.go
@@ -34,6 +34,10 @@ var secretAddCmd = cli.Command{
Usage: "inject the secret for these image types",
Value: &cli.StringSlice{},
},
+ cli.StringFlag{
+ Name: "input",
+ Usage: "input secret value from a file",
+ },
},
}
@@ -60,8 +64,10 @@ func secretAdd(c *cli.Context) error {
return fmt.Errorf("Please specify the --image parameter")
}
- // allow secret value to come from a file when prefixed with the @ symbol,
- // similar to curl conventions.
+ // TODO(bradrydzewski) below we use an @ sybmol to denote that the secret
+ // value should be loaded from a file (inspired by curl). I'd prefer to use
+ // a --input flag to explicitly specify a filepath instead.
+
if strings.HasPrefix(secret.Value, "@") {
path := secret.Value[1:]
out, ferr := ioutil.ReadFile(path)
diff --git a/engine/compiler/builtin/alias.go b/engine/compiler/builtin/alias.go
deleted file mode 100644
index 002cd8fae..000000000
--- a/engine/compiler/builtin/alias.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package builtin
-
-import (
- "fmt"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type aliasOp struct {
- visitor
- index map[string]string
- prefix string
- suffix int
-}
-
-func NewAliasOp(prefix string) Visitor {
- return &aliasOp{
- index: map[string]string{},
- prefix: prefix,
- }
-}
-
-func (v *aliasOp) VisitContainer(node *parse.ContainerNode) error {
- v.suffix++
-
- node.Container.Alias = node.Container.Name
- node.Container.Name = fmt.Sprintf("%s_%d", v.prefix, v.suffix)
- return nil
-}
diff --git a/engine/compiler/builtin/args_test.go b/engine/compiler/builtin/args_test.go
deleted file mode 100644
index 1669d48c7..000000000
--- a/engine/compiler/builtin/args_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_args(t *testing.T) {
-
- g := goblin.Goblin(t)
- g.Describe("plugins arguments", func() {
-
- g.It("should ignore non-plugin containers", func() {
- root := parse.NewRootNode()
- c := root.NewShellNode()
- c.Container = runner.Container{}
- c.Vargs = map[string]interface{}{
- "depth": 50,
- }
-
- ops := NewArgsOp()
- ops.VisitContainer(c)
-
- g.Assert(c.Container.Environment["PLUGIN_DEPTH"]).Equal("")
- })
-
- g.It("should include args as environment variable", func() {
- root := parse.NewRootNode()
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- c.Vargs = map[string]interface{}{
- "depth": 50,
- }
-
- ops := NewArgsOp()
- ops.VisitContainer(c)
-
- g.Assert(c.Container.Environment["PLUGIN_DEPTH"]).Equal("50")
- })
- })
-
-}
diff --git a/engine/compiler/builtin/build.go b/engine/compiler/builtin/build.go
deleted file mode 100644
index 9d4d65163..000000000
--- a/engine/compiler/builtin/build.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package builtin
-
-import (
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-)
-
-// BuildOp is a transform operation that converts the build section of the Yaml
-// to a step in the pipeline responsible for building the Docker image.
-func BuildOp(node parse.Node) error {
- build, ok := node.(*parse.BuildNode)
- if !ok {
- return nil
- }
- if build.Context == "" {
- return nil
- }
-
- root := node.Root()
- builder := root.NewContainerNode()
-
- command := []string{
- "build",
- "--force-rm",
- "-f", build.Dockerfile,
- "-t", root.Image,
- build.Context,
- }
-
- builder.Container = runner.Container{
- Image: "docker:apline",
- Volumes: []string{"/var/run/docker.sock:/var/run/docker.sock"},
- Entrypoint: []string{"/usr/local/bin/docker"},
- Command: command,
- WorkingDir: root.Path,
- }
-
- root.Services = append(root.Services, builder)
- return nil
-}
diff --git a/engine/compiler/builtin/clone.go b/engine/compiler/builtin/clone.go
deleted file mode 100644
index 3b2c79c21..000000000
--- a/engine/compiler/builtin/clone.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package builtin
-
-import (
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-)
-
-type cloneOp struct {
- visitor
- plugin string
- enable bool
-}
-
-// NewCloneOp returns a transformer that configures the default clone plugin.
-func NewCloneOp(plugin string, enable bool) Visitor {
- return &cloneOp{
- enable: enable,
- plugin: plugin,
- }
-}
-
-func (v *cloneOp) VisitContainer(node *parse.ContainerNode) error {
- if node.Type() != parse.NodeClone {
- return nil
- }
- if v.enable == false {
- node.Disabled = true
- return nil
- }
-
- if node.Container.Name == "" {
- node.Container.Name = "clone"
- }
- if node.Container.Image == "" {
- node.Container.Image = v.plugin
- }
-
- // discard any other cache properties except the image name.
- // everything else is discard for security reasons.
- node.Container = runner.Container{
- Name: node.Container.Name,
- Image: node.Container.Image,
- }
- return nil
-}
diff --git a/engine/compiler/builtin/clone_test.go b/engine/compiler/builtin/clone_test.go
deleted file mode 100644
index 98d869936..000000000
--- a/engine/compiler/builtin/clone_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package builtin
-
-// import (
-// "testing"
-
-// "github.com/libcd/libcd"
-// "github.com/libcd/libyaml/parse"
-
-// "github.com/franela/goblin"
-// )
-
-// func Test_clone(t *testing.T) {
-// root := parse.NewRootNode()
-
-// g := goblin.Goblin(t)
-// g.Describe("clone", func() {
-
-// g.It("should use default when nil", func() {
-// op := NewCloneOp("plugins/git:latest")
-
-// op.VisitRoot(root)
-// g.Assert(root.Clone.(*parse.ContainerNode).Container.Image).Equal("plugins/git:latest")
-// })
-
-// g.It("should use user-defined clone plugin", func() {
-// op := NewCloneOp("plugins/git:latest")
-// clone := root.NewCloneNode()
-// clone.Container = libcd.Container{}
-// clone.Container.Image = "custom/hg:latest"
-// root.Clone = clone
-
-// op.VisitRoot(root)
-// g.Assert(clone.Container.Image).Equal("custom/hg:latest")
-// })
-// })
-// }
diff --git a/engine/compiler/builtin/envs.go b/engine/compiler/builtin/envs.go
deleted file mode 100644
index 42fb595e8..000000000
--- a/engine/compiler/builtin/envs.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package builtin
-
-import (
- "os"
- "strings"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-var (
- httpProxy = os.Getenv("HTTP_PROXY")
- httpsProxy = os.Getenv("HTTPS_PROXY")
- noProxy = os.Getenv("NO_PROXY")
-)
-
-type envOp struct {
- visitor
- envs map[string]string
-}
-
-// NewEnvOp returns a transformer that sets default environment variables
-// for each container, service and plugin.
-func NewEnvOp(envs map[string]string) Visitor {
- return &envOp{
- envs: envs,
- }
-}
-
-func (v *envOp) VisitContainer(node *parse.ContainerNode) error {
- if node.Container.Environment == nil {
- node.Container.Environment = map[string]string{}
- }
- v.defaultEnv(node)
- v.defaultEnvProxy(node)
- return nil
-}
-
-func (v *envOp) defaultEnv(node *parse.ContainerNode) {
- for k, v := range v.envs {
- node.Container.Environment[k] = v
- }
-}
-
-func (v *envOp) defaultEnvProxy(node *parse.ContainerNode) {
- if httpProxy != "" {
- node.Container.Environment["HTTP_PROXY"] = httpProxy
- node.Container.Environment["http_proxy"] = strings.ToUpper(httpProxy)
- }
- if httpsProxy != "" {
- node.Container.Environment["HTTPS_PROXY"] = httpsProxy
- node.Container.Environment["https_proxy"] = strings.ToUpper(httpsProxy)
- }
- if noProxy != "" {
- node.Container.Environment["NO_PROXY"] = noProxy
- node.Container.Environment["no_proxy"] = strings.ToUpper(noProxy)
- }
-}
diff --git a/engine/compiler/builtin/envs_test.go b/engine/compiler/builtin/envs_test.go
deleted file mode 100644
index aab72c50a..000000000
--- a/engine/compiler/builtin/envs_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_env(t *testing.T) {
- root := parse.NewRootNode()
-
- g := goblin.Goblin(t)
- g.Describe("environment variables", func() {
-
- g.It("should be copied", func() {
- envs := map[string]string{"CI": "drone"}
-
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- op := NewEnvOp(envs)
-
- op.VisitContainer(c)
- g.Assert(c.Container.Environment["CI"]).Equal("drone")
- })
-
- g.It("should include http proxy variables", func() {
- httpProxy = "foo"
- httpsProxy = "bar"
- noProxy = "baz"
-
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- op := NewEnvOp(map[string]string{})
-
- op.VisitContainer(c)
- g.Assert(c.Container.Environment["HTTP_PROXY"]).Equal("foo")
- g.Assert(c.Container.Environment["HTTPS_PROXY"]).Equal("bar")
- g.Assert(c.Container.Environment["NO_PROXY"]).Equal("baz")
- })
-
- })
-}
diff --git a/engine/compiler/builtin/escalate.go b/engine/compiler/builtin/escalate.go
deleted file mode 100644
index 78a0c1893..000000000
--- a/engine/compiler/builtin/escalate.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package builtin
-
-import (
- "path/filepath"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type escalateOp struct {
- visitor
- plugins []string
-}
-
-// NewEscalateOp returns a transformer that configures plugins to automatically
-// execute in privileged mode. This is intended for plugins running dind.
-func NewEscalateOp(plugins []string) Visitor {
- return &escalateOp{
- plugins: plugins,
- }
-}
-
-func (v *escalateOp) VisitContainer(node *parse.ContainerNode) error {
- for _, pattern := range v.plugins {
- ok, _ := filepath.Match(pattern, node.Container.Image)
- if ok {
- node.Container.Privileged = true
- }
- }
- return nil
-}
diff --git a/engine/compiler/builtin/escalate_test.go b/engine/compiler/builtin/escalate_test.go
deleted file mode 100644
index e1374bedb..000000000
--- a/engine/compiler/builtin/escalate_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_escalate(t *testing.T) {
- root := parse.NewRootNode()
-
- g := goblin.Goblin(t)
- g.Describe("privileged transform", func() {
-
- g.It("should handle matches", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/docker"}
- op := NewEscalateOp([]string{"plugins/docker"})
-
- op.VisitContainer(c)
- g.Assert(c.Container.Privileged).IsTrue()
- })
-
- g.It("should handle glob matches", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/docker"}
- op := NewEscalateOp([]string{"plugins/*"})
-
- op.VisitContainer(c)
- g.Assert(c.Container.Privileged).IsTrue()
- })
-
- g.It("should handle non matches", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/git"}
- op := NewEscalateOp([]string{"plugins/docker"})
-
- op.VisitContainer(c)
- g.Assert(c.Container.Privileged).IsFalse()
- })
-
- g.It("should handle non glob matches", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/docker:develop"}
- op := NewEscalateOp([]string{"plugins/docker"})
-
- op.VisitContainer(c)
- g.Assert(c.Container.Privileged).IsFalse()
- })
- })
-}
diff --git a/engine/compiler/builtin/filter.go b/engine/compiler/builtin/filter.go
deleted file mode 100644
index 88f64283a..000000000
--- a/engine/compiler/builtin/filter.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package builtin
-
-import (
- "path/filepath"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type filterOp struct {
- visitor
- status string
- branch string
- event string
- environ string
- platform string
- matrix map[string]string
-}
-
-// NewFilterOp returns a transformer that filters (ie removes) steps
-// from the process based on conditional logic in the yaml.
-func NewFilterOp(status, branch, event, env string, matrix map[string]string) Visitor {
- return &filterOp{
- status: status,
- branch: branch,
- event: event,
- environ: env,
- matrix: matrix,
- }
-}
-
-func (v *filterOp) VisitContainer(node *parse.ContainerNode) error {
- v.visitStatus(node)
- v.visitBranch(node)
- v.visitEvent(node)
- v.visitMatrix(node)
- v.visitPlatform(node)
- return nil
-}
-
-// visitStatus is a helpfer function that converts an on_change status
-// filter to either success or failure based on the prior build status.
-func (v *filterOp) visitStatus(node *parse.ContainerNode) {
- if len(node.Conditions.Status) == 0 {
- node.Conditions.Status = []string{"success"}
- return
- }
- for _, status := range node.Conditions.Status {
- if status != "change" && status != "changed" && status != "changes" {
- continue
- }
- var want []string
- switch v.status {
- case "success":
- want = append(want, "failure")
- case "failure", "error", "killed":
- want = append(want, "success")
- default:
- want = []string{"success", "failure"}
- }
- node.Conditions.Status = append(node.Conditions.Status, want...)
- break
- }
-}
-
-// visitBranch is a helper function that disables container steps when
-// the branch conditions are not satisfied.
-func (v *filterOp) visitBranch(node *parse.ContainerNode) {
- if len(node.Conditions.Branch) == 0 {
- return
- }
- for _, pattern := range node.Conditions.Branch {
- if ok, _ := filepath.Match(pattern, v.branch); ok {
- return
- }
- }
- node.Disabled = true
-}
-
-// visitEnvironment is a helper function that disables container steps
-// when the deployment environment conditions are not satisfied.
-func (v *filterOp) visitEnvironment(node *parse.ContainerNode) {
- if len(node.Conditions.Environment) == 0 {
- return
- }
- for _, pattern := range node.Conditions.Environment {
- if ok, _ := filepath.Match(pattern, v.environ); ok {
- return
- }
- }
- node.Disabled = true
-}
-
-// visitEvent is a helper function that disables container steps
-// when the build event conditions are not satisfied.
-func (v *filterOp) visitEvent(node *parse.ContainerNode) {
- if len(node.Conditions.Event) == 0 {
- return
- }
- for _, pattern := range node.Conditions.Event {
- if ok, _ := filepath.Match(pattern, v.event); ok {
- return
- }
- }
- node.Disabled = true
-}
-
-func (v *filterOp) visitMatrix(node *parse.ContainerNode) {
- for key, val := range node.Conditions.Matrix {
- if v.matrix[key] != val {
- node.Disabled = true
- break
- }
- }
-}
-
-// visitPlatform is a helper function that disables container steps
-// when the build event conditions are not satisfied.
-func (v *filterOp) visitPlatform(node *parse.ContainerNode) {
- if len(node.Conditions.Platform) == 0 {
- return
- }
- for _, pattern := range node.Conditions.Platform {
- if ok, _ := filepath.Match(pattern, v.platform); ok {
- return
- }
- }
- node.Disabled = true
-}
diff --git a/engine/compiler/builtin/filter_test.go b/engine/compiler/builtin/filter_test.go
deleted file mode 100644
index ae01fa3c5..000000000
--- a/engine/compiler/builtin/filter_test.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package builtin
-
-// import (
-// "testing"
-
-// "github.com/franela/goblin"
-// )
-
-// func TestFilter(t *testing.T) {
-// g := goblin.Goblin(t)
-// g.Describe("Filters", func() {
-
-// g.It("Should match no branch filter", func() {
-// c := &Container{}
-// FilterBranch("feature/foo")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should match branch", func() {
-// c := &Container{}
-// c.Conditions.Branch.parts = []string{"feature/*"}
-// FilterBranch("feature/foo")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should match branch wildcard", func() {
-// c := &Container{}
-// c.Conditions.Branch.parts = []string{"feature/*"}
-// FilterBranch("feature/foo")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should disable when branch filter doesn't match", func() {
-// c := &Container{}
-// c.Conditions.Branch.parts = []string{"feature/*", "develop"}
-// FilterBranch("master")(nil, c)
-// g.Assert(c.Disabled).IsTrue()
-// })
-
-// g.It("Should match no platform filter", func() {
-// c := &Container{}
-// FilterPlatform("linux_amd64")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should match platform", func() {
-// c := &Container{}
-// c.Conditions.Platform.parts = []string{"linux_amd64"}
-// FilterPlatform("linux_amd64")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should disable when platform filter doesn't match", func() {
-// c := &Container{}
-// c.Conditions.Platform.parts = []string{"linux_arm", "linux_arm64"}
-// FilterPlatform("linux_amd64")(nil, c)
-// g.Assert(c.Disabled).IsTrue()
-// })
-
-// g.It("Should match no environment filter", func() {
-// c := &Container{}
-// FilterEnvironment("production")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should match environment", func() {
-// c := &Container{}
-// c.Conditions.Environment.parts = []string{"production"}
-// FilterEnvironment("production")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should disable when environment filter doesn't match", func() {
-// c := &Container{}
-// c.Conditions.Environment.parts = []string{"develop", "staging"}
-// FilterEnvironment("production")(nil, c)
-// g.Assert(c.Disabled).IsTrue()
-// })
-
-// g.It("Should match no event filter", func() {
-// c := &Container{}
-// FilterEvent("push")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should match event", func() {
-// c := &Container{}
-// c.Conditions.Event.parts = []string{"push"}
-// FilterEvent("push")(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should disable when event filter doesn't match", func() {
-// c := &Container{}
-// c.Conditions.Event.parts = []string{"push", "tag"}
-// FilterEvent("pull_request")(nil, c)
-// g.Assert(c.Disabled).IsTrue()
-// })
-
-// g.It("Should match matrix", func() {
-// c := &Container{}
-// c.Conditions.Matrix = map[string]string{
-// "go": "1.5",
-// "redis": "3.0",
-// }
-// matrix := map[string]string{
-// "go": "1.5",
-// "redis": "3.0",
-// "node": "5.0.0",
-// }
-// FilterMatrix(matrix)(nil, c)
-// g.Assert(c.Disabled).IsFalse()
-// })
-
-// g.It("Should disable when event filter doesn't match", func() {
-// c := &Container{}
-// c.Conditions.Matrix = map[string]string{
-// "go": "1.5",
-// "redis": "3.0",
-// }
-// matrix := map[string]string{
-// "go": "1.4.2",
-// "redis": "3.0",
-// "node": "5.0.0",
-// }
-// FilterMatrix(matrix)(nil, c)
-// g.Assert(c.Disabled).IsTrue()
-// })
-// })
-// }
diff --git a/engine/compiler/builtin/normalize.go b/engine/compiler/builtin/normalize.go
deleted file mode 100644
index 4de12720d..000000000
--- a/engine/compiler/builtin/normalize.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package builtin
-
-import (
- "path/filepath"
- "strings"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type normalizeOp struct {
- visitor
- namespace string
-}
-
-// NewNormalizeOp returns a transformer that normalizes the container image
-// names and plugin names to their fully qualified values.
-func NewNormalizeOp(namespace string) Visitor {
- return &normalizeOp{
- namespace: namespace,
- }
-}
-
-func (v *normalizeOp) VisitContainer(node *parse.ContainerNode) error {
- v.normalizeName(node)
- v.normalizeImage(node)
- switch node.NodeType {
- case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
- v.normalizePlugin(node)
- }
- return nil
-}
-
-// normalize the container image to the fully qualified name.
-func (v *normalizeOp) normalizeImage(node *parse.ContainerNode) {
- if strings.Contains(node.Container.Image, ":") {
- return
- }
- node.Container.Image = node.Container.Image + ":latest"
-}
-
-// normalize the plugin entrypoint and command values.
-func (v *normalizeOp) normalizePlugin(node *parse.ContainerNode) {
- if strings.Contains(node.Container.Image, "/") {
- return
- }
- if strings.Contains(node.Container.Image, "_") {
- node.Container.Image = strings.Replace(node.Container.Image, "_", "-", -1)
- }
- node.Container.Image = filepath.Join(v.namespace, node.Container.Image)
-}
-
-// normalize the container name to ensrue a value is set.
-func (v *normalizeOp) normalizeName(node *parse.ContainerNode) {
- if node.Container.Name != "" {
- return
- }
-
- parts := strings.Split(node.Container.Image, "/")
- if len(parts) != 0 {
- node.Container.Name = parts[len(parts)-1]
- }
- parts = strings.Split(node.Container.Image, ":")
- if len(parts) != 0 {
- node.Container.Name = parts[0]
- }
-}
diff --git a/engine/compiler/builtin/normalize_test.go b/engine/compiler/builtin/normalize_test.go
deleted file mode 100644
index dbb24f2f6..000000000
--- a/engine/compiler/builtin/normalize_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_normalize(t *testing.T) {
- root := parse.NewRootNode()
-
- g := goblin.Goblin(t)
- g.Describe("normalizing", func() {
-
- g.Describe("images", func() {
-
- g.It("should append tag if empty", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{Image: "golang"}
- op := NewNormalizeOp("")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("golang:latest")
- })
-
- g.It("should not override existing tag", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{Image: "golang:1.5"}
- op := NewNormalizeOp("")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("golang:1.5")
- })
- })
-
- g.Describe("plugins", func() {
-
- g.It("should prepend namespace", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "git"}
- op := NewNormalizeOp("plugins")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("plugins/git:latest")
- })
-
- g.It("should not override existing namespace", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "index.docker.io/drone/git"}
- op := NewNormalizeOp("plugins")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("index.docker.io/drone/git:latest")
- })
-
- g.It("should replace underscores with dashes", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "gh_pages"}
- op := NewNormalizeOp("plugins")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("plugins/gh-pages:latest")
- })
-
- g.It("should ignore shell or service types", func() {
- c := root.NewShellNode()
- c.Container = runner.Container{Image: "golang"}
- op := NewNormalizeOp("plugins")
-
- op.VisitContainer(c)
- g.Assert(c.Container.Image).Equal("golang:latest")
- })
- })
- })
-}
diff --git a/engine/compiler/builtin/pod.go b/engine/compiler/builtin/pod.go
deleted file mode 100644
index 791c2a6fd..000000000
--- a/engine/compiler/builtin/pod.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package builtin
-
-import (
- "fmt"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-)
-
-type podOp struct {
- visitor
- name string
-}
-
-// NewPodOp returns a transformer that configures an ambassador container
-// providing shared networking and container volumes.
-func NewPodOp(name string) Visitor {
- return &podOp{
- name: name,
- }
-}
-
-func (v *podOp) VisitContainer(node *parse.ContainerNode) error {
- if node.Container.Network == "" {
- parent := fmt.Sprintf("container:%s", v.name)
- node.Container.Network = parent
- }
- node.Container.VolumesFrom = append(node.Container.VolumesFrom, v.name)
- return nil
-}
-
-func (v *podOp) VisitRoot(node *parse.RootNode) error {
- service := node.NewServiceNode()
- service.Container = runner.Container{
- Name: v.name,
- Alias: "ambassador",
- Image: "busybox:latest",
- Entrypoint: []string{"/bin/sleep"},
- Command: []string{"86400"},
- Volumes: []string{node.Path, node.Base},
- // Entrypoint: []string{"/bin/sh", "-c"},
- // Volumes: []string{node.Base},
- // Command: []string{
- // fmt.Sprintf("mkdir -p %s; sleep 86400", node.Path),
- // },
- }
-
- node.Pod = service
- return nil
-}
diff --git a/engine/compiler/builtin/pull.go b/engine/compiler/builtin/pull.go
deleted file mode 100644
index 5796b5729..000000000
--- a/engine/compiler/builtin/pull.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package builtin
-
-import (
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type pullOp struct {
- visitor
- pull bool
-}
-
-// NewPullOp returns a transformer that configures plugins to automatically
-// pull the latest images at runtime.
-func NewPullOp(pull bool) Visitor {
- return &pullOp{
- pull: pull,
- }
-}
-
-func (v *pullOp) VisitContainer(node *parse.ContainerNode) error {
- switch node.NodeType {
- case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
- node.Container.Pull = v.pull
- }
- return nil
-}
diff --git a/engine/compiler/builtin/pull_test.go b/engine/compiler/builtin/pull_test.go
deleted file mode 100644
index 882d32103..000000000
--- a/engine/compiler/builtin/pull_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_pull(t *testing.T) {
- root := parse.NewRootNode()
-
- g := goblin.Goblin(t)
- g.Describe("pull image", func() {
-
- g.It("should be enabled for plugins", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- op := NewPullOp(true)
-
- op.VisitContainer(c)
- g.Assert(c.Container.Pull).IsTrue()
- })
-
- g.It("should be disabled for plugins", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- op := NewPullOp(false)
-
- op.VisitContainer(c)
- g.Assert(c.Container.Pull).IsFalse()
- })
-
- g.It("should be disabled for non-plugins", func() {
- c := root.NewShellNode()
- c.Container = runner.Container{}
- op := NewPullOp(true)
-
- op.VisitContainer(c)
- g.Assert(c.Container.Pull).IsFalse()
- })
- })
-}
diff --git a/engine/compiler/builtin/secrets.go b/engine/compiler/builtin/secrets.go
deleted file mode 100644
index d5c418499..000000000
--- a/engine/compiler/builtin/secrets.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package builtin
-
-import (
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/model"
-)
-
-type secretOp struct {
- visitor
- event string
- secrets []*model.Secret
-}
-
-// NewSecretOp returns a transformer that configures plugin secrets.
-func NewSecretOp(event string, secrets []*model.Secret) Visitor {
- return &secretOp{
- event: event,
- secrets: secrets,
- }
-}
-
-func (v *secretOp) VisitContainer(node *parse.ContainerNode) error {
- for _, secret := range v.secrets {
- if !secret.Match(node.Container.Image, v.event) {
- continue
- }
-
- switch secret.Name {
- case "REGISTRY_USERNAME":
- node.Container.AuthConfig.Username = secret.Value
- case "REGISTRY_PASSWORD":
- node.Container.AuthConfig.Password = secret.Value
- case "REGISTRY_EMAIL":
- node.Container.AuthConfig.Email = secret.Value
- case "REGISTRY_TOKEN":
- node.Container.AuthConfig.Token = secret.Value
- default:
- if node.Container.Environment == nil {
- node.Container.Environment = map[string]string{}
- }
- node.Container.Environment[secret.Name] = secret.Value
- }
- }
- return nil
-}
diff --git a/engine/compiler/builtin/shell.go b/engine/compiler/builtin/shell.go
deleted file mode 100644
index a3dd32068..000000000
--- a/engine/compiler/builtin/shell.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package builtin
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-const (
- Freebsd_amd64 = "freebsd_amd64"
- Linux_adm64 = "linux_amd64"
- Windows_amd64 = "windows_amd64"
-)
-
-type shellOp struct {
- visitor
- platform string
-}
-
-// NewShellOp returns a transformer that converts the shell node to
-// a runnable container.
-func NewShellOp(platform string) Visitor {
- return &shellOp{
- platform: platform,
- }
-}
-
-func (v *shellOp) VisitContainer(node *parse.ContainerNode) error {
- if node.NodeType != parse.NodeShell {
- return nil
- }
-
- node.Container.Entrypoint = []string{
- "/bin/sh", "-c",
- }
- node.Container.Command = []string{
- "echo $DRONE_SCRIPT | base64 -d | /bin/sh -e",
- }
- if node.Container.Environment == nil {
- node.Container.Environment = map[string]string{}
- }
- node.Container.Environment["HOME"] = "/root"
- node.Container.Environment["SHELL"] = "/bin/sh"
- node.Container.Environment["DRONE_SCRIPT"] = toScript(
- node.Root().Path,
- node.Commands,
- )
-
- return nil
-}
-
-func toScript(base string, commands []string) string {
- var buf bytes.Buffer
- for _, command := range commands {
- buf.WriteString(fmt.Sprintf(
- traceScript,
- ""+command+"",
- command,
- ))
- }
-
- script := fmt.Sprintf(
- setupScript,
- buf.String(),
- )
-
- return base64.StdEncoding.EncodeToString([]byte(script))
-}
-
-// setupScript is a helper script this is added to the build to ensure
-// a minimum set of environment variables are set correctly.
-const setupScript = `
-if [ -n "$DRONE_NETRC_MACHINE" ]; then
-cat < $HOME/.netrc
-machine $DRONE_NETRC_MACHINE
-login $DRONE_NETRC_USERNAME
-password $DRONE_NETRC_PASSWORD
-EOF
-fi
-
-unset DRONE_NETRC_USERNAME
-unset DRONE_NETRC_PASSWORD
-unset DRONE_SCRIPT
-
-%s
-`
-
-// traceScript is a helper script that is added to the build script
-// to trace a command.
-const traceScript = `
-echo %q
-%s
-`
diff --git a/engine/compiler/builtin/shell_test.go b/engine/compiler/builtin/shell_test.go
deleted file mode 100644
index bc9dd8291..000000000
--- a/engine/compiler/builtin/shell_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_shell(t *testing.T) {
-
- g := goblin.Goblin(t)
- g.Describe("shell containers", func() {
-
- g.It("should ignore plugin steps", func() {
- root := parse.NewRootNode()
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- ops := NewShellOp(Linux_adm64)
- ops.VisitContainer(c)
-
- g.Assert(len(c.Container.Entrypoint)).Equal(0)
- g.Assert(len(c.Container.Command)).Equal(0)
- g.Assert(c.Container.Environment["DRONE_SCRIPT"]).Equal("")
- })
-
- g.It("should set entrypoint, command and environment variables", func() {
- root := parse.NewRootNode()
- root.Base = "/go"
- root.Path = "/go/src/github.com/octocat/hello-world"
-
- c := root.NewShellNode()
- c.Commands = []string{"go build"}
- ops := NewShellOp(Linux_adm64)
- ops.VisitContainer(c)
-
- g.Assert(c.Container.Entrypoint).Equal([]string{"/bin/sh", "-c"})
- g.Assert(c.Container.Command).Equal([]string{"echo $DRONE_SCRIPT | base64 -d | /bin/sh -e"})
- g.Assert(c.Container.Environment["DRONE_SCRIPT"] != "").IsTrue()
- })
- })
-}
diff --git a/engine/compiler/builtin/validate.go b/engine/compiler/builtin/validate.go
deleted file mode 100644
index ec88953c5..000000000
--- a/engine/compiler/builtin/validate.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package builtin
-
-import (
- "fmt"
- "path/filepath"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type validateOp struct {
- visitor
- plugins []string
- trusted bool
-}
-
-// NewValidateOp returns a linter that checks container configuration.
-func NewValidateOp(trusted bool, plugins []string) Visitor {
- return &validateOp{
- trusted: trusted,
- plugins: plugins,
- }
-}
-
-func (v *validateOp) VisitContainer(node *parse.ContainerNode) error {
- switch node.NodeType {
- case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
- if err := v.validatePlugins(node); err != nil {
- return err
- }
- }
- if node.NodeType == parse.NodePlugin {
- if err := v.validatePluginConfig(node); err != nil {
- return err
- }
- }
- return v.validateConfig(node)
-}
-
-// validate the plugin image and return an error if the plugin
-// image does not match the whitelist.
-func (v *validateOp) validatePlugins(node *parse.ContainerNode) error {
- match := false
- for _, pattern := range v.plugins {
- ok, err := filepath.Match(pattern, node.Container.Image)
- if ok && err == nil {
- match = true
- break
- }
- }
- if !match {
- return fmt.Errorf(
- "Plugin %s is not in the whitelist",
- node.Container.Image,
- )
- }
- return nil
-}
-
-// validate the plugin command and entrypoint and return an error
-// the user attempts to set or override these values.
-func (v *validateOp) validatePluginConfig(node *parse.ContainerNode) error {
- if len(node.Container.Entrypoint) != 0 {
- return fmt.Errorf("Cannot set plugin Entrypoint")
- }
- if len(node.Container.Command) != 0 {
- return fmt.Errorf("Cannot set plugin Command")
- }
- return nil
-}
-
-// validate the container configuration and return an error if
-// restricted configurations are used.
-func (v *validateOp) validateConfig(node *parse.ContainerNode) error {
- if v.trusted {
- return nil
- }
- if node.Container.Privileged {
- return fmt.Errorf("Insufficient privileges to use privileged mode")
- }
- if len(node.Container.DNS) != 0 {
- return fmt.Errorf("Insufficient privileges to use custom dns")
- }
- if len(node.Container.DNSSearch) != 0 {
- return fmt.Errorf("Insufficient privileges to use dns_search")
- }
- if len(node.Container.Devices) != 0 {
- return fmt.Errorf("Insufficient privileges to use devices")
- }
- if len(node.Container.ExtraHosts) != 0 {
- return fmt.Errorf("Insufficient privileges to use extra_hosts")
- }
- if len(node.Container.Network) != 0 {
- return fmt.Errorf("Insufficient privileges to override the network")
- }
- if node.Container.OomKillDisable {
- return fmt.Errorf("Insufficient privileges to disable oom_kill")
- }
- if len(node.Container.Volumes) != 0 && node.Type() != parse.NodeCache {
- return fmt.Errorf("Insufficient privileges to use volumes")
- }
- if len(node.Container.VolumesFrom) != 0 {
- return fmt.Errorf("Insufficient privileges to use volumes_from")
- }
- return nil
-}
-
-// validate the environment configuration and return an error if
-// an attempt is made to override system environment variables.
-// func (v *validateOp) validateEnvironment(node *parse.ContainerNode) error {
-// for key := range node.Container.Environment {
-// upper := strings.ToUpper(key)
-// switch {
-// case strings.HasPrefix(upper, "DRONE_"):
-// return fmt.Errorf("Cannot set or override DRONE_ environment variables")
-// case strings.HasPrefix(upper, "PLUGIN_"):
-// return fmt.Errorf("Cannot set or override PLUGIN_ environment variables")
-// }
-// }
-// return nil
-// }
diff --git a/engine/compiler/builtin/validate_test.go b/engine/compiler/builtin/validate_test.go
deleted file mode 100644
index 1744c6283..000000000
--- a/engine/compiler/builtin/validate_test.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/drone/drone/engine/compiler/parse"
- "github.com/drone/drone/engine/runner"
-
- "github.com/franela/goblin"
-)
-
-func Test_validate(t *testing.T) {
- root := parse.NewRootNode()
-
- g := goblin.Goblin(t)
- g.Describe("validating", func() {
-
- g.Describe("privileged attributes", func() {
-
- g.It("should not error when trusted build", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- ops := NewValidateOp(true, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err == nil).IsTrue("error should be nil")
- })
-
- g.It("should error when privleged mode", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.Privileged = true
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use privileged mode")
- })
-
- g.It("should error when dns configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.DNS = []string{"8.8.8.8"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use custom dns")
- })
-
- g.It("should error when dns_search configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.DNSSearch = []string{"8.8.8.8"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use dns_search")
- })
-
- g.It("should error when devices configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.Devices = []string{"/dev/foo"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use devices")
- })
-
- g.It("should error when extra_hosts configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.ExtraHosts = []string{"1.2.3.4 foo.com"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use extra_hosts")
- })
-
- g.It("should error when network configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.Network = "host"
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to override the network")
- })
-
- g.It("should error when oom_kill_disabled configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.OomKillDisable = true
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to disable oom_kill")
- })
-
- g.It("should error when volumes configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.Volumes = []string{"/:/tmp"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use volumes")
- })
-
- g.It("should error when volumes_from configured", func() {
- c := root.NewContainerNode()
- c.Container = runner.Container{}
- c.Container.VolumesFrom = []string{"drone"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Insufficient privileges to use volumes_from")
- })
- })
-
- g.Describe("plugin configuration", func() {
- g.It("should error when entrypoint is configured", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/git"}
- c.Container.Entrypoint = []string{"/bin/sh"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Cannot set plugin Entrypoint")
- })
-
- g.It("should error when command is configured", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/git"}
- c.Container.Command = []string{"cat", "/proc/1/status"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should not be nil")
- g.Assert(err.Error()).Equal("Cannot set plugin Command")
- })
-
- g.It("should not error when empty entrypoint, command", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{Image: "plugins/git"}
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err == nil).IsTrue("error should be nil")
- })
- })
-
- g.Describe("plugin whitelist", func() {
-
- g.It("should error when no match found", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- c.Container.Image = "custom/git"
-
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err != nil).IsTrue("error should be nil")
- g.Assert(err.Error()).Equal("Plugin custom/git is not in the whitelist")
- })
-
- g.It("should not error when match found", func() {
- c := root.NewPluginNode()
- c.Container = runner.Container{}
- c.Container.Image = "plugins/git"
-
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err == nil).IsTrue("error should be nil")
- })
-
- g.It("should ignore build images", func() {
- c := root.NewShellNode()
- c.Container = runner.Container{}
- c.Container.Image = "google/golang"
-
- ops := NewValidateOp(false, []string{"plugins/*"})
- err := ops.VisitContainer(c)
-
- g.Assert(err == nil).IsTrue("error should be nil")
- })
- })
- })
-}
diff --git a/engine/compiler/builtin/visitor.go b/engine/compiler/builtin/visitor.go
deleted file mode 100644
index bd84a8f5b..000000000
--- a/engine/compiler/builtin/visitor.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package builtin
-
-import "github.com/drone/drone/engine/compiler/parse"
-
-// Visitor interface for walking the Yaml file.
-type Visitor interface {
- VisitRoot(*parse.RootNode) error
- VisitVolume(*parse.VolumeNode) error
- VisitNetwork(*parse.NetworkNode) error
- VisitBuild(*parse.BuildNode) error
- VisitContainer(*parse.ContainerNode) error
-}
-
-// visitor provides an easy default implementation of a Visitor interface with
-// stubbed methods. This can be embedded in transforms to meet the basic
-// requirements.
-type visitor struct{}
-
-func (visitor) VisitRoot(*parse.RootNode) error { return nil }
-func (visitor) VisitVolume(*parse.VolumeNode) error { return nil }
-func (visitor) VisitNetwork(*parse.NetworkNode) error { return nil }
-func (visitor) VisitBuild(*parse.BuildNode) error { return nil }
-func (visitor) VisitContainer(*parse.ContainerNode) error { return nil }
diff --git a/engine/compiler/builtin/workspace.go b/engine/compiler/builtin/workspace.go
deleted file mode 100644
index 84256f9cc..000000000
--- a/engine/compiler/builtin/workspace.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package builtin
-
-import (
- "path/filepath"
-
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-type workspaceOp struct {
- visitor
- base string
- path string
-}
-
-// NewWorkspaceOp returns a transformer that provides a default workspace paths,
-// including the base path (mounted as a volume) and absolute path where the
-// code is cloned.
-func NewWorkspaceOp(base, path string) Visitor {
- return &workspaceOp{
- base: base,
- path: path,
- }
-}
-
-func (v *workspaceOp) VisitRoot(node *parse.RootNode) error {
- if node.Base == "" {
- node.Base = v.base
- }
- if node.Path == "" {
- node.Path = v.path
- }
- if !filepath.IsAbs(node.Path) {
- node.Path = filepath.Join(
- node.Base,
- node.Path,
- )
- }
- return nil
-}
-
-func (v *workspaceOp) VisitContainer(node *parse.ContainerNode) error {
- if node.NodeType == parse.NodeService {
- // we must not override the default working
- // directory of service containers. All other
- // container should launch in the workspace
- return nil
- }
- root := node.Root()
- node.Container.WorkingDir = root.Path
- return nil
-}
diff --git a/engine/compiler/builtin/workspace_test.go b/engine/compiler/builtin/workspace_test.go
deleted file mode 100644
index 523d2f019..000000000
--- a/engine/compiler/builtin/workspace_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package builtin
-
-import (
- "testing"
-
- "github.com/franela/goblin"
- "github.com/drone/drone/engine/compiler/parse"
-)
-
-func Test_workspace(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("workspace", func() {
-
- var defaultBase = "/go"
- var defaultPath = "src/github.com/octocat/hello-world"
-
- g.It("should not override user paths", func() {
- var base = "/drone"
- var path = "/drone/src/github.com/octocat/hello-world"
-
- op := NewWorkspaceOp(defaultBase, defaultPath)
- root := parse.NewRootNode()
- root.Base = base
- root.Path = path
-
- op.VisitRoot(root)
- g.Assert(root.Base).Equal(base)
- g.Assert(root.Path).Equal(path)
- })
-
- g.It("should convert user paths to absolute", func() {
- var base = "/drone"
- var path = "src/github.com/octocat/hello-world"
- var abs = "/drone/src/github.com/octocat/hello-world"
-
- op := NewWorkspaceOp(defaultBase, defaultPath)
- root := parse.NewRootNode()
- root.Base = base
- root.Path = path
-
- op.VisitRoot(root)
- g.Assert(root.Base).Equal(base)
- g.Assert(root.Path).Equal(abs)
- })
-
- g.It("should set the default path", func() {
- var base = "/go"
- var path = "/go/src/github.com/octocat/hello-world"
-
- op := NewWorkspaceOp(defaultBase, defaultPath)
- root := parse.NewRootNode()
-
- op.VisitRoot(root)
- g.Assert(root.Base).Equal(base)
- g.Assert(root.Path).Equal(path)
- })
-
- g.It("should use workspace as working_dir", func() {
- var base = "/drone"
- var path = "/drone/src/github.com/octocat/hello-world"
-
- root := parse.NewRootNode()
- root.Base = base
- root.Path = path
-
- c := root.NewContainerNode()
-
- op := NewWorkspaceOp(defaultBase, defaultPath)
- op.VisitContainer(c)
- g.Assert(c.Container.WorkingDir).Equal(root.Path)
- })
-
- g.It("should not use workspace as working_dir for services", func() {
- var base = "/drone"
- var path = "/drone/src/github.com/octocat/hello-world"
-
- root := parse.NewRootNode()
- root.Base = base
- root.Path = path
-
- c := root.NewServiceNode()
-
- op := NewWorkspaceOp(defaultBase, defaultPath)
- op.VisitContainer(c)
- g.Assert(c.Container.WorkingDir).Equal("")
- })
- })
-}
diff --git a/engine/compiler/compile.go b/engine/compiler/compile.go
deleted file mode 100644
index da4e56364..000000000
--- a/engine/compiler/compile.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package compiler
-
-import (
- "github.com/drone/drone/engine/runner"
- "github.com/drone/drone/engine/runner/parse"
-
- yaml "github.com/drone/drone/engine/compiler/parse"
-)
-
-// Compiler compiles the Yaml file to the intermediate representation.
-type Compiler struct {
- trans []Transform
-}
-
-func New() *Compiler {
- return &Compiler{}
-}
-
-// Transforms sets the compiler transforms use to transform the intermediate
-// representation during compilation.
-func (c *Compiler) Transforms(trans []Transform) *Compiler {
- c.trans = append(c.trans, trans...)
- return c
-}
-
-// CompileString compiles the Yaml configuration string and returns
-// the intermediate representation for the interpreter.
-func (c *Compiler) CompileString(in string) (*runner.Spec, error) {
- return c.Compile([]byte(in))
-}
-
-// CompileString compiles the Yaml configuration file and returns
-// the intermediate representation for the interpreter.
-func (c *Compiler) Compile(in []byte) (*runner.Spec, error) {
- root, err := yaml.Parse(in)
- if err != nil {
- return nil, err
- }
- if err := root.Walk(c.walk); err != nil {
- return nil, err
- }
-
- config := &runner.Spec{}
- tree := parse.NewTree()
-
- // pod section
- if root.Pod != nil {
- node, ok := root.Pod.(*yaml.ContainerNode)
- if ok {
- config.Containers = append(config.Containers, &node.Container)
- tree.Append(parse.NewRunNode().SetName(node.Container.Name).SetDetach(true))
- }
- }
-
- // cache section
- if root.Cache != nil {
- node, ok := root.Cache.(*yaml.ContainerNode)
- if ok && !node.Disabled {
- config.Containers = append(config.Containers, &node.Container)
- tree.Append(parse.NewRunNode().SetName(node.Container.Name))
- }
- }
-
- // clone section
- if root.Clone != nil {
- node, ok := root.Clone.(*yaml.ContainerNode)
- if ok && !node.Disabled {
- config.Containers = append(config.Containers, &node.Container)
- tree.Append(parse.NewRunNode().SetName(node.Container.Name))
- }
- }
-
- // services section
- for _, container := range root.Services {
- node, ok := container.(*yaml.ContainerNode)
- if !ok || node.Disabled {
- continue
- }
-
- config.Containers = append(config.Containers, &node.Container)
- tree.Append(parse.NewRunNode().SetName(node.Container.Name).SetDetach(true))
- }
-
- // pipeline section
- for i, container := range root.Script {
- node, ok := container.(*yaml.ContainerNode)
- if !ok || node.Disabled {
- continue
- }
-
- config.Containers = append(config.Containers, &node.Container)
-
- // step 1: lookahead to see if any status=failure exist
- list := parse.NewListNode()
- for ii, next := range root.Script {
- if i >= ii {
- continue
- }
- node, ok := next.(*yaml.ContainerNode)
- if !ok || node.Disabled || !node.OnFailure() {
- continue
- }
-
- list.Append(
- parse.NewRecoverNode().SetBody(
- parse.NewRunNode().SetName(
- node.Container.Name,
- ),
- ),
- )
- }
- // step 2: if yes, collect these and append to "error" node
- if len(list.Body) == 0 {
- tree.Append(parse.NewRunNode().SetName(node.Container.Name))
- } else {
- errorNode := parse.NewErrorNode()
- errorNode.SetBody(parse.NewRunNode().SetName(node.Container.Name))
- errorNode.SetDefer(list)
- tree.Append(errorNode)
- }
- }
-
- config.Nodes = tree
- return config, nil
-}
-
-func (c *Compiler) walk(node yaml.Node) (err error) {
- for _, trans := range c.trans {
- switch v := node.(type) {
- case *yaml.BuildNode:
- err = trans.VisitBuild(v)
- case *yaml.ContainerNode:
- err = trans.VisitContainer(v)
- case *yaml.NetworkNode:
- err = trans.VisitNetwork(v)
- case *yaml.VolumeNode:
- err = trans.VisitVolume(v)
- case *yaml.RootNode:
- err = trans.VisitRoot(v)
- }
- if err != nil {
- break
- }
- }
- return err
-}
diff --git a/engine/compiler/compile_test.go b/engine/compiler/compile_test.go
deleted file mode 100644
index a20d4fea6..000000000
--- a/engine/compiler/compile_test.go
+++ /dev/null
@@ -1 +0,0 @@
-package compiler
diff --git a/engine/compiler/parse/node.go b/engine/compiler/parse/node.go
deleted file mode 100644
index 6a97159a1..000000000
--- a/engine/compiler/parse/node.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package parse
-
-const (
- NodeBuild = "build"
- NodeCache = "cache"
- NodeClone = "clone"
- NodeContainer = "container"
- NodeNetwork = "network"
- NodePlugin = "plugin"
- NodeRoot = "root"
- NodeService = "service"
- NodeShell = "shell"
- NodeVolume = "volume"
-)
-
-// NodeType identifies the type of parse tree node.
-type NodeType string
-
-// Type returns itself an provides an easy default implementation.
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-// String returns the string value of the Node type.
-func (t NodeType) String() string {
- return string(t)
-}
-
-// A Node is an element in the parse tree.
-type Node interface {
- Type() NodeType
- Root() *RootNode
-}
diff --git a/engine/compiler/parse/node_build.go b/engine/compiler/parse/node_build.go
deleted file mode 100644
index 158529b88..000000000
--- a/engine/compiler/parse/node_build.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package parse
-
-// BuildNode represents Docker image build instructions.
-type BuildNode struct {
- NodeType
-
- Context string
- Dockerfile string
- Args map[string]string
-
- root *RootNode
-}
-
-// Root returns the root node.
-func (n *BuildNode) Root() *RootNode { return n.root }
-
-//
-// intermediate types for yaml decoding.
-//
-
-type build struct {
- Context string
- Dockerfile string
- Args map[string]string
-}
-
-func (b *build) UnmarshalYAML(unmarshal func(interface{}) error) error {
- err := unmarshal(&b.Context)
- if err == nil {
- return nil
- }
- out := struct {
- Context string
- Dockerfile string
- Args map[string]string
- }{}
- err = unmarshal(&out)
- b.Context = out.Context
- b.Args = out.Args
- b.Dockerfile = out.Dockerfile
- return err
-}
diff --git a/engine/compiler/parse/node_container.go b/engine/compiler/parse/node_container.go
deleted file mode 100644
index 8ffad21ab..000000000
--- a/engine/compiler/parse/node_container.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package parse
-
-import (
- "fmt"
-
- "github.com/drone/drone/engine/runner"
-
- "gopkg.in/yaml.v2"
-)
-
-type Conditions struct {
- Platform []string
- Environment []string
- Event []string
- Branch []string
- Status []string
- Matrix map[string]string
-}
-
-// ContainerNode represents a Docker container.
-type ContainerNode struct {
- NodeType
-
- // Container represents the container configuration.
- Container runner.Container
- Conditions Conditions
- Disabled bool
- Commands []string
- Vargs map[string]interface{}
-
- root *RootNode
-}
-
-// Root returns the root node.
-func (n *ContainerNode) Root() *RootNode { return n.root }
-
-// OnSuccess returns true if the container should be executed
-// when the exit code of the previous step is 0.
-func (n *ContainerNode) OnSuccess() bool {
- for _, status := range n.Conditions.Status {
- if status == "success" {
- return true
- }
- }
- return false
-}
-
-// OnFailure returns true if the container should be executed
-// even when the exit code of the previous step != 0.
-func (n *ContainerNode) OnFailure() bool {
- for _, status := range n.Conditions.Status {
- if status == "failure" {
- return true
- }
- }
- return false
-}
-
-//
-// intermediate types for yaml decoding.
-//
-
-type container struct {
- Name string `yaml:"name"`
- Image string `yaml:"image"`
- Build string `yaml:"build"`
- Pull bool `yaml:"pull"`
- Privileged bool `yaml:"privileged"`
- Environment mapEqualSlice `yaml:"environment"`
- Entrypoint stringOrSlice `yaml:"entrypoint"`
- Command stringOrSlice `yaml:"command"`
- Commands stringOrSlice `yaml:"commands"`
- ExtraHosts stringOrSlice `yaml:"extra_hosts"`
- Volumes stringOrSlice `yaml:"volumes"`
- VolumesFrom stringOrSlice `yaml:"volumes_from"`
- Devices stringOrSlice `yaml:"devices"`
- Network string `yaml:"network_mode"`
- DNS stringOrSlice `yaml:"dns"`
- DNSSearch stringOrSlice `yaml:"dns_search"`
- MemSwapLimit int64 `yaml:"memswap_limit"`
- MemLimit int64 `yaml:"mem_limit"`
- CPUQuota int64 `yaml:"cpu_quota"`
- CPUShares int64 `yaml:"cpu_shares"`
- CPUSet string `yaml:"cpuset"`
- OomKillDisable bool `yaml:"oom_kill_disable"`
-
- AuthConfig struct {
- Username string `yaml:"username"`
- Password string `yaml:"password"`
- Email string `yaml:"email"`
- Token string `yaml:"registry_token"`
- } `yaml:"auth_config"`
-
- Conditions struct {
- Platform stringOrSlice `yaml:"platform"`
- Environment stringOrSlice `yaml:"environment"`
- Event stringOrSlice `yaml:"event"`
- Branch stringOrSlice `yaml:"branch"`
- Status stringOrSlice `yaml:"status"`
- Matrix map[string]string `yaml:"matrix"`
- } `yaml:"when"`
-
- Vargs map[string]interface{} `yaml:",inline"`
-}
-
-func (c *container) ToContainer() runner.Container {
- return runner.Container{
- Name: c.Name,
- Image: c.Image,
- Pull: c.Pull,
- Privileged: c.Privileged,
- Environment: c.Environment.parts,
- Entrypoint: c.Entrypoint.parts,
- Command: c.Command.parts,
- ExtraHosts: c.ExtraHosts.parts,
- Volumes: c.Volumes.parts,
- VolumesFrom: c.VolumesFrom.parts,
- Devices: c.Devices.parts,
- Network: c.Network,
- DNS: c.DNS.parts,
- DNSSearch: c.DNSSearch.parts,
- MemSwapLimit: c.MemSwapLimit,
- MemLimit: c.MemLimit,
- CPUQuota: c.CPUQuota,
- CPUShares: c.CPUShares,
- CPUSet: c.CPUSet,
- OomKillDisable: c.OomKillDisable,
- AuthConfig: runner.Auth{
- Username: c.AuthConfig.Username,
- Password: c.AuthConfig.Password,
- Email: c.AuthConfig.Email,
- Token: c.AuthConfig.Token,
- },
- }
-}
-
-func (c *container) ToConditions() Conditions {
- return Conditions{
- Platform: c.Conditions.Platform.parts,
- Environment: c.Conditions.Environment.parts,
- Event: c.Conditions.Event.parts,
- Branch: c.Conditions.Branch.parts,
- Status: c.Conditions.Status.parts,
- Matrix: c.Conditions.Matrix,
- }
-}
-
-type containerList struct {
- containers []*container
-}
-
-func (c *containerList) UnmarshalYAML(unmarshal func(interface{}) error) error {
- slice := yaml.MapSlice{}
- err := unmarshal(&slice)
- if err != nil {
- return err
- }
-
- for _, s := range slice {
- cc := container{}
-
- out, err := yaml.Marshal(s.Value)
- if err != nil {
- return err
- }
-
- err = yaml.Unmarshal(out, &cc)
- if err != nil {
- return err
- }
- if cc.Name == "" {
- cc.Name = fmt.Sprintf("%v", s.Key)
- }
- if cc.Image == "" {
- cc.Image = fmt.Sprintf("%v", s.Key)
- }
- c.containers = append(c.containers, &cc)
- }
- return err
-}
diff --git a/engine/compiler/parse/node_root.go b/engine/compiler/parse/node_root.go
deleted file mode 100644
index 0288f5f47..000000000
--- a/engine/compiler/parse/node_root.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package parse
-
-// RootNode is the root node in the parsed Yaml file.
-type RootNode struct {
- NodeType
-
- Platform string
- Base string
- Path string
- Image string
-
- Pod Node
- Build Node
- Cache Node
- Clone Node
- Script []Node
- Volumes []Node
- Networks []Node
- Services []Node
-}
-
-// NewRootNode returns a new root node.
-func NewRootNode() *RootNode {
- return &RootNode{
- NodeType: NodeRoot,
- }
-}
-
-// Root returns the root node.
-func (n *RootNode) Root() *RootNode { return n }
-
-// Returns a new Volume Node.
-func (n *RootNode) NewVolumeNode(name string) *VolumeNode {
- return &VolumeNode{
- NodeType: NodeVolume,
- Name: name,
- root: n,
- }
-}
-
-// Returns a new Network Node.
-func (n *RootNode) NewNetworkNode(name string) *NetworkNode {
- return &NetworkNode{
- NodeType: NodeNetwork,
- Name: name,
- root: n,
- }
-}
-
-// Returns a new Network Node.
-func (n *RootNode) NewBuildNode(context string) *BuildNode {
- return &BuildNode{
- NodeType: NodeBuild,
- Context: context,
- root: n,
- }
-}
-
-// Returns a new Container Plugin Node.
-func (n *RootNode) NewPluginNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodePlugin,
- root: n,
- }
-}
-
-// Returns a new Container Shell Node.
-func (n *RootNode) NewShellNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodeShell,
- root: n,
- }
-}
-
-// Returns a new Container Service Node.
-func (n *RootNode) NewServiceNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodeService,
- root: n,
- }
-}
-
-// Returns a new Container Clone Node.
-func (n *RootNode) NewCloneNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodeClone,
- root: n,
- }
-}
-
-// Returns a new Container Cache Node.
-func (n *RootNode) NewCacheNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodeCache,
- root: n,
- }
-}
-
-// Returns a new Container Node.
-func (n *RootNode) NewContainerNode() *ContainerNode {
- return &ContainerNode{
- NodeType: NodeContainer,
- root: n,
- }
-}
-
-// Walk is a function that walk through all child nodes of the RootNode
-// and invokes the Walk callback function for each Node.
-func (n *RootNode) Walk(fn WalkFunc) (err error) {
- var nodes []Node
- nodes = append(nodes, n)
- nodes = append(nodes, n.Build)
- nodes = append(nodes, n.Cache)
- nodes = append(nodes, n.Clone)
- nodes = append(nodes, n.Script...)
- nodes = append(nodes, n.Volumes...)
- nodes = append(nodes, n.Networks...)
- nodes = append(nodes, n.Services...)
- for _, node := range nodes {
- err = fn(node)
- if err != nil {
- return
- }
- }
- return
-}
-
-type WalkFunc func(Node) error
-
-//
-// intermediate types for yaml decoding.
-//
-
-type root struct {
- Workspace struct {
- Path string
- Base string
- }
- Image string
- Platform string
- Volumes volumeList
- Networks networkList
- Services containerList
- Script containerList `yaml:"pipeline"`
- Cache container
- Clone container
- Build build
-}
diff --git a/engine/compiler/parse/node_root_test.go b/engine/compiler/parse/node_root_test.go
deleted file mode 100644
index f4760109a..000000000
--- a/engine/compiler/parse/node_root_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestRootNode(t *testing.T) {
- g := goblin.Goblin(t)
- r := &RootNode{}
-
- g.Describe("Root Node", func() {
-
- g.It("should return self as root", func() {
- g.Assert(r).Equal(r.Root())
- })
-
- g.It("should create a Volume Node", func() {
- n := r.NewVolumeNode("foo")
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.Name).Equal("foo")
- g.Assert(n.String()).Equal(NodeVolume)
- g.Assert(n.Type()).Equal(NodeType(NodeVolume))
- })
-
- g.It("should create a Network Node", func() {
- n := r.NewNetworkNode("foo")
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.Name).Equal("foo")
- g.Assert(n.String()).Equal(NodeNetwork)
- g.Assert(n.Type()).Equal(NodeType(NodeNetwork))
- })
-
- g.It("should create a Plugin Node", func() {
- n := r.NewPluginNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodePlugin)
- g.Assert(n.Type()).Equal(NodeType(NodePlugin))
- })
-
- g.It("should create a Shell Node", func() {
- n := r.NewShellNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodeShell)
- g.Assert(n.Type()).Equal(NodeType(NodeShell))
- })
-
- g.It("should create a Service Node", func() {
- n := r.NewServiceNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodeService)
- g.Assert(n.Type()).Equal(NodeType(NodeService))
- })
-
- g.It("should create a Build Node", func() {
- n := r.NewBuildNode(".")
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.Context).Equal(".")
- g.Assert(n.String()).Equal(NodeBuild)
- g.Assert(n.Type()).Equal(NodeType(NodeBuild))
- })
-
- g.It("should create a Cache Node", func() {
- n := r.NewCacheNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodeCache)
- g.Assert(n.Type()).Equal(NodeType(NodeCache))
- })
-
- g.It("should create a Clone Node", func() {
- n := r.NewCloneNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodeClone)
- g.Assert(n.Type()).Equal(NodeType(NodeClone))
- })
-
- g.It("should create a Container Node", func() {
- n := r.NewContainerNode()
- g.Assert(n.Root()).Equal(r)
- g.Assert(n.String()).Equal(NodeContainer)
- g.Assert(n.Type()).Equal(NodeType(NodeContainer))
- })
- })
-}
diff --git a/engine/compiler/parse/parse.go b/engine/compiler/parse/parse.go
deleted file mode 100644
index a3be5ed32..000000000
--- a/engine/compiler/parse/parse.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package parse
-
-import (
- "gopkg.in/yaml.v2"
-)
-
-// Parse parses a Yaml file and returns a Tree structure.
-func Parse(in []byte) (*RootNode, error) {
- out := root{}
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- return nil, err
- }
-
- root := NewRootNode()
- root.Platform = out.Platform
- root.Path = out.Workspace.Path
- root.Base = out.Workspace.Base
- root.Image = out.Image
-
- // append volume nodes to tree
- for _, v := range out.Volumes.volumes {
- vv := root.NewVolumeNode(v.Name)
- vv.Driver = v.Driver
- vv.DriverOpts = v.DriverOpts
- root.Volumes = append(root.Volumes, vv)
- }
-
- // append network nodes to tree
- for _, n := range out.Networks.networks {
- nn := root.NewNetworkNode(n.Name)
- nn.Driver = n.Driver
- nn.DriverOpts = n.DriverOpts
- root.Networks = append(root.Networks, nn)
- }
-
- // add the build section
- if out.Build.Context != "" {
- root.Build = &BuildNode{
- NodeType: NodeBuild,
- Context: out.Build.Context,
- Dockerfile: out.Build.Dockerfile,
- Args: out.Build.Args,
- root: root,
- }
- }
-
- // add the cache section
- {
- cc := root.NewCacheNode()
- cc.Container = out.Cache.ToContainer()
- cc.Conditions = out.Cache.ToConditions()
- cc.Container.Name = "cache"
- cc.Vargs = out.Cache.Vargs
- root.Cache = cc
- }
-
- // add the clone section
- {
- cc := root.NewCloneNode()
- cc.Conditions = out.Clone.ToConditions()
- cc.Container = out.Clone.ToContainer()
- cc.Container.Name = "clone"
- cc.Vargs = out.Clone.Vargs
- root.Clone = cc
- }
-
- // append services
- for _, c := range out.Services.containers {
- if c.Build != "" {
- continue
- }
- cc := root.NewServiceNode()
- cc.Conditions = c.ToConditions()
- cc.Container = c.ToContainer()
- root.Services = append(root.Services, cc)
- }
-
- // append scripts
- for _, c := range out.Script.containers {
- var cc *ContainerNode
- if len(c.Commands.parts) == 0 {
- cc = root.NewPluginNode()
- } else {
- cc = root.NewShellNode()
- }
- cc.Commands = c.Commands.parts
- cc.Vargs = c.Vargs
- cc.Container = c.ToContainer()
- cc.Conditions = c.ToConditions()
- root.Script = append(root.Script, cc)
- }
-
- return root, nil
-}
-
-// ParseString parses a Yaml string and returns a Tree structure.
-func ParseString(in string) (*RootNode, error) {
- return Parse([]byte(in))
-}
diff --git a/engine/compiler/parse/parse_test.go b/engine/compiler/parse/parse_test.go
deleted file mode 100644
index 02d17af93..000000000
--- a/engine/compiler/parse/parse_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestParse(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Parser", func() {
- g.Describe("given a yaml file", func() {
-
- g.It("should unmarshal a string", func() {
- out, err := ParseString(sampleYaml)
- if err != nil {
- g.Fail(err)
- }
- g.Assert(out.Image).Equal("hello-world")
- g.Assert(out.Base).Equal("/go")
- g.Assert(out.Path).Equal("src/github.com/octocat/hello-world")
- g.Assert(out.Build.(*BuildNode).Context).Equal(".")
- g.Assert(out.Build.(*BuildNode).Dockerfile).Equal("Dockerfile")
- g.Assert(out.Cache.(*ContainerNode).Vargs["mount"]).Equal("node_modules")
- g.Assert(out.Clone.(*ContainerNode).Container.Image).Equal("git")
- g.Assert(out.Clone.(*ContainerNode).Vargs["depth"]).Equal(1)
- g.Assert(out.Volumes[0].(*VolumeNode).Name).Equal("custom")
- g.Assert(out.Volumes[0].(*VolumeNode).Driver).Equal("blockbridge")
- g.Assert(out.Networks[0].(*NetworkNode).Name).Equal("custom")
- g.Assert(out.Networks[0].(*NetworkNode).Driver).Equal("overlay")
- g.Assert(out.Services[0].(*ContainerNode).Container.Name).Equal("database")
- g.Assert(out.Services[0].(*ContainerNode).Container.Image).Equal("mysql")
- g.Assert(out.Script[0].(*ContainerNode).Container.Name).Equal("test")
- g.Assert(out.Script[0].(*ContainerNode).Container.Image).Equal("golang")
- g.Assert(out.Script[0].(*ContainerNode).Commands).Equal([]string{"go install", "go test"})
- g.Assert(out.Script[0].(*ContainerNode).String()).Equal(NodeShell)
- g.Assert(out.Script[1].(*ContainerNode).Container.Name).Equal("build")
- g.Assert(out.Script[1].(*ContainerNode).Container.Image).Equal("golang")
- g.Assert(out.Script[1].(*ContainerNode).Commands).Equal([]string{"go build"})
- g.Assert(out.Script[1].(*ContainerNode).String()).Equal(NodeShell)
- g.Assert(out.Script[2].(*ContainerNode).Container.Name).Equal("notify")
- g.Assert(out.Script[2].(*ContainerNode).Container.Image).Equal("slack")
- g.Assert(out.Script[2].(*ContainerNode).String()).Equal(NodePlugin)
- })
- })
- })
-}
-
-var sampleYaml = `
-image: hello-world
-build:
- context: .
- dockerfile: Dockerfile
-
-workspace:
- path: src/github.com/octocat/hello-world
- base: /go
-
-clone:
- image: git
- depth: 1
-
-cache:
- mount: node_modules
-
-pipeline:
- test:
- image: golang
- commands:
- - go install
- - go test
- build:
- image: golang
- commands:
- - go build
- when:
- event: push
- notify:
- image: slack
- channel: dev
- when:
- event: failure
-
-services:
- database:
- image: mysql
-
-networks:
- custom:
- driver: overlay
-
-volumes:
- custom:
- driver: blockbridge
-`
diff --git a/engine/compiler/parse/types.go b/engine/compiler/parse/types.go
deleted file mode 100644
index cf4596886..000000000
--- a/engine/compiler/parse/types.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package parse
-
-import "strings"
-
-// mapEqualSlice represents a map[string]string or a slice of
-// strings in key=value format.
-type mapEqualSlice struct {
- parts map[string]string
-}
-
-func (s *mapEqualSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
- s.parts = map[string]string{}
- err := unmarshal(&s.parts)
- if err == nil {
- return nil
- }
-
- var slice []string
- err = unmarshal(&slice)
- if err != nil {
- return err
- }
- for _, v := range slice {
- parts := strings.SplitN(v, "=", 2)
- if len(parts) == 2 {
- key := parts[0]
- val := parts[1]
- s.parts[key] = val
- }
- }
- return nil
-}
-
-// stringOrSlice represents a string or an array of strings.
-type stringOrSlice struct {
- parts []string
-}
-
-func (s *stringOrSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var sliceType []string
- err := unmarshal(&sliceType)
- if err == nil {
- s.parts = sliceType
- return nil
- }
-
- var stringType string
- err = unmarshal(&stringType)
- if err == nil {
- sliceType = make([]string, 0, 1)
- s.parts = append(sliceType, string(stringType))
- return nil
- }
- return err
-}
diff --git a/engine/compiler/parse/types_test.go b/engine/compiler/parse/types_test.go
deleted file mode 100644
index 463a72c75..000000000
--- a/engine/compiler/parse/types_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
- "gopkg.in/yaml.v2"
-)
-
-func TestTypes(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Yaml types", func() {
- g.Describe("given a yaml file", func() {
-
- g.It("should unmarshal a string", func() {
- in := []byte("foo")
- out := stringOrSlice{}
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- g.Fail(err)
- }
- g.Assert(len(out.parts)).Equal(1)
- g.Assert(out.parts[0]).Equal("foo")
- })
-
- g.It("should unmarshal a string slice", func() {
- in := []byte("[ foo ]")
- out := stringOrSlice{}
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- g.Fail(err)
- }
- g.Assert(len(out.parts)).Equal(1)
- g.Assert(out.parts[0]).Equal("foo")
- })
-
- g.It("should throw error when invalid string slice", func() {
- in := []byte("{ }") // string value should fail parse
- out := stringOrSlice{}
- err := yaml.Unmarshal(in, &out)
- g.Assert(err != nil).IsTrue("expects error")
- })
-
- g.It("should unmarshal a map", func() {
- in := []byte("foo: bar")
- out := mapEqualSlice{}
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- g.Fail(err)
- }
- g.Assert(len(out.parts)).Equal(1)
- g.Assert(out.parts["foo"]).Equal("bar")
- })
-
- g.It("should unmarshal a map equal slice", func() {
- in := []byte("[ foo=bar ]")
- out := mapEqualSlice{}
- err := yaml.Unmarshal(in, &out)
- if err != nil {
- g.Fail(err)
- }
- g.Assert(len(out.parts)).Equal(1)
- g.Assert(out.parts["foo"]).Equal("bar")
- })
-
- g.It("should throw error when invalid map equal slice", func() {
- in := []byte("foo") // string value should fail parse
- out := mapEqualSlice{}
- err := yaml.Unmarshal(in, &out)
- g.Assert(err != nil).IsTrue("expects error")
- })
- })
- })
-}
diff --git a/engine/compiler/transform.go b/engine/compiler/transform.go
deleted file mode 100644
index a61087e58..000000000
--- a/engine/compiler/transform.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package compiler
-
-import "github.com/drone/drone/engine/compiler/parse"
-
-// Transform is used to transform nodes from the parsed Yaml file during the
-// compilation process. A Transform may be used to add, disable or alter nodes.
-type Transform interface {
- VisitRoot(*parse.RootNode) error
- VisitVolume(*parse.VolumeNode) error
- VisitNetwork(*parse.NetworkNode) error
- VisitBuild(*parse.BuildNode) error
- VisitContainer(*parse.ContainerNode) error
-}
diff --git a/engine/runner/container.go b/engine/runner/container.go
deleted file mode 100644
index e901e3b19..000000000
--- a/engine/runner/container.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package runner
-
-import "fmt"
-
-// Container defines the container configuration.
-type Container struct {
- Name string `json:"name"`
- Alias string `json:"alias"`
- Image string `json:"image"`
- Pull bool `json:"pull,omitempty"`
- AuthConfig Auth `json:"auth_config,omitempty"`
- Privileged bool `json:"privileged,omitempty"`
- WorkingDir string `json:"working_dir,omitempty"`
- Environment map[string]string `json:"environment,omitempty"`
- Entrypoint []string `json:"entrypoint,omitempty"`
- Command []string `json:"command,omitempty"`
- ExtraHosts []string `json:"extra_hosts,omitempty"`
- Volumes []string `json:"volumes,omitempty"`
- VolumesFrom []string `json:"volumes_from,omitempty"`
- Devices []string `json:"devices,omitempty"`
- Network string `json:"network_mode,omitempty"`
- DNS []string `json:"dns,omitempty"`
- DNSSearch []string `json:"dns_search,omitempty"`
- MemSwapLimit int64 `json:"memswap_limit,omitempty"`
- MemLimit int64 `json:"mem_limit,omitempty"`
- CPUQuota int64 `json:"cpu_quota,omitempty"`
- CPUShares int64 `json:"cpu_shares,omitempty"`
- CPUSet string `json:"cpuset,omitempty"`
- OomKillDisable bool `json:"oom_kill_disable,omitempty"`
-}
-
-// Validate validates the container configuration details and returns an error
-// if the validation fails.
-func (c *Container) Validate() error {
- switch {
-
- case c.Name == "":
- return fmt.Errorf("Missing container name")
- case c.Image == "":
- return fmt.Errorf("Missing container image")
- default:
- return nil
- }
-
-}
-
-// Auth provides authentication parameters to authenticate to a remote
-// container registry for image download.
-type Auth struct {
- Username string `json:"username,omitempty"`
- Password string `json:"password,omitempty"`
- Email string `json:"email,omitempty"`
- Token string `json:"registry_token,omitempty"`
-}
-
-// Volume defines a container volume.
-type Volume struct {
- Name string `json:"name,omitempty"`
- Alias string `json:"alias,omitempty"`
- Driver string `json:"driver,omitempty"`
- DriverOpts map[string]string `json:"driver_opts,omitempty"`
- External bool `json:"external,omitempty"`
-}
-
-// Network defines a container network.
-type Network struct {
- Name string `json:"name,omitempty"`
- Alias string `json:"alias,omitempty"`
- Driver string `json:"driver,omitempty"`
- DriverOpts map[string]string `json:"driver_opts,omitempty"`
- External bool `json:"external,omitempty"`
-}
diff --git a/engine/runner/container_test.go b/engine/runner/container_test.go
deleted file mode 100644
index 6fab60ee2..000000000
--- a/engine/runner/container_test.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package runner
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestContainer(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Container validation", func() {
-
- g.It("fails with an invalid name", func() {
- c := Container{
- Image: "golang:1.5",
- }
- err := c.Validate()
- g.Assert(err != nil).IsTrue()
- g.Assert(err.Error()).Equal("Missing container name")
- })
-
- g.It("fails with an invalid image", func() {
- c := Container{
- Name: "container_0",
- }
- err := c.Validate()
- g.Assert(err != nil).IsTrue()
- g.Assert(err.Error()).Equal("Missing container image")
- })
-
- g.It("passes with valid attributes", func() {
- c := Container{
- Name: "container_0",
- Image: "golang:1.5",
- }
- g.Assert(c.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/docker/context.go b/engine/runner/docker/context.go
deleted file mode 100644
index e19ef84b2..000000000
--- a/engine/runner/docker/context.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package docker
-
-import (
- "github.com/drone/drone/engine/runner"
- "golang.org/x/net/context"
-)
-
-const key = "docker"
-
-// Setter defines a context that enables setting values.
-type Setter interface {
- Set(string, interface{})
-}
-
-// FromContext returns the Engine associated with this context.
-func FromContext(c context.Context) runner.Engine {
- return c.Value(key).(runner.Engine)
-}
-
-// ToContext adds the Engine to this context if it supports the
-// Setter interface.
-func ToContext(c Setter, d runner.Engine) {
- c.Set(key, d)
-}
diff --git a/engine/runner/docker/helper.go b/engine/runner/docker/helper.go
deleted file mode 100644
index 25b77f955..000000000
--- a/engine/runner/docker/helper.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package docker
-
-import (
- "os"
-
- "github.com/drone/drone/engine/runner"
- "github.com/samalba/dockerclient"
-)
-
-var (
- dockerHost = os.Getenv("DOCKER_HOST")
- dockerCert = os.Getenv("DOCKER_CERT_PATH")
- dockerTLS = os.Getenv("DOCKER_TLS_VERIFY")
-)
-
-func init() {
- if dockerHost == "" {
- dockerHost = "unix:///var/run/docker.sock"
- }
-}
-
-// New returns a new Docker engine using the provided Docker client.
-func New(client dockerclient.Client) runner.Engine {
- return &dockerEngine{client}
-}
-
-// NewEnv returns a new Docker engine from the DOCKER_HOST and DOCKER_CERT_PATH
-// environment variables.
-func NewEnv() (runner.Engine, error) {
- config, err := dockerclient.TLSConfigFromCertPath(dockerCert)
- if err == nil && dockerTLS != "1" {
- config.InsecureSkipVerify = true
- }
- client, err := dockerclient.NewDockerClient(dockerHost, config)
- if err != nil {
- return nil, err
- }
- return New(client), nil
-}
-
-// MustEnv returns a new Docker engine from the DOCKER_HOST and DOCKER_CERT_PATH
-// environment variables. Errors creating the Docker engine will panic.
-func MustEnv() runner.Engine {
- engine, err := NewEnv()
- if err != nil {
- panic(err)
- }
- return engine
-}
diff --git a/engine/runner/engine.go b/engine/runner/engine.go
deleted file mode 100644
index 5f24cc324..000000000
--- a/engine/runner/engine.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package runner
-
-//go:generate mockery -name Engine -output mock -case=underscore
-
-import "io"
-
-// Engine defines the container runtime engine.
-type Engine interface {
- // VolumeCreate(*Volume) (string, error)
- // VolumeRemove(string) error
- ContainerStart(*Container) (string, error)
- ContainerStop(string) error
- ContainerRemove(string) error
- ContainerWait(string) (*State, error)
- ContainerLogs(string) (io.ReadCloser, error)
-}
-
-// State defines the state of the container.
-type State struct {
- ExitCode int // container exit code
- OOMKilled bool // container exited due to oom error
-}
diff --git a/engine/runner/helper.go b/engine/runner/helper.go
deleted file mode 100644
index 1b49caf22..000000000
--- a/engine/runner/helper.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package runner
-
-import (
- "encoding/json"
- "io/ioutil"
-)
-
-// Parse parses a raw file containing a JSON encoded format of an intermediate
-// representation of the pipeline.
-func Parse(data []byte) (*Spec, error) {
- v := &Spec{}
- err := json.Unmarshal(data, v)
- return v, err
-}
-
-// ParseFile parses a file containing a JSON encoded format of an intermediate
-// representation of the pipeline.
-func ParseFile(filename string) (*Spec, error) {
- out, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return Parse(out)
-}
diff --git a/engine/runner/helper_test.go b/engine/runner/helper_test.go
deleted file mode 100644
index 2a60efc2a..000000000
--- a/engine/runner/helper_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package runner
-
-import (
- "io/ioutil"
- "os"
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestHelper(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Parsing", func() {
-
- g.It("should unmarhsal file []byte", func() {
- res, err := Parse(sample)
- if err != nil {
- t.Error(err)
- return
- }
- g.Assert(err == nil).IsTrue("expect file parsed")
- g.Assert(len(res.Containers)).Equal(2)
- g.Assert(len(res.Volumes)).Equal(1)
- })
-
- g.It("should unmarshal from file", func() {
- temp, _ := ioutil.TempFile("", "spec_")
- defer os.Remove(temp.Name())
-
- ioutil.WriteFile(temp.Name(), sample, 0700)
-
- _, err := ParseFile(temp.Name())
- if err != nil {
- t.Error(err)
- return
- }
- g.Assert(err == nil).IsTrue("expect file parsed")
- })
-
- g.It("should error when file not found", func() {
- _, err := ParseFile("/tmp/foo/bar/dummy/file.json")
- g.Assert(err == nil).IsFalse("expect file not found error")
- })
- })
-}
-
-// invalid json representation, simulate parsing error
-var invalid = []byte(`[]`)
-
-// valid json representation, verify parsing
-var sample = []byte(`{
- "containers": [
- {
- "name": "container_0",
- "image": "node:latest"
- },
- {
- "name": "container_1",
- "image": "golang:latest"
- }
- ],
- "volumes": [
- {
- "name": "volume_0"
- }
- ],
- "program": {
- "type": "list",
- "body": [
- {
- "type": "defer",
- "body": {
- "type": "recover",
- "body": {
- "type": "run",
- "name": "container_0"
- }
- },
- "defer": {
- "type": "parallel",
- "body": [
- {
- "type": "run",
- "name": "container_1"
- },
- {
- "type": "run",
- "name": "container_1"
- }
- ],
- "limit": 2
- }
- }
- ]
- }
-}`)
diff --git a/engine/runner/parse/node.go b/engine/runner/parse/node.go
deleted file mode 100644
index 0c8b7050b..000000000
--- a/engine/runner/parse/node.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package parse
-
-const (
- NodeList = "list"
- NodeDefer = "defer"
- NodeError = "error"
- NodeRecover = "recover"
- NodeParallel = "parallel"
- NodeRun = "run"
-)
-
-// NodeType identifies the type of a parse tree node.
-type NodeType string
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-// String returns the string value of the Node type.
-func (t NodeType) String() string {
- return string(t)
-}
-
-// A Node is an element in the parse tree.
-type Node interface {
- Type() NodeType
- Validate() error
-}
diff --git a/engine/runner/parse/node_defer.go b/engine/runner/parse/node_defer.go
deleted file mode 100644
index bc6935f2a..000000000
--- a/engine/runner/parse/node_defer.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package parse
-
-import "fmt"
-
-// DeferNode executes the child node, and then executes the deffered node.
-// The deffered node is guaranteed to execute, even when the child node fails.
-type DeferNode struct {
- NodeType `json:"type"`
-
- Body Node `json:"body"` // evaluate node
- Defer Node `json:"defer"` // defer evaluation of node.
-}
-
-// NewDeferNode returns a new DeferNode.
-func NewDeferNode() *DeferNode {
- return &DeferNode{NodeType: NodeDefer}
-}
-
-func (n *DeferNode) SetBody(node Node) *DeferNode {
- n.Body = node
- return n
-}
-
-func (n *DeferNode) SetDefer(node Node) *DeferNode {
- n.Defer = node
- return n
-}
-
-func (n *DeferNode) Validate() error {
- switch {
- case n.NodeType != NodeDefer:
- return fmt.Errorf("Defer Node uses an invalid type")
- case n.Body == nil:
- return fmt.Errorf("Defer Node body is empty")
- case n.Defer == nil:
- return fmt.Errorf("Defer Node defer is empty")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_defer_test.go b/engine/runner/parse/node_defer_test.go
deleted file mode 100644
index 9de1bf886..000000000
--- a/engine/runner/parse/node_defer_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestDeferNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("DeferNode", func() {
- g.It("should set body and defer node", func() {
- node0 := NewRunNode()
- node1 := NewRunNode()
-
- defer0 := NewDeferNode()
- defer1 := defer0.SetBody(node0)
- defer2 := defer0.SetDefer(node1)
- g.Assert(defer0.Type().String()).Equal(NodeDefer)
- g.Assert(defer0.Body).Equal(node0)
- g.Assert(defer0.Defer).Equal(node1)
- g.Assert(defer0).Equal(defer1)
- g.Assert(defer0).Equal(defer2)
- })
-
- g.It("should fail validation when invalid type", func() {
- defer0 := DeferNode{}
- err := defer0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Defer Node uses an invalid type")
- })
-
- g.It("should fail validation when empty body", func() {
- defer0 := NewDeferNode()
- err := defer0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Defer Node body is empty")
- })
-
- g.It("should fail validation when empty defer", func() {
- defer0 := NewDeferNode()
- defer0.SetBody(NewRunNode())
- err := defer0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Defer Node defer is empty")
- })
-
- g.It("should pass validation", func() {
- defer0 := NewDeferNode()
- defer0.SetBody(NewRunNode())
- defer0.SetDefer(NewRunNode())
- g.Assert(defer0.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/node_error.go b/engine/runner/parse/node_error.go
deleted file mode 100644
index cb3f55e7f..000000000
--- a/engine/runner/parse/node_error.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package parse
-
-import "fmt"
-
-// ErrorNode executes the body node, and then executes the error node if
-// the body node errors. This is similar to defer but only executes on error.
-type ErrorNode struct {
- NodeType `json:"type"`
-
- Body Node `json:"body"` // evaluate node
- Defer Node `json:"defer"` // defer evaluation of node on error.
-}
-
-// NewErrorNode returns a new ErrorNode.
-func NewErrorNode() *ErrorNode {
- return &ErrorNode{NodeType: NodeError}
-}
-
-func (n *ErrorNode) SetBody(node Node) *ErrorNode {
- n.Body = node
- return n
-}
-
-func (n *ErrorNode) SetDefer(node Node) *ErrorNode {
- n.Defer = node
- return n
-}
-
-func (n *ErrorNode) Validate() error {
- switch {
- case n.NodeType != NodeError:
- return fmt.Errorf("Error Node uses an invalid type")
- case n.Body == nil:
- return fmt.Errorf("Error Node body is empty")
- case n.Defer == nil:
- return fmt.Errorf("Error Node defer is empty")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_error_test.go b/engine/runner/parse/node_error_test.go
deleted file mode 100644
index f68cce858..000000000
--- a/engine/runner/parse/node_error_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestErrorNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("ErrorNode", func() {
- g.It("should set body and error node", func() {
- node0 := NewRunNode()
- node1 := NewRunNode()
-
- error0 := NewErrorNode()
- error1 := error0.SetBody(node0)
- error2 := error0.SetDefer(node1)
- g.Assert(error0.Type().String()).Equal(NodeError)
- g.Assert(error0.Body).Equal(node0)
- g.Assert(error0.Defer).Equal(node1)
- g.Assert(error0).Equal(error1)
- g.Assert(error0).Equal(error2)
- })
-
- g.It("should fail validation when invalid type", func() {
- error0 := ErrorNode{}
- err := error0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Error Node uses an invalid type")
- })
-
- g.It("should fail validation when empty body", func() {
- error0 := NewErrorNode()
- err := error0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Error Node body is empty")
- })
-
- g.It("should fail validation when empty error", func() {
- error0 := NewErrorNode()
- error0.SetBody(NewRunNode())
- err := error0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Error Node defer is empty")
- })
-
- g.It("should pass validation", func() {
- error0 := NewErrorNode()
- error0.SetBody(NewRunNode())
- error0.SetDefer(NewRunNode())
- g.Assert(error0.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/node_list.go b/engine/runner/parse/node_list.go
deleted file mode 100644
index 514cd7bae..000000000
--- a/engine/runner/parse/node_list.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package parse
-
-import "fmt"
-
-// ListNode serially executes a list of child nodes.
-type ListNode struct {
- NodeType `json:"type"`
-
- // Body is the list of child nodes
- Body []Node `json:"body"`
-}
-
-// NewListNode returns a new ListNode.
-func NewListNode() *ListNode {
- return &ListNode{NodeType: NodeList}
-}
-
-// Append appens a child node to the list.
-func (n *ListNode) Append(node Node) *ListNode {
- n.Body = append(n.Body, node)
- return n
-}
-
-func (n *ListNode) Validate() error {
- switch {
- case n.NodeType != NodeList:
- return fmt.Errorf("List Node uses an invalid type")
- case len(n.Body) == 0:
- return fmt.Errorf("List Node body is empty")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_list_test.go b/engine/runner/parse/node_list_test.go
deleted file mode 100644
index 5c0ad3281..000000000
--- a/engine/runner/parse/node_list_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestListNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("ListNode", func() {
- g.It("should append nodes", func() {
- node := NewRunNode()
-
- list0 := NewListNode()
- list1 := list0.Append(node)
- g.Assert(list0.Type().String()).Equal(NodeList)
- g.Assert(list0.Body[0]).Equal(node)
- g.Assert(list0).Equal(list1)
- })
-
- g.It("should fail validation when invalid type", func() {
- list := ListNode{}
- err := list.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("List Node uses an invalid type")
- })
-
- g.It("should fail validation when empty body", func() {
- list := NewListNode()
- err := list.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("List Node body is empty")
- })
-
- g.It("should pass validation", func() {
- node := NewRunNode()
- list := NewListNode()
- list.Append(node)
- g.Assert(list.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/node_parallel.go b/engine/runner/parse/node_parallel.go
deleted file mode 100644
index a587235e5..000000000
--- a/engine/runner/parse/node_parallel.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package parse
-
-import "fmt"
-
-// ParallelNode executes a list of child nodes in parallel.
-type ParallelNode struct {
- NodeType `json:"type"`
-
- Body []Node `json:"body"` // nodes for parallel evaluation.
- Limit int `json:"limit"` // limit for parallel evaluation.
-}
-
-func NewParallelNode() *ParallelNode {
- return &ParallelNode{NodeType: NodeParallel}
-}
-
-func (n *ParallelNode) Append(node Node) *ParallelNode {
- n.Body = append(n.Body, node)
- return n
-}
-
-func (n *ParallelNode) SetLimit(limit int) *ParallelNode {
- n.Limit = limit
- return n
-}
-
-func (n *ParallelNode) Validate() error {
- switch {
- case n.NodeType != NodeParallel:
- return fmt.Errorf("Parallel Node uses an invalid type")
- case len(n.Body) == 0:
- return fmt.Errorf("Parallel Node body is empty")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_parallel_test.go b/engine/runner/parse/node_parallel_test.go
deleted file mode 100644
index 9c0f0fb74..000000000
--- a/engine/runner/parse/node_parallel_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestParallelNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("ParallelNode", func() {
- g.It("should append nodes", func() {
- node := NewRunNode()
-
- parallel0 := NewParallelNode()
- parallel1 := parallel0.Append(node)
- g.Assert(parallel0.Type().String()).Equal(NodeParallel)
- g.Assert(parallel0.Body[0]).Equal(node)
- g.Assert(parallel0).Equal(parallel1)
- })
-
- g.It("should fail validation when invalid type", func() {
- node := ParallelNode{}
- err := node.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Parallel Node uses an invalid type")
- })
-
- g.It("should fail validation when empty body", func() {
- node := NewParallelNode()
- err := node.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Parallel Node body is empty")
- })
-
- g.It("should pass validation", func() {
- node := NewParallelNode().Append(NewRunNode())
- g.Assert(node.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/node_recover.go b/engine/runner/parse/node_recover.go
deleted file mode 100644
index 9cac51a12..000000000
--- a/engine/runner/parse/node_recover.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package parse
-
-import "fmt"
-
-type RecoverNode struct {
- NodeType `json:"type"`
-
- Body Node `json:"body"` // evaluate node and catch all errors.
-}
-
-func NewRecoverNode() *RecoverNode {
- return &RecoverNode{NodeType: NodeRecover}
-}
-
-func (n *RecoverNode) SetBody(node Node) *RecoverNode {
- n.Body = node
- return n
-}
-
-func (n *RecoverNode) Validate() error {
- switch {
- case n.NodeType != NodeRecover:
- return fmt.Errorf("Recover Node uses an invalid type")
- case n.Body == nil:
- return fmt.Errorf("Recover Node body is empty")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_recover_test.go b/engine/runner/parse/node_recover_test.go
deleted file mode 100644
index 20248655e..000000000
--- a/engine/runner/parse/node_recover_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestRecoverNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("RecoverNode", func() {
- g.It("should set body", func() {
- node0 := NewRunNode()
-
- recover0 := NewRecoverNode()
- recover1 := recover0.SetBody(node0)
- g.Assert(recover0.Type().String()).Equal(NodeRecover)
- g.Assert(recover0.Body).Equal(node0)
- g.Assert(recover0).Equal(recover1)
- })
-
- g.It("should fail validation when invalid type", func() {
- recover0 := RecoverNode{}
- err := recover0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Recover Node uses an invalid type")
- })
-
- g.It("should fail validation when empty body", func() {
- recover0 := NewRecoverNode()
- err := recover0.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Recover Node body is empty")
- })
-
- g.It("should pass validation", func() {
- recover0 := NewRecoverNode()
- recover0.SetBody(NewRunNode())
- g.Assert(recover0.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/node_run.go b/engine/runner/parse/node_run.go
deleted file mode 100644
index dedc90731..000000000
--- a/engine/runner/parse/node_run.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package parse
-
-import "fmt"
-
-type RunNode struct {
- NodeType `json:"type"`
-
- Name string `json:"name"`
- Detach bool `json:"detach,omitempty"`
- Silent bool `json:"silent,omitempty"`
-}
-
-func (n *RunNode) SetName(name string) *RunNode {
- n.Name = name
- return n
-}
-
-func (n *RunNode) SetDetach(detach bool) *RunNode {
- n.Detach = detach
- return n
-}
-
-func (n *RunNode) SetSilent(silent bool) *RunNode {
- n.Silent = silent
- return n
-}
-
-func NewRunNode() *RunNode {
- return &RunNode{NodeType: NodeRun}
-}
-
-func (n *RunNode) Validate() error {
- switch {
- case n.NodeType != NodeRun:
- return fmt.Errorf("Run Node uses an invalid type")
- case n.Name == "":
- return fmt.Errorf("Run Node has an invalid name")
- default:
- return nil
- }
-}
diff --git a/engine/runner/parse/node_run_test.go b/engine/runner/parse/node_run_test.go
deleted file mode 100644
index 9051249db..000000000
--- a/engine/runner/parse/node_run_test.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package parse
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestRunNode(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("RunNode", func() {
- g.It("should set container name for lookup", func() {
- node0 := NewRunNode()
- node1 := node0.SetName("foo")
-
- g.Assert(node0.Type().String()).Equal(NodeRun)
- g.Assert(node0.Name).Equal("foo")
- g.Assert(node0).Equal(node1)
- })
-
- g.It("should fail validation when invalid type", func() {
- node := RunNode{}
- err := node.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Run Node uses an invalid type")
- })
-
- g.It("should fail validation when invalid name", func() {
- node := NewRunNode()
- err := node.Validate()
- g.Assert(err == nil).IsFalse()
- g.Assert(err.Error()).Equal("Run Node has an invalid name")
- })
-
- g.It("should pass validation", func() {
- node := NewRunNode().SetName("foo")
- g.Assert(node.Validate() == nil).IsTrue()
- })
- })
-}
diff --git a/engine/runner/parse/parse.go b/engine/runner/parse/parse.go
deleted file mode 100644
index b027cff34..000000000
--- a/engine/runner/parse/parse.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package parse
-
-import "encoding/json"
-
-// Tree is the intermediate representation of a pipeline.
-type Tree struct {
- *ListNode // top-level Tree node
-}
-
-// New allocates a new Tree.
-func NewTree() *Tree {
- return &Tree{
- NewListNode(),
- }
-}
-
-// Parse parses a JSON encoded Tree.
-func Parse(data []byte) (*Tree, error) {
- tree := &Tree{}
- err := tree.UnmarshalJSON(data)
- return tree, err
-}
-
-// MarshalJSON implements the Marshaler interface and returns
-// a JSON encoded representation of the Tree.
-func (t *Tree) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.ListNode)
-}
-
-// UnmarshalJSON implements the Unmarshaler interface and returns
-// a Tree from a JSON representation.
-func (t *Tree) UnmarshalJSON(data []byte) error {
- block, err := decodeList(data)
- if err != nil {
- return nil
- }
- t.ListNode = block.(*ListNode)
- return nil
-}
-
-//
-// below are custom decoding functions. We cannot use the default json
-// decoder because the tree structure uses interfaces and the json decoder
-// has difficulty ascertaining the interface type when decoding.
-//
-
-func decodeNode(data []byte) (Node, error) {
- node := &nodeType{}
-
- err := json.Unmarshal(data, node)
- if err != nil {
- return nil, err
- }
- switch node.Type {
- case NodeList:
- return decodeList(data)
- case NodeDefer:
- return decodeDefer(data)
- case NodeError:
- return decodeError(data)
- case NodeRecover:
- return decodeRecover(data)
- case NodeParallel:
- return decodeParallel(data)
- case NodeRun:
- return decodeRun(data)
- }
- return nil, nil
-}
-
-func decodeNodes(data []json.RawMessage) ([]Node, error) {
- var nodes []Node
- for _, d := range data {
- node, err := decodeNode(d)
- if err != nil {
- return nil, err
- }
- nodes = append(nodes, node)
- }
- return nodes, nil
-}
-
-func decodeList(data []byte) (Node, error) {
- v := &nodeList{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- b, err := decodeNodes(v.Body)
- if err != nil {
- return nil, err
- }
- n := NewListNode()
- n.Body = b
- return n, nil
-}
-
-func decodeDefer(data []byte) (Node, error) {
- v := &nodeDefer{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- b, err := decodeNode(v.Body)
- if err != nil {
- return nil, err
- }
- d, err := decodeNode(v.Defer)
- if err != nil {
- return nil, err
- }
- n := NewDeferNode()
- n.Body = b
- n.Defer = d
- return n, nil
-}
-
-func decodeError(data []byte) (Node, error) {
- v := &nodeError{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- b, err := decodeNode(v.Body)
- if err != nil {
- return nil, err
- }
- d, err := decodeNode(v.Defer)
- if err != nil {
- return nil, err
- }
- n := NewErrorNode()
- n.Body = b
- n.Defer = d
- return n, nil
-}
-
-func decodeRecover(data []byte) (Node, error) {
- v := &nodeRecover{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- b, err := decodeNode(v.Body)
- if err != nil {
- return nil, err
- }
- n := NewRecoverNode()
- n.Body = b
- return n, nil
-}
-
-func decodeParallel(data []byte) (Node, error) {
- v := &nodeParallel{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- b, err := decodeNodes(v.Body)
- if err != nil {
- return nil, err
- }
- n := NewParallelNode()
- n.Body = b
- n.Limit = v.Limit
- return n, nil
-}
-
-func decodeRun(data []byte) (Node, error) {
- v := &nodeRun{}
- err := json.Unmarshal(data, v)
- if err != nil {
- return nil, err
- }
- return &RunNode{NodeRun, v.Name, v.Detach, v.Silent}, nil
-}
-
-//
-// below are intermediate representations of the node structures
-// since we cannot simply encode / decode using the built-in json
-// encoding and decoder.
-//
-
-type nodeType struct {
- Type NodeType `json:"type"`
-}
-
-type nodeDefer struct {
- Type NodeType `json:"type"`
- Body json.RawMessage `json:"body"`
- Defer json.RawMessage `json:"defer"`
-}
-
-type nodeError struct {
- Type NodeType `json:"type"`
- Body json.RawMessage `json:"body"`
- Defer json.RawMessage `json:"defer"`
-}
-
-type nodeList struct {
- Type NodeType `json:"type"`
- Body []json.RawMessage `json:"body"`
-}
-
-type nodeRecover struct {
- Type NodeType `json:"type"`
- Body json.RawMessage `json:"body"`
-}
-
-type nodeParallel struct {
- Type NodeType `json:"type"`
- Body []json.RawMessage `json:"body"`
- Limit int `json:"limit"`
-}
-
-type nodeRun struct {
- Type NodeType `json:"type"`
- Name string `json:"name"`
- Detach bool `json:"detach,omitempty"`
- Silent bool `json:"silent,omitempty"`
-}
diff --git a/engine/runner/parse/parse_test.go b/engine/runner/parse/parse_test.go
deleted file mode 100644
index b384882d1..000000000
--- a/engine/runner/parse/parse_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package parse
-
-import (
- "bytes"
- "encoding/json"
- "reflect"
- "testing"
-)
-
-func TestUnmarshal(t *testing.T) {
-
- node1 := NewRunNode().SetName("foo")
- node2 := NewRecoverNode().SetBody(node1)
-
- node3 := NewRunNode().SetName("bar")
- node4 := NewRunNode().SetName("bar")
-
- node5 := NewParallelNode().
- Append(node3).
- Append(node4).
- SetLimit(2)
-
- node6 := NewDeferNode().
- SetBody(node2).
- SetDefer(node5)
-
- tree := NewTree()
- tree.Append(node6)
-
- encoded, err := json.MarshalIndent(tree, "", "\t")
- if err != nil {
- t.Error(err)
- }
-
- if !bytes.Equal(encoded, sample) {
- t.Errorf("Want to marshal Tree to %s, got %s",
- string(sample),
- string(encoded),
- )
- }
-
- parsed, err := Parse(encoded)
- if err != nil {
- t.Error(err)
- }
-
- if !reflect.DeepEqual(tree, parsed) {
- t.Errorf("Want to marsnal and then unmarshal Tree")
- }
-}
-
-var sample = []byte(`{
- "type": "list",
- "body": [
- {
- "type": "defer",
- "body": {
- "type": "recover",
- "body": {
- "type": "run",
- "name": "foo"
- }
- },
- "defer": {
- "type": "parallel",
- "body": [
- {
- "type": "run",
- "name": "bar"
- },
- {
- "type": "run",
- "name": "bar"
- }
- ],
- "limit": 2
- }
- }
- ]
-}`)
diff --git a/engine/runner/pipe.go b/engine/runner/pipe.go
deleted file mode 100644
index d49654297..000000000
--- a/engine/runner/pipe.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package runner
-
-import "fmt"
-
-// Pipe returns a buffered pipe that is connected to the console output.
-type Pipe struct {
- lines chan *Line
- eof chan bool
-}
-
-// Next returns the next Line of console output.
-func (p *Pipe) Next() *Line {
- select {
- case line := <-p.lines:
- return line
- case <-p.eof:
- return nil
- }
-}
-
-// Close closes the pipe of console output.
-func (p *Pipe) Close() {
- go func() {
- p.eof <- true
- }()
-}
-
-func newPipe(buffer int) *Pipe {
- return &Pipe{
- lines: make(chan *Line, buffer),
- eof: make(chan bool),
- }
-}
-
-// Line is a line of console output.
-type Line struct {
- Proc string `json:"proc,omitempty"`
- Time int64 `json:"time,omitempty"`
- Type int `json:"type,omitempty"`
- Pos int `json:"pos,omityempty"`
- Out string `json:"out,omitempty"`
-}
-
-func (l *Line) String() string {
- return fmt.Sprintf("[%s:L%v:%vs] %s", l.Proc, l.Pos, l.Time, l.Out)
-}
-
-// TODO(bradrydzewski) consider an alternate buffer impelmentation based on the
-// x.crypto ssh buffer https://github.com/golang/crypto/blob/master/ssh/buffer.go
diff --git a/engine/runner/pipe_test.go b/engine/runner/pipe_test.go
deleted file mode 100644
index d7be32945..000000000
--- a/engine/runner/pipe_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package runner
-
-import (
- "sync"
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestPipe(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Pipe", func() {
- g.It("should get next line from buffer", func() {
- line := &Line{
- Proc: "redis",
- Pos: 1,
- Out: "starting redis server",
- }
- pipe := newPipe(10)
- pipe.lines <- line
- next := pipe.Next()
- g.Assert(next).Equal(line)
- })
-
- g.It("should get null line on buffer closed", func() {
- pipe := newPipe(10)
-
- var wg sync.WaitGroup
- wg.Add(1)
-
- go func() {
- next := pipe.Next()
- g.Assert(next == nil).IsTrue("line should be nil")
- wg.Done()
- }()
-
- pipe.Close()
- wg.Wait()
- })
-
- g.Describe("Line output", func() {
- g.It("should prefix string() with metadata", func() {
- line := Line{
- Proc: "redis",
- Time: 60,
- Pos: 1,
- Out: "starting redis server",
- }
- g.Assert(line.String()).Equal("[redis:L1:60s] starting redis server")
- })
- })
- })
-}
diff --git a/engine/runner/runner.go b/engine/runner/runner.go
deleted file mode 100644
index 7bad01840..000000000
--- a/engine/runner/runner.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package runner
-
-import (
- "bufio"
- "fmt"
- "time"
-
- "github.com/drone/drone/engine/runner/parse"
-
- "golang.org/x/net/context"
-)
-
-// NoContext is the default context you should supply if not using your own
-// context.Context
-var NoContext = context.TODO()
-
-// Tracer defines a tracing function that is invoked prior to creating and
-// running the container.
-type Tracer func(c *Container) error
-
-// Config defines the configuration for creating the Runner.
-type Config struct {
- Tracer Tracer
- Engine Engine
-
- // Buffer defines the size of the buffer for the channel to which the
- // console output is streamed.
- Buffer uint
-}
-
-// Runner creates a build Runner using the specific configuration for the given
-// Context and Specification.
-func (c *Config) Runner(ctx context.Context, spec *Spec) *Runner {
-
- // TODO(bradyrdzewski) we should make a copy of the configuration parameters
- // instead of a direct reference. This helps avoid any race conditions or
- //unexpected behavior if the Config changes.
- return &Runner{
- ctx: ctx,
- conf: c,
- spec: spec,
- errc: make(chan error),
- pipe: newPipe(int(c.Buffer) + 1),
- }
-}
-
-type Runner struct {
- ctx context.Context
- conf *Config
- spec *Spec
- pipe *Pipe
- errc chan (error)
-
- containers []string
- volumes []string
- networks []string
-}
-
-// Run starts the build runner but does not wait for it to complete. The Wait
-// method will return the exit code and release associated resources once the
-// running containers exit.
-func (r *Runner) Run() {
-
- go func() {
- r.setup()
- err := r.exec(r.spec.Nodes.ListNode)
- r.pipe.Close()
- r.cancel()
- r.teardown()
- r.errc <- err
- }()
-
- go func() {
- <-r.ctx.Done()
- r.cancel()
- }()
-}
-
-// Wait waits for the runner to exit.
-func (r *Runner) Wait() error {
- return <-r.errc
-}
-
-// Pipe returns a Pipe that is connected to the console output stream.
-func (r *Runner) Pipe() *Pipe {
- return r.pipe
-}
-
-func (r *Runner) exec(node parse.Node) error {
- switch v := node.(type) {
- case *parse.ListNode:
- return r.execList(v)
- case *parse.DeferNode:
- return r.execDefer(v)
- case *parse.ErrorNode:
- return r.execError(v)
- case *parse.RecoverNode:
- return r.execRecover(v)
- case *parse.ParallelNode:
- return r.execParallel(v)
- case *parse.RunNode:
- return r.execRun(v)
- }
- return fmt.Errorf("runner: unexepected node %s", node)
-}
-
-func (r *Runner) execList(node *parse.ListNode) error {
- for _, n := range node.Body {
- err := r.exec(n)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *Runner) execDefer(node *parse.DeferNode) error {
- err1 := r.exec(node.Body)
- err2 := r.exec(node.Defer)
- if err1 != nil {
- return err1
- }
- return err2
-}
-
-func (r *Runner) execError(node *parse.ErrorNode) error {
- err := r.exec(node.Body)
- if err != nil {
- r.exec(node.Defer)
- }
- return err
-}
-
-func (r *Runner) execRecover(node *parse.RecoverNode) error {
- r.exec(node.Body)
- return nil
-}
-
-func (r *Runner) execParallel(node *parse.ParallelNode) error {
- errc := make(chan error)
-
- for _, n := range node.Body {
- go func(node parse.Node) {
- errc <- r.exec(node)
- }(n)
- }
-
- var err error
- for i := 0; i < len(node.Body); i++ {
- select {
- case cerr := <-errc:
- if cerr != nil {
- err = cerr
- }
- }
- }
-
- return err
-}
-
-func (r *Runner) execRun(node *parse.RunNode) error {
- container, err := r.spec.lookupContainer(node.Name)
- if err != nil {
- return err
- }
- if r.conf.Tracer != nil {
- err := r.conf.Tracer(container)
- switch {
- case err == ErrSkip:
- return nil
- case err != nil:
- return err
- }
- }
- // TODO(bradrydzewski) there is potential here for a race condition where
- // the context is cancelled just after this line, resulting in the container
- // still being started.
- if r.ctx.Err() != nil {
- return err
- }
-
- name, err := r.conf.Engine.ContainerStart(container)
- if err != nil {
- return err
- }
- r.containers = append(r.containers, name)
-
- go func() {
- if node.Silent {
- return
- }
- rc, err := r.conf.Engine.ContainerLogs(name)
- if err != nil {
- return
- }
- defer rc.Close()
-
- num := 0
- now := time.Now().UTC()
- scanner := bufio.NewScanner(rc)
- for scanner.Scan() {
- r.pipe.lines <- &Line{
- Proc: container.Alias,
- Time: int64(time.Since(now).Seconds()),
- Pos: num,
- Out: scanner.Text(),
- }
- num++
- }
- }()
-
- // exit when running container in detached mode in background
- if node.Detach {
- return nil
- }
-
- state, err := r.conf.Engine.ContainerWait(name)
- if err != nil {
- return err
- }
- if state.OOMKilled {
- return &OomError{name}
- } else if state.ExitCode != 0 {
- return &ExitError{name, state.ExitCode}
- }
- return nil
-}
-
-func (r *Runner) setup() {
- // this is where we will setup network and volumes
-}
-
-func (r *Runner) teardown() {
- // TODO(bradrydzewski) this is not yet thread safe.
- for _, container := range r.containers {
- r.conf.Engine.ContainerRemove(container)
- }
-}
-
-func (r *Runner) cancel() {
- // TODO(bradrydzewski) this is not yet thread safe.
- for _, container := range r.containers {
- r.conf.Engine.ContainerStop(container)
- }
-}
diff --git a/engine/runner/runner_test.go b/engine/runner/runner_test.go
deleted file mode 100644
index 09a3ecd67..000000000
--- a/engine/runner/runner_test.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package runner
-
-import "testing"
-
-func TestRunner(t *testing.T) {
- t.Skip()
-}
diff --git a/engine/runner/spec.go b/engine/runner/spec.go
deleted file mode 100644
index 1f78a001b..000000000
--- a/engine/runner/spec.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package runner
-
-import (
- "fmt"
-
- "github.com/drone/drone/engine/runner/parse"
-)
-
-// Spec defines the pipeline configuration and exeuction.
-type Spec struct {
- // Volumes defines a list of all container volumes.
- Volumes []*Volume `json:"volumes,omitempty"`
-
- // Networks defines a list of all container networks.
- Networks []*Network `json:"networks,omitempty"`
-
- // Containers defines a list of all containers in the pipeline.
- Containers []*Container `json:"containers,omitempty"`
-
- // Nodes defines the container execution tree.
- Nodes *parse.Tree `json:"program,omitempty"`
-}
-
-// lookupContainer is a helper funciton that returns the named container from
-// the slice of containers.
-func (s *Spec) lookupContainer(name string) (*Container, error) {
- for _, container := range s.Containers {
- if container.Name == name {
- return container, nil
- }
- }
- return nil, fmt.Errorf("runner: unknown container %s", name)
-}
diff --git a/engine/runner/spec_test.go b/engine/runner/spec_test.go
deleted file mode 100644
index ba627000f..000000000
--- a/engine/runner/spec_test.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package runner
-
-import (
- "testing"
-
- "github.com/franela/goblin"
-)
-
-func TestSpec(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Spec file", func() {
-
- g.Describe("when looking up a container", func() {
-
- spec := Spec{}
- spec.Containers = append(spec.Containers, &Container{
- Name: "golang",
- })
-
- g.It("should find and return the container", func() {
- c, err := spec.lookupContainer("golang")
- g.Assert(err == nil).IsTrue("error should be nil")
- g.Assert(c).Equal(spec.Containers[0])
- })
-
- g.It("should return an error when not found", func() {
- c, err := spec.lookupContainer("node")
- g.Assert(err == nil).IsFalse("should return error")
- g.Assert(c == nil).IsTrue("should return nil container")
- })
-
- })
- })
-}
diff --git a/model/job.go b/model/job.go
index 607d690f7..b8d2bbd12 100644
--- a/model/job.go
+++ b/model/job.go
@@ -6,7 +6,7 @@ type Job struct {
BuildID int64 `json:"-" meddler:"job_build_id"`
NodeID int64 `json:"-" meddler:"job_node_id"`
Number int `json:"number" meddler:"job_number"`
- Error string `json:"error" meddler:"-"`
+ Error string `json:"error" meddler:"job_error"`
Status string `json:"status" meddler:"job_status"`
ExitCode int `json:"exit_code" meddler:"job_exit_code"`
Enqueued int64 `json:"enqueued_at" meddler:"job_enqueued"`
diff --git a/server/hook.go b/server/hook.go
index a2f293f58..a026d88c4 100644
--- a/server/hook.go
+++ b/server/hook.go
@@ -152,7 +152,7 @@ func PostHook(c *gin.Context) {
// verify the branches can be built vs skipped
branches := yaml.ParseBranch(raw)
- if !branches.Matches(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
+ if !branches.Match(build.Branch) && build.Event != model.EventTag && build.Event != model.EventDeploy {
c.String(200, "Branch does not match restrictions defined in yaml")
return
}
diff --git a/server/queue.go b/server/queue.go
index 2230125a1..e8afd8e6f 100644
--- a/server/queue.go
+++ b/server/queue.go
@@ -91,6 +91,7 @@ func Update(c *gin.Context) {
job.Finished = work.Job.Finished
job.Status = work.Job.Status
job.ExitCode = work.Job.ExitCode
+ job.Error = work.Job.Error
if build.Status == model.StatusPending {
build.Status = model.StatusRunning
diff --git a/store/datastore/ddl/mysql/4.sql b/store/datastore/ddl/mysql/4.sql
new file mode 100644
index 000000000..ea4fc4576
--- /dev/null
+++ b/store/datastore/ddl/mysql/4.sql
@@ -0,0 +1,9 @@
+-- +migrate Up
+
+ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
+
+UPDATE jobs SET job_error = '' WHERE job_error = null;
+
+-- +migrate Down
+
+ALTER TABLE jobs DROP COLUMN job_error;
diff --git a/store/datastore/ddl/postgres/4.sql b/store/datastore/ddl/postgres/4.sql
new file mode 100644
index 000000000..b50f43cb1
--- /dev/null
+++ b/store/datastore/ddl/postgres/4.sql
@@ -0,0 +1,9 @@
+-- +migrate Up
+
+ALTER TABLE jobs ADD COLUMN job_error VARCHAR(500);
+
+UPDATE jobs SET job_error = '';
+
+-- +migrate Down
+
+ALTER TABLE jobs DROP COLUMN job_error;
diff --git a/store/datastore/ddl/sqlite3/4.sql b/store/datastore/ddl/sqlite3/4.sql
new file mode 100644
index 000000000..a57b6b103
--- /dev/null
+++ b/store/datastore/ddl/sqlite3/4.sql
@@ -0,0 +1,9 @@
+-- +migrate Up
+
+ALTER TABLE jobs ADD COLUMN job_error TEXT;
+
+UPDATE jobs SET job_error = '';
+
+-- +migrate Down
+
+ALTER TABLE jobs DROP COLUMN job_error;
diff --git a/template/amber/build.amber b/template/amber/build.amber
index 55a117c8b..32161bafc 100644
--- a/template/amber/build.amber
+++ b/template/amber/build.amber
@@ -59,11 +59,11 @@ block content
| pending assignment to a worker
div[class="msg-running"]
.hidden ? $job.Status != "running"
- | started
+ | started
span[data-livestamp=$job.Started]
div[class="msg-finished"]
.hidden ? $job.Finished == 0
- | finished
+ | finished
span[data-livestamp=$job.Finished]
div[class="msg-exited"]
.hidden ? $job.Finished == 0
@@ -75,9 +75,12 @@ block content
button.btn.btn-info.hidden#cancel cancel
div.col-md-8
- pre#output
- button.tail#tail
- i.material-icons expand_more
+ if Job.Error != ""
+ div.alert.alert-danger #{Job.Error}
+ else
+ pre#output
+ button.tail#tail
+ i.material-icons expand_more
block append scripts
script
@@ -88,4 +91,3 @@ block append scripts
var status = #{json(Job.Status)};
var view = new JobViewModel(repo, build, job, status);
-
diff --git a/yaml/branch.go b/yaml/branch.go
index ae426313a..dbb04fd01 100644
--- a/yaml/branch.go
+++ b/yaml/branch.go
@@ -1,77 +1,18 @@
package yaml
-import (
- "path/filepath"
-
- "gopkg.in/yaml.v2"
-)
-
-type Branch struct {
- Include []string `yaml:"include"`
- Exclude []string `yaml:"exclude"`
-}
+import "gopkg.in/yaml.v2"
// ParseBranch parses the branch section of the Yaml document.
-func ParseBranch(in []byte) *Branch {
- return parseBranch(in)
+func ParseBranch(in []byte) Constraint {
+ out := struct {
+ Constraint Constraint `yaml:"branches"`
+ }{}
+
+ yaml.Unmarshal(in, &out)
+ return out.Constraint
}
// ParseBranchString parses the branch section of the Yaml document.
-func ParseBranchString(in string) *Branch {
+func ParseBranchString(in string) Constraint {
return ParseBranch([]byte(in))
}
-
-// Matches returns true if the branch matches the include patterns and
-// does not match any of the exclude patterns.
-func (b *Branch) Matches(branch string) bool {
- // when no includes or excludes automatically match
- if len(b.Include) == 0 && len(b.Exclude) == 0 {
- return true
- }
-
- // exclusions are processed first. So we can include everything and
- // then selectively exclude certain sub-patterns.
- for _, pattern := range b.Exclude {
- if pattern == branch {
- return false
- }
- if ok, _ := filepath.Match(pattern, branch); ok {
- return false
- }
- }
-
- for _, pattern := range b.Include {
- if pattern == branch {
- return true
- }
- if ok, _ := filepath.Match(pattern, branch); ok {
- return true
- }
- }
-
- return false
-}
-
-func parseBranch(in []byte) *Branch {
- out1 := struct {
- Branch struct {
- Include stringOrSlice `yaml:"include"`
- Exclude stringOrSlice `yaml:"exclude"`
- } `yaml:"branches"`
- }{}
-
- out2 := struct {
- Include stringOrSlice `yaml:"branches"`
- }{}
-
- yaml.Unmarshal(in, &out1)
- yaml.Unmarshal(in, &out2)
-
- return &Branch{
- Exclude: out1.Branch.Exclude.Slice(),
- Include: append(
- out1.Branch.Include.Slice(),
- out2.Include.Slice()...,
- ),
- }
-}
diff --git a/yaml/branch_test.go b/yaml/branch_test.go
index 9525ad30c..32055bbac 100644
--- a/yaml/branch_test.go
+++ b/yaml/branch_test.go
@@ -13,62 +13,32 @@ func TestBranch(t *testing.T) {
g.It("Should parse and match emtpy", func() {
branch := ParseBranchString("")
- g.Assert(branch.Matches("master")).IsTrue()
+ g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match", func() {
branch := ParseBranchString("branches: { include: [ master, develop ] }")
- g.Assert(branch.Matches("master")).IsTrue()
+ g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match shortand", func() {
branch := ParseBranchString("branches: [ master, develop ]")
- g.Assert(branch.Matches("master")).IsTrue()
+ g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match shortand string", func() {
branch := ParseBranchString("branches: master")
- g.Assert(branch.Matches("master")).IsTrue()
+ g.Assert(branch.Match("master")).IsTrue()
})
g.It("Should parse and match exclude", func() {
branch := ParseBranchString("branches: { exclude: [ master, develop ] }")
- g.Assert(branch.Matches("master")).IsFalse()
+ g.Assert(branch.Match("master")).IsFalse()
})
g.It("Should parse and match exclude shorthand", func() {
branch := ParseBranchString("branches: { exclude: master }")
- g.Assert(branch.Matches("master")).IsFalse()
- })
-
- g.It("Should match include", func() {
- b := Branch{}
- b.Include = []string{"master"}
- g.Assert(b.Matches("master")).IsTrue()
- })
-
- g.It("Should match include pattern", func() {
- b := Branch{}
- b.Include = []string{"feature/*"}
- g.Assert(b.Matches("feature/foo")).IsTrue()
- })
-
- g.It("Should fail to match include pattern", func() {
- b := Branch{}
- b.Include = []string{"feature/*"}
- g.Assert(b.Matches("master")).IsFalse()
- })
-
- g.It("Should match exclude", func() {
- b := Branch{}
- b.Exclude = []string{"master"}
- g.Assert(b.Matches("master")).IsFalse()
- })
-
- g.It("Should match exclude pattern", func() {
- b := Branch{}
- b.Exclude = []string{"feature/*"}
- g.Assert(b.Matches("feature/foo")).IsFalse()
+ g.Assert(branch.Match("master")).IsFalse()
})
})
}
diff --git a/yaml/build.go b/yaml/build.go
new file mode 100644
index 000000000..ec3892a07
--- /dev/null
+++ b/yaml/build.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Build represents Docker image build instructions.
+type Build struct {
+ Context string
+ Dockerfile string
+ Args map[string]string
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (b *Build) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ err := unmarshal(&b.Context)
+ if err == nil {
+ return nil
+ }
+ out := struct {
+ Context string
+ Dockerfile string
+ Args map[string]string
+ }{}
+ err = unmarshal(&out)
+ b.Context = out.Context
+ b.Args = out.Args
+ b.Dockerfile = out.Dockerfile
+ return err
+}
diff --git a/engine/compiler/parse/node_build_test.go b/yaml/build_test.go
similarity index 88%
rename from engine/compiler/parse/node_build_test.go
rename to yaml/build_test.go
index 223edbedd..69c9a1fb9 100644
--- a/engine/compiler/parse/node_build_test.go
+++ b/yaml/build_test.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"testing"
@@ -7,7 +7,7 @@ import (
"gopkg.in/yaml.v2"
)
-func TestBuildNode(t *testing.T) {
+func TestBuild(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Build", func() {
@@ -15,7 +15,7 @@ func TestBuildNode(t *testing.T) {
g.It("should unmarshal", func() {
in := []byte(".")
- out := build{}
+ out := Build{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
@@ -25,7 +25,7 @@ func TestBuildNode(t *testing.T) {
g.It("should unmarshal shorthand", func() {
in := []byte("{ context: ., dockerfile: Dockerfile }")
- out := build{}
+ out := Build{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
diff --git a/yaml/config.go b/yaml/config.go
new file mode 100644
index 000000000..7f05cab55
--- /dev/null
+++ b/yaml/config.go
@@ -0,0 +1,67 @@
+package yaml
+
+import "gopkg.in/yaml.v2"
+
+// Workspace represents the build workspace.
+type Workspace struct {
+ Base string
+ Path string
+}
+
+// Config represents the build configuration Yaml document.
+type Config struct {
+ Image string
+ Build *Build
+ Workspace *Workspace
+ Pipeline []*Container
+ Services []*Container
+ Volumes []*Volume
+ Networks []*Network
+}
+
+// ParseString parses the Yaml configuration document.
+func ParseString(data string) (*Config, error) {
+ return Parse([]byte(data))
+}
+
+// Parse parses Yaml configuration document.
+func Parse(data []byte) (*Config, error) {
+ v := struct {
+ Image string
+ Build *Build
+ Workspace *Workspace
+ Services containerList
+ Pipeline containerList
+ Networks networkList
+ Volumes volumeList
+ }{}
+
+ err := yaml.Unmarshal(data, &v)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, c := range v.Services.containers {
+ c.Detached = true
+ }
+
+ return &Config{
+ Image: v.Image,
+ Build: v.Build,
+ Workspace: v.Workspace,
+ Services: v.Services.containers,
+ Pipeline: v.Pipeline.containers,
+ Networks: v.Networks.networks,
+ Volumes: v.Volumes.volumes,
+ }, nil
+}
+
+type config struct {
+ Image string
+ Build *Build
+ Workspace *Workspace
+ Services containerList
+ Pipeline containerList
+ Networks networkList
+ Volumes volumeList
+}
diff --git a/yaml/config_test.go b/yaml/config_test.go
new file mode 100644
index 000000000..5e5e780cc
--- /dev/null
+++ b/yaml/config_test.go
@@ -0,0 +1,83 @@
+package yaml
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestParse(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Parser", func() {
+ g.Describe("Given a yaml file", func() {
+
+ g.It("Should unmarshal a string", func() {
+ out, err := ParseString(sampleYaml)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(out.Image).Equal("hello-world")
+ g.Assert(out.Workspace.Base).Equal("/go")
+ g.Assert(out.Workspace.Path).Equal("src/github.com/octocat/hello-world")
+ g.Assert(out.Build.Context).Equal(".")
+ g.Assert(out.Build.Dockerfile).Equal("Dockerfile")
+ g.Assert(out.Volumes[0].Name).Equal("custom")
+ g.Assert(out.Volumes[0].Driver).Equal("blockbridge")
+ g.Assert(out.Networks[0].Name).Equal("custom")
+ g.Assert(out.Networks[0].Driver).Equal("overlay")
+ g.Assert(out.Services[0].Name).Equal("database")
+ g.Assert(out.Services[0].Image).Equal("mysql")
+ g.Assert(out.Pipeline[0].Name).Equal("test")
+ g.Assert(out.Pipeline[0].Image).Equal("golang")
+ g.Assert(out.Pipeline[0].Commands).Equal([]string{"go install", "go test"})
+ g.Assert(out.Pipeline[1].Name).Equal("build")
+ g.Assert(out.Pipeline[1].Image).Equal("golang")
+ g.Assert(out.Pipeline[1].Commands).Equal([]string{"go build"})
+ g.Assert(out.Pipeline[2].Name).Equal("notify")
+ g.Assert(out.Pipeline[2].Image).Equal("slack")
+ })
+ })
+ })
+}
+
+var sampleYaml = `
+image: hello-world
+build:
+ context: .
+ dockerfile: Dockerfile
+
+workspace:
+ path: src/github.com/octocat/hello-world
+ base: /go
+
+pipeline:
+ test:
+ image: golang
+ commands:
+ - go install
+ - go test
+ build:
+ image: golang
+ commands:
+ - go build
+ when:
+ event: push
+ notify:
+ image: slack
+ channel: dev
+ when:
+ event: failure
+
+services:
+ database:
+ image: mysql
+
+networks:
+ custom:
+ driver: overlay
+
+volumes:
+ custom:
+ driver: blockbridge
+`
diff --git a/yaml/constraint.go b/yaml/constraint.go
new file mode 100644
index 000000000..9c71fc2b6
--- /dev/null
+++ b/yaml/constraint.go
@@ -0,0 +1,152 @@
+package yaml
+
+import (
+ "path/filepath"
+
+ "github.com/drone/drone/yaml/types"
+)
+
+// Constraints define constraints for container execution.
+type Constraints struct {
+ Platform Constraint
+ Environment Constraint
+ Event Constraint
+ Branch Constraint
+ Status Constraint
+ Matrix ConstraintMap
+}
+
+// Match returns true if all constraints match the given input. If a single constraint
+// fails a false value is returned.
+func (c *Constraints) Match(arch, target, event, branch, status string, matrix map[string]string) bool {
+ return c.Platform.Match(arch) &&
+ c.Environment.Match(target) &&
+ c.Event.Match(event) &&
+ c.Branch.Match(branch) &&
+ c.Status.Match(status) &&
+ c.Matrix.Match(matrix)
+}
+
+// Constraint defines an individual constraint.
+type Constraint struct {
+ Include []string
+ Exclude []string
+}
+
+// Match returns true if the string matches the include patterns and does not
+// match any of the exclude patterns.
+func (c *Constraint) Match(v string) bool {
+ if c.Excludes(v) {
+ return false
+ }
+ if c.Includes(v) {
+ return true
+ }
+ if len(c.Include) == 0 {
+ return true
+ }
+ return false
+}
+
+// Includes returns true if the string matches matches the include patterns.
+func (c *Constraint) Includes(v string) bool {
+ for _, pattern := range c.Include {
+ if ok, _ := filepath.Match(pattern, v); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Excludes returns true if the string matches matches the exclude patterns.
+func (c *Constraint) Excludes(v string) bool {
+ for _, pattern := range c.Exclude {
+ if ok, _ := filepath.Match(pattern, v); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (c *Constraint) UnmarshalYAML(unmarshal func(interface{}) error) error {
+
+ var out1 = struct {
+ Include types.StringOrSlice
+ Exclude types.StringOrSlice
+ }{}
+
+ var out2 types.StringOrSlice
+
+ unmarshal(&out1)
+ unmarshal(&out2)
+
+ c.Exclude = out1.Exclude.Slice()
+ c.Include = append(
+ out1.Include.Slice(),
+ out2.Slice()...,
+ )
+ return nil
+}
+
+// ConstraintMap defines an individual constraint for key value structures.
+type ConstraintMap struct {
+ Include map[string]string
+ Exclude map[string]string
+}
+
+// Match returns true if the params matches the include key values and does not
+// match any of the exclude key values.
+func (c *ConstraintMap) Match(params map[string]string) bool {
+ // when no includes or excludes automatically match
+ if len(c.Include) == 0 && len(c.Exclude) == 0 {
+ return true
+ }
+
+ // exclusions are processed first. So we can include everything and then
+ // selectively include others.
+ if len(c.Exclude) != 0 {
+ var matches int
+
+ for key, val := range c.Exclude {
+ if params[key] == val {
+ matches++
+ }
+ }
+ if matches == len(c.Exclude) {
+ return false
+ }
+ }
+
+ for key, val := range c.Include {
+ if params[key] != val {
+ return false
+ }
+ }
+
+ return true
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (c *ConstraintMap) UnmarshalYAML(unmarshal func(interface{}) error) error {
+
+ out1 := struct {
+ Include map[string]string
+ Exclude map[string]string
+ }{
+ Include: map[string]string{},
+ Exclude: map[string]string{},
+ }
+
+ out2 := map[string]string{}
+
+ unmarshal(&out1)
+ unmarshal(&out2)
+
+ c.Include = out1.Include
+ c.Exclude = out1.Exclude
+ for k, v := range out2 {
+ c.Include[k] = v
+ }
+ return nil
+}
diff --git a/yaml/constraint_test.go b/yaml/constraint_test.go
new file mode 100644
index 000000000..22630de68
--- /dev/null
+++ b/yaml/constraint_test.go
@@ -0,0 +1,142 @@
+package yaml
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestConstraint(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Constraint", func() {
+
+ g.It("Should parse and match emtpy", func() {
+ c := parseConstraint("")
+ g.Assert(c.Match("master")).IsTrue()
+ })
+
+ g.It("Should parse and match", func() {
+ c := parseConstraint("{ include: [ master, develop ] }")
+ g.Assert(c.Include[0]).Equal("master")
+ g.Assert(c.Include[1]).Equal("develop")
+ g.Assert(c.Match("master")).IsTrue()
+ })
+
+ g.It("Should parse and match shortand", func() {
+ c := parseConstraint("[ master, develop ]")
+ g.Assert(c.Include[0]).Equal("master")
+ g.Assert(c.Include[1]).Equal("develop")
+ g.Assert(c.Match("master")).IsTrue()
+ })
+
+ g.It("Should parse and match shortand string", func() {
+ c := parseConstraint("master")
+ g.Assert(c.Include[0]).Equal("master")
+ g.Assert(c.Match("master")).IsTrue()
+ })
+
+ g.It("Should parse and match exclude", func() {
+ c := parseConstraint("{ exclude: [ master, develop ] }")
+ g.Assert(c.Exclude[0]).Equal("master")
+ g.Assert(c.Exclude[1]).Equal("develop")
+ g.Assert(c.Match("master")).IsFalse()
+ })
+
+ g.It("Should parse and match exclude shorthand", func() {
+ c := parseConstraint("{ exclude: master }")
+ g.Assert(c.Exclude[0]).Equal("master")
+ g.Assert(c.Match("master")).IsFalse()
+ })
+
+ g.It("Should match include", func() {
+ b := Constraint{}
+ b.Include = []string{"master"}
+ g.Assert(b.Match("master")).IsTrue()
+ })
+
+ g.It("Should match include pattern", func() {
+ b := Constraint{}
+ b.Include = []string{"feature/*"}
+ g.Assert(b.Match("feature/foo")).IsTrue()
+ })
+
+ g.It("Should fail to match include pattern", func() {
+ b := Constraint{}
+ b.Include = []string{"feature/*"}
+ g.Assert(b.Match("master")).IsFalse()
+ })
+
+ g.It("Should match exclude", func() {
+ b := Constraint{}
+ b.Exclude = []string{"master"}
+ g.Assert(b.Match("master")).IsFalse()
+ })
+
+ g.It("Should match exclude pattern", func() {
+ b := Constraint{}
+ b.Exclude = []string{"feature/*"}
+ g.Assert(b.Match("feature/foo")).IsFalse()
+ })
+
+ g.It("Should match when eclude patterns mismatch", func() {
+ b := Constraint{}
+ b.Exclude = []string{"foo"}
+ g.Assert(b.Match("bar")).IsTrue()
+ })
+ })
+}
+
+func TestConstraintMap(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Constraint Map", func() {
+ g.It("Should parse and match emtpy", func() {
+ p := map[string]string{"golang": "1.5", "redis": "3.2"}
+ c := parseConstraintMap("")
+ g.Assert(c.Match(p)).IsTrue()
+ })
+
+ g.It("Should parse and match", func() {
+ p := map[string]string{"golang": "1.5", "redis": "3.2"}
+ c := parseConstraintMap("{ include: { golang: 1.5 } }")
+ g.Assert(c.Include["golang"]).Equal("1.5")
+ g.Assert(c.Match(p)).IsTrue()
+ })
+
+ g.It("Should parse and match shortand", func() {
+ p := map[string]string{"golang": "1.5", "redis": "3.2"}
+ c := parseConstraintMap("{ golang: 1.5 }")
+ g.Assert(c.Include["golang"]).Equal("1.5")
+ g.Assert(c.Match(p)).IsTrue()
+ })
+
+ g.It("Should parse and match exclude", func() {
+ p := map[string]string{"golang": "1.5", "redis": "3.2"}
+ c := parseConstraintMap("{ exclude: { golang: 1.5 } }")
+ g.Assert(c.Exclude["golang"]).Equal("1.5")
+ g.Assert(c.Match(p)).IsFalse()
+ })
+
+ g.It("Should parse and mismatch exclude", func() {
+ p := map[string]string{"golang": "1.5", "redis": "3.2"}
+ c := parseConstraintMap("{ exclude: { golang: 1.5, redis: 2.8 } }")
+ g.Assert(c.Exclude["golang"]).Equal("1.5")
+ g.Assert(c.Exclude["redis"]).Equal("2.8")
+ g.Assert(c.Match(p)).IsTrue()
+ })
+ })
+}
+
+func parseConstraint(s string) *Constraint {
+ c := &Constraint{}
+ yaml.Unmarshal([]byte(s), c)
+ return c
+}
+
+func parseConstraintMap(s string) *ConstraintMap {
+ c := &ConstraintMap{}
+ yaml.Unmarshal([]byte(s), c)
+ return c
+}
diff --git a/yaml/container.go b/yaml/container.go
new file mode 100644
index 000000000..012be8a0f
--- /dev/null
+++ b/yaml/container.go
@@ -0,0 +1,158 @@
+package yaml
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/yaml/types"
+ "gopkg.in/yaml.v2"
+)
+
+// Auth defines Docker authentication credentials.
+type Auth struct {
+ Username string
+ Password string
+ Email string
+}
+
+// Container defines a Docker container.
+type Container struct {
+ ID string
+ Name string
+ Image string
+ Build string
+ Pull bool
+ AuthConfig Auth
+ Detached bool
+ Disabled bool
+ Privileged bool
+ WorkingDir string
+ Environment map[string]string
+ Entrypoint []string
+ Command []string
+ Commands []string
+ ExtraHosts []string
+ Volumes []string
+ VolumesFrom []string
+ Devices []string
+ Network string
+ DNS []string
+ DNSSearch []string
+ MemSwapLimit int64
+ MemLimit int64
+ CPUQuota int64
+ CPUShares int64
+ CPUSet string
+ OomKillDisable bool
+ Constraints Constraints
+
+ Vargs map[string]interface{}
+}
+
+// container is an intermediate type used for decoding a container in a format
+// compatible with docker-compose.yml.
+
+// this file has a bunch of custom types that are annoying to work with, which
+// is why this is used for intermediate purposes and converted to something
+// easier to work with.
+type container struct {
+ Name string `yaml:"name"`
+ Image string `yaml:"image"`
+ Build string `yaml:"build"`
+ Pull bool `yaml:"pull"`
+ Privileged bool `yaml:"privileged"`
+ Environment types.MapEqualSlice `yaml:"environment"`
+ Entrypoint types.StringOrSlice `yaml:"entrypoint"`
+ Command types.StringOrSlice `yaml:"command"`
+ Commands types.StringOrSlice `yaml:"commands"`
+ ExtraHosts types.StringOrSlice `yaml:"extra_hosts"`
+ Volumes types.StringOrSlice `yaml:"volumes"`
+ VolumesFrom types.StringOrSlice `yaml:"volumes_from"`
+ Devices types.StringOrSlice `yaml:"devices"`
+ Network string `yaml:"network_mode"`
+ DNS types.StringOrSlice `yaml:"dns"`
+ DNSSearch types.StringOrSlice `yaml:"dns_search"`
+ MemSwapLimit int64 `yaml:"memswap_limit"`
+ MemLimit int64 `yaml:"mem_limit"`
+ CPUQuota int64 `yaml:"cpu_quota"`
+ CPUShares int64 `yaml:"cpu_shares"`
+ CPUSet string `yaml:"cpuset"`
+ OomKillDisable bool `yaml:"oom_kill_disable"`
+
+ AuthConfig struct {
+ Username string `yaml:"username"`
+ Password string `yaml:"password"`
+ Email string `yaml:"email"`
+ Token string `yaml:"registry_token"`
+ } `yaml:"auth_config"`
+
+ Constraints Constraints `yaml:"when"`
+
+ Vargs map[string]interface{} `yaml:",inline"`
+}
+
+// containerList is an intermediate type used for decoding a slice of containers
+// in a format compatible with docker-compose.yml
+type containerList struct {
+ containers []*Container
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (c *containerList) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ slice := yaml.MapSlice{}
+ err := unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+
+ for _, s := range slice {
+ cc := container{}
+
+ out, merr := yaml.Marshal(s.Value)
+ if err != nil {
+ return merr
+ }
+
+ err = yaml.Unmarshal(out, &cc)
+ if err != nil {
+ return err
+ }
+ if cc.Name == "" {
+ cc.Name = fmt.Sprintf("%v", s.Key)
+ }
+ if cc.Image == "" {
+ cc.Image = fmt.Sprintf("%v", s.Key)
+ }
+ c.containers = append(c.containers, &Container{
+ Name: cc.Name,
+ Image: cc.Image,
+ Build: cc.Build,
+ Pull: cc.Pull,
+ Privileged: cc.Privileged,
+ Environment: cc.Environment.Map(),
+ Entrypoint: cc.Entrypoint.Slice(),
+ Command: cc.Command.Slice(),
+ Commands: cc.Commands.Slice(),
+ ExtraHosts: cc.ExtraHosts.Slice(),
+ Volumes: cc.Volumes.Slice(),
+ VolumesFrom: cc.VolumesFrom.Slice(),
+ Devices: cc.Devices.Slice(),
+ Network: cc.Network,
+ DNS: cc.DNS.Slice(),
+ DNSSearch: cc.DNSSearch.Slice(),
+ MemSwapLimit: cc.MemSwapLimit,
+ MemLimit: cc.MemLimit,
+ CPUQuota: cc.CPUQuota,
+ CPUShares: cc.CPUShares,
+ CPUSet: cc.CPUSet,
+ OomKillDisable: cc.OomKillDisable,
+ Vargs: cc.Vargs,
+ AuthConfig: Auth{
+ Username: cc.AuthConfig.Username,
+ Password: cc.AuthConfig.Password,
+ Email: cc.AuthConfig.Email,
+ },
+ Constraints: cc.Constraints,
+ })
+ }
+ return err
+}
diff --git a/engine/compiler/parse/node_container_test.go b/yaml/container_test.go
similarity index 81%
rename from engine/compiler/parse/node_container_test.go
rename to yaml/container_test.go
index 352e98099..6d0af800b 100644
--- a/engine/compiler/parse/node_container_test.go
+++ b/yaml/container_test.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"testing"
@@ -28,15 +28,15 @@ func TestContainerNode(t *testing.T) {
g.Assert(c.Build).Equal(".")
g.Assert(c.Pull).Equal(true)
g.Assert(c.Privileged).Equal(true)
- g.Assert(c.Entrypoint.parts).Equal([]string{"/bin/sh"})
- g.Assert(c.Command.parts).Equal([]string{"yes"})
- g.Assert(c.Commands.parts).Equal([]string{"whoami"})
- g.Assert(c.ExtraHosts.parts).Equal([]string{"foo.com"})
- g.Assert(c.Volumes.parts).Equal([]string{"/foo:/bar"})
- g.Assert(c.VolumesFrom.parts).Equal([]string{"foo"})
- g.Assert(c.Devices.parts).Equal([]string{"/dev/tty0"})
+ g.Assert(c.Entrypoint).Equal([]string{"/bin/sh"})
+ g.Assert(c.Command).Equal([]string{"yes"})
+ g.Assert(c.Commands).Equal([]string{"whoami"})
+ g.Assert(c.ExtraHosts).Equal([]string{"foo.com"})
+ g.Assert(c.Volumes).Equal([]string{"/foo:/bar"})
+ g.Assert(c.VolumesFrom).Equal([]string{"foo"})
+ g.Assert(c.Devices).Equal([]string{"/dev/tty0"})
g.Assert(c.Network).Equal("bridge")
- g.Assert(c.DNS.parts).Equal([]string{"8.8.8.8"})
+ g.Assert(c.DNS).Equal([]string{"8.8.8.8"})
g.Assert(c.MemSwapLimit).Equal(int64(1))
g.Assert(c.MemLimit).Equal(int64(2))
g.Assert(c.CPUQuota).Equal(int64(3))
diff --git a/engine/compiler/parse/node_network.go b/yaml/network.go
similarity index 56%
rename from engine/compiler/parse/node_network.go
rename to yaml/network.go
index b78a4bb7e..d49e86e3e 100644
--- a/engine/compiler/parse/node_network.go
+++ b/yaml/network.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"fmt"
@@ -6,26 +6,8 @@ import (
"gopkg.in/yaml.v2"
)
-// NetworkNode represents a Docker network.
-type NetworkNode struct {
- NodeType
- root *RootNode
-
- Name string
- Driver string
- DriverOpts map[string]string
-}
-
-// Root returns the root node.
-func (n *NetworkNode) Root() *RootNode { return n.root }
-
-//
-// intermediate types for yaml decoding.
-//
-
-// network is an intermediate type used for decoding a networks in a format
-// compatible with docker-compose.yml
-type network struct {
+// Network defines a Docker network.
+type Network struct {
Name string
Driver string
DriverOpts map[string]string `yaml:"driver_opts"`
@@ -34,9 +16,10 @@ type network struct {
// networkList is an intermediate type used for decoding a slice of networks
// in a format compatible with docker-compose.yml
type networkList struct {
- networks []*network
+ networks []*Network
}
+// UnmarshalYAML implements custom Yaml unmarshaling.
func (n *networkList) UnmarshalYAML(unmarshal func(interface{}) error) error {
slice := yaml.MapSlice{}
err := unmarshal(&slice)
@@ -45,11 +28,11 @@ func (n *networkList) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
for _, s := range slice {
- nn := network{}
+ nn := Network{}
- out, err := yaml.Marshal(s.Value)
- if err != nil {
- return err
+ out, merr := yaml.Marshal(s.Value)
+ if merr != nil {
+ return merr
}
err = yaml.Unmarshal(out, &nn)
diff --git a/engine/compiler/parse/node_network_test.go b/yaml/network_test.go
similarity index 85%
rename from engine/compiler/parse/node_network_test.go
rename to yaml/network_test.go
index c4b1ca4fe..4fe3c8636 100644
--- a/engine/compiler/parse/node_network_test.go
+++ b/yaml/network_test.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"testing"
@@ -7,7 +7,7 @@ import (
"gopkg.in/yaml.v2"
)
-func TestNetworkNode(t *testing.T) {
+func TestNetworks(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Networks", func() {
@@ -38,13 +38,13 @@ func TestNetworkNode(t *testing.T) {
g.It("should unmarshal and use default driver", func() {
in := []byte("foo: { name: bar }")
- out := volumeList{}
+ out := networkList{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
- g.Assert(len(out.volumes)).Equal(1)
- g.Assert(out.volumes[0].Driver).Equal("local")
+ g.Assert(len(out.networks)).Equal(1)
+ g.Assert(out.networks[0].Driver).Equal("bridge")
})
})
})
diff --git a/yaml/transform/clone.go b/yaml/transform/clone.go
new file mode 100644
index 000000000..b3e20f430
--- /dev/null
+++ b/yaml/transform/clone.go
@@ -0,0 +1,22 @@
+package transform
+
+import "github.com/drone/drone/yaml"
+
+const clone = "clone"
+
+// Clone transforms the Yaml to include a clone step.
+func Clone(c *yaml.Config, plugin string) error {
+ for _, p := range c.Pipeline {
+ if p.Name == clone {
+ return nil
+ }
+ }
+
+ s := &yaml.Container{
+ Image: plugin,
+ Name: clone,
+ }
+
+ c.Pipeline = append([]*yaml.Container{s}, c.Pipeline...)
+ return nil
+}
diff --git a/yaml/transform/clone_test.go b/yaml/transform/clone_test.go
new file mode 100644
index 000000000..5796f91c6
--- /dev/null
+++ b/yaml/transform/clone_test.go
@@ -0,0 +1 @@
+package transform
diff --git a/yaml/transform/command.go b/yaml/transform/command.go
new file mode 100644
index 000000000..fc9ce0208
--- /dev/null
+++ b/yaml/transform/command.go
@@ -0,0 +1,82 @@
+package transform
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "github.com/drone/drone/yaml"
+)
+
+// CommandTransform transforms the custom shell commands in the Yaml pipeline
+// into a container ENTRYPOINT and and CMD for execution.
+func CommandTransform(c *yaml.Config) error {
+ for _, p := range c.Pipeline {
+
+ if isPlugin(p) {
+ continue
+ }
+
+ p.Entrypoint = []string{
+ "/bin/sh", "-c",
+ }
+ p.Command = []string{
+ "echo $DRONE_SCRIPT | base64 -d | /bin/sh -e",
+ }
+ if p.Environment == nil {
+ p.Environment = map[string]string{}
+ }
+ p.Environment["HOME"] = "/root"
+ p.Environment["SHELL"] = "/bin/sh"
+ p.Environment["DRONE_SCRIPT"] = toScript(
+ p.Commands,
+ )
+ }
+ return nil
+}
+
+func toScript(commands []string) string {
+ var buf bytes.Buffer
+ for _, command := range commands {
+ escaped := fmt.Sprintf("%q", command)
+ escaped = strings.Replace(command, "$", `$\`, -1)
+ buf.WriteString(fmt.Sprintf(
+ traceScript,
+ escaped,
+ command,
+ ))
+ }
+
+ script := fmt.Sprintf(
+ setupScript,
+ buf.String(),
+ )
+
+ return base64.StdEncoding.EncodeToString([]byte(script))
+}
+
+// setupScript is a helper script this is added to the build to ensure
+// a minimum set of environment variables are set correctly.
+const setupScript = `
+if [ -n "$DRONE_NETRC_MACHINE" ]; then
+cat < $HOME/.netrc
+machine $DRONE_NETRC_MACHINE
+login $DRONE_NETRC_USERNAME
+password $DRONE_NETRC_PASSWORD
+EOF
+fi
+
+unset DRONE_NETRC_USERNAME
+unset DRONE_NETRC_PASSWORD
+unset DRONE_SCRIPT
+
+%s
+`
+
+// traceScript is a helper script that is added to the build script
+// to trace a command.
+const traceScript = `
+echo + %s
+%s
+`
diff --git a/yaml/transform/command_test.go b/yaml/transform/command_test.go
new file mode 100644
index 000000000..791e64be0
--- /dev/null
+++ b/yaml/transform/command_test.go
@@ -0,0 +1,47 @@
+package transform
+
+import (
+ "testing"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/franela/goblin"
+)
+
+func Test_command(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Command genration", func() {
+
+ g.It("should ignore plugin steps", func() {
+ c := newConfig(&yaml.Container{
+ Commands: []string{
+ "go build",
+ "go test",
+ },
+ Vargs: map[string]interface{}{
+ "depth": 50,
+ },
+ })
+
+ CommandTransform(c)
+ g.Assert(len(c.Pipeline[0].Entrypoint)).Equal(0)
+ g.Assert(len(c.Pipeline[0].Command)).Equal(0)
+ g.Assert(c.Pipeline[0].Environment["DRONE_SCRIPT"]).Equal("")
+ })
+
+ g.It("should set entrypoint, command and environment variables", func() {
+ c := newConfig(&yaml.Container{
+ Commands: []string{
+ "go build",
+ "go test",
+ },
+ })
+
+ CommandTransform(c)
+ g.Assert(c.Pipeline[0].Entrypoint).Equal([]string{"/bin/sh", "-c"})
+ g.Assert(c.Pipeline[0].Command).Equal([]string{"echo $DRONE_SCRIPT | base64 -d | /bin/sh -e"})
+ g.Assert(c.Pipeline[0].Environment["DRONE_SCRIPT"] != "").IsTrue()
+ })
+ })
+}
diff --git a/yaml/transform/environ.go b/yaml/transform/environ.go
new file mode 100644
index 000000000..47e1e6e4d
--- /dev/null
+++ b/yaml/transform/environ.go
@@ -0,0 +1,20 @@
+package transform
+
+import "github.com/drone/drone/yaml"
+
+// Environ transforms the steps in the Yaml pipeline to include runtime
+// environment variables.
+func Environ(c *yaml.Config, envs map[string]string) error {
+ for _, p := range c.Pipeline {
+ if p.Environment == nil {
+ p.Environment = map[string]string{}
+ }
+ for k, v := range envs {
+ if v == "" {
+ continue
+ }
+ p.Environment[k] = v
+ }
+ }
+ return nil
+}
diff --git a/yaml/transform/environ_test.go b/yaml/transform/environ_test.go
new file mode 100644
index 000000000..903e31eff
--- /dev/null
+++ b/yaml/transform/environ_test.go
@@ -0,0 +1,27 @@
+package transform
+
+import (
+ "testing"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/franela/goblin"
+)
+
+func Test_env(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("environment variables", func() {
+
+ g.It("should be copied", func() {
+ envs := map[string]string{"CI": "drone"}
+
+ c := newConfig(&yaml.Container{
+ Environment: map[string]string{},
+ })
+
+ Environ(c, envs)
+ g.Assert(c.Pipeline[0].Environment["CI"]).Equal("drone")
+ })
+ })
+}
diff --git a/yaml/transform/filter.go b/yaml/transform/filter.go
new file mode 100644
index 000000000..9a36245e9
--- /dev/null
+++ b/yaml/transform/filter.go
@@ -0,0 +1,58 @@
+package transform
+
+import (
+ "github.com/drone/drone/model"
+ "github.com/drone/drone/yaml"
+)
+
+// DefaultFilter is a transform function that applies default Filters to each
+// step in the Yaml specification file.
+func DefaultFilter(conf *yaml.Config) {
+ for _, step := range conf.Pipeline {
+ defaultStatus(step)
+ defaultEvent(step)
+ }
+}
+
+// defaultStatus sets default status conditions.
+func defaultStatus(c *yaml.Container) {
+ if !isEmpty(c.Constraints.Status) {
+ return
+ }
+ c.Constraints.Status.Include = []string{
+ model.StatusSuccess,
+ }
+}
+
+// defaultEvent sets default event conditions.
+func defaultEvent(c *yaml.Container) {
+ if !isEmpty(c.Constraints.Event) {
+ return
+ }
+
+ if isPlugin(c) && !isClone(c) {
+ c.Constraints.Event.Exclude = []string{
+ model.EventPull,
+ }
+ }
+}
+
+// helper function returns true if the step is a clone step.
+func isEmpty(c yaml.Constraint) bool {
+ return len(c.Include) == 0 && len(c.Exclude) == 0
+}
+
+// helper function returns true if the step is a plugin step.
+func isPlugin(c *yaml.Container) bool {
+ return len(c.Commands) == 0 || len(c.Vargs) != 0
+}
+
+// helper function returns true if the step is a command step.
+func isCommand(c *yaml.Container) bool {
+ return len(c.Commands) != 0
+}
+
+// helper function returns true if the step is a clone step.
+func isClone(c *yaml.Container) bool {
+ return c.Name == "clone"
+}
diff --git a/yaml/transform/identifier.go b/yaml/transform/identifier.go
new file mode 100644
index 000000000..b2df68bc7
--- /dev/null
+++ b/yaml/transform/identifier.go
@@ -0,0 +1,30 @@
+package transform
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/gorilla/securecookie"
+)
+
+// Identifier transforms the container steps in the Yaml and assigns a unique
+// container identifier.
+func Identifier(c *yaml.Config) error {
+
+ // creates a random prefix for the build
+ rand := base64.RawURLEncoding.EncodeToString(
+ securecookie.GenerateRandomKey(8),
+ )
+
+ for i, step := range c.Services {
+ step.ID = fmt.Sprintf("drone_%s_%d", rand, i)
+ }
+
+ for i, step := range c.Pipeline {
+ step.ID = fmt.Sprintf("drone_%s_%d", rand, i+len(c.Services))
+ }
+
+ return nil
+}
diff --git a/yaml/transform/image.go b/yaml/transform/image.go
new file mode 100644
index 000000000..5caff74fd
--- /dev/null
+++ b/yaml/transform/image.go
@@ -0,0 +1,69 @@
+package transform
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/drone/drone/yaml"
+)
+
+// ImagePull transforms the Yaml to automatically pull the latest image.
+func ImagePull(conf *yaml.Config, pull bool) error {
+ for _, plugin := range conf.Pipeline {
+ if !isPlugin(plugin) {
+ continue
+ }
+ plugin.Pull = pull
+ }
+ return nil
+}
+
+// ImageTag transforms the Yaml to use the :latest image tag when empty.
+func ImageTag(conf *yaml.Config) error {
+ for _, image := range conf.Pipeline {
+ if !strings.Contains(image.Image, ":") {
+ image.Image = image.Image + ":latest"
+ }
+ }
+ for _, image := range conf.Services {
+ if !strings.Contains(image.Image, ":") {
+ image.Image = image.Image + ":latest"
+ }
+ }
+ return nil
+}
+
+// ImageName transforms the Yaml to replace underscores with dashes.
+func ImageName(conf *yaml.Config) error {
+ for _, image := range conf.Pipeline {
+ image.Image = strings.Replace(image.Image, "_", "-", -1)
+ }
+ return nil
+}
+
+// ImageNamespace transforms the Yaml to use a default namepsace for plugins.
+func ImageNamespace(conf *yaml.Config, namespace string) error {
+ for _, image := range conf.Pipeline {
+ if strings.Contains(image.Image, "/") {
+ continue
+ }
+ if !isPlugin(image) {
+ continue
+ }
+ image.Image = filepath.Join(namespace, image.Image)
+ }
+ return nil
+}
+
+// ImageEscalate transforms the Yaml to automatically enable privileged mode
+// for a subset of white-listed plugins matching the given patterns.
+func ImageEscalate(conf *yaml.Config, patterns []string) error {
+ for _, c := range conf.Pipeline {
+ for _, pattern := range patterns {
+ if ok, _ := filepath.Match(pattern, c.Image); ok {
+ c.Privileged = true
+ }
+ }
+ }
+ return nil
+}
diff --git a/yaml/transform/image_test.go b/yaml/transform/image_test.go
new file mode 100644
index 000000000..67ff8dd0d
--- /dev/null
+++ b/yaml/transform/image_test.go
@@ -0,0 +1,151 @@
+package transform
+
+import (
+ "testing"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/franela/goblin"
+)
+
+func Test_pull(t *testing.T) {
+ g := goblin.Goblin(t)
+ g.Describe("pull image", func() {
+
+ g.It("should be enabled for plugins", func() {
+ c := newConfig(&yaml.Container{})
+
+ ImagePull(c, true)
+ g.Assert(c.Pipeline[0].Pull).IsTrue()
+ })
+
+ g.It("should be disabled for plugins", func() {
+ c := newConfig(&yaml.Container{})
+
+ ImagePull(c, false)
+ g.Assert(c.Pipeline[0].Pull).IsFalse()
+ })
+
+ g.It("should not apply to commands", func() {
+ c := newConfig(&yaml.Container{
+ Commands: []string{
+ "go build",
+ "go test",
+ },
+ })
+
+ ImagePull(c, true)
+ g.Assert(c.Pipeline[0].Pull).IsFalse()
+ })
+
+ g.It("should not apply to services", func() {
+ c := newConfigService(&yaml.Container{
+ Image: "mysql",
+ })
+
+ ImagePull(c, true)
+ g.Assert(c.Services[0].Pull).IsFalse()
+ })
+ })
+}
+
+func Test_escalate(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("privileged transform", func() {
+
+ g.It("should handle matches", func() {
+ c := newConfig(&yaml.Container{
+ Image: "plugins/docker",
+ })
+
+ ImageEscalate(c, []string{"plugins/docker"})
+ g.Assert(c.Pipeline[0].Privileged).IsTrue()
+ })
+
+ g.It("should handle glob matches", func() {
+ c := newConfig(&yaml.Container{
+ Image: "plugins/docker:latest",
+ })
+
+ ImageEscalate(c, []string{"plugins/docker:*"})
+ g.Assert(c.Pipeline[0].Privileged).IsTrue()
+ })
+
+ g.It("should handle non matches", func() {
+ c := newConfig(&yaml.Container{
+ Image: "plugins/git:latest",
+ })
+
+ ImageEscalate(c, []string{"plugins/docker:*"})
+ g.Assert(c.Pipeline[0].Privileged).IsFalse()
+ })
+
+ g.It("should handle non glob matches", func() {
+ c := newConfig(&yaml.Container{
+ Image: "plugins/docker:latest",
+ })
+
+ ImageEscalate(c, []string{"plugins/docker"})
+ g.Assert(c.Pipeline[0].Privileged).IsFalse()
+ })
+ })
+}
+
+func Test_normalize(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("normalizing", func() {
+
+ g.Describe("images", func() {
+
+ g.It("should append tag if empty", func() {
+ c := newConfig(&yaml.Container{
+ Image: "golang",
+ })
+
+ ImageTag(c)
+ g.Assert(c.Pipeline[0].Image).Equal("golang:latest")
+ })
+
+ g.It("should not override existing tag", func() {
+ c := newConfig(&yaml.Container{
+ Image: "golang:1.5",
+ })
+
+ ImageTag(c)
+ g.Assert(c.Pipeline[0].Image).Equal("golang:1.5")
+ })
+ })
+
+ g.Describe("plugins", func() {
+
+ g.It("should prepend namespace", func() {
+ c := newConfig(&yaml.Container{
+ Image: "slack",
+ })
+
+ ImageNamespace(c, "plugins")
+ g.Assert(c.Pipeline[0].Image).Equal("plugins/slack")
+ })
+
+ g.It("should not override existing namespace", func() {
+ c := newConfig(&yaml.Container{
+ Image: "index.docker.io/drone/git",
+ })
+
+ ImageNamespace(c, "plugins")
+ g.Assert(c.Pipeline[0].Image).Equal("index.docker.io/drone/git")
+ })
+
+ g.It("should replace underscores with dashes", func() {
+ c := newConfig(&yaml.Container{
+ Image: "gh_pages",
+ })
+
+ ImageName(c)
+ g.Assert(c.Pipeline[0].Image).Equal("gh-pages")
+ })
+ })
+ })
+}
diff --git a/yaml/transform/plugin.go b/yaml/transform/plugin.go
new file mode 100644
index 000000000..be0557bd5
--- /dev/null
+++ b/yaml/transform/plugin.go
@@ -0,0 +1,47 @@
+package transform
+
+import (
+ "path/filepath"
+
+ "github.com/drone/drone/yaml"
+)
+
+// PluginDisable is a transform function that alters the Yaml configuration to
+// disables plugins. This is intended for use when executing the pipeline
+// locally on your own computer.
+func PluginDisable(conf *yaml.Config, patterns []string) error {
+ for _, container := range conf.Pipeline {
+ if len(container.Commands) != 0 { // skip build steps
+ continue
+ }
+ var match bool
+ for _, pattern := range patterns {
+ if ok, _ := filepath.Match(pattern, container.Name); ok {
+ match = true
+ break
+ }
+ }
+ if !match {
+ container.Disabled = true
+ }
+ }
+ return nil
+}
+
+// PluginParams is a transform function that alters the Yaml configuration to
+// include plugin vargs parameters as environment variables.
+func PluginParams(conf *yaml.Config) error {
+ for _, container := range conf.Pipeline {
+ if len(container.Vargs) == 0 {
+ continue
+ }
+ if container.Environment == nil {
+ container.Environment = map[string]string{}
+ }
+ err := argsToEnv(container.Vargs, container.Environment)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/yaml/transform/plugin_test.go b/yaml/transform/plugin_test.go
new file mode 100644
index 000000000..5796f91c6
--- /dev/null
+++ b/yaml/transform/plugin_test.go
@@ -0,0 +1 @@
+package transform
diff --git a/yaml/transform/pod.go b/yaml/transform/pod.go
new file mode 100644
index 000000000..ae734cd6a
--- /dev/null
+++ b/yaml/transform/pod.go
@@ -0,0 +1,61 @@
+package transform
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/gorilla/securecookie"
+)
+
+// Pod transforms the containers in the Yaml to use Pod networking, where every
+// container shares the localhost connection.
+func Pod(c *yaml.Config) error {
+
+ rand := base64.RawURLEncoding.EncodeToString(
+ securecookie.GenerateRandomKey(8),
+ )
+
+ ambassador := &yaml.Container{
+ ID: fmt.Sprintf("drone_ambassador_%s", rand),
+ Name: "ambassador",
+ Image: "busybox:latest",
+ Detached: true,
+ Entrypoint: []string{"/bin/sleep"},
+ Command: []string{"86400"},
+ Volumes: []string{c.Workspace.Path, c.Workspace.Base},
+ Environment: map[string]string{},
+ }
+ network := fmt.Sprintf("container:%s", ambassador.ID)
+
+ var containers []*yaml.Container
+ containers = append(containers, c.Pipeline...)
+ containers = append(containers, c.Services...)
+
+ for _, container := range containers {
+ container.VolumesFrom = append(container.VolumesFrom, ambassador.ID)
+ if container.Network == "" {
+ container.Network = network
+ }
+ }
+
+ c.Services = append([]*yaml.Container{ambassador}, c.Services...)
+ return nil
+}
+
+// func (v *podOp) VisitContainer(node *parse.ContainerNode) error {
+// if node.Container.Network == "" {
+// parent := fmt.Sprintf("container:%s", v.name)
+// node.Container.Network = parent
+// }
+// node.Container.VolumesFrom = append(node.Container.VolumesFrom, v.name)
+// return nil
+// }
+//
+// func (v *podOp) VisitRoot(node *parse.RootNode) error {
+//
+//
+// node.Pod = service
+// return nil
+// }
diff --git a/yaml/transform/secret.go b/yaml/transform/secret.go
new file mode 100644
index 000000000..69054c6ce
--- /dev/null
+++ b/yaml/transform/secret.go
@@ -0,0 +1,39 @@
+package transform
+
+import (
+ "github.com/drone/drone/model"
+ "github.com/drone/drone/yaml"
+)
+
+func ImageSecrets(c *yaml.Config, secrets []*model.Secret, event string) error {
+ var images []*yaml.Container
+ images = append(images, c.Pipeline...)
+ images = append(images, c.Services...)
+
+ for _, image := range images {
+ imageSecrets(image, secrets, event)
+ }
+ return nil
+}
+
+func imageSecrets(c *yaml.Container, secrets []*model.Secret, event string) {
+ for _, secret := range secrets {
+ if !secret.Match(c.Image, event) {
+ continue
+ }
+
+ switch secret.Name {
+ case "REGISTRY_USERNAME":
+ c.AuthConfig.Username = secret.Value
+ case "REGISTRY_PASSWORD":
+ c.AuthConfig.Password = secret.Value
+ case "REGISTRY_EMAIL":
+ c.AuthConfig.Email = secret.Value
+ default:
+ if c.Environment == nil {
+ c.Environment = map[string]string{}
+ }
+ c.Environment[secret.Name] = secret.Value
+ }
+ }
+}
diff --git a/yaml/transform/secret_test.go b/yaml/transform/secret_test.go
new file mode 100644
index 000000000..5796f91c6
--- /dev/null
+++ b/yaml/transform/secret_test.go
@@ -0,0 +1 @@
+package transform
diff --git a/yaml/transform/transform.go b/yaml/transform/transform.go
new file mode 100644
index 000000000..61d735282
--- /dev/null
+++ b/yaml/transform/transform.go
@@ -0,0 +1,6 @@
+package transform
+
+import "github.com/drone/drone/yaml"
+
+// TransformFunc defines an operation for transforming the Yaml file.
+type TransformFunc func(*yaml.Config) error
diff --git a/engine/compiler/builtin/args.go b/yaml/transform/util.go
similarity index 51%
rename from engine/compiler/builtin/args.go
rename to yaml/transform/util.go
index 835a1ed48..910390b63 100644
--- a/engine/compiler/builtin/args.go
+++ b/yaml/transform/util.go
@@ -1,4 +1,4 @@
-package builtin
+package transform
import (
"fmt"
@@ -6,35 +6,10 @@ import (
"strconv"
"strings"
- "github.com/drone/drone/engine/compiler/parse"
-
json "github.com/ghodss/yaml"
"gopkg.in/yaml.v2"
)
-type argsOps struct {
- visitor
-}
-
-// NewArgsOp returns a transformer that provides the plugin node
-// with the custom arguments from the Yaml file.
-func NewArgsOp() Visitor {
- return &argsOps{}
-}
-
-func (v *argsOps) VisitContainer(node *parse.ContainerNode) error {
- switch node.NodeType {
- case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
- break // no-op
- default:
- return nil
- }
- if node.Container.Environment == nil {
- node.Container.Environment = map[string]string{}
- }
- return argsToEnv(node.Vargs, node.Container.Environment)
-}
-
// argsToEnv uses reflection to convert a map[string]interface to a list
// of environment variables.
func argsToEnv(from map[string]interface{}, to map[string]string) error {
@@ -58,28 +33,25 @@ func argsToEnv(from map[string]interface{}, to map[string]string) error {
case reflect.Float32, reflect.Float64:
to[k] = fmt.Sprintf("%v", vv.Float())
- // case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
- // to[k] = strconv.FormatInt(vv.Int(), 16)
-
- // case reflect.Float32, reflect.Float64:
- // to[k] = strconv.FormatFloat(vv.Float(), 'E', -1, 64)
-
case reflect.Map:
yml, _ := yaml.Marshal(vv.Interface())
out, _ := json.YAMLToJSON(yml)
to[k] = string(out)
case reflect.Slice:
- out, _ := yaml.Marshal(vv.Interface())
+ out, err := yaml.Marshal(vv.Interface())
+ if err != nil {
+ return err
+ }
in := []string{}
- err := yaml.Unmarshal(out, &in)
+ err = yaml.Unmarshal(out, &in)
if err == nil {
to[k] = strings.Join(in, ",")
} else {
out, err = json.YAMLToJSON(out)
if err != nil {
- // return err TODO(bradrydzewski) unit test coverage for possible errors
+ return err
}
to[k] = string(out)
}
diff --git a/yaml/transform/validate.go b/yaml/transform/validate.go
new file mode 100644
index 000000000..28471e013
--- /dev/null
+++ b/yaml/transform/validate.go
@@ -0,0 +1,79 @@
+package transform
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/yaml"
+)
+
+func Check(c *yaml.Config, trusted bool) error {
+ var images []*yaml.Container
+ images = append(images, c.Pipeline...)
+ images = append(images, c.Services...)
+
+ for _, image := range c.Pipeline {
+ if err := CheckEntrypoint(image); err != nil {
+ return err
+ }
+ if trusted {
+ continue
+ }
+ if err := CheckTrusted(image); err != nil {
+ return err
+ }
+ }
+ for _, image := range c.Services {
+ if trusted {
+ continue
+ }
+ if err := CheckTrusted(image); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// validate the plugin command and entrypoint and return an error
+// the user attempts to set or override these values.
+func CheckEntrypoint(c *yaml.Container) error {
+ if len(c.Entrypoint) != 0 {
+ return fmt.Errorf("Cannot set plugin Entrypoint")
+ }
+ if len(c.Command) != 0 {
+ return fmt.Errorf("Cannot set plugin Command")
+ }
+ return nil
+}
+
+// validate the container configuration and return an error if restricted
+// configurations are used.
+func CheckTrusted(c *yaml.Container) error {
+ if c.Privileged {
+ return fmt.Errorf("Insufficient privileges to use privileged mode")
+ }
+ if len(c.DNS) != 0 {
+ return fmt.Errorf("Insufficient privileges to use custom dns")
+ }
+ if len(c.DNSSearch) != 0 {
+ return fmt.Errorf("Insufficient privileges to use dns_search")
+ }
+ if len(c.Devices) != 0 {
+ return fmt.Errorf("Insufficient privileges to use devices")
+ }
+ if len(c.ExtraHosts) != 0 {
+ return fmt.Errorf("Insufficient privileges to use extra_hosts")
+ }
+ if len(c.Network) != 0 {
+ return fmt.Errorf("Insufficient privileges to override the network")
+ }
+ if c.OomKillDisable {
+ return fmt.Errorf("Insufficient privileges to disable oom_kill")
+ }
+ if len(c.Volumes) != 0 {
+ return fmt.Errorf("Insufficient privileges to use volumes")
+ }
+ if len(c.VolumesFrom) != 0 {
+ return fmt.Errorf("Insufficient privileges to use volumes_from")
+ }
+ return nil
+}
diff --git a/yaml/transform/validate_test.go b/yaml/transform/validate_test.go
new file mode 100644
index 000000000..eddbcdf24
--- /dev/null
+++ b/yaml/transform/validate_test.go
@@ -0,0 +1,154 @@
+package transform
+
+import (
+ "testing"
+
+ "github.com/drone/drone/yaml"
+
+ "github.com/franela/goblin"
+)
+
+func Test_validate(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("validating", func() {
+
+ g.Describe("privileged attributes", func() {
+
+ g.It("should not error when trusted build", func() {
+ c := newConfig(&yaml.Container{Privileged: true})
+ err := Check(c, true)
+
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+
+ g.It("should error when privleged mode", func() {
+ c := newConfig(&yaml.Container{
+ Privileged: true,
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use privileged mode")
+ })
+
+ g.It("should error when privleged service container", func() {
+ c := newConfigService(&yaml.Container{
+ Privileged: true,
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use privileged mode")
+ })
+
+ g.It("should error when dns configured", func() {
+ c := newConfig(&yaml.Container{
+ DNS: []string{"8.8.8.8"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use custom dns")
+ })
+
+ g.It("should error when dns_search configured", func() {
+ c := newConfig(&yaml.Container{
+ DNSSearch: []string{"8.8.8.8"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use dns_search")
+ })
+
+ g.It("should error when devices configured", func() {
+ c := newConfig(&yaml.Container{
+ Devices: []string{"/dev/foo"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use devices")
+ })
+
+ g.It("should error when extra_hosts configured", func() {
+ c := newConfig(&yaml.Container{
+ ExtraHosts: []string{"1.2.3.4 foo.com"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use extra_hosts")
+ })
+
+ g.It("should error when network configured", func() {
+ c := newConfig(&yaml.Container{
+ Network: "host",
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to override the network")
+ })
+
+ g.It("should error when oom_kill_disabled configured", func() {
+ c := newConfig(&yaml.Container{
+ OomKillDisable: true,
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to disable oom_kill")
+ })
+
+ g.It("should error when volumes configured", func() {
+ c := newConfig(&yaml.Container{
+ Volumes: []string{"/:/tmp"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use volumes")
+ })
+
+ g.It("should error when volumes_from configured", func() {
+ c := newConfig(&yaml.Container{
+ VolumesFrom: []string{"drone"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use volumes_from")
+ })
+ })
+
+ g.Describe("plugin configuration", func() {
+ g.It("should error when entrypoint is configured", func() {
+ c := newConfig(&yaml.Container{
+ Entrypoint: []string{"/bin/sh"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Cannot set plugin Entrypoint")
+ })
+
+ g.It("should error when command is configured", func() {
+ c := newConfig(&yaml.Container{
+ Command: []string{"cat", "/proc/1/status"},
+ })
+ err := Check(c, false)
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Cannot set plugin Command")
+ })
+
+ g.It("should not error when empty entrypoint, command", func() {
+ c := newConfig(&yaml.Container{})
+ err := Check(c, false)
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+ })
+ })
+}
+
+func newConfig(container *yaml.Container) *yaml.Config {
+ return &yaml.Config{
+ Pipeline: []*yaml.Container{container},
+ }
+}
+
+func newConfigService(container *yaml.Container) *yaml.Config {
+ return &yaml.Config{
+ Services: []*yaml.Container{container},
+ }
+}
diff --git a/yaml/transform/volume.go b/yaml/transform/volume.go
new file mode 100644
index 000000000..e42a9ee69
--- /dev/null
+++ b/yaml/transform/volume.go
@@ -0,0 +1,20 @@
+package transform
+
+import "github.com/drone/drone/yaml"
+
+func ImageVolume(conf *yaml.Config, volumes []string) error {
+
+ if len(volumes) == 0 {
+ return nil
+ }
+
+ var containers []*yaml.Container
+ containers = append(containers, conf.Pipeline...)
+ containers = append(containers, conf.Services...)
+
+ for _, container := range containers {
+ container.Volumes = append(container.Volumes, volumes...)
+ }
+
+ return nil
+}
diff --git a/yaml/transform/workspace.go b/yaml/transform/workspace.go
new file mode 100644
index 000000000..caba299cd
--- /dev/null
+++ b/yaml/transform/workspace.go
@@ -0,0 +1,32 @@
+package transform
+
+import (
+ "path/filepath"
+
+ "github.com/drone/drone/yaml"
+)
+
+// WorkspaceTransform transforms ...
+func WorkspaceTransform(c *yaml.Config, base, path string) error {
+ if c.Workspace == nil {
+ c.Workspace = &yaml.Workspace{}
+ }
+
+ if c.Workspace.Base == "" {
+ c.Workspace.Base = base
+ }
+ if c.Workspace.Path == "" {
+ c.Workspace.Path = path
+ }
+ if !filepath.IsAbs(c.Workspace.Path) {
+ c.Workspace.Path = filepath.Join(
+ c.Workspace.Base,
+ c.Workspace.Path,
+ )
+ }
+
+ for _, p := range c.Pipeline {
+ p.WorkingDir = c.Workspace.Path
+ }
+ return nil
+}
diff --git a/yaml/transform/workspace_test.go b/yaml/transform/workspace_test.go
new file mode 100644
index 000000000..c16c1f41e
--- /dev/null
+++ b/yaml/transform/workspace_test.go
@@ -0,0 +1,99 @@
+package transform
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+
+ "github.com/drone/drone/yaml"
+)
+
+func TestWorkspace(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("workspace", func() {
+
+ defaultBase := "/go"
+ defaultPath := "src/github.com/octocat/hello-world"
+
+ g.It("should not override user paths", func() {
+ base := "/drone"
+ path := "/drone/src/github.com/octocat/hello-world"
+
+ conf := &yaml.Config{
+ Workspace: &yaml.Workspace{
+ Base: base,
+ Path: path,
+ },
+ }
+
+ WorkspaceTransform(conf, defaultBase, defaultPath)
+ g.Assert(conf.Workspace.Base).Equal(base)
+ g.Assert(conf.Workspace.Path).Equal(path)
+ })
+
+ g.It("should convert user paths to absolute", func() {
+ base := "/drone"
+ path := "src/github.com/octocat/hello-world"
+ abs := "/drone/src/github.com/octocat/hello-world"
+
+ conf := &yaml.Config{
+ Workspace: &yaml.Workspace{
+ Base: base,
+ Path: path,
+ },
+ }
+
+ WorkspaceTransform(conf, defaultBase, defaultPath)
+ g.Assert(conf.Workspace.Base).Equal(base)
+ g.Assert(conf.Workspace.Path).Equal(abs)
+ })
+
+ g.It("should set the default path", func() {
+ var base = "/go"
+ var path = "/go/src/github.com/octocat/hello-world"
+
+ conf := &yaml.Config{}
+
+ WorkspaceTransform(conf, defaultBase, defaultPath)
+ g.Assert(conf.Workspace.Base).Equal(base)
+ g.Assert(conf.Workspace.Path).Equal(path)
+ })
+
+ g.It("should use workspace as working_dir", func() {
+ var base = "/drone"
+ var path = "/drone/src/github.com/octocat/hello-world"
+
+ conf := &yaml.Config{
+ Workspace: &yaml.Workspace{
+ Base: base,
+ Path: path,
+ },
+ Pipeline: []*yaml.Container{
+ {},
+ },
+ }
+
+ WorkspaceTransform(conf, defaultBase, defaultPath)
+ g.Assert(conf.Pipeline[0].WorkingDir).Equal(path)
+ })
+
+ g.It("should not use workspace as working_dir for services", func() {
+ var base = "/drone"
+ var path = "/drone/src/github.com/octocat/hello-world"
+
+ conf := &yaml.Config{
+ Workspace: &yaml.Workspace{
+ Base: base,
+ Path: path,
+ },
+ Services: []*yaml.Container{
+ {},
+ },
+ }
+
+ WorkspaceTransform(conf, defaultBase, defaultPath)
+ g.Assert(conf.Services[0].WorkingDir).Equal("")
+ })
+ })
+}
diff --git a/yaml/types.go b/yaml/types.go
deleted file mode 100644
index 9c1eefa56..000000000
--- a/yaml/types.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package yaml
-
-// stringOrSlice represents a string or an array of strings.
-type stringOrSlice struct {
- parts []string
-}
-
-func (s *stringOrSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var sliceType []string
- err := unmarshal(&sliceType)
- if err == nil {
- s.parts = sliceType
- return nil
- }
-
- var stringType string
- err = unmarshal(&stringType)
- if err == nil {
- sliceType = make([]string, 0, 1)
- s.parts = append(sliceType, string(stringType))
- return nil
- }
- return err
-}
-
-func (s stringOrSlice) Slice() []string {
- return s.parts
-}
diff --git a/yaml/types/map.go b/yaml/types/map.go
new file mode 100644
index 000000000..b74498d8d
--- /dev/null
+++ b/yaml/types/map.go
@@ -0,0 +1,43 @@
+package types
+
+import "strings"
+
+// MapEqualSlice is a custom Yaml type that can hold a map or slice of strings
+// in key=value format.
+type MapEqualSlice struct {
+ parts map[string]string
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (s *MapEqualSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ s.parts = map[string]string{}
+ err := unmarshal(&s.parts)
+ if err == nil {
+ return nil
+ }
+
+ var slice []string
+ err = unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+ for _, v := range slice {
+ parts := strings.SplitN(v, "=", 2)
+ if len(parts) == 2 {
+ key := parts[0]
+ val := parts[1]
+ s.parts[key] = val
+ }
+ }
+ return nil
+}
+
+// Map returns the Yaml information as a map.
+func (s *MapEqualSlice) Map() map[string]string {
+ return s.parts
+}
+
+// NewMapEqualSlice returns a new MapEqualSlice.
+func NewMapEqualSlice(from map[string]string) *MapEqualSlice {
+ return &MapEqualSlice{from}
+}
diff --git a/yaml/types/map_test.go b/yaml/types/map_test.go
new file mode 100644
index 000000000..7ed1e1c50
--- /dev/null
+++ b/yaml/types/map_test.go
@@ -0,0 +1,44 @@
+package types
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestMapEqualSlice(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Yaml map equal slice", func() {
+
+ g.It("should unmarshal a map", func() {
+ in := []byte("foo: bar")
+ out := MapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.Map())).Equal(1)
+ g.Assert(out.Map()["foo"]).Equal("bar")
+ })
+
+ g.It("should unmarshal a map equal slice", func() {
+ in := []byte("[ foo=bar ]")
+ out := MapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts["foo"]).Equal("bar")
+ })
+
+ g.It("should throw error when invalid map equal slice", func() {
+ in := []byte("foo") // string value should fail parse
+ out := MapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ g.Assert(err != nil).IsTrue("expects error")
+ })
+ })
+}
diff --git a/yaml/types/slice.go b/yaml/types/slice.go
new file mode 100644
index 000000000..b39e43212
--- /dev/null
+++ b/yaml/types/slice.go
@@ -0,0 +1,35 @@
+package types
+
+// StringOrSlice is a custom Yaml type that can hold a string or slice of strings.
+type StringOrSlice struct {
+ parts []string
+}
+
+// UnmarshalYAML implements custom Yaml unmarshaling.
+func (s *StringOrSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var sliceType []string
+ err := unmarshal(&sliceType)
+ if err == nil {
+ s.parts = sliceType
+ return nil
+ }
+
+ var stringType string
+ err = unmarshal(&stringType)
+ if err == nil {
+ sliceType = make([]string, 0, 1)
+ s.parts = append(sliceType, string(stringType))
+ return nil
+ }
+ return err
+}
+
+// Slice returns the slice of strings.
+func (s StringOrSlice) Slice() []string {
+ return s.parts
+}
+
+// NewStringOrSlice returns a new StringOrSlice.
+func NewStringOrSlice(from []string) *StringOrSlice {
+ return &StringOrSlice{from}
+}
diff --git a/yaml/types_test.go b/yaml/types/slice_test.go
similarity index 74%
rename from yaml/types_test.go
rename to yaml/types/slice_test.go
index 8d095223f..dcf9a25f6 100644
--- a/yaml/types_test.go
+++ b/yaml/types/slice_test.go
@@ -1,4 +1,4 @@
-package yaml
+package types
import (
"testing"
@@ -7,26 +7,26 @@ import (
"gopkg.in/yaml.v2"
)
-func TestTypes(t *testing.T) {
+func TestStringSlice(t *testing.T) {
g := goblin.Goblin(t)
- g.Describe("Yaml types", func() {
+ g.Describe("Yaml string slice", func() {
g.Describe("given a yaml file", func() {
g.It("should unmarshal a string", func() {
in := []byte("foo")
- out := stringOrSlice{}
+ out := StringOrSlice{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
}
- g.Assert(len(out.parts)).Equal(1)
- g.Assert(out.parts[0]).Equal("foo")
+ g.Assert(len(out.Slice())).Equal(1)
+ g.Assert(out.Slice()[0]).Equal("foo")
})
g.It("should unmarshal a string slice", func() {
in := []byte("[ foo ]")
- out := stringOrSlice{}
+ out := StringOrSlice{}
err := yaml.Unmarshal(in, &out)
if err != nil {
g.Fail(err)
@@ -37,7 +37,7 @@ func TestTypes(t *testing.T) {
g.It("should throw error when invalid string slice", func() {
in := []byte("{ }") // string value should fail parse
- out := stringOrSlice{}
+ out := StringOrSlice{}
err := yaml.Unmarshal(in, &out)
g.Assert(err != nil).IsTrue("expects error")
})
diff --git a/engine/compiler/parse/node_volume.go b/yaml/volume.go
similarity index 57%
rename from engine/compiler/parse/node_volume.go
rename to yaml/volume.go
index 1aadfa1f7..20d297bf9 100644
--- a/engine/compiler/parse/node_volume.go
+++ b/yaml/volume.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"fmt"
@@ -6,38 +6,21 @@ import (
"gopkg.in/yaml.v2"
)
-// VolumeNode represents a Docker volume.
-type VolumeNode struct {
- NodeType
- root *RootNode
-
- Name string
- Driver string
- DriverOpts map[string]string
- External bool
-}
-
-// Root returns the root node.
-func (n *VolumeNode) Root() *RootNode { return n.root }
-
-//
-// intermediate types for yaml decoding.
-//
-
-// volume is an intermediate type used for decoding a volumes in a format
-// compatible with docker-compose.yml
-type volume struct {
+// Volume defines a Docker volume.
+type Volume struct {
Name string
Driver string
DriverOpts map[string]string `yaml:"driver_opts"`
+ External bool
}
// volumeList is an intermediate type used for decoding a slice of volumes
// in a format compatible with docker-compose.yml
type volumeList struct {
- volumes []*volume
+ volumes []*Volume
}
+// UnmarshalYAML implements custom Yaml unmarshaling.
func (v *volumeList) UnmarshalYAML(unmarshal func(interface{}) error) error {
slice := yaml.MapSlice{}
err := unmarshal(&slice)
@@ -46,11 +29,10 @@ func (v *volumeList) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
for _, s := range slice {
- vv := volume{}
-
- out, err := yaml.Marshal(s.Value)
- if err != nil {
- return err
+ vv := Volume{}
+ out, merr := yaml.Marshal(s.Value)
+ if merr != nil {
+ return merr
}
err = yaml.Unmarshal(out, &vv)
diff --git a/engine/compiler/parse/node_volume_test.go b/yaml/volume_test.go
similarity index 95%
rename from engine/compiler/parse/node_volume_test.go
rename to yaml/volume_test.go
index 795880918..ebeaa9ae1 100644
--- a/engine/compiler/parse/node_volume_test.go
+++ b/yaml/volume_test.go
@@ -1,4 +1,4 @@
-package parse
+package yaml
import (
"testing"
@@ -7,7 +7,7 @@ import (
"gopkg.in/yaml.v2"
)
-func TestVolumeNode(t *testing.T) {
+func TestVolumes(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Volumes", func() {