mirror of
https://github.com/harness/drone.git
synced 2025-05-31 11:43:15 +00:00
Merge branch 'main' into add-pipeline-editor
This commit is contained in:
commit
e785cd82b0
@ -93,7 +93,6 @@ $ ./gitness register
|
||||
|
||||
> NOTE: A user `admin` (pw: `changeit`) gets created by default.
|
||||
|
||||
|
||||
Login to the application:
|
||||
|
||||
```bash
|
||||
|
@ -73,6 +73,10 @@ func (c *command) run(*kingpin.ParseContext) error {
|
||||
// - ctx is canceled
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
|
||||
g.Go(func() error {
|
||||
return system.services.JobScheduler.Run(gCtx)
|
||||
})
|
||||
|
||||
// start server
|
||||
gHTTP, shutdownHTTP := system.server.ListenAndServe()
|
||||
g.Go(gHTTP.Wait)
|
||||
@ -116,6 +120,8 @@ func (c *command) run(*kingpin.ParseContext) error {
|
||||
}
|
||||
}
|
||||
|
||||
system.services.JobScheduler.WaitJobsDone(shutdownCtx)
|
||||
|
||||
log.Info().Msg("wait for subroutines to complete")
|
||||
err = g.Wait()
|
||||
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"github.com/harness/gitness/internal/server"
|
||||
"github.com/harness/gitness/internal/services"
|
||||
"github.com/harness/gitness/internal/services/codecomments"
|
||||
"github.com/harness/gitness/internal/services/job"
|
||||
pullreqservice "github.com/harness/gitness/internal/services/pullreq"
|
||||
"github.com/harness/gitness/internal/services/webhook"
|
||||
"github.com/harness/gitness/internal/store"
|
||||
@ -102,6 +103,7 @@ func initSystem(ctx context.Context, config *types.Config) (*cliserver.System, e
|
||||
lock.WireSet,
|
||||
pubsub.WireSet,
|
||||
codecomments.WireSet,
|
||||
job.WireSet,
|
||||
gitrpccron.WireSet,
|
||||
checkcontroller.WireSet,
|
||||
execution.WireSet,
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
server2 "github.com/harness/gitness/internal/server"
|
||||
"github.com/harness/gitness/internal/services"
|
||||
"github.com/harness/gitness/internal/services/codecomments"
|
||||
"github.com/harness/gitness/internal/services/job"
|
||||
pullreq2 "github.com/harness/gitness/internal/services/pullreq"
|
||||
"github.com/harness/gitness/internal/services/webhook"
|
||||
"github.com/harness/gitness/internal/store"
|
||||
@ -87,6 +88,7 @@ func initSystem(ctx context.Context, config *types.Config) (*server.System, erro
|
||||
}
|
||||
pathUID := check.ProvidePathUIDCheck()
|
||||
repoStore := database.ProvideRepoStore(db, pathCache)
|
||||
pipelineStore := database.ProvidePipelineStore(db)
|
||||
gitrpcConfig, err := server.ProvideGitRPCClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -95,27 +97,26 @@ func initSystem(ctx context.Context, config *types.Config) (*server.System, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoController := repo.ProvideController(config, db, provider, pathUID, authorizer, pathStore, repoStore, spaceStore, principalStore, gitrpcInterface)
|
||||
repoController := repo.ProvideController(config, db, provider, pathUID, authorizer, pathStore, repoStore, spaceStore, pipelineStore, principalStore, gitrpcInterface)
|
||||
executionStore := database.ProvideExecutionStore(db)
|
||||
stageStore := database.ProvideStageStore(db)
|
||||
pipelineStore := database.ProvidePipelineStore(db)
|
||||
executionController := execution.ProvideController(db, authorizer, executionStore, stageStore, pipelineStore, spaceStore)
|
||||
executionController := execution.ProvideController(db, authorizer, executionStore, repoStore, stageStore, pipelineStore)
|
||||
stepStore := database.ProvideStepStore(db)
|
||||
logStore := logs.ProvideLogStore(db, config)
|
||||
logStream := livelog.ProvideLogStream(config)
|
||||
logsController := logs2.ProvideController(db, authorizer, executionStore, pipelineStore, stageStore, stepStore, logStore, logStream, spaceStore)
|
||||
logsController := logs2.ProvideController(db, authorizer, executionStore, repoStore, pipelineStore, stageStore, stepStore, logStore, logStream)
|
||||
secretStore := database.ProvideSecretStore(db)
|
||||
connectorStore := database.ProvideConnectorStore(db)
|
||||
templateStore := database.ProvideTemplateStore(db)
|
||||
spaceController := space.ProvideController(db, provider, pathUID, authorizer, pathStore, pipelineStore, secretStore, connectorStore, templateStore, spaceStore, repoStore, principalStore, repoController, membershipStore)
|
||||
pipelineController := pipeline.ProvideController(db, pathUID, pathStore, repoStore, authorizer, pipelineStore, spaceStore)
|
||||
pipelineController := pipeline.ProvideController(db, pathUID, pathStore, repoStore, authorizer, pipelineStore)
|
||||
encrypter, err := encrypt.ProvideEncrypter(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secretController := secret.ProvideController(db, pathUID, pathStore, encrypter, secretStore, authorizer, spaceStore)
|
||||
triggerStore := database.ProvideTriggerStore(db)
|
||||
triggerController := trigger.ProvideController(db, authorizer, triggerStore, pipelineStore, spaceStore)
|
||||
triggerController := trigger.ProvideController(db, authorizer, triggerStore, pipelineStore, repoStore)
|
||||
connectorController := connector.ProvideController(db, pathUID, connectorStore, authorizer, spaceStore)
|
||||
templateController := template.ProvideController(db, pathUID, templateStore, authorizer, spaceStore)
|
||||
pluginStore := database.ProvidePluginStore(db)
|
||||
@ -202,7 +203,13 @@ func initSystem(ctx context.Context, config *types.Config) (*server.System, erro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
servicesServices := services.ProvideServices(webhookService, pullreqService)
|
||||
jobStore := database.ProvideJobStore(db)
|
||||
executor := job.ProvideExecutor(jobStore, pubSub)
|
||||
scheduler, err := job.ProvideScheduler(jobStore, executor, mutexManager, pubSub, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
servicesServices := services.ProvideServices(webhookService, pullreqService, executor, scheduler)
|
||||
serverSystem := server.NewSystem(bootstrapBootstrap, serverServer, grpcServer, manager, servicesServices)
|
||||
return serverSystem, nil
|
||||
}
|
||||
|
@ -53,9 +53,9 @@ func (g Adapter) DiffShortStat(
|
||||
headRef string,
|
||||
useMergeBase bool,
|
||||
) (types.DiffShortStat, error) {
|
||||
separator := "..."
|
||||
if !useMergeBase {
|
||||
separator = ".."
|
||||
separator := ".."
|
||||
if useMergeBase {
|
||||
separator = "..."
|
||||
}
|
||||
|
||||
shortstatArgs := []string{baseRef + separator + headRef}
|
||||
|
@ -83,12 +83,7 @@ func (s DiffService) DiffShortStat(ctx context.Context, r *rpc.DiffRequest) (*rp
|
||||
base := r.GetBase()
|
||||
repoPath := getFullPathForRepo(s.reposRoot, base.GetRepoUid())
|
||||
|
||||
// direct comparison
|
||||
// when direct is false then its like you use --merge-base
|
||||
// to find best common ancestor(s) between two refs
|
||||
direct := !r.GetMergeBase()
|
||||
|
||||
stat, err := s.adapter.DiffShortStat(ctx, repoPath, r.GetBaseRef(), r.GetHeadRef(), direct)
|
||||
stat, err := s.adapter.DiffShortStat(ctx, repoPath, r.GetBaseRef(), r.GetHeadRef(), r.GetMergeBase())
|
||||
if err != nil {
|
||||
return nil, processGitErrorf(err, "failed to fetch short statistics "+
|
||||
"between %s and %s", r.GetBaseRef(), r.GetHeadRef())
|
||||
|
@ -75,7 +75,7 @@ type GitAdapter interface {
|
||||
repoPath string,
|
||||
baseRef string,
|
||||
headRef string,
|
||||
direct bool) (types.DiffShortStat, error)
|
||||
useMergeBase bool) (types.DiffShortStat, error)
|
||||
|
||||
GetDiffHunkHeaders(ctx context.Context,
|
||||
repoPath string,
|
||||
|
3
go.mod
3
go.mod
@ -18,10 +18,11 @@ require (
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/wire v0.5.0
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75
|
||||
github.com/gotidy/ptr v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/guregu/null v4.0.0+incompatible
|
||||
github.com/harness/go-rbac v0.0.0-20230409233212-ca97fe90aac8
|
||||
github.com/harness/go-rbac v0.0.0-20230829014129-c9b217856ea2
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/jmoiron/sqlx v1.3.3
|
||||
github.com/joho/godotenv v1.3.0
|
||||
|
7
go.sum
7
go.sum
@ -254,6 +254,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=
|
||||
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
@ -267,8 +269,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/guregu/null v4.0.0+incompatible h1:4zw0ckM7ECd6FNNddc3Fu4aty9nTlpkkzH7dPn4/4Gw=
|
||||
github.com/guregu/null v4.0.0+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM=
|
||||
github.com/harness/go-rbac v0.0.0-20230409233212-ca97fe90aac8 h1:sQzaA/ithB9mCXTC5VeC4XTWmQ531Tefbgxr1X4y7WU=
|
||||
github.com/harness/go-rbac v0.0.0-20230409233212-ca97fe90aac8/go.mod h1:uGgBgSZPgyygG5rWzoYsKIQ8TM4zt5yQq9nreznWvOI=
|
||||
github.com/harness/go-rbac v0.0.0-20230829014129-c9b217856ea2 h1:M1Jd2uEKl4YW9g/6vzN1qo06d5dshYYdwxlhOTUSnh4=
|
||||
github.com/harness/go-rbac v0.0.0-20230829014129-c9b217856ea2/go.mod h1:uGgBgSZPgyygG5rWzoYsKIQ8TM4zt5yQq9nreznWvOI=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
@ -367,6 +369,7 @@ github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk=
|
||||
github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
|
||||
|
@ -9,21 +9,26 @@ import (
|
||||
|
||||
"github.com/harness/gitness/internal/auth"
|
||||
"github.com/harness/gitness/internal/auth/authz"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
"github.com/harness/gitness/types"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CheckPipeline checks if a repo specific permission is granted for the current auth session
|
||||
// in the scope of its parent.
|
||||
// CheckPipeline checks if a pipeline specific permission is granted for the current auth session
|
||||
// in the scope of the parent.
|
||||
// Returns nil if the permission is granted, otherwise returns an error.
|
||||
// NotAuthenticated, NotAuthorized, or any underlying error.
|
||||
func CheckPipeline(ctx context.Context, authorizer authz.Authorizer, session *auth.Session,
|
||||
parentPath, uid string, permission enum.Permission) error {
|
||||
scope := &types.Scope{SpacePath: parentPath}
|
||||
repoPath string, pipelineUID string, permission enum.Permission) error {
|
||||
spacePath, repoName, err := paths.DisectLeaf(repoPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to disect path '%s'", repoPath)
|
||||
}
|
||||
scope := &types.Scope{SpacePath: spacePath, Repo: repoName}
|
||||
resource := &types.Resource{
|
||||
Type: enum.ResourceTypePipeline,
|
||||
Name: uid,
|
||||
Name: pipelineUID,
|
||||
}
|
||||
|
||||
return Check(ctx, authorizer, session, scope, resource, permission)
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (in *ReportInput) Validate() error {
|
||||
in.Payload.Kind)
|
||||
}
|
||||
|
||||
payloadDataJSON, err := sanitizeJsonPayload(in.Payload.Data, &types.CheckPayloadText{})
|
||||
payloadDataJSON, err := sanitizeJSONPayload(in.Payload.Data, &types.CheckPayloadText{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -79,7 +79,7 @@ func (in *ReportInput) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func sanitizeJsonPayload(source json.RawMessage, data any) (json.RawMessage, error) {
|
||||
func sanitizeJSONPayload(source json.RawMessage, data any) (json.RawMessage, error) {
|
||||
if len(source) == 0 {
|
||||
return json.Marshal(data) // marshal the empty object
|
||||
}
|
||||
@ -118,7 +118,7 @@ func (c *Controller) Report(
|
||||
in *ReportInput,
|
||||
metadata map[string]string,
|
||||
) (*types.Check, error) {
|
||||
repo, err := c.getRepoCheckAccess(ctx, session, repoRef, enum.PermissionCommitCheckReport)
|
||||
repo, err := c.getRepoCheckAccess(ctx, session, repoRef, enum.PermissionRepoReportCommitCheck)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire access access to repo: %w", err)
|
||||
}
|
||||
|
@ -15,25 +15,25 @@ type Controller struct {
|
||||
db *sqlx.DB
|
||||
authorizer authz.Authorizer
|
||||
executionStore store.ExecutionStore
|
||||
repoStore store.RepoStore
|
||||
stageStore store.StageStore
|
||||
pipelineStore store.PipelineStore
|
||||
spaceStore store.SpaceStore
|
||||
}
|
||||
|
||||
func NewController(
|
||||
db *sqlx.DB,
|
||||
authorizer authz.Authorizer,
|
||||
executionStore store.ExecutionStore,
|
||||
repoStore store.RepoStore,
|
||||
stageStore store.StageStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return &Controller{
|
||||
db: db,
|
||||
authorizer: authorizer,
|
||||
executionStore: executionStore,
|
||||
repoStore: repoStore,
|
||||
stageStore: stageStore,
|
||||
pipelineStore: pipelineStore,
|
||||
spaceStore: spaceStore,
|
||||
}
|
||||
}
|
||||
|
@ -23,25 +23,25 @@ type CreateInput struct {
|
||||
func (c *Controller) Create(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
uid string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
in *CreateInput,
|
||||
) (*types.Execution, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, uid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineExecute)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path,
|
||||
pipelineUID, enum.PermissionPipelineExecute)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err = c.pipelineStore.IncrementSeqNum(ctx, pipeline)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to increment sequence number: %w", err)
|
||||
|
@ -16,23 +16,23 @@ import (
|
||||
func (c *Controller) Delete(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
executionNum int64,
|
||||
) error {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find parent space: %w", err)
|
||||
return fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
err = c.executionStore.Delete(ctx, pipeline.ID, executionNum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete execution: %w", err)
|
||||
|
@ -17,25 +17,23 @@ import (
|
||||
func (c *Controller) Find(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
executionNum int64,
|
||||
) (*types.Execution, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
|
||||
execution, err := c.executionStore.Find(ctx, pipeline.ID, executionNum)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find execution %d: %w", executionNum, err)
|
||||
|
@ -17,24 +17,25 @@ import (
|
||||
func (c *Controller) List(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
pagination types.Pagination,
|
||||
) ([]*types.Execution, int64, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find parent space: %w", err)
|
||||
}
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
var count int64
|
||||
var executions []*types.Execution
|
||||
|
||||
|
@ -21,21 +21,21 @@ type UpdateInput struct {
|
||||
func (c *Controller) Update(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
executionNum int64,
|
||||
in *UpdateInput) (*types.Execution, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check auth: %w", err)
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
@ -20,10 +20,10 @@ var WireSet = wire.NewSet(
|
||||
func ProvideController(db *sqlx.DB,
|
||||
authorizer authz.Authorizer,
|
||||
executionStore store.ExecutionStore,
|
||||
repoStore store.RepoStore,
|
||||
stageStore store.StageStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return NewController(db, authorizer, executionStore, stageStore,
|
||||
pipelineStore, spaceStore)
|
||||
return NewController(db, authorizer, executionStore, repoStore, stageStore,
|
||||
pipelineStore)
|
||||
}
|
||||
|
@ -16,34 +16,34 @@ type Controller struct {
|
||||
db *sqlx.DB
|
||||
authorizer authz.Authorizer
|
||||
executionStore store.ExecutionStore
|
||||
repoStore store.RepoStore
|
||||
pipelineStore store.PipelineStore
|
||||
stageStore store.StageStore
|
||||
stepStore store.StepStore
|
||||
logStore store.LogStore
|
||||
logStream livelog.LogStream
|
||||
spaceStore store.SpaceStore
|
||||
}
|
||||
|
||||
func NewController(
|
||||
db *sqlx.DB,
|
||||
authorizer authz.Authorizer,
|
||||
executionStore store.ExecutionStore,
|
||||
repoStore store.RepoStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
stageStore store.StageStore,
|
||||
stepStore store.StepStore,
|
||||
logStore store.LogStore,
|
||||
logStream livelog.LogStream,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return &Controller{
|
||||
db: db,
|
||||
authorizer: authorizer,
|
||||
executionStore: executionStore,
|
||||
repoStore: repoStore,
|
||||
pipelineStore: pipelineStore,
|
||||
stageStore: stageStore,
|
||||
stepStore: stepStore,
|
||||
logStore: logStore,
|
||||
logStream: logStream,
|
||||
spaceStore: spaceStore,
|
||||
}
|
||||
}
|
||||
|
@ -17,27 +17,26 @@ import (
|
||||
func (c *Controller) Find(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
executionNum int64,
|
||||
stageNum int,
|
||||
stepNum int,
|
||||
) (io.ReadCloser, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
|
||||
execution, err := c.executionStore.Find(ctx, pipeline.ID, executionNum)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find execution: %w", err)
|
||||
|
@ -17,27 +17,25 @@ import (
|
||||
func (c *Controller) Tail(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
executionNum int64,
|
||||
stageNum int,
|
||||
stepNum int,
|
||||
) (<-chan *livelog.Line, <-chan error, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
|
||||
execution, err := c.executionStore.Find(ctx, pipeline.ID, executionNum)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to find execution: %w", err)
|
||||
|
@ -21,13 +21,13 @@ var WireSet = wire.NewSet(
|
||||
func ProvideController(db *sqlx.DB,
|
||||
authorizer authz.Authorizer,
|
||||
executionStore store.ExecutionStore,
|
||||
repoStore store.RepoStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
stageStore store.StageStore,
|
||||
stepStore store.StepStore,
|
||||
logStore store.LogStore,
|
||||
logStream livelog.LogStream,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return NewController(db, authorizer, executionStore,
|
||||
pipelineStore, stageStore, stepStore, logStore, logStream, spaceStore)
|
||||
return NewController(db, authorizer, executionStore, repoStore,
|
||||
pipelineStore, stageStore, stepStore, logStore, logStream)
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ type Controller struct {
|
||||
repoStore store.RepoStore
|
||||
authorizer authz.Authorizer
|
||||
pipelineStore store.PipelineStore
|
||||
spaceStore store.SpaceStore
|
||||
}
|
||||
|
||||
func NewController(
|
||||
@ -30,7 +29,6 @@ func NewController(
|
||||
pathStore store.PathStore,
|
||||
repoStore store.RepoStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return &Controller{
|
||||
db: db,
|
||||
@ -39,6 +37,5 @@ func NewController(
|
||||
repoStore: repoStore,
|
||||
authorizer: authorizer,
|
||||
pipelineStore: pipelineStore,
|
||||
spaceStore: spaceStore,
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ package pipeline
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -26,34 +25,26 @@ var (
|
||||
)
|
||||
|
||||
type CreateInput struct {
|
||||
Description string `json:"description"`
|
||||
SpaceRef string `json:"space_ref"`
|
||||
UID string `json:"uid"`
|
||||
RepoRef string `json:"repo_ref"` // empty if repo_type != gitness
|
||||
RepoType enum.ScmType `json:"repo_type"`
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
ConfigPath string `json:"config_path"`
|
||||
Description string `json:"description"`
|
||||
UID string `json:"uid"`
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
ConfigPath string `json:"config_path"`
|
||||
}
|
||||
|
||||
func (c *Controller) Create(ctx context.Context, session *auth.Session, in *CreateInput) (*types.Pipeline, error) {
|
||||
parentSpace, err := c.spaceStore.FindByRef(ctx, in.SpaceRef)
|
||||
func (c *Controller) Create(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
repoRef string,
|
||||
in *CreateInput,
|
||||
) (*types.Pipeline, error) {
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent by ref: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, parentSpace.Path, in.UID, enum.PermissionPipelineEdit)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, "", enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var repoID int64
|
||||
|
||||
if in.RepoType == enum.ScmTypeGitness {
|
||||
repo, err := c.repoStore.FindByRef(ctx, in.RepoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
repoID = repo.ID
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
if err := c.sanitizeCreateInput(in); err != nil {
|
||||
@ -64,11 +55,9 @@ func (c *Controller) Create(ctx context.Context, session *auth.Session, in *Crea
|
||||
now := time.Now().UnixMilli()
|
||||
pipeline = &types.Pipeline{
|
||||
Description: in.Description,
|
||||
SpaceID: parentSpace.ID,
|
||||
RepoID: repo.ID,
|
||||
UID: in.UID,
|
||||
Seq: 0,
|
||||
RepoID: repoID,
|
||||
RepoType: in.RepoType,
|
||||
DefaultBranch: in.DefaultBranch,
|
||||
ConfigPath: in.ConfigPath,
|
||||
Created: now,
|
||||
@ -84,11 +73,6 @@ func (c *Controller) Create(ctx context.Context, session *auth.Session, in *Crea
|
||||
}
|
||||
|
||||
func (c *Controller) sanitizeCreateInput(in *CreateInput) error {
|
||||
parentRefAsID, err := strconv.ParseInt(in.SpaceRef, 10, 64)
|
||||
if (err == nil && parentRefAsID <= 0) || (len(strings.TrimSpace(in.SpaceRef)) == 0) {
|
||||
return errPipelineRequiresParent
|
||||
}
|
||||
|
||||
if err := c.uidCheck(in.UID, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -13,17 +13,18 @@ import (
|
||||
"github.com/harness/gitness/types/enum"
|
||||
)
|
||||
|
||||
func (c *Controller) Delete(ctx context.Context, session *auth.Session, spaceRef string, uid string) error {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
func (c *Controller) Delete(ctx context.Context, session *auth.Session, repoRef string, uid string) error {
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find parent space: %w", err)
|
||||
return fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, uid, enum.PermissionPipelineDelete)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, uid, enum.PermissionPipelineDelete)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not authorize: %w", err)
|
||||
return fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
err = c.pipelineStore.DeleteByUID(ctx, space.ID, uid)
|
||||
|
||||
err = c.pipelineStore.DeleteByUID(ctx, repo.ID, uid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete pipeline: %w", err)
|
||||
}
|
||||
|
@ -17,16 +17,16 @@ import (
|
||||
func (c *Controller) Find(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
uid string,
|
||||
) (*types.Pipeline, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, uid, enum.PermissionPipelineView)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, uid, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authorize: %w", err)
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
return c.pipelineStore.FindByUID(ctx, space.ID, uid)
|
||||
return c.pipelineStore.FindByUID(ctx, repo.ID, uid)
|
||||
}
|
||||
|
@ -23,21 +23,20 @@ type UpdateInput struct {
|
||||
func (c *Controller) Update(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
uid string,
|
||||
in *UpdateInput,
|
||||
) (*types.Pipeline, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, uid, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, uid, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, uid)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, uid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ func ProvideController(db *sqlx.DB,
|
||||
repoStore store.RepoStore,
|
||||
authorizer authz.Authorizer,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
) *Controller {
|
||||
return NewController(db, uidCheck, authorizer, pathStore, repoStore, pipelineStore, spaceStore)
|
||||
return NewController(db, uidCheck, authorizer, pathStore, repoStore, pipelineStore)
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ type Controller struct {
|
||||
pathStore store.PathStore
|
||||
repoStore store.RepoStore
|
||||
spaceStore store.SpaceStore
|
||||
pipelineStore store.PipelineStore
|
||||
principalStore store.PrincipalStore
|
||||
gitRPCClient gitrpc.Interface
|
||||
}
|
||||
@ -42,6 +43,7 @@ func NewController(
|
||||
pathStore store.PathStore,
|
||||
repoStore store.RepoStore,
|
||||
spaceStore store.SpaceStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
principalStore store.PrincipalStore,
|
||||
gitRPCClient gitrpc.Interface,
|
||||
) *Controller {
|
||||
@ -54,6 +56,7 @@ func NewController(
|
||||
pathStore: pathStore,
|
||||
repoStore: repoStore,
|
||||
spaceStore: spaceStore,
|
||||
pipelineStore: pipelineStore,
|
||||
principalStore: principalStore,
|
||||
gitRPCClient: gitRPCClient,
|
||||
}
|
||||
|
@ -92,6 +92,7 @@ func (c *Controller) DiffStats(
|
||||
ReadParams: gitrpc.CreateRPCReadParams(repo),
|
||||
BaseRef: info.BaseRef,
|
||||
HeadRef: info.HeadRef,
|
||||
MergeBase: info.MergeBase,
|
||||
})
|
||||
if err != nil {
|
||||
return types.DiffStats{}, err
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
package space
|
||||
package repo
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -14,36 +14,45 @@ import (
|
||||
"github.com/harness/gitness/types/enum"
|
||||
)
|
||||
|
||||
// ListPipelines lists the pipelines in a space.
|
||||
// ListPipelines lists the pipelines under a repository.
|
||||
func (c *Controller) ListPipelines(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
latest bool,
|
||||
filter types.ListQueryFilter,
|
||||
) ([]*types.Pipeline, int64, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to find repo: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckSpace(ctx, c.authorizer, session, space, enum.PermissionPipelineView, false)
|
||||
err = apiauth.CheckRepo(ctx, c.authorizer, session, repo, enum.PermissionPipelineView, false)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("could not authorize: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
var count int64
|
||||
var pipelines []*types.Pipeline
|
||||
|
||||
err = dbtx.New(c.db).WithTx(ctx, func(ctx context.Context) (err error) {
|
||||
count, err = c.pipelineStore.Count(ctx, space.ID, filter)
|
||||
count, err = c.pipelineStore.Count(ctx, repo.ID, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count child executions: %w", err)
|
||||
}
|
||||
|
||||
pipelines, err = c.pipelineStore.List(ctx, space.ID, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count child executions: %w", err)
|
||||
if !latest {
|
||||
pipelines, err = c.pipelineStore.List(ctx, repo.ID, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list pipelines: %w", err)
|
||||
}
|
||||
} else {
|
||||
pipelines, err = c.pipelineStore.ListLatest(ctx, repo.ID, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list latest pipelines: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}, dbtx.TxDefaultReadOnly)
|
||||
if err != nil {
|
@ -23,7 +23,8 @@ var WireSet = wire.NewSet(
|
||||
|
||||
func ProvideController(config *types.Config, db *sqlx.DB, urlProvider *url.Provider,
|
||||
uidCheck check.PathUID, authorizer authz.Authorizer, pathStore store.PathStore, repoStore store.RepoStore,
|
||||
spaceStore store.SpaceStore, principalStore store.PrincipalStore, rpcClient gitrpc.Interface) *Controller {
|
||||
spaceStore store.SpaceStore, pipelineStore store.PipelineStore,
|
||||
principalStore store.PrincipalStore, rpcClient gitrpc.Interface) *Controller {
|
||||
return NewController(config.Git.DefaultBranch, db, urlProvider, uidCheck,
|
||||
authorizer, pathStore, repoStore, spaceStore, principalStore, rpcClient)
|
||||
authorizer, pathStore, repoStore, spaceStore, pipelineStore, principalStore, rpcClient)
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ type Controller struct {
|
||||
authorizer authz.Authorizer
|
||||
triggerStore store.TriggerStore
|
||||
pipelineStore store.PipelineStore
|
||||
spaceStore store.SpaceStore
|
||||
repoStore store.RepoStore
|
||||
}
|
||||
|
||||
func NewController(
|
||||
@ -24,13 +24,13 @@ func NewController(
|
||||
authorizer authz.Authorizer,
|
||||
triggerStore store.TriggerStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
repoStore store.RepoStore,
|
||||
) *Controller {
|
||||
return &Controller{
|
||||
db: db,
|
||||
authorizer: authorizer,
|
||||
triggerStore: triggerStore,
|
||||
pipelineStore: pipelineStore,
|
||||
spaceStore: spaceStore,
|
||||
repoStore: repoStore,
|
||||
}
|
||||
}
|
||||
|
@ -24,25 +24,24 @@ type CreateInput struct {
|
||||
func (c *Controller) Create(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
in *CreateInput,
|
||||
) (*types.Trigger, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
// Trigger permissions are associated with pipeline permissions. If a user has permissions
|
||||
// to edit the pipeline, they will have permissions to create a trigger as well.
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineEdit)
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().UnixMilli()
|
||||
|
@ -16,26 +16,26 @@ import (
|
||||
func (c *Controller) Delete(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
triggerUID string,
|
||||
) error {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find parent space: %w", err)
|
||||
return fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
// Trigger permissions are associated with pipeline permissions. If a user has permissions
|
||||
// to edit the pipeline, they will have permissions to remove a trigger as well.
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
// Trigger permissions are associated with pipeline permissions. If a user has permissions
|
||||
// to delete the pipeline, they will have permissions to remove a trigger as well.
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
err = c.triggerStore.DeleteByUID(ctx, pipeline.ID, triggerUID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not delete trigger: %w", err)
|
||||
|
@ -17,25 +17,24 @@ import (
|
||||
func (c *Controller) Find(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
triggerUID string,
|
||||
) (*types.Trigger, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authorize: %w", err)
|
||||
}
|
||||
|
||||
trigger, err := c.triggerStore.FindByUID(ctx, pipeline.ID, triggerUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find trigger %s: %w", triggerUID, err)
|
||||
|
@ -16,22 +16,24 @@ import (
|
||||
func (c *Controller) List(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
filter types.ListQueryFilter,
|
||||
) ([]*types.Trigger, int64, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find parent space: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
// Trigger permissions are associated with pipeline permissions. If a user has permissions
|
||||
// to view the pipeline, they will have permissions to list triggers as well.
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineView)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipeline.UID, enum.PermissionPipelineView)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to authorize: %w", err)
|
||||
return nil, 0, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
||||
count, err := c.triggerStore.Count(ctx, pipeline.ID, filter)
|
||||
|
@ -23,21 +23,22 @@ type UpdateInput struct {
|
||||
func (c *Controller) Update(
|
||||
ctx context.Context,
|
||||
session *auth.Session,
|
||||
spaceRef string,
|
||||
repoRef string,
|
||||
pipelineUID string,
|
||||
triggerUID string,
|
||||
in *UpdateInput) (*types.Trigger, error) {
|
||||
space, err := c.spaceStore.FindByRef(ctx, spaceRef)
|
||||
repo, err := c.repoStore.FindByRef(ctx, repoRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find space: %w", err)
|
||||
return nil, fmt.Errorf("failed to find repo by ref: %w", err)
|
||||
}
|
||||
// Trigger permissions are associated with pipeline permissions. If a user has permissions
|
||||
// to edit the pipeline, they will have permissions to edit the trigger as well.
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, repo.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize pipeline: %w", err)
|
||||
}
|
||||
|
||||
err = apiauth.CheckPipeline(ctx, c.authorizer, session, space.Path, pipelineUID, enum.PermissionPipelineEdit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check auth: %w", err)
|
||||
}
|
||||
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, space.ID, pipelineUID)
|
||||
pipeline, err := c.pipelineStore.FindByUID(ctx, repo.ID, pipelineUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find pipeline: %w", err)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ func ProvideController(db *sqlx.DB,
|
||||
authorizer authz.Authorizer,
|
||||
triggerStore store.TriggerStore,
|
||||
pipelineStore store.PipelineStore,
|
||||
spaceStore store.SpaceStore,
|
||||
repoStore store.RepoStore,
|
||||
) *Controller {
|
||||
return NewController(db, authorizer, triggerStore, pipelineStore, spaceStore)
|
||||
return NewController(db, authorizer, triggerStore, pipelineStore, repoStore)
|
||||
}
|
||||
|
@ -11,19 +11,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleCreate(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -36,7 +35,7 @@ func HandleCreate(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
execution, err := executionCtrl.Create(ctx, session, spaceRef, pipelineUID, in)
|
||||
execution, err := executionCtrl.Create(ctx, session, repoRef, pipelineUID, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,19 +10,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleDelete(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -33,7 +32,7 @@ func HandleDelete(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
err = executionCtrl.Delete(ctx, session, spaceRef, pipelineUID, n)
|
||||
err = executionCtrl.Delete(ctx, session, repoRef, pipelineUID, n)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,14 +10,13 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleFind(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -27,13 +26,13 @@ func HandleFind(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
execution, err := executionCtrl.Find(ctx, session, spaceRef, pipelineUID, n)
|
||||
execution, err := executionCtrl.Find(ctx, session, repoRef, pipelineUID, n)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,19 +10,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleList(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -30,7 +29,7 @@ func HandleList(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
|
||||
pagination := request.ParsePaginationFromRequest(r)
|
||||
|
||||
repos, totalCount, err := executionCtrl.List(ctx, session, spaceRef, pipelineUID, pagination)
|
||||
repos, totalCount, err := executionCtrl.List(ctx, session, repoRef, pipelineUID, pagination)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleUpdate(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
@ -26,12 +25,12 @@ func HandleUpdate(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -42,7 +41,7 @@ func HandleUpdate(executionCtrl *execution.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
pipeline, err := executionCtrl.Update(ctx, session, spaceRef, pipelineUID, n, in)
|
||||
pipeline, err := executionCtrl.Update(ctx, session, repoRef, pipelineUID, n, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -11,14 +11,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/logs"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleFind(logCtrl *logs.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -38,14 +42,8 @@ func HandleFind(logCtrl *logs.Controller) http.HandlerFunc {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
rc, err := logCtrl.Find(
|
||||
ctx, session, spaceRef, pipelineUID,
|
||||
ctx, session, repoRef, pipelineUID,
|
||||
executionNum, int(stageNum), int(stepNum))
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/logs"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
@ -28,7 +27,7 @@ func HandleTail(logCtrl *logs.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -48,7 +47,7 @@ func HandleTail(logCtrl *logs.Controller) http.HandlerFunc {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -65,7 +64,7 @@ func HandleTail(logCtrl *logs.Controller) http.HandlerFunc {
|
||||
f.Flush()
|
||||
|
||||
linec, errc, err := logCtrl.Tail(
|
||||
ctx, session, spaceRef, pipelineUID,
|
||||
ctx, session, repoRef, pipelineUID,
|
||||
executionNum, int(stageNum), int(stepNum))
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
|
@ -18,14 +18,20 @@ func HandleCreate(pipelineCtrl *pipeline.Controller) http.HandlerFunc {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
in := new(pipeline.CreateInput)
|
||||
err := json.NewDecoder(r.Body).Decode(in)
|
||||
err = json.NewDecoder(r.Body).Decode(in)
|
||||
if err != nil {
|
||||
render.BadRequestf(w, "Invalid Request Body: %s.", err)
|
||||
return
|
||||
}
|
||||
|
||||
pipeline, err := pipelineCtrl.Create(ctx, session, in)
|
||||
pipeline, err := pipelineCtrl.Create(ctx, session, repoRef, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,25 +10,24 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/pipeline"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleDelete(pipelineCtrl *pipeline.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = pipelineCtrl.Delete(ctx, session, spaceRef, pipelineUID)
|
||||
err = pipelineCtrl.Delete(ctx, session, repoRef, pipelineUID)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,25 +10,24 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/pipeline"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleFind(pipelineCtrl *pipeline.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
pipeline, err := pipelineCtrl.Find(ctx, session, spaceRef, pipelineUID)
|
||||
pipeline, err := pipelineCtrl.Find(ctx, session, repoRef, pipelineUID)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/pipeline"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleUpdate(pipelineCtrl *pipeline.Controller) http.HandlerFunc {
|
||||
@ -26,18 +25,18 @@ func HandleUpdate(pipelineCtrl *pipeline.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
pipeline, err := pipelineCtrl.Update(ctx, session, spaceRef, pipelineUID, in)
|
||||
pipeline, err := pipelineCtrl.Update(ctx, session, repoRef, pipelineUID, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -2,28 +2,29 @@
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package space
|
||||
package repo
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/harness/gitness/internal/api/controller/space"
|
||||
"github.com/harness/gitness/internal/api/controller/repo"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
)
|
||||
|
||||
func HandleListPipelines(spaceCtrl *space.Controller) http.HandlerFunc {
|
||||
func HandleListPipelines(repoCtrl *repo.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
spaceRef, err := request.GetSpaceRefFromPath(r)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
filter := request.ParseListQueryFilterFromRequest(r)
|
||||
repos, totalCount, err := spaceCtrl.ListPipelines(ctx, session, spaceRef, filter)
|
||||
latest := request.GetLatestFromPath(r)
|
||||
repos, totalCount, err := repoCtrl.ListPipelines(ctx, session, repoRef, latest, filter)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
@ -11,19 +11,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleCreate(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -36,7 +35,7 @@ func HandleCreate(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
trigger, err := triggerCtrl.Create(ctx, session, spaceRef, pipelineUID, in)
|
||||
trigger, err := triggerCtrl.Create(ctx, session, repoRef, pipelineUID, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,19 +10,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleDelete(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -33,7 +32,7 @@ func HandleDelete(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
err = triggerCtrl.Delete(ctx, session, spaceRef, pipelineUID, triggerUID)
|
||||
err = triggerCtrl.Delete(ctx, session, repoRef, pipelineUID, triggerUID)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,14 +10,13 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleFind(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -27,13 +26,13 @@ func HandleFind(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
trigger, err := triggerCtrl.Find(ctx, session, spaceRef, pipelineUID, triggerUID)
|
||||
trigger, err := triggerCtrl.Find(ctx, session, repoRef, pipelineUID, triggerUID)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -10,19 +10,18 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleList(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
session, _ := request.AuthSessionFrom(ctx)
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -30,7 +29,7 @@ func HandleList(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
|
||||
filter := request.ParseListQueryFilterFromRequest(r)
|
||||
|
||||
repos, totalCount, err := triggerCtrl.List(ctx, session, spaceRef, pipelineUID, filter)
|
||||
repos, totalCount, err := triggerCtrl.List(ctx, session, repoRef, pipelineUID, filter)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/render"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/paths"
|
||||
)
|
||||
|
||||
func HandleUpdate(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
@ -26,12 +25,12 @@ func HandleUpdate(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
pipelineRef, err := request.GetPipelineRefFromPath(r)
|
||||
pipelineUID, err := request.GetPipelineUIDFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
}
|
||||
spaceRef, pipelineUID, err := paths.DisectLeaf(pipelineRef)
|
||||
repoRef, err := request.GetRepoRefFromPath(r)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
@ -42,7 +41,7 @@ func HandleUpdate(triggerCtrl *trigger.Controller) http.HandlerFunc {
|
||||
return
|
||||
}
|
||||
|
||||
pipeline, err := triggerCtrl.Update(ctx, session, spaceRef, pipelineUID, triggerUID, in)
|
||||
pipeline, err := triggerCtrl.Update(ctx, session, repoRef, pipelineUID, triggerUID, in)
|
||||
if err != nil {
|
||||
render.TranslatedUserError(w, err)
|
||||
return
|
||||
|
@ -7,9 +7,11 @@ package openapi
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gotidy/ptr"
|
||||
"github.com/harness/gitness/internal/api/controller/execution"
|
||||
"github.com/harness/gitness/internal/api/controller/pipeline"
|
||||
"github.com/harness/gitness/internal/api/controller/trigger"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/api/usererror"
|
||||
"github.com/harness/gitness/types"
|
||||
|
||||
@ -17,7 +19,8 @@ import (
|
||||
)
|
||||
|
||||
type pipelineRequest struct {
|
||||
Ref string `path:"pipeline_ref"`
|
||||
repoRequest
|
||||
Ref string `path:"pipeline_uid"`
|
||||
}
|
||||
|
||||
type executionRequest struct {
|
||||
@ -47,6 +50,7 @@ type createTriggerRequest struct {
|
||||
}
|
||||
|
||||
type createPipelineRequest struct {
|
||||
repoRequest
|
||||
pipeline.CreateInput
|
||||
}
|
||||
|
||||
@ -77,6 +81,20 @@ type updatePipelineRequest struct {
|
||||
pipeline.UpdateInput
|
||||
}
|
||||
|
||||
var queryParameterLatest = openapi3.ParameterOrRef{
|
||||
Parameter: &openapi3.Parameter{
|
||||
Name: request.QueryParamLatest,
|
||||
In: openapi3.ParameterInQuery,
|
||||
Description: ptr.String("Whether to fetch latest build information for each pipeline."),
|
||||
Required: ptr.Bool(false),
|
||||
Schema: &openapi3.SchemaOrRef{
|
||||
Schema: &openapi3.Schema{
|
||||
Type: ptrSchemaType(openapi3.SchemaTypeBoolean),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
opCreate := openapi3.Operation{}
|
||||
opCreate.WithTags("pipeline")
|
||||
@ -87,7 +105,19 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&opCreate, new(usererror.Error), http.StatusInternalServerError)
|
||||
_ = reflector.SetJSONResponse(&opCreate, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&opCreate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPost, "/pipelines", opCreate)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPost, "/repos/{repo_ref}/pipelines", opCreate)
|
||||
|
||||
opPipelines := openapi3.Operation{}
|
||||
opPipelines.WithTags("pipeline")
|
||||
opPipelines.WithMapOfAnything(map[string]interface{}{"operationId": "listPipelines"})
|
||||
opPipelines.WithParameters(queryParameterQueryRepo, queryParameterPage, queryParameterLimit, queryParameterLatest)
|
||||
_ = reflector.SetRequest(&opPipelines, new(repoRequest), http.MethodGet)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, []types.Pipeline{}, http.StatusOK)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusInternalServerError)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet, "/repos/{repo_ref}/pipelines", opPipelines)
|
||||
|
||||
opFind := openapi3.Operation{}
|
||||
opFind.WithTags("pipeline")
|
||||
@ -98,7 +128,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&opFind, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&opFind, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&opFind, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet, "/pipelines/{pipeline_ref}", opFind)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet, "/repos/{repo_ref}/pipelines/{pipeline_uid}", opFind)
|
||||
|
||||
opDelete := openapi3.Operation{}
|
||||
opDelete.WithTags("pipeline")
|
||||
@ -109,7 +139,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&opDelete, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&opDelete, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&opDelete, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodDelete, "/pipelines/{pipeline_ref}", opDelete)
|
||||
_ = reflector.Spec.AddOperation(http.MethodDelete, "/repos/{repo_ref}/pipelines/{pipeline_uid}", opDelete)
|
||||
|
||||
opUpdate := openapi3.Operation{}
|
||||
opUpdate.WithTags("pipeline")
|
||||
@ -122,7 +152,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&opUpdate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&opUpdate, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPatch,
|
||||
"/pipelines/{pipeline_ref}", opUpdate)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}", opUpdate)
|
||||
|
||||
executionCreate := openapi3.Operation{}
|
||||
executionCreate.WithTags("pipeline")
|
||||
@ -134,7 +164,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&executionCreate, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&executionCreate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPost,
|
||||
"/pipelines/{pipeline_ref}/executions", executionCreate)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions", executionCreate)
|
||||
|
||||
executionFind := openapi3.Operation{}
|
||||
executionFind.WithTags("pipeline")
|
||||
@ -146,7 +176,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&executionFind, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&executionFind, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet,
|
||||
"/pipelines/{pipeline_ref}/executions/{execution_number}", executionFind)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions/{execution_number}", executionFind)
|
||||
|
||||
executionDelete := openapi3.Operation{}
|
||||
executionDelete.WithTags("pipeline")
|
||||
@ -158,7 +188,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&executionDelete, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&executionDelete, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodDelete,
|
||||
"/pipelines/{pipeline_ref}/executions/{execution_number}", executionDelete)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions/{execution_number}", executionDelete)
|
||||
|
||||
executionUpdate := openapi3.Operation{}
|
||||
executionUpdate.WithTags("pipeline")
|
||||
@ -171,7 +201,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&executionUpdate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&executionUpdate, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPatch,
|
||||
"/pipelines/{pipeline_ref}/executions/{execution_number}", executionUpdate)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions/{execution_number}", executionUpdate)
|
||||
|
||||
executionList := openapi3.Operation{}
|
||||
executionList.WithTags("pipeline")
|
||||
@ -184,7 +214,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&executionList, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&executionList, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet,
|
||||
"/pipelines/{pipeline_ref}/executions", executionList)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions", executionList)
|
||||
|
||||
triggerCreate := openapi3.Operation{}
|
||||
triggerCreate.WithTags("pipeline")
|
||||
@ -196,7 +226,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&triggerCreate, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&triggerCreate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPost,
|
||||
"/pipelines/{pipeline_ref}/triggers", triggerCreate)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/triggers", triggerCreate)
|
||||
|
||||
triggerFind := openapi3.Operation{}
|
||||
triggerFind.WithTags("pipeline")
|
||||
@ -208,7 +238,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&triggerFind, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&triggerFind, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet,
|
||||
"/pipelines/{pipeline_ref}/triggers/{trigger_uid}", triggerFind)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/triggers/{trigger_uid}", triggerFind)
|
||||
|
||||
triggerDelete := openapi3.Operation{}
|
||||
triggerDelete.WithTags("pipeline")
|
||||
@ -220,7 +250,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&triggerDelete, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&triggerDelete, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodDelete,
|
||||
"/pipelines/{pipeline_ref}/triggers/{trigger_uid}", triggerDelete)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/triggers/{trigger_uid}", triggerDelete)
|
||||
|
||||
triggerUpdate := openapi3.Operation{}
|
||||
triggerUpdate.WithTags("pipeline")
|
||||
@ -233,7 +263,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&triggerUpdate, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&triggerUpdate, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodPatch,
|
||||
"/pipelines/{pipeline_ref}/triggers/{trigger_uid}", triggerUpdate)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/triggers/{trigger_uid}", triggerUpdate)
|
||||
|
||||
triggerList := openapi3.Operation{}
|
||||
triggerList.WithTags("pipeline")
|
||||
@ -246,7 +276,7 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&triggerList, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&triggerList, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet,
|
||||
"/pipelines/{pipeline_ref}/triggers", triggerList)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/triggers", triggerList)
|
||||
|
||||
logView := openapi3.Operation{}
|
||||
logView.WithTags("pipeline")
|
||||
@ -258,5 +288,5 @@ func pipelineOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&logView, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&logView, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet,
|
||||
"/pipelines/{pipeline_ref}/executions/{execution_number}/logs/{stage_number}/{step_number}", logView)
|
||||
"/repos/{repo_ref}/pipelines/{pipeline_uid}/executions/{execution_number}/logs/{stage_number}/{step_number}", logView)
|
||||
}
|
||||
|
@ -7,11 +7,11 @@ package openapi
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gotidy/ptr"
|
||||
"github.com/harness/gitness/internal/api/request"
|
||||
"github.com/harness/gitness/internal/api/usererror"
|
||||
"github.com/harness/gitness/types"
|
||||
|
||||
"github.com/gotidy/ptr"
|
||||
"github.com/swaggest/openapi-go/openapi3"
|
||||
)
|
||||
|
||||
|
@ -230,18 +230,6 @@ func spaceOperations(reflector *openapi3.Reflector) {
|
||||
_ = reflector.SetJSONResponse(&opRepos, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet, "/spaces/{space_ref}/repos", opRepos)
|
||||
|
||||
opPipelines := openapi3.Operation{}
|
||||
opPipelines.WithTags("space")
|
||||
opPipelines.WithMapOfAnything(map[string]interface{}{"operationId": "listPipelines"})
|
||||
opPipelines.WithParameters(queryParameterQueryRepo, queryParameterPage, queryParameterLimit)
|
||||
_ = reflector.SetRequest(&opPipelines, new(spaceRequest), http.MethodGet)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, []types.Pipeline{}, http.StatusOK)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusInternalServerError)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusUnauthorized)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusForbidden)
|
||||
_ = reflector.SetJSONResponse(&opPipelines, new(usererror.Error), http.StatusNotFound)
|
||||
_ = reflector.Spec.AddOperation(http.MethodGet, "/spaces/{space_ref}/pipelines", opPipelines)
|
||||
|
||||
opTemplates := openapi3.Operation{}
|
||||
opTemplates.WithTags("space")
|
||||
opTemplates.WithMapOfAnything(map[string]interface{}{"operationId": "listTemplates"})
|
||||
|
@ -10,14 +10,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
PathParamPipelineRef = "pipeline_ref"
|
||||
PathParamPipelineRef = "pipeline_uid"
|
||||
PathParamExecutionNumber = "execution_number"
|
||||
PathParamStageNumber = "stage_number"
|
||||
PathParamStepNumber = "step_number"
|
||||
PathParamTriggerUID = "trigger_uid"
|
||||
QueryParamLatest = "latest"
|
||||
)
|
||||
|
||||
func GetPipelineRefFromPath(r *http.Request) (string, error) {
|
||||
func GetPipelineUIDFromPath(r *http.Request) (string, error) {
|
||||
rawRef, err := PathParamOrError(r, PathParamPipelineRef)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -39,6 +40,14 @@ func GetStepNumberFromPath(r *http.Request) (int64, error) {
|
||||
return PathParamAsPositiveInt64(r, PathParamStepNumber)
|
||||
}
|
||||
|
||||
func GetLatestFromPath(r *http.Request) bool {
|
||||
v, _ := QueryParam(r, QueryParamLatest)
|
||||
if v == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetTriggerUIDFromPath(r *http.Request) (string, error) {
|
||||
rawRef, err := PathParamOrError(r, PathParamTriggerUID)
|
||||
if err != nil {
|
||||
|
@ -71,7 +71,7 @@ type APIHandler interface {
|
||||
|
||||
var (
|
||||
// terminatedPathPrefixesAPI is the list of prefixes that will require resolving terminated paths.
|
||||
terminatedPathPrefixesAPI = []string{"/v1/spaces/", "/v1/repos/", "/v1/pipelines/",
|
||||
terminatedPathPrefixesAPI = []string{"/v1/spaces/", "/v1/repos/",
|
||||
"/v1/secrets/", "/v1/connectors", "/v1/templates"}
|
||||
)
|
||||
|
||||
@ -162,8 +162,7 @@ func setupRoutesV1(r chi.Router,
|
||||
sysCtrl *system.Controller,
|
||||
) {
|
||||
setupSpaces(r, spaceCtrl)
|
||||
setupRepos(r, repoCtrl, pullreqCtrl, webhookCtrl, checkCtrl)
|
||||
setupPipelines(r, pipelineCtrl, executionCtrl, triggerCtrl, logCtrl)
|
||||
setupRepos(r, repoCtrl, pipelineCtrl, executionCtrl, triggerCtrl, logCtrl, pullreqCtrl, webhookCtrl, checkCtrl)
|
||||
setupConnectors(r, connectorCtrl)
|
||||
setupTemplates(r, templateCtrl)
|
||||
setupSecrets(r, secretCtrl)
|
||||
@ -193,7 +192,6 @@ func setupSpaces(r chi.Router, spaceCtrl *space.Controller) {
|
||||
r.Get("/spaces", handlerspace.HandleListSpaces(spaceCtrl))
|
||||
r.Get("/repos", handlerspace.HandleListRepos(spaceCtrl))
|
||||
r.Get("/service-accounts", handlerspace.HandleListServiceAccounts(spaceCtrl))
|
||||
r.Get("/pipelines", handlerspace.HandleListPipelines(spaceCtrl))
|
||||
r.Get("/secrets", handlerspace.HandleListSecrets(spaceCtrl))
|
||||
r.Get("/connectors", handlerspace.HandleListConnectors(spaceCtrl))
|
||||
r.Get("/templates", handlerspace.HandleListTemplates(spaceCtrl))
|
||||
@ -223,6 +221,10 @@ func setupSpaces(r chi.Router, spaceCtrl *space.Controller) {
|
||||
|
||||
func setupRepos(r chi.Router,
|
||||
repoCtrl *repo.Controller,
|
||||
pipelineCtrl *pipeline.Controller,
|
||||
executionCtrl *execution.Controller,
|
||||
triggerCtrl *trigger.Controller,
|
||||
logCtrl *logs.Controller,
|
||||
pullreqCtrl *pullreq.Controller,
|
||||
webhookCtrl *webhook.Controller,
|
||||
checkCtrl *check.Controller,
|
||||
@ -312,6 +314,8 @@ func setupRepos(r chi.Router,
|
||||
|
||||
setupWebhook(r, webhookCtrl)
|
||||
|
||||
setupPipelines(r, repoCtrl, pipelineCtrl, executionCtrl, triggerCtrl, logCtrl)
|
||||
|
||||
SetupChecks(r, checkCtrl)
|
||||
})
|
||||
})
|
||||
@ -319,11 +323,13 @@ func setupRepos(r chi.Router,
|
||||
|
||||
func setupPipelines(
|
||||
r chi.Router,
|
||||
repoCtrl *repo.Controller,
|
||||
pipelineCtrl *pipeline.Controller,
|
||||
executionCtrl *execution.Controller,
|
||||
triggerCtrl *trigger.Controller,
|
||||
logCtrl *logs.Controller) {
|
||||
r.Route("/pipelines", func(r chi.Router) {
|
||||
r.Get("/", handlerrepo.HandleListPipelines(repoCtrl))
|
||||
// Create takes path and parentId via body, not uri
|
||||
r.Post("/", handlerpipeline.HandleCreate(pipelineCtrl))
|
||||
r.Route(fmt.Sprintf("/{%s}", request.PathParamPipelineRef), func(r chi.Router) {
|
||||
|
142
internal/services/job/executor.go
Normal file
142
internal/services/job/executor.go
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"github.com/harness/gitness/internal/store"
|
||||
"github.com/harness/gitness/pubsub"
|
||||
"github.com/harness/gitness/types"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// Executor holds map of Handler objects per each job type registered.
|
||||
// The Scheduler uses the Executor to start execution of jobs.
|
||||
type Executor struct {
|
||||
handlerMap map[string]Handler
|
||||
handlerComplete bool
|
||||
store store.JobStore
|
||||
publisher pubsub.Publisher
|
||||
}
|
||||
|
||||
const (
|
||||
ProgressMin = 0
|
||||
ProgressMax = 100
|
||||
)
|
||||
|
||||
// ProgressReporter can be used by a job Handler to report back the execution progress.
|
||||
type ProgressReporter func(progress int, result string) error
|
||||
|
||||
// Handler is a job executor for a specific job type.
|
||||
// An implementation should try to honor the context and
|
||||
// try to abort the execution as soon as the context is done.
|
||||
type Handler interface {
|
||||
Handle(ctx context.Context, input string, fn ProgressReporter) (result string, err error)
|
||||
}
|
||||
|
||||
var noHandlerDefinedError = errors.New("no handler registered for the job type")
|
||||
|
||||
// NewExecutor creates new Executor.
|
||||
func NewExecutor(jobStore store.JobStore, publisher pubsub.Publisher) *Executor {
|
||||
return &Executor{
|
||||
handlerMap: make(map[string]Handler),
|
||||
handlerComplete: false,
|
||||
store: jobStore,
|
||||
publisher: publisher,
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers a job Handler for the provided job type.
|
||||
// This function is not thread safe. All calls are expected to be made
|
||||
// in a single thread during the application boot time.
|
||||
func (e *Executor) Register(jobType string, exec Handler) error {
|
||||
if jobType == "" {
|
||||
return errors.New("jobType must not be empty")
|
||||
}
|
||||
|
||||
if e.handlerComplete {
|
||||
return errors.New("job handler registration is complete")
|
||||
}
|
||||
|
||||
if exec == nil {
|
||||
return errors.New("provided Handler is nil")
|
||||
}
|
||||
|
||||
if _, ok := e.handlerMap[jobType]; ok {
|
||||
return fmt.Errorf("a Handler is already defined to run the '%s' job types", jobType)
|
||||
}
|
||||
|
||||
e.handlerMap[jobType] = exec
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// finishRegistration forbids further registration of job types.
|
||||
// It is called by the Scheduler when it starts.
|
||||
func (e *Executor) finishRegistration() {
|
||||
e.handlerComplete = true
|
||||
}
|
||||
|
||||
// exec runs a single job. This function is synchronous,
|
||||
// so the caller is responsible to run it in a separate go-routine.
|
||||
func (e *Executor) exec(
|
||||
ctx context.Context,
|
||||
jobUID, jobType string,
|
||||
input string,
|
||||
) (result string, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf(
|
||||
"panic while processing job=%s type=%s: %v\n%s",
|
||||
jobUID, jobType, r, debug.Stack())
|
||||
}
|
||||
}()
|
||||
|
||||
exec, ok := e.handlerMap[jobType]
|
||||
if !ok {
|
||||
return "", noHandlerDefinedError
|
||||
}
|
||||
|
||||
// progressReporter is the function with which the job can update its progress.
|
||||
// This function will be executed in the job executor's Go-routine.
|
||||
// It uses the job's context.
|
||||
progressReporter := func(progress int, result string) error {
|
||||
if progress < ProgressMin || progress > ProgressMax {
|
||||
return errors.New("progress must be between 0 and 100")
|
||||
}
|
||||
|
||||
jobDummy := &types.Job{
|
||||
UID: jobUID,
|
||||
Updated: time.Now().UnixMilli(),
|
||||
Result: result,
|
||||
State: enum.JobStateRunning,
|
||||
RunProgress: progress,
|
||||
}
|
||||
|
||||
// This doesn't need to be behind the global lock because it only updates the single row.
|
||||
// While a job is running no other process should touch it.
|
||||
// Even this call will fail if the context deadline has been exceeded.
|
||||
// The job parameter is a dummy types.Job object that just holds fields that should be updated.
|
||||
if err := e.store.UpdateProgress(ctx, jobDummy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tell everybody that a job progress has been updated
|
||||
if err := publishStateChange(ctx, e.publisher, jobDummy); err != nil {
|
||||
log.Err(err).Msg("failed to publish job state change")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return exec.Handle(ctx, input, progressReporter) // runs the job
|
||||
}
|
89
internal/services/job/job_overdue.go
Normal file
89
internal/services/job/job_overdue.go
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/harness/gitness/internal/store"
|
||||
"github.com/harness/gitness/lock"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const (
|
||||
jobUIDOverdue = "gitness:jobs:overdue"
|
||||
jobTypeOverdue = "gitness:jobs:overdue"
|
||||
jobCronOverdue = "*/20 * * * *" // every 20 min
|
||||
)
|
||||
|
||||
type jobOverdue struct {
|
||||
store store.JobStore
|
||||
mxManager lock.MutexManager
|
||||
scheduler *Scheduler
|
||||
}
|
||||
|
||||
func newJobOverdue(jobStore store.JobStore, mxManager lock.MutexManager, scheduler *Scheduler) *jobOverdue {
|
||||
return &jobOverdue{
|
||||
store: jobStore,
|
||||
mxManager: mxManager,
|
||||
scheduler: scheduler,
|
||||
}
|
||||
}
|
||||
|
||||
// Handle reclaims overdue jobs. Normally this shouldn't happen.
|
||||
// But, it can occur if DB update after a job execution fails,
|
||||
// or the server suddenly terminates while the job is still running.
|
||||
func (j *jobOverdue) Handle(ctx context.Context, _ string, _ ProgressReporter) (string, error) {
|
||||
mx, err := globalLock(ctx, j.mxManager)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to obtain the lock to reclaim overdue jobs")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mx.Unlock(ctx); err != nil {
|
||||
log.Err(err).Msg("failed to release global lock after reclaiming overdue jobs")
|
||||
}
|
||||
}()
|
||||
|
||||
overdueJobs, err := j.store.ListDeadlineExceeded(ctx, time.Now())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list overdue jobs")
|
||||
}
|
||||
|
||||
if len(overdueJobs) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var minScheduled time.Time
|
||||
|
||||
for _, job := range overdueJobs {
|
||||
const errorMessage = "deadline exceeded"
|
||||
postExec(job, "", errorMessage)
|
||||
|
||||
err = j.store.UpdateExecution(ctx, job)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed update overdue job")
|
||||
}
|
||||
|
||||
if job.State == enum.JobStateScheduled {
|
||||
scheduled := time.UnixMilli(job.Scheduled)
|
||||
if minScheduled.IsZero() || minScheduled.After(scheduled) {
|
||||
minScheduled = scheduled
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !minScheduled.IsZero() {
|
||||
j.scheduler.scheduleProcessing(minScheduled)
|
||||
}
|
||||
|
||||
result := fmt.Sprintf("found %d overdue jobs", len(overdueJobs))
|
||||
|
||||
return result, nil
|
||||
}
|
67
internal/services/job/job_purge.go
Normal file
67
internal/services/job/job_purge.go
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/harness/gitness/internal/store"
|
||||
"github.com/harness/gitness/lock"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const (
|
||||
jobUIDPurge = "gitness:jobs:purge"
|
||||
jobTypePurge = "gitness:jobs:purge"
|
||||
jobCronPurge = "15 */4 * * *" // every 4 hours at 15 minutes
|
||||
)
|
||||
|
||||
type jobPurge struct {
|
||||
store store.JobStore
|
||||
mxManager lock.MutexManager
|
||||
minOldAge time.Duration
|
||||
}
|
||||
|
||||
func newJobPurge(jobStore store.JobStore, mxManager lock.MutexManager, minOldAge time.Duration) *jobPurge {
|
||||
if minOldAge < 0 {
|
||||
minOldAge = 0
|
||||
}
|
||||
|
||||
return &jobPurge{
|
||||
store: jobStore,
|
||||
mxManager: mxManager,
|
||||
minOldAge: minOldAge,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *jobPurge) Handle(ctx context.Context, _ string, _ ProgressReporter) (string, error) {
|
||||
mx, err := globalLock(ctx, j.mxManager)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to obtain the lock to clean up old jobs")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mx.Unlock(ctx); err != nil {
|
||||
log.Err(err).Msg("failed to release global lock after cleaning up old jobs")
|
||||
}
|
||||
}()
|
||||
|
||||
olderThan := time.Now().Add(-j.minOldAge)
|
||||
|
||||
n, err := j.store.DeleteOld(ctx, olderThan)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to purge old jobs")
|
||||
}
|
||||
|
||||
result := "no old jobs found"
|
||||
if n > 0 {
|
||||
result = fmt.Sprintf("deleted %d old jobs", n)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
23
internal/services/job/lock.go
Normal file
23
internal/services/job/lock.go
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/harness/gitness/lock"
|
||||
)
|
||||
|
||||
func globalLock(ctx context.Context, manager lock.MutexManager) (lock.Mutex, error) {
|
||||
const lockKey = "jobs"
|
||||
mx, err := manager.NewMutex(lockKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = mx.Lock(ctx)
|
||||
|
||||
return mx, err
|
||||
}
|
60
internal/services/job/pubsub.go
Normal file
60
internal/services/job/pubsub.go
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
|
||||
"github.com/harness/gitness/pubsub"
|
||||
"github.com/harness/gitness/types"
|
||||
)
|
||||
|
||||
const (
|
||||
PubSubTopicCancelJob = "gitness:job:cancel_job"
|
||||
PubSubTopicStateChange = "gitness:job:state_change"
|
||||
)
|
||||
|
||||
func encodeStateChange(job *types.Job) ([]byte, error) {
|
||||
stateChange := &types.JobStateChange{
|
||||
UID: job.UID,
|
||||
State: job.State,
|
||||
Progress: job.RunProgress,
|
||||
Result: job.Result,
|
||||
Failure: job.LastFailureError,
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
if err := gob.NewEncoder(buffer).Encode(stateChange); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func DecodeStateChange(payload []byte) (*types.JobStateChange, error) {
|
||||
stateChange := &types.JobStateChange{}
|
||||
if err := gob.NewDecoder(bytes.NewReader(payload)).Decode(stateChange); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stateChange, nil
|
||||
}
|
||||
|
||||
func publishStateChange(ctx context.Context, publisher pubsub.Publisher, job *types.Job) error {
|
||||
payload, err := encodeStateChange(job)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to gob encode JobStateChange: %w", err)
|
||||
}
|
||||
|
||||
err = publisher.Publish(ctx, PubSubTopicStateChange, payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to publish JobStateChange: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
688
internal/services/job/scheduler.go
Normal file
688
internal/services/job/scheduler.go
Normal file
@ -0,0 +1,688 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/harness/gitness/internal/store"
|
||||
"github.com/harness/gitness/lock"
|
||||
"github.com/harness/gitness/pubsub"
|
||||
"github.com/harness/gitness/types"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
|
||||
"github.com/gorhill/cronexpr"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// Scheduler controls execution of background jobs.
|
||||
type Scheduler struct {
|
||||
// dependencies
|
||||
store store.JobStore
|
||||
executor *Executor
|
||||
mxManager lock.MutexManager
|
||||
pubsubService pubsub.PubSub
|
||||
|
||||
// configuration fields
|
||||
instanceID string
|
||||
maxRunning int
|
||||
purgeMinOldAge time.Duration
|
||||
|
||||
// synchronization stuff
|
||||
signal chan time.Time
|
||||
done chan struct{}
|
||||
wgRunning sync.WaitGroup
|
||||
cancelJobMx sync.Mutex
|
||||
cancelJobMap map[string]context.CancelFunc
|
||||
}
|
||||
|
||||
func NewScheduler(
|
||||
jobStore store.JobStore,
|
||||
executor *Executor,
|
||||
mxManager lock.MutexManager,
|
||||
pubsubService pubsub.PubSub,
|
||||
instanceID string,
|
||||
maxRunning int,
|
||||
purgeMinOldAge time.Duration,
|
||||
) (*Scheduler, error) {
|
||||
if maxRunning < 1 {
|
||||
maxRunning = 1
|
||||
}
|
||||
return &Scheduler{
|
||||
store: jobStore,
|
||||
executor: executor,
|
||||
mxManager: mxManager,
|
||||
pubsubService: pubsubService,
|
||||
|
||||
instanceID: instanceID,
|
||||
maxRunning: maxRunning,
|
||||
purgeMinOldAge: purgeMinOldAge,
|
||||
|
||||
cancelJobMap: map[string]context.CancelFunc{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Run runs the background job scheduler.
|
||||
// It's a blocking call. It blocks until the provided context is done.
|
||||
func (s *Scheduler) Run(ctx context.Context) error {
|
||||
if s.done != nil {
|
||||
return errors.New("already started")
|
||||
}
|
||||
|
||||
consumer := s.pubsubService.Subscribe(ctx, PubSubTopicCancelJob, s.handleCancelJob)
|
||||
defer func() {
|
||||
err := consumer.Close()
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Err(err).
|
||||
Msg("job scheduler: failed to close pubsub cancel job consumer")
|
||||
}
|
||||
}()
|
||||
|
||||
if err := s.createNecessaryJobs(ctx); err != nil {
|
||||
return fmt.Errorf("failed to create necessary jobs: %w", err)
|
||||
}
|
||||
|
||||
if err := s.registerNecessaryJobs(); err != nil {
|
||||
return fmt.Errorf("failed to register scheduler's internal jobs: %w", err)
|
||||
}
|
||||
|
||||
s.executor.finishRegistration()
|
||||
|
||||
log.Ctx(ctx).Debug().Msg("job scheduler: starting")
|
||||
|
||||
s.done = make(chan struct{})
|
||||
defer close(s.done)
|
||||
|
||||
s.signal = make(chan time.Time, 1)
|
||||
|
||||
timer := newSchedulerTimer()
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
err := func() error {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
stack := string(debug.Stack())
|
||||
log.Ctx(ctx).Error().
|
||||
Str("panic", fmt.Sprintf("[%T] job scheduler panic: %v", r, r)).
|
||||
Msg(stack)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
|
||||
case newTime := <-s.signal:
|
||||
dur := timer.RescheduleEarlier(newTime)
|
||||
if dur > 0 {
|
||||
log.Ctx(ctx).Trace().
|
||||
Msgf("job scheduler: update of scheduled job processing time... runs in %s", dur)
|
||||
}
|
||||
return nil
|
||||
|
||||
case now := <-timer.Ch():
|
||||
count, nextExec, gotAllJobs, err := s.processReadyJobs(ctx, now)
|
||||
|
||||
// If the next processing time isn't known use the default.
|
||||
if nextExec.IsZero() {
|
||||
const period = time.Minute
|
||||
nextExec = now.Add(period)
|
||||
}
|
||||
|
||||
// Reset the timer. Make the timer edgy if there are more jobs available.
|
||||
dur := timer.ResetAt(nextExec, !gotAllJobs)
|
||||
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Err(err).
|
||||
Msgf("job scheduler: failed to process jobs; next iteration in %s", dur)
|
||||
} else {
|
||||
log.Ctx(ctx).Trace().
|
||||
Msgf("job scheduler: started %d jobs; next iteration in %s", count, dur)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitJobsDone waits until execution of all jobs has finished.
|
||||
// It is intended to be used for graceful shutdown, after the Run method has finished.
|
||||
func (s *Scheduler) WaitJobsDone(ctx context.Context) {
|
||||
log.Ctx(ctx).Debug().Msg("job scheduler: stopping... waiting for the currently running jobs to finish")
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
s.wgRunning.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Ctx(ctx).Warn().Msg("job scheduler: stop interrupted")
|
||||
case <-ch:
|
||||
log.Ctx(ctx).Info().Msg("job scheduler: gracefully stopped")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) CancelJob(ctx context.Context, jobUID string) error {
|
||||
s.cancelJobMx.Lock()
|
||||
cancelFn, ok := s.cancelJobMap[jobUID]
|
||||
s.cancelJobMx.Unlock()
|
||||
|
||||
if ok {
|
||||
cancelFn()
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.pubsubService.Publish(ctx, PubSubTopicCancelJob, []byte(jobUID))
|
||||
}
|
||||
|
||||
func (s *Scheduler) handleCancelJob(payload []byte) error {
|
||||
jobUID := string(payload)
|
||||
if jobUID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.cancelJobMx.Lock()
|
||||
cancelFn, ok := s.cancelJobMap[jobUID]
|
||||
s.cancelJobMx.Unlock()
|
||||
|
||||
if ok {
|
||||
cancelFn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// scheduleProcessing triggers processing of ready jobs.
|
||||
// This should be run after adding new jobs to the database.
|
||||
func (s *Scheduler) scheduleProcessing(scheduled time.Time) {
|
||||
go func() {
|
||||
select {
|
||||
case <-s.done:
|
||||
case s.signal <- scheduled:
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// scheduleIfHaveMoreJobs triggers processing of ready jobs if the timer is edgy.
|
||||
// The timer would be edgy if the previous iteration found more jobs that it could start (full capacity).
|
||||
// This should be run after a non-recurring job has finished.
|
||||
func (s *Scheduler) scheduleIfHaveMoreJobs() {
|
||||
s.scheduleProcessing(time.Time{}) // zero time will trigger the timer if it's edgy
|
||||
}
|
||||
|
||||
// RunJob runs a single job of the provided type.
|
||||
// All parameters a job receives must be inside the data string (as JSON or whatever the job handler can interpret).
|
||||
// The caller gets the job UID which can then by used to track execution (the job state and progress percentage).
|
||||
func (s *Scheduler) RunJob(ctx context.Context,
|
||||
jobType string,
|
||||
maxRetries int,
|
||||
timeout time.Duration,
|
||||
data string,
|
||||
) (string, error) {
|
||||
if jobType == "" {
|
||||
return "", errors.New("jobType must not be empty")
|
||||
}
|
||||
|
||||
if maxRetries < 0 {
|
||||
return "", errors.New("maxRetries must be positive")
|
||||
}
|
||||
|
||||
if timeout < time.Second {
|
||||
return "", errors.New("timeout too short")
|
||||
}
|
||||
|
||||
uid, err := UID()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate job UID: %w", err)
|
||||
}
|
||||
|
||||
nowMilli := time.Now().UnixMilli()
|
||||
|
||||
job := &types.Job{
|
||||
UID: uid,
|
||||
Created: nowMilli,
|
||||
Updated: nowMilli,
|
||||
Type: jobType,
|
||||
Priority: enum.JobPriorityNormal,
|
||||
Data: data,
|
||||
Result: "",
|
||||
MaxDurationSeconds: int(timeout / time.Second),
|
||||
MaxRetries: maxRetries,
|
||||
State: enum.JobStateScheduled,
|
||||
Scheduled: nowMilli,
|
||||
TotalExecutions: 0,
|
||||
RunBy: "",
|
||||
RunDeadline: nowMilli,
|
||||
RunProgress: ProgressMin,
|
||||
LastExecuted: 0, // never executed
|
||||
IsRecurring: false,
|
||||
RecurringCron: "",
|
||||
ConsecutiveFailures: 0,
|
||||
LastFailureError: "",
|
||||
}
|
||||
|
||||
ctx = log.Ctx(ctx).With().
|
||||
Str("job.UID", job.UID).
|
||||
Str("job.Type", job.Type).
|
||||
Logger().WithContext(ctx)
|
||||
|
||||
mx, err := globalLock(ctx, s.mxManager)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to obtain global lock to add new job: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mx.Unlock(ctx); err != nil {
|
||||
log.Ctx(ctx).Err(err).Msg("failed to release global lock after adding a new job")
|
||||
}
|
||||
}()
|
||||
|
||||
err = s.store.Create(ctx, job)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to add new job to the database: %w", err)
|
||||
}
|
||||
|
||||
available, err := s.availableSlots(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to count available slots for job execution: %w", err)
|
||||
}
|
||||
|
||||
if available == 0 {
|
||||
// already running at the full capacity: the job is scheduled, we're done.
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
err = s.runJob(ctx, job)
|
||||
if err != nil {
|
||||
log.Ctx(ctx).Err(err).Msg("failed to run job")
|
||||
}
|
||||
|
||||
return uid, nil
|
||||
}
|
||||
|
||||
// processReadyJobs executes jobs that are ready to run. This function is periodically run by the Scheduler.
|
||||
// The function returns the number of jobs it has is started, the next scheduled execution time (of this function)
|
||||
// and a bool value if all currently available ready jobs were started.
|
||||
// Internally the Scheduler uses an "edgy" timer to reschedule calls of this function.
|
||||
// The edgy option of the timer will be on if this function hasn't been able to start all job that are ready to run.
|
||||
// If the timer has the edgy option turned on it will trigger the timer (and thus this function will be called)
|
||||
// when any currently running job finishes successfully or fails.
|
||||
func (s *Scheduler) processReadyJobs(ctx context.Context, now time.Time) (int, time.Time, bool, error) {
|
||||
mx, err := globalLock(ctx, s.mxManager)
|
||||
if err != nil {
|
||||
return 0, time.Time{}, false,
|
||||
fmt.Errorf("failed to obtain global lock to periodically process ready jobs: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mx.Unlock(ctx); err != nil {
|
||||
log.Ctx(ctx).Err(err).
|
||||
Msg("failed to release global lock after periodic processing of ready jobs")
|
||||
}
|
||||
}()
|
||||
|
||||
availableCount, err := s.availableSlots(ctx)
|
||||
if err != nil {
|
||||
return 0, time.Time{}, false,
|
||||
fmt.Errorf("failed to count available slots for job execution: %w", err)
|
||||
}
|
||||
|
||||
// get one over the limit to check if all ready jobs are fetched
|
||||
jobs, err := s.store.ListReady(ctx, now, availableCount+1)
|
||||
if err != nil {
|
||||
return 0, time.Time{}, false,
|
||||
fmt.Errorf("failed to load scheduled jobs: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
countExecuted int
|
||||
knownNextExecTime time.Time
|
||||
gotAllJobs bool
|
||||
)
|
||||
|
||||
if len(jobs) > availableCount {
|
||||
// More jobs are ready than we are able to run.
|
||||
jobs = jobs[:availableCount]
|
||||
} else {
|
||||
gotAllJobs = true
|
||||
knownNextExecTime, err = s.store.NextScheduledTime(ctx, now)
|
||||
if err != nil {
|
||||
return 0, time.Time{}, false,
|
||||
fmt.Errorf("failed to read next scheduled time: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, job := range jobs {
|
||||
jobCtx := log.Ctx(ctx).With().
|
||||
Str("job.UID", job.UID).
|
||||
Str("job.Type", job.Type).
|
||||
Logger().WithContext(ctx)
|
||||
|
||||
err = s.runJob(jobCtx, job)
|
||||
if err != nil {
|
||||
knownNextExecTime = time.Time{}
|
||||
gotAllJobs = false
|
||||
log.Ctx(jobCtx).Err(err).Msg("failed to run job")
|
||||
continue
|
||||
}
|
||||
|
||||
countExecuted++
|
||||
}
|
||||
|
||||
return countExecuted, knownNextExecTime, gotAllJobs, nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) availableSlots(ctx context.Context) (int, error) {
|
||||
countRunning, err := s.store.CountRunning(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
availableCount := s.maxRunning - countRunning
|
||||
if availableCount < 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return availableCount, nil
|
||||
}
|
||||
|
||||
// runJob updates the job in the database and starts it in a separate goroutine.
|
||||
// The function will also log the execution.
|
||||
func (s *Scheduler) runJob(ctx context.Context, job *types.Job) error {
|
||||
// Update the job fields for the new execution
|
||||
s.preExec(job)
|
||||
|
||||
if err := s.store.UpdateExecution(ctx, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// tell everybody that a job has started
|
||||
if err := publishStateChange(ctx, s.pubsubService, job); err != nil {
|
||||
log.Err(err).Msg("failed to publish job state change")
|
||||
}
|
||||
|
||||
s.wgRunning.Add(1)
|
||||
go func(jobCtx context.Context, job *types.Job) {
|
||||
defer s.wgRunning.Done()
|
||||
|
||||
log.Ctx(jobCtx).Debug().Msg("started job")
|
||||
|
||||
timeStart := time.Now()
|
||||
|
||||
// Run the job
|
||||
execResult, execFailure := s.doExec(jobCtx, job)
|
||||
|
||||
// Update the job fields, reschedule if necessary.
|
||||
postExec(job, execResult, execFailure)
|
||||
|
||||
// Use the context.Background() because we want to update the job even if the job's context is done.
|
||||
// The context can be done because the job exceeded its deadline or the server is shutting down.
|
||||
backgroundCtx := context.Background()
|
||||
|
||||
// tell everybody that a job has finished execution
|
||||
if err := publishStateChange(backgroundCtx, s.pubsubService, job); err != nil {
|
||||
log.Ctx(jobCtx).Err(err).Msg("failed to publish job state change")
|
||||
}
|
||||
|
||||
if mx, err := globalLock(backgroundCtx, s.mxManager); err != nil {
|
||||
// If locking failed, just log the error and proceed to update the DB anyway.
|
||||
log.Ctx(jobCtx).Err(err).Msg("failed to obtain global lock to update job after execution")
|
||||
} else {
|
||||
defer func() {
|
||||
if err := mx.Unlock(backgroundCtx); err != nil {
|
||||
log.Ctx(jobCtx).Err(err).Msg("failed to release global lock to update job after execution")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err := s.store.UpdateExecution(backgroundCtx, job); err != nil {
|
||||
log.Ctx(jobCtx).Err(err).Msg("failed to update after execution")
|
||||
return
|
||||
}
|
||||
|
||||
logInfo := log.Ctx(jobCtx).Info().Str("duration", time.Since(timeStart).String())
|
||||
|
||||
if job.IsRecurring {
|
||||
logInfo = logInfo.Bool("job.IsRecurring", true)
|
||||
}
|
||||
if job.Result != "" {
|
||||
logInfo = logInfo.Str("job.Result", job.Result)
|
||||
}
|
||||
if job.LastFailureError != "" {
|
||||
logInfo = logInfo.Str("job.Failure", job.LastFailureError)
|
||||
}
|
||||
|
||||
switch job.State {
|
||||
case enum.JobStateFinished:
|
||||
logInfo.Msg("job successfully finished")
|
||||
s.scheduleIfHaveMoreJobs()
|
||||
|
||||
case enum.JobStateFailed:
|
||||
logInfo.Msg("job failed")
|
||||
s.scheduleIfHaveMoreJobs()
|
||||
|
||||
case enum.JobStateScheduled:
|
||||
scheduledTime := time.UnixMilli(job.Scheduled)
|
||||
logInfo.
|
||||
Str("job.Scheduled", scheduledTime.Format(time.RFC3339Nano)).
|
||||
Msg("job finished and rescheduled")
|
||||
|
||||
s.scheduleProcessing(scheduledTime)
|
||||
|
||||
case enum.JobStateRunning:
|
||||
log.Ctx(jobCtx).Error().Msg("should not happen; job still has state=running after finishing")
|
||||
}
|
||||
}(ctx, job)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// preExec updates the provided types.Job before execution.
|
||||
func (s *Scheduler) preExec(job *types.Job) {
|
||||
if job.MaxDurationSeconds < 1 {
|
||||
job.MaxDurationSeconds = 1
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
nowMilli := now.UnixMilli()
|
||||
|
||||
execDuration := time.Duration(job.MaxDurationSeconds) * time.Second
|
||||
execDeadline := now.Add(execDuration)
|
||||
|
||||
job.Updated = nowMilli
|
||||
job.LastExecuted = nowMilli
|
||||
job.State = enum.JobStateRunning
|
||||
job.RunDeadline = execDeadline.UnixMilli()
|
||||
job.RunBy = s.instanceID
|
||||
job.RunProgress = ProgressMin
|
||||
job.TotalExecutions++
|
||||
job.Result = ""
|
||||
job.LastFailureError = ""
|
||||
}
|
||||
|
||||
// doExec executes the provided types.Job.
|
||||
func (s *Scheduler) doExec(ctx context.Context, job *types.Job) (execResult, execError string) {
|
||||
execDeadline := time.UnixMilli(job.RunDeadline)
|
||||
|
||||
jobCtx, done := context.WithDeadline(ctx, execDeadline)
|
||||
defer done()
|
||||
|
||||
s.cancelJobMx.Lock()
|
||||
if _, ok := s.cancelJobMap[job.UID]; ok {
|
||||
// should not happen: jobs have unique UIDs!
|
||||
s.cancelJobMx.Unlock()
|
||||
return "", "failed to start: already running"
|
||||
}
|
||||
s.cancelJobMap[job.UID] = done
|
||||
s.cancelJobMx.Unlock()
|
||||
|
||||
defer func() {
|
||||
s.cancelJobMx.Lock()
|
||||
delete(s.cancelJobMap, job.UID)
|
||||
s.cancelJobMx.Unlock()
|
||||
}()
|
||||
|
||||
execResult, err := s.executor.exec(jobCtx, job.UID, job.Type, job.Data)
|
||||
if err != nil {
|
||||
execError = err.Error()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// postExec updates the provided types.Job after execution and reschedules it if necessary.
|
||||
func postExec(job *types.Job, resultData, resultErr string) {
|
||||
now := time.Now()
|
||||
nowMilli := now.UnixMilli()
|
||||
|
||||
job.Updated = nowMilli
|
||||
job.Result = resultData
|
||||
job.RunBy = ""
|
||||
|
||||
if resultErr != "" {
|
||||
job.ConsecutiveFailures++
|
||||
job.State = enum.JobStateFailed
|
||||
job.LastFailureError = resultErr
|
||||
} else {
|
||||
job.State = enum.JobStateFinished
|
||||
job.RunProgress = ProgressMax
|
||||
}
|
||||
|
||||
// Reschedule recurring jobs
|
||||
if job.IsRecurring {
|
||||
if resultErr == "" {
|
||||
job.ConsecutiveFailures = 0
|
||||
}
|
||||
|
||||
exp, err := cronexpr.Parse(job.RecurringCron)
|
||||
if err != nil {
|
||||
job.State = enum.JobStateFailed
|
||||
|
||||
messages := fmt.Sprintf("failed to parse cron string: %s", err.Error())
|
||||
if job.LastFailureError != "" {
|
||||
messages = messages + "; " + job.LastFailureError
|
||||
}
|
||||
|
||||
job.LastFailureError = messages
|
||||
} else {
|
||||
job.State = enum.JobStateScheduled
|
||||
job.Scheduled = exp.Next(now).UnixMilli()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Reschedule the failed job if retrying is allowed
|
||||
if job.State == enum.JobStateFailed && job.ConsecutiveFailures <= job.MaxRetries {
|
||||
const retryDelay = 15 * time.Second
|
||||
job.State = enum.JobStateScheduled
|
||||
job.Scheduled = now.Add(retryDelay).UnixMilli()
|
||||
job.RunProgress = ProgressMin
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) AddRecurring(
|
||||
ctx context.Context,
|
||||
jobUID,
|
||||
jobType,
|
||||
cronDef string,
|
||||
maxDur time.Duration,
|
||||
) error {
|
||||
cronExp, err := cronexpr.Parse(cronDef)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid cron definition string for job type=%s: %w", jobType, err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
nowMilli := now.UnixMilli()
|
||||
|
||||
nextExec := cronExp.Next(now)
|
||||
|
||||
job := &types.Job{
|
||||
UID: jobUID,
|
||||
Created: nowMilli,
|
||||
Updated: nowMilli,
|
||||
Type: jobType,
|
||||
Priority: enum.JobPriorityElevated,
|
||||
Data: "",
|
||||
Result: "",
|
||||
MaxDurationSeconds: int(maxDur / time.Second),
|
||||
MaxRetries: 0,
|
||||
State: enum.JobStateScheduled,
|
||||
Scheduled: nextExec.UnixMilli(),
|
||||
TotalExecutions: 0,
|
||||
RunBy: "",
|
||||
RunDeadline: 0,
|
||||
RunProgress: 0,
|
||||
LastExecuted: 0,
|
||||
IsRecurring: true,
|
||||
RecurringCron: cronDef,
|
||||
ConsecutiveFailures: 0,
|
||||
LastFailureError: "",
|
||||
}
|
||||
|
||||
err = s.store.Upsert(ctx, job)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upsert job id=%s type=%s: %w", jobUID, jobType, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) createNecessaryJobs(ctx context.Context) error {
|
||||
mx, err := globalLock(ctx, s.mxManager)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain global lock to create necessary jobs: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := mx.Unlock(ctx); err != nil {
|
||||
log.Ctx(ctx).Err(err).
|
||||
Msg("failed to release global lock after creating necessary jobs")
|
||||
}
|
||||
}()
|
||||
|
||||
err = s.AddRecurring(ctx, jobUIDPurge, jobTypePurge, jobCronPurge, 5*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.AddRecurring(ctx, jobUIDOverdue, jobTypeOverdue, jobCronOverdue, 5*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerNecessaryJobs registers two jobs: overdue job recovery and purge old finished jobs.
|
||||
// These two jobs types are integral part of the job scheduler.
|
||||
func (s *Scheduler) registerNecessaryJobs() error {
|
||||
handlerOverdue := newJobOverdue(s.store, s.mxManager, s)
|
||||
err := s.executor.Register(jobTypeOverdue, handlerOverdue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handlerPurge := newJobPurge(s.store, s.mxManager, s.purgeMinOldAge)
|
||||
err = s.executor.Register(jobTypePurge, handlerPurge)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
111
internal/services/job/timer.go
Normal file
111
internal/services/job/timer.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const timerMaxDur = 30 * time.Minute
|
||||
const timerMinDur = time.Nanosecond
|
||||
|
||||
type schedulerTimer struct {
|
||||
timerAt time.Time
|
||||
timer *time.Timer
|
||||
edgy bool // if true, the next RescheduleEarlier call will trigger the timer immediately.
|
||||
}
|
||||
|
||||
// newSchedulerTimer created new timer for the Scheduler. It is created to fire immediately.
|
||||
func newSchedulerTimer() *schedulerTimer {
|
||||
return &schedulerTimer{
|
||||
timerAt: time.Now().Add(timerMinDur),
|
||||
timer: time.NewTimer(timerMinDur),
|
||||
}
|
||||
}
|
||||
|
||||
// ResetAt resets the internal timer to trigger at the provided time.
|
||||
// If the provided time is zero, it will schedule it to after the max duration.
|
||||
func (t *schedulerTimer) ResetAt(next time.Time, edgy bool) time.Duration {
|
||||
return t.resetAt(time.Now(), next, edgy)
|
||||
}
|
||||
|
||||
func (t *schedulerTimer) resetAt(now, next time.Time, edgy bool) time.Duration {
|
||||
var dur time.Duration
|
||||
|
||||
dur = next.Sub(now)
|
||||
if dur < timerMinDur {
|
||||
dur = timerMinDur
|
||||
next = now.Add(dur)
|
||||
} else if dur > timerMaxDur {
|
||||
dur = timerMaxDur
|
||||
next = now.Add(dur)
|
||||
}
|
||||
|
||||
t.Stop()
|
||||
t.edgy = edgy
|
||||
t.timerAt = next
|
||||
t.timer.Reset(dur)
|
||||
|
||||
return dur
|
||||
}
|
||||
|
||||
// RescheduleEarlier will reset the timer if the new time is earlier than the previous time.
|
||||
// Otherwise, the function does nothing and returns 0.
|
||||
// Providing zero time triggers the timer if it's edgy, otherwise does nothing.
|
||||
func (t *schedulerTimer) RescheduleEarlier(next time.Time) time.Duration {
|
||||
return t.rescheduleEarlier(time.Now(), next)
|
||||
}
|
||||
|
||||
func (t *schedulerTimer) rescheduleEarlier(now, next time.Time) time.Duration {
|
||||
var dur time.Duration
|
||||
|
||||
switch {
|
||||
case t.edgy:
|
||||
// if the timer is edgy trigger it immediately
|
||||
dur = timerMinDur
|
||||
|
||||
case next.IsZero():
|
||||
// if the provided time is zero: trigger the timer if it's edgy otherwise do nothing
|
||||
if !t.edgy {
|
||||
return 0
|
||||
}
|
||||
dur = timerMinDur
|
||||
|
||||
case !next.Before(t.timerAt):
|
||||
// do nothing if the timer is already scheduled to run sooner than the provided time
|
||||
return 0
|
||||
|
||||
default:
|
||||
dur = next.Sub(now)
|
||||
if dur < timerMinDur {
|
||||
dur = timerMinDur
|
||||
}
|
||||
}
|
||||
|
||||
next = now.Add(dur)
|
||||
|
||||
t.Stop()
|
||||
t.timerAt = next
|
||||
t.timer.Reset(dur)
|
||||
|
||||
return dur
|
||||
}
|
||||
|
||||
func (t *schedulerTimer) Ch() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
func (t *schedulerTimer) Stop() {
|
||||
// stop the timer
|
||||
t.timer.Stop()
|
||||
|
||||
// consume the timer's tick if any
|
||||
select {
|
||||
case <-t.timer.C:
|
||||
default:
|
||||
}
|
||||
|
||||
t.timerAt = time.Time{}
|
||||
}
|
105
internal/services/job/timer_test.go
Normal file
105
internal/services/job/timer_test.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSchedulerTimer_ResetAt(t *testing.T) {
|
||||
now := time.Now()
|
||||
tests := []struct {
|
||||
name string
|
||||
at time.Time
|
||||
exp time.Duration
|
||||
}{
|
||||
{
|
||||
name: "zero",
|
||||
at: time.Time{},
|
||||
exp: timerMinDur,
|
||||
},
|
||||
{
|
||||
name: "immediate",
|
||||
at: now,
|
||||
exp: timerMinDur,
|
||||
},
|
||||
{
|
||||
name: "30s",
|
||||
at: now.Add(30 * time.Second),
|
||||
exp: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
timer := newSchedulerTimer()
|
||||
dur := timer.resetAt(now, test.at, false)
|
||||
if want, got := test.exp, dur; want != dur {
|
||||
t.Errorf("want: %s, got: %s", want.String(), got.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerTimer_TryResetAt(t *testing.T) {
|
||||
now := time.Now()
|
||||
tests := []struct {
|
||||
name string
|
||||
at time.Time
|
||||
edgy bool
|
||||
exp time.Duration
|
||||
}{
|
||||
{
|
||||
name: "past",
|
||||
at: now.Add(-time.Second),
|
||||
exp: timerMinDur,
|
||||
},
|
||||
{
|
||||
name: "30s",
|
||||
at: now.Add(30 * time.Second),
|
||||
exp: 30 * time.Second,
|
||||
},
|
||||
{
|
||||
name: "90s",
|
||||
at: now.Add(90 * time.Second),
|
||||
exp: 0,
|
||||
},
|
||||
{
|
||||
name: "30s-edgy",
|
||||
at: now.Add(30 * time.Second),
|
||||
edgy: true,
|
||||
exp: timerMinDur,
|
||||
},
|
||||
{
|
||||
name: "90s-edgy",
|
||||
at: now.Add(90 * time.Second),
|
||||
edgy: true,
|
||||
exp: timerMinDur,
|
||||
},
|
||||
{
|
||||
name: "zero",
|
||||
at: time.Time{},
|
||||
exp: 0,
|
||||
},
|
||||
{
|
||||
name: "zero-edgy",
|
||||
at: time.Time{},
|
||||
edgy: true,
|
||||
exp: timerMinDur,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
timer := newSchedulerTimer()
|
||||
timer.resetAt(now, now.Add(time.Minute), test.edgy)
|
||||
dur := timer.rescheduleEarlier(now, test.at)
|
||||
if want, got := test.exp, dur; want != dur {
|
||||
t.Errorf("want: %s, got: %s", want.String(), got.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
25
internal/services/job/uid.go
Normal file
25
internal/services/job/uid.go
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
)
|
||||
|
||||
// UID returns unique random string with length equal to 16.
|
||||
func UID() (string, error) {
|
||||
const uidSizeBytes = 10 // must be divisible by 5, the resulting string length will be uidSizeBytes/5*8
|
||||
|
||||
var buf [uidSizeBytes]byte
|
||||
_, err := rand.Read(buf[:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
uid := base32.StdEncoding.EncodeToString(buf[:])
|
||||
|
||||
return uid, nil
|
||||
}
|
47
internal/services/job/wire.go
Normal file
47
internal/services/job/wire.go
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"github.com/harness/gitness/internal/store"
|
||||
"github.com/harness/gitness/lock"
|
||||
"github.com/harness/gitness/pubsub"
|
||||
"github.com/harness/gitness/types"
|
||||
|
||||
"github.com/google/wire"
|
||||
)
|
||||
|
||||
var WireSet = wire.NewSet(
|
||||
ProvideExecutor,
|
||||
ProvideScheduler,
|
||||
)
|
||||
|
||||
func ProvideExecutor(
|
||||
jobStore store.JobStore,
|
||||
pubsubService pubsub.PubSub,
|
||||
) *Executor {
|
||||
return NewExecutor(
|
||||
jobStore,
|
||||
pubsubService,
|
||||
)
|
||||
}
|
||||
|
||||
func ProvideScheduler(
|
||||
jobStore store.JobStore,
|
||||
executor *Executor,
|
||||
mutexManager lock.MutexManager,
|
||||
pubsubService pubsub.PubSub,
|
||||
config *types.Config,
|
||||
) (*Scheduler, error) {
|
||||
return NewScheduler(
|
||||
jobStore,
|
||||
executor,
|
||||
mutexManager,
|
||||
pubsubService,
|
||||
config.InstanceID,
|
||||
config.BackgroundJobs.MaxRunning,
|
||||
config.BackgroundJobs.PurgeFinishedOlderThan,
|
||||
)
|
||||
}
|
@ -5,6 +5,7 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/harness/gitness/internal/services/job"
|
||||
"github.com/harness/gitness/internal/services/pullreq"
|
||||
"github.com/harness/gitness/internal/services/webhook"
|
||||
|
||||
@ -16,16 +17,22 @@ var WireSet = wire.NewSet(
|
||||
)
|
||||
|
||||
type Services struct {
|
||||
ws *webhook.Service
|
||||
bms *pullreq.Service
|
||||
Webhook *webhook.Service
|
||||
PullReq *pullreq.Service
|
||||
JobExecutor *job.Executor
|
||||
JobScheduler *job.Scheduler
|
||||
}
|
||||
|
||||
func ProvideServices(
|
||||
ws *webhook.Service,
|
||||
bms *pullreq.Service,
|
||||
webhooksSrv *webhook.Service,
|
||||
pullReqSrv *pullreq.Service,
|
||||
jobExecutor *job.Executor,
|
||||
jobScheduler *job.Scheduler,
|
||||
) Services {
|
||||
return Services{
|
||||
ws: ws,
|
||||
bms: bms,
|
||||
Webhook: webhooksSrv,
|
||||
PullReq: pullReqSrv,
|
||||
JobExecutor: jobExecutor,
|
||||
JobScheduler: jobScheduler,
|
||||
}
|
||||
}
|
||||
|
@ -441,6 +441,42 @@ type (
|
||||
Delete(ctx context.Context, repoID, reqCheckID int64) error
|
||||
}
|
||||
|
||||
JobStore interface {
|
||||
// Find fetches a job by its unique identifier.
|
||||
Find(ctx context.Context, uid string) (*types.Job, error)
|
||||
|
||||
// Create is used to create a new job.
|
||||
Create(ctx context.Context, job *types.Job) error
|
||||
|
||||
// Upsert will insert the job in the database if the job didn't already exist,
|
||||
// or it will update the existing one but only if its definition has changed.
|
||||
Upsert(ctx context.Context, job *types.Job) error
|
||||
|
||||
// UpdateDefinition is used to update a job definition.
|
||||
UpdateDefinition(ctx context.Context, job *types.Job) error
|
||||
|
||||
// UpdateExecution is used to update a job before and after execution.
|
||||
UpdateExecution(ctx context.Context, job *types.Job) error
|
||||
|
||||
// UpdateProgress is used to update a job progress data.
|
||||
UpdateProgress(ctx context.Context, job *types.Job) error
|
||||
|
||||
// CountRunning returns number of jobs that are currently being run.
|
||||
CountRunning(ctx context.Context) (int, error)
|
||||
|
||||
// ListReady returns a list of jobs that are ready for execution.
|
||||
ListReady(ctx context.Context, now time.Time, limit int) ([]*types.Job, error)
|
||||
|
||||
// ListDeadlineExceeded returns a list of jobs that have exceeded their execution deadline.
|
||||
ListDeadlineExceeded(ctx context.Context, now time.Time) ([]*types.Job, error)
|
||||
|
||||
// NextScheduledTime returns a scheduled time of the next ready job.
|
||||
NextScheduledTime(ctx context.Context, now time.Time) (time.Time, error)
|
||||
|
||||
// DeleteOld removes non-recurring jobs that have finished execution or have failed.
|
||||
DeleteOld(ctx context.Context, olderThan time.Time) (int64, error)
|
||||
}
|
||||
|
||||
PipelineStore interface {
|
||||
// Find returns a pipeline given a pipeline ID from the datastore.
|
||||
Find(ctx context.Context, id int64) (*types.Pipeline, error)
|
||||
@ -454,8 +490,12 @@ type (
|
||||
// Update tries to update a pipeline in the datastore
|
||||
Update(ctx context.Context, pipeline *types.Pipeline) error
|
||||
|
||||
// List lists the pipelines present in a parent space ID in the datastore.
|
||||
List(ctx context.Context, spaceID int64, pagination types.ListQueryFilter) ([]*types.Pipeline, error)
|
||||
// List lists the pipelines present in a repository in the datastore.
|
||||
List(ctx context.Context, repoID int64, pagination types.ListQueryFilter) ([]*types.Pipeline, error)
|
||||
|
||||
// ListLatest lists the pipelines present in a repository in the datastore.
|
||||
// It also returns latest build information for all the returned entries.
|
||||
ListLatest(ctx context.Context, repoID int64, pagination types.ListQueryFilter) ([]*types.Pipeline, error)
|
||||
|
||||
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
|
||||
UpdateOptLock(ctx context.Context, pipeline *types.Pipeline,
|
||||
@ -464,11 +504,11 @@ type (
|
||||
// Delete deletes a pipeline ID from the datastore.
|
||||
Delete(ctx context.Context, id int64) error
|
||||
|
||||
// Count the number of pipelines in a space matching the given filter.
|
||||
Count(ctx context.Context, spaceID int64, filter types.ListQueryFilter) (int64, error)
|
||||
// Count the number of pipelines in a repository matching the given filter.
|
||||
Count(ctx context.Context, repoID int64, filter types.ListQueryFilter) (int64, error)
|
||||
|
||||
// DeleteByUID deletes a pipeline with a given UID in a space
|
||||
DeleteByUID(ctx context.Context, spaceID int64, uid string) error
|
||||
// DeleteByUID deletes a pipeline with a given UID under a repo.
|
||||
DeleteByUID(ctx context.Context, repoID int64, uid string) error
|
||||
|
||||
// IncrementSeqNum increments the sequence number of the pipeline
|
||||
IncrementSeqNum(ctx context.Context, pipeline *types.Pipeline) (*types.Pipeline, error)
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/harness/gitness/store/database"
|
||||
"github.com/harness/gitness/store/database/dbtx"
|
||||
"github.com/harness/gitness/types"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
sqlxtypes "github.com/jmoiron/sqlx/types"
|
||||
@ -294,6 +295,7 @@ func (s *executionStore) UpdateOptLock(ctx context.Context,
|
||||
}
|
||||
|
||||
// List lists the executions for a given pipeline ID.
|
||||
// It orders them in descending order of execution number.
|
||||
func (s *executionStore) List(
|
||||
ctx context.Context,
|
||||
pipelineID int64,
|
||||
@ -302,7 +304,8 @@ func (s *executionStore) List(
|
||||
stmt := database.Builder.
|
||||
Select(executionColumns).
|
||||
From("executions").
|
||||
Where("execution_pipeline_id = ?", fmt.Sprint(pipelineID))
|
||||
Where("execution_pipeline_id = ?", fmt.Sprint(pipelineID)).
|
||||
OrderBy("execution_number " + enum.OrderDesc.String())
|
||||
|
||||
stmt = stmt.Limit(database.Limit(pagination.Size))
|
||||
stmt = stmt.Offset(database.Offset(pagination.Page, pagination.Size))
|
||||
|
@ -1,14 +1,12 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/harness/gitness/types"
|
||||
)
|
||||
|
||||
func mapInternalToExecution(in *execution) (*types.Execution, error) {
|
||||
var params map[string]string
|
||||
err := json.Unmarshal(in.Params, ¶ms)
|
||||
err := in.Params.Unmarshal(¶ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
430
internal/store/database/job.go
Normal file
430
internal/store/database/job.go
Normal file
@ -0,0 +1,430 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/harness/gitness/internal/store"
|
||||
gitness_store "github.com/harness/gitness/store"
|
||||
"github.com/harness/gitness/store/database"
|
||||
"github.com/harness/gitness/store/database/dbtx"
|
||||
"github.com/harness/gitness/types"
|
||||
"github.com/harness/gitness/types/enum"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
var _ store.JobStore = (*JobStore)(nil)
|
||||
|
||||
func NewJobStore(db *sqlx.DB) *JobStore {
|
||||
return &JobStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
type JobStore struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
const (
|
||||
jobColumns = `
|
||||
job_uid
|
||||
,job_created
|
||||
,job_updated
|
||||
,job_type
|
||||
,job_priority
|
||||
,job_data
|
||||
,job_result
|
||||
,job_max_duration_seconds
|
||||
,job_max_retries
|
||||
,job_state
|
||||
,job_scheduled
|
||||
,job_total_executions
|
||||
,job_run_by
|
||||
,job_run_deadline
|
||||
,job_run_progress
|
||||
,job_last_executed
|
||||
,job_is_recurring
|
||||
,job_recurring_cron
|
||||
,job_consecutive_failures
|
||||
,job_last_failure_error`
|
||||
|
||||
jobSelectBase = `
|
||||
SELECT` + jobColumns + `
|
||||
FROM jobs`
|
||||
)
|
||||
|
||||
// Find fetches a job by its unique identifier.
|
||||
func (s *JobStore) Find(ctx context.Context, uid string) (*types.Job, error) {
|
||||
const sqlQuery = jobSelectBase + `
|
||||
WHERE job_uid = $1`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
result := &types.Job{}
|
||||
if err := db.GetContext(ctx, result, sqlQuery, uid); err != nil {
|
||||
return nil, database.ProcessSQLErrorf(err, "Failed to find job by uid")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Create creates a new job.
|
||||
func (s *JobStore) Create(ctx context.Context, job *types.Job) error {
|
||||
const sqlQuery = `
|
||||
INSERT INTO jobs (` + jobColumns + `
|
||||
) VALUES (
|
||||
:job_uid
|
||||
,:job_created
|
||||
,:job_updated
|
||||
,:job_type
|
||||
,:job_priority
|
||||
,:job_data
|
||||
,:job_result
|
||||
,:job_max_duration_seconds
|
||||
,:job_max_retries
|
||||
,:job_state
|
||||
,:job_scheduled
|
||||
,:job_total_executions
|
||||
,:job_run_by
|
||||
,:job_run_deadline
|
||||
,:job_run_progress
|
||||
,:job_last_executed
|
||||
,:job_is_recurring
|
||||
,:job_recurring_cron
|
||||
,:job_consecutive_failures
|
||||
,:job_last_failure_error
|
||||
)`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
query, arg, err := db.BindNamed(sqlQuery, job)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to bind job object")
|
||||
}
|
||||
|
||||
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Insert query failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upsert creates or updates a job. If the job didn't exist it will insert it in the database,
|
||||
// otherwise it will update it but only if its definition has changed.
|
||||
func (s *JobStore) Upsert(ctx context.Context, job *types.Job) error {
|
||||
const sqlQuery = `
|
||||
INSERT INTO jobs (` + jobColumns + `
|
||||
) VALUES (
|
||||
:job_uid
|
||||
,:job_created
|
||||
,:job_updated
|
||||
,:job_type
|
||||
,:job_priority
|
||||
,:job_data
|
||||
,:job_result
|
||||
,:job_max_duration_seconds
|
||||
,:job_max_retries
|
||||
,:job_state
|
||||
,:job_scheduled
|
||||
,:job_total_executions
|
||||
,:job_run_by
|
||||
,:job_run_deadline
|
||||
,:job_run_progress
|
||||
,:job_last_executed
|
||||
,:job_is_recurring
|
||||
,:job_recurring_cron
|
||||
,:job_consecutive_failures
|
||||
,:job_last_failure_error
|
||||
)
|
||||
ON CONFLICT (job_uid) DO
|
||||
UPDATE SET
|
||||
job_updated = :job_updated
|
||||
,job_type = :job_type
|
||||
,job_priority = :job_priority
|
||||
,job_data = :job_data
|
||||
,job_result = :job_result
|
||||
,job_max_duration_seconds = :job_max_duration_seconds
|
||||
,job_max_retries = :job_max_retries
|
||||
,job_state = :job_state
|
||||
,job_scheduled = :job_scheduled
|
||||
,job_is_recurring = :job_is_recurring
|
||||
,job_recurring_cron = :job_recurring_cron
|
||||
WHERE
|
||||
jobs.job_type <> :job_type OR
|
||||
jobs.job_priority <> :job_priority OR
|
||||
jobs.job_data <> :job_data OR
|
||||
jobs.job_max_duration_seconds <> :job_max_duration_seconds OR
|
||||
jobs.job_max_retries <> :job_max_retries OR
|
||||
jobs.job_is_recurring <> :job_is_recurring OR
|
||||
jobs.job_recurring_cron <> :job_recurring_cron`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
query, arg, err := db.BindNamed(sqlQuery, job)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to bind job object")
|
||||
}
|
||||
|
||||
if _, err := db.ExecContext(ctx, query, arg...); err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Upsert query failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateDefinition is used to update a job definition.
|
||||
func (s *JobStore) UpdateDefinition(ctx context.Context, job *types.Job) error {
|
||||
const sqlQuery = `
|
||||
UPDATE jobs
|
||||
SET
|
||||
job_updated = :job_updated
|
||||
,job_type = :job_type
|
||||
,job_priority = :job_priority
|
||||
,job_data = :job_data
|
||||
,job_result = :job_result
|
||||
,job_max_duration_seconds = :job_max_duration_seconds
|
||||
,job_max_retries = :job_max_retries
|
||||
,job_state = :job_state
|
||||
,job_scheduled = :job_scheduled
|
||||
,job_is_recurring = :job_is_recurring
|
||||
,job_recurring_cron = :job_recurring_cron
|
||||
WHERE job_uid = :job_uid`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
query, arg, err := db.BindNamed(sqlQuery, job)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to bind job object for update")
|
||||
}
|
||||
|
||||
result, err := db.ExecContext(ctx, query, arg...)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to update job definition")
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to get number of updated rows")
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return gitness_store.ErrResourceNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateExecution is used to update a job before and after execution.
|
||||
func (s *JobStore) UpdateExecution(ctx context.Context, job *types.Job) error {
|
||||
const sqlQuery = `
|
||||
UPDATE jobs
|
||||
SET
|
||||
job_updated = :job_updated
|
||||
,job_result = :job_result
|
||||
,job_state = :job_state
|
||||
,job_scheduled = :job_scheduled
|
||||
,job_total_executions = :job_total_executions
|
||||
,job_run_by = :job_run_by
|
||||
,job_run_deadline = :job_run_deadline
|
||||
,job_last_executed = :job_last_executed
|
||||
,job_consecutive_failures = :job_consecutive_failures
|
||||
,job_last_failure_error = :job_last_failure_error
|
||||
WHERE job_uid = :job_uid`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
query, arg, err := db.BindNamed(sqlQuery, job)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to bind job object for update")
|
||||
}
|
||||
|
||||
result, err := db.ExecContext(ctx, query, arg...)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to update job execution")
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to get number of updated rows")
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return gitness_store.ErrResourceNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *JobStore) UpdateProgress(ctx context.Context, job *types.Job) error {
|
||||
const sqlQuery = `
|
||||
UPDATE jobs
|
||||
SET
|
||||
job_updated = :job_updated
|
||||
,job_result = :job_result
|
||||
,job_run_progress = :job_run_progress
|
||||
WHERE job_uid = :job_uid AND job_state = 'running'`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
query, arg, err := db.BindNamed(sqlQuery, job)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to bind job object for update")
|
||||
}
|
||||
|
||||
result, err := db.ExecContext(ctx, query, arg...)
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to update job progress")
|
||||
}
|
||||
|
||||
count, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Failed to get number of updated rows")
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
return gitness_store.ErrResourceNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CountRunning returns number of jobs that are currently being run.
|
||||
func (s *JobStore) CountRunning(ctx context.Context) (int, error) {
|
||||
stmt := database.Builder.
|
||||
Select("count(*)").
|
||||
From("jobs").
|
||||
Where("job_state = ?", enum.JobStateRunning)
|
||||
|
||||
sql, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to convert count running jobs query to sql: %w", err)
|
||||
}
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
var count int64
|
||||
err = db.QueryRowContext(ctx, sql, args...).Scan(&count)
|
||||
if err != nil {
|
||||
return 0, database.ProcessSQLErrorf(err, "failed executing count running jobs query")
|
||||
}
|
||||
|
||||
return int(count), nil
|
||||
}
|
||||
|
||||
// ListReady returns a list of jobs that are ready for execution:
|
||||
// The jobs with state="scheduled" and scheduled time in the past.
|
||||
func (s *JobStore) ListReady(ctx context.Context, now time.Time, limit int) ([]*types.Job, error) {
|
||||
stmt := database.Builder.
|
||||
Select(jobColumns).
|
||||
From("jobs").
|
||||
Where("job_state = ?", enum.JobStateScheduled).
|
||||
Where("job_scheduled <= ?", now.UnixMilli()).
|
||||
OrderBy("job_priority desc, job_scheduled asc, job_uid asc").
|
||||
Limit(uint64(limit))
|
||||
|
||||
sql, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert list scheduled jobs query to sql: %w", err)
|
||||
}
|
||||
|
||||
result := make([]*types.Job, 0)
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
|
||||
return nil, database.ProcessSQLErrorf(err, "failed to execute list scheduled jobs query")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListDeadlineExceeded returns a list of jobs that have exceeded their execution deadline.
|
||||
func (s *JobStore) ListDeadlineExceeded(ctx context.Context, now time.Time) ([]*types.Job, error) {
|
||||
stmt := database.Builder.
|
||||
Select(jobColumns).
|
||||
From("jobs").
|
||||
Where("job_state = ?", enum.JobStateRunning).
|
||||
Where("job_run_deadline < ?", now.UnixMilli()).
|
||||
OrderBy("job_run_deadline asc")
|
||||
|
||||
sql, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert list overdue jobs query to sql: %w", err)
|
||||
}
|
||||
|
||||
result := make([]*types.Job, 0)
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
if err = db.SelectContext(ctx, &result, sql, args...); err != nil {
|
||||
return nil, database.ProcessSQLErrorf(err, "failed to execute list overdue jobs query")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// NextScheduledTime returns a scheduled time of the next ready job or zero time if no such job exists.
|
||||
func (s *JobStore) NextScheduledTime(ctx context.Context, now time.Time) (time.Time, error) {
|
||||
stmt := database.Builder.
|
||||
Select("job_scheduled").
|
||||
From("jobs").
|
||||
Where("job_state = ?", enum.JobStateScheduled).
|
||||
Where("job_scheduled > ?", now.UnixMilli()).
|
||||
OrderBy("job_scheduled asc").
|
||||
Limit(1)
|
||||
|
||||
query, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to convert next scheduled time query to sql: %w", err)
|
||||
}
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
var result int64
|
||||
|
||||
err = db.QueryRowContext(ctx, query, args...).Scan(&result)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return time.Time{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return time.Time{}, database.ProcessSQLErrorf(err, "failed to execute next scheduled time query")
|
||||
}
|
||||
|
||||
return time.UnixMilli(result), nil
|
||||
}
|
||||
|
||||
// DeleteOld removes non-recurring jobs that have finished execution or have failed.
|
||||
func (s *JobStore) DeleteOld(ctx context.Context, olderThan time.Time) (int64, error) {
|
||||
stmt := database.Builder.
|
||||
Delete("jobs").
|
||||
Where("(job_state = ? OR job_state = ?)", enum.JobStateFinished, enum.JobStateFailed).
|
||||
Where("job_is_recurring = false").
|
||||
Where("job_last_executed < ?", olderThan.UnixMilli())
|
||||
|
||||
sql, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to convert delete done jobs query to sql: %w", err)
|
||||
}
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
result, err := db.ExecContext(ctx, sql, args...)
|
||||
if err != nil {
|
||||
return 0, database.ProcessSQLErrorf(err, "failed to execute delete done jobs query")
|
||||
}
|
||||
|
||||
n, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return 0, database.ProcessSQLErrorf(err, "failed to get number of deleted jobs")
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
DROP TABLE IF exists pipelines;
|
||||
DROP TABLE IF exists executions;
|
||||
DROP TABLE IF exists stages;
|
||||
DROP TABLE IF exists secrets;
|
||||
DROP TABLE IF exists steps;
|
||||
DROP TABLE IF exists logs;
|
||||
DROP TABLE IF exists plugins;
|
||||
@ -10,26 +11,17 @@ DROP TABLE IF exists triggers;
|
||||
CREATE TABLE pipelines (
|
||||
pipeline_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,pipeline_description TEXT NOT NULL
|
||||
,pipeline_space_id INTEGER NOT NULL
|
||||
,pipeline_uid TEXT NOT NULL
|
||||
,pipeline_seq INTEGER NOT NULL DEFAULT 0
|
||||
,pipeline_repo_id INTEGER NOT NULL
|
||||
,pipeline_repo_type TEXT NOT NULL
|
||||
,pipeline_repo_name TEXT NOT NULL
|
||||
,pipeline_default_branch TEXT NOT NULL
|
||||
,pipeline_config_path TEXT NOT NULL
|
||||
,pipeline_created INTEGER NOT NULL
|
||||
,pipeline_updated INTEGER NOT NULL
|
||||
,pipeline_version INTEGER NOT NULL
|
||||
|
||||
-- Ensure unique combination of UID and ParentID
|
||||
,UNIQUE (pipeline_space_id, pipeline_uid)
|
||||
|
||||
-- Foreign key to spaces table
|
||||
,CONSTRAINT fk_pipeline_space_id FOREIGN KEY (pipeline_space_id)
|
||||
REFERENCES spaces (space_id) MATCH SIMPLE
|
||||
ON UPDATE NO ACTION
|
||||
ON DELETE CASCADE
|
||||
-- Ensure unique combination of UID and repo ID
|
||||
,UNIQUE (pipeline_repo_id, pipeline_uid)
|
||||
|
||||
-- Foreign key to repositories table
|
||||
,CONSTRAINT fk_pipelines_repo_id FOREIGN KEY (pipeline_repo_id)
|
||||
@ -198,23 +190,32 @@ CREATE TABLE logs (
|
||||
|
||||
-- Insert some pipelines
|
||||
INSERT INTO pipelines (
|
||||
pipeline_id, pipeline_description, pipeline_space_id, pipeline_uid, pipeline_seq,
|
||||
pipeline_repo_id, pipeline_repo_type, pipeline_repo_name, pipeline_default_branch,
|
||||
pipeline_id, pipeline_description, pipeline_uid, pipeline_seq,
|
||||
pipeline_repo_id, pipeline_default_branch,
|
||||
pipeline_config_path, pipeline_created, pipeline_updated, pipeline_version
|
||||
) VALUES (
|
||||
1, 'Sample Pipeline 1', 1, 'pipeline_uid_1', 2, 1, 'git', 'sample_repo_1',
|
||||
1, 'Sample Pipeline 1', 'pipeline_uid_1', 2, 1,
|
||||
'main', 'config_path_1', 1678932000, 1678932100, 1
|
||||
);
|
||||
|
||||
INSERT INTO pipelines (
|
||||
pipeline_id, pipeline_description, pipeline_space_id, pipeline_uid, pipeline_seq,
|
||||
pipeline_repo_id, pipeline_repo_type, pipeline_repo_name, pipeline_default_branch,
|
||||
pipeline_id, pipeline_description, pipeline_uid, pipeline_seq,
|
||||
pipeline_repo_id, pipeline_default_branch,
|
||||
pipeline_config_path, pipeline_created, pipeline_updated, pipeline_version
|
||||
) VALUES (
|
||||
2, 'Sample Pipeline 2', 1, 'pipeline_uid_2', 0, 1, 'git', 'sample_repo_2',
|
||||
2, 'Sample Pipeline 2', 'pipeline_uid_2', 0, 1,
|
||||
'develop', 'config_path_2', 1678932200, 1678932300, 1
|
||||
);
|
||||
|
||||
INSERT INTO pipelines (
|
||||
pipeline_id, pipeline_description, pipeline_uid, pipeline_seq,
|
||||
pipeline_repo_id, pipeline_default_branch,
|
||||
pipeline_config_path, pipeline_created, pipeline_updated, pipeline_version
|
||||
) VALUES (
|
||||
3, 'Sample Pipeline 3', 'pipeline_uid_3', 0, 1,
|
||||
'develop', 'config_path_2', 1678932200000, 1678932300000, 1
|
||||
);
|
||||
|
||||
-- Insert some executions
|
||||
INSERT INTO executions (
|
||||
execution_id, execution_pipeline_id, execution_repo_id, execution_trigger,
|
||||
@ -296,6 +297,26 @@ INSERT INTO executions (
|
||||
'production', 5, 0, 1678932500, 1678932600, 1678932700, 1678932800, 1
|
||||
);
|
||||
|
||||
INSERT INTO executions (
|
||||
execution_id, execution_pipeline_id, execution_repo_id, execution_trigger,
|
||||
execution_number, execution_parent, execution_status, execution_error,
|
||||
execution_event, execution_action, execution_link, execution_timestamp,
|
||||
execution_title, execution_message, execution_before, execution_after,
|
||||
execution_ref, execution_source_repo, execution_source, execution_target,
|
||||
execution_author, execution_author_name, execution_author_email,
|
||||
execution_author_avatar, execution_sender, execution_params, execution_cron,
|
||||
execution_deploy, execution_deploy_id, execution_debug, execution_started,
|
||||
execution_finished, execution_created, execution_updated, execution_version
|
||||
) VALUES (
|
||||
5, 2, 1, 'manual', 3, 0, 'running', '', 'push', 'created',
|
||||
'https://example.com/pipelines/1', 1678932400, 'Pipeline Execution 1',
|
||||
'Pipeline execution message...', 'commit_hash_before', 'commit_hash_after',
|
||||
'refs/heads/main', 'source_repo_name', 'source_branch', 'target_branch',
|
||||
'author_login', 'Author Name', 'author@example.com', 'https://example.com/avatar.jpg',
|
||||
'sender_username', '{"param1": "value1", "param2": "value2"}', '0 0 * * *',
|
||||
'production', 5, 0, 1678932500, 1678932600, 1678932700, 1678932800, 1
|
||||
);
|
||||
|
||||
-- Insert some stages
|
||||
INSERT INTO stages (stage_id, stage_execution_id, stage_number, stage_parent_group_id, stage_kind, stage_type, stage_name, stage_status, stage_error, stage_errignore, stage_exit_code, stage_limit, stage_os, stage_arch, stage_variant, stage_kernel, stage_machine, stage_started, stage_stopped, stage_created, stage_updated, stage_version, stage_on_success, stage_on_failure, stage_depends_on, stage_labels, stage_limit_repo)
|
||||
VALUES (
|
||||
|
@ -0,0 +1,4 @@
|
||||
DROP INDEX jobs_last_executed;
|
||||
DROP INDEX jobs_run_deadline;
|
||||
DROP INDEX jobs_scheduled;
|
||||
DROP TABLE jobs;
|
@ -0,0 +1,35 @@
|
||||
CREATE TABLE jobs (
|
||||
job_uid TEXT NOT NULL
|
||||
,job_created BIGINT NOT NULL
|
||||
,job_updated BIGINT NOT NULL
|
||||
,job_type TEXT NOT NULL
|
||||
,job_priority INTEGER NOT NULL
|
||||
,job_data TEXT NOT NULL
|
||||
,job_result TEXT NOT NULL
|
||||
,job_max_duration_seconds INTEGER NOT NULL
|
||||
,job_max_retries INTEGER NOT NULL
|
||||
,job_state TEXT NOT NULL
|
||||
,job_scheduled BIGINT NOT NULL
|
||||
,job_total_executions INTEGER
|
||||
,job_run_by TEXT NOT NULL
|
||||
,job_run_deadline BIGINT
|
||||
,job_run_progress INTEGER NOT NULL
|
||||
,job_last_executed BIGINT
|
||||
,job_is_recurring BOOLEAN NOT NULL
|
||||
,job_recurring_cron TEXT NOT NULL
|
||||
,job_consecutive_failures INTEGER NOT NULL
|
||||
,job_last_failure_error TEXT NOT NULL
|
||||
,CONSTRAINT pk_jobs_uid PRIMARY KEY (job_uid)
|
||||
);
|
||||
|
||||
CREATE INDEX jobs_scheduled
|
||||
ON jobs(job_scheduled)
|
||||
WHERE job_state = 'scheduled';
|
||||
|
||||
CREATE INDEX jobs_run_deadline
|
||||
ON jobs(job_run_deadline)
|
||||
WHERE job_state = 'running';
|
||||
|
||||
CREATE INDEX jobs_last_executed
|
||||
ON jobs(job_last_executed)
|
||||
WHERE job_state = 'finished' OR job_state = 'failed';
|
@ -0,0 +1,4 @@
|
||||
DROP INDEX jobs_last_executed;
|
||||
DROP INDEX jobs_run_deadline;
|
||||
DROP INDEX jobs_scheduled;
|
||||
DROP TABLE jobs;
|
@ -0,0 +1,35 @@
|
||||
CREATE TABLE jobs (
|
||||
job_uid TEXT NOT NULL
|
||||
,job_created BIGINT NOT NULL
|
||||
,job_updated BIGINT NOT NULL
|
||||
,job_type TEXT NOT NULL
|
||||
,job_priority INTEGER NOT NULL
|
||||
,job_data TEXT NOT NULL
|
||||
,job_result TEXT NOT NULL
|
||||
,job_max_duration_seconds INTEGER NOT NULL
|
||||
,job_max_retries INTEGER NOT NULL
|
||||
,job_state TEXT NOT NULL
|
||||
,job_scheduled BIGINT NOT NULL
|
||||
,job_total_executions INTEGER
|
||||
,job_run_by TEXT NOT NULL
|
||||
,job_run_deadline BIGINT
|
||||
,job_run_progress INTEGER NOT NULL
|
||||
,job_last_executed BIGINT
|
||||
,job_is_recurring BOOLEAN NOT NULL
|
||||
,job_recurring_cron TEXT NOT NULL
|
||||
,job_consecutive_failures INTEGER NOT NULL
|
||||
,job_last_failure_error TEXT NOT NULL
|
||||
,CONSTRAINT pk_jobs_uid PRIMARY KEY (job_uid)
|
||||
);
|
||||
|
||||
CREATE INDEX jobs_scheduled
|
||||
ON jobs(job_scheduled)
|
||||
WHERE job_state = 'scheduled';
|
||||
|
||||
CREATE INDEX jobs_run_deadline
|
||||
ON jobs(job_run_deadline)
|
||||
WHERE job_state = 'running';
|
||||
|
||||
CREATE INDEX jobs_last_executed
|
||||
ON jobs(job_last_executed)
|
||||
WHERE job_state = 'finished' OR job_state = 'failed';
|
@ -31,12 +31,9 @@ const (
|
||||
pipelineColumns = `
|
||||
pipeline_id
|
||||
,pipeline_description
|
||||
,pipeline_space_id
|
||||
,pipeline_uid
|
||||
,pipeline_seq
|
||||
,pipeline_repo_id
|
||||
,pipeline_repo_type
|
||||
,pipeline_repo_name
|
||||
,pipeline_default_branch
|
||||
,pipeline_config_path
|
||||
,pipeline_created
|
||||
@ -69,14 +66,14 @@ func (s *pipelineStore) Find(ctx context.Context, id int64) (*types.Pipeline, er
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// FindByUID returns a pipeline in a given space with a given UID.
|
||||
func (s *pipelineStore) FindByUID(ctx context.Context, spaceID int64, uid string) (*types.Pipeline, error) {
|
||||
// FindByUID returns a pipeline for a given repo with a given UID.
|
||||
func (s *pipelineStore) FindByUID(ctx context.Context, repoID int64, uid string) (*types.Pipeline, error) {
|
||||
const findQueryStmt = pipelineQueryBase + `
|
||||
WHERE pipeline_space_id = $1 AND pipeline_uid = $2`
|
||||
WHERE pipeline_repo_id = $1 AND pipeline_uid = $2`
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
dst := new(types.Pipeline)
|
||||
if err := db.GetContext(ctx, dst, findQueryStmt, spaceID, uid); err != nil {
|
||||
if err := db.GetContext(ctx, dst, findQueryStmt, repoID, uid); err != nil {
|
||||
return nil, database.ProcessSQLErrorf(err, "Failed to find pipeline")
|
||||
}
|
||||
return dst, nil
|
||||
@ -87,12 +84,9 @@ func (s *pipelineStore) Create(ctx context.Context, pipeline *types.Pipeline) er
|
||||
const pipelineInsertStmt = `
|
||||
INSERT INTO pipelines (
|
||||
pipeline_description
|
||||
,pipeline_space_id
|
||||
,pipeline_uid
|
||||
,pipeline_seq
|
||||
,pipeline_repo_id
|
||||
,pipeline_repo_type
|
||||
,pipeline_repo_name
|
||||
,pipeline_default_branch
|
||||
,pipeline_config_path
|
||||
,pipeline_created
|
||||
@ -100,12 +94,9 @@ func (s *pipelineStore) Create(ctx context.Context, pipeline *types.Pipeline) er
|
||||
,pipeline_version
|
||||
) VALUES (
|
||||
:pipeline_description,
|
||||
:pipeline_space_id,
|
||||
:pipeline_uid,
|
||||
:pipeline_seq,
|
||||
:pipeline_repo_id,
|
||||
:pipeline_repo_type,
|
||||
:pipeline_repo_name,
|
||||
:pipeline_default_branch,
|
||||
:pipeline_config_path,
|
||||
:pipeline_created,
|
||||
@ -171,16 +162,16 @@ func (s *pipelineStore) Update(ctx context.Context, p *types.Pipeline) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all the pipelines present in a space.
|
||||
// List lists all the pipelines for a repository.
|
||||
func (s *pipelineStore) List(
|
||||
ctx context.Context,
|
||||
parentID int64,
|
||||
repoID int64,
|
||||
filter types.ListQueryFilter,
|
||||
) ([]*types.Pipeline, error) {
|
||||
stmt := database.Builder.
|
||||
Select(pipelineColumns).
|
||||
From("pipelines").
|
||||
Where("pipeline_space_id = ?", fmt.Sprint(parentID))
|
||||
Where("pipeline_repo_id = ?", fmt.Sprint(repoID))
|
||||
|
||||
if filter.Query != "" {
|
||||
stmt = stmt.Where("LOWER(pipeline_uid) LIKE ?", fmt.Sprintf("%%%s%%", strings.ToLower(filter.Query)))
|
||||
@ -204,6 +195,78 @@ func (s *pipelineStore) List(
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// ListLatest lists all the pipelines under a repository with information
|
||||
// about the latest build if available.
|
||||
func (s *pipelineStore) ListLatest(
|
||||
ctx context.Context,
|
||||
repoID int64,
|
||||
filter types.ListQueryFilter,
|
||||
) ([]*types.Pipeline, error) {
|
||||
const pipelineExecutionColumns = pipelineColumns + `
|
||||
,executions.execution_id
|
||||
,executions.execution_pipeline_id
|
||||
,execution_repo_id
|
||||
,execution_trigger
|
||||
,execution_number
|
||||
,execution_status
|
||||
,execution_error
|
||||
,execution_link
|
||||
,execution_timestamp
|
||||
,execution_title
|
||||
,execution_author
|
||||
,execution_author_name
|
||||
,execution_author_email
|
||||
,execution_author_avatar
|
||||
,execution_source
|
||||
,execution_target
|
||||
,execution_source_repo
|
||||
,execution_started
|
||||
,execution_finished
|
||||
,execution_created
|
||||
,execution_updated
|
||||
`
|
||||
// Create a subquery to get max execution IDs for each unique execution pipeline ID.
|
||||
subquery := database.Builder.
|
||||
Select("execution_pipeline_id, execution_id, MAX(execution_number)").
|
||||
From("executions").
|
||||
Where("execution_repo_id = ?").
|
||||
GroupBy("execution_pipeline_id")
|
||||
|
||||
// Convert the subquery to SQL.
|
||||
subquerySQL, _, err := subquery.ToSql()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Left join the previous table with executions and pipelines table.
|
||||
stmt := database.Builder.
|
||||
Select(pipelineExecutionColumns).
|
||||
From("pipelines").
|
||||
LeftJoin("("+subquerySQL+") AS max_executions ON pipelines.pipeline_id = max_executions.execution_pipeline_id").
|
||||
LeftJoin("executions ON executions.execution_id = max_executions.execution_id").
|
||||
Where("pipeline_repo_id = ?", fmt.Sprint(repoID))
|
||||
|
||||
if filter.Query != "" {
|
||||
stmt = stmt.Where("LOWER(pipeline_uid) LIKE ?", fmt.Sprintf("%%%s%%", strings.ToLower(filter.Query)))
|
||||
}
|
||||
stmt = stmt.Limit(database.Limit(filter.Size))
|
||||
stmt = stmt.Offset(database.Offset(filter.Page, filter.Size))
|
||||
|
||||
sql, args, err := stmt.ToSql()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to convert query to sql")
|
||||
}
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
dst := []*pipelineExecutionJoin{}
|
||||
if err = db.SelectContext(ctx, &dst, sql, args...); err != nil {
|
||||
return nil, database.ProcessSQLErrorf(err, "Failed executing custom list query")
|
||||
}
|
||||
|
||||
return convert(dst), nil
|
||||
}
|
||||
|
||||
// UpdateOptLock updates the pipeline using the optimistic locking mechanism.
|
||||
func (s *pipelineStore) UpdateOptLock(ctx context.Context,
|
||||
pipeline *types.Pipeline,
|
||||
@ -231,12 +294,12 @@ func (s *pipelineStore) UpdateOptLock(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
// Count of pipelines in a space.
|
||||
func (s *pipelineStore) Count(ctx context.Context, parentID int64, filter types.ListQueryFilter) (int64, error) {
|
||||
// Count of pipelines under a repo.
|
||||
func (s *pipelineStore) Count(ctx context.Context, repoID int64, filter types.ListQueryFilter) (int64, error) {
|
||||
stmt := database.Builder.
|
||||
Select("count(*)").
|
||||
From("pipelines").
|
||||
Where("pipeline_space_id = ?", parentID)
|
||||
Where("pipeline_repo_id = ?", repoID)
|
||||
|
||||
if filter.Query != "" {
|
||||
stmt = stmt.Where("LOWER(pipeline_uid) LIKE ?", fmt.Sprintf("%%%s%%", strings.ToLower(filter.Query)))
|
||||
@ -272,15 +335,15 @@ func (s *pipelineStore) Delete(ctx context.Context, id int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteByUID deletes a pipeline with a given UID in a space.
|
||||
func (s *pipelineStore) DeleteByUID(ctx context.Context, spaceID int64, uid string) error {
|
||||
// DeleteByUID deletes a pipeline with a given UID under a given repo.
|
||||
func (s *pipelineStore) DeleteByUID(ctx context.Context, repoID int64, uid string) error {
|
||||
const pipelineDeleteStmt = `
|
||||
DELETE FROM pipelines
|
||||
WHERE pipeline_space_id = $1 AND pipeline_uid = $2`
|
||||
WHERE pipeline_repo_id = $1 AND pipeline_uid = $2`
|
||||
|
||||
db := dbtx.GetAccessor(ctx, s.db)
|
||||
|
||||
if _, err := db.ExecContext(ctx, pipelineDeleteStmt, spaceID, uid); err != nil {
|
||||
if _, err := db.ExecContext(ctx, pipelineDeleteStmt, repoID, uid); err != nil {
|
||||
return database.ProcessSQLErrorf(err, "Could not delete pipeline")
|
||||
}
|
||||
|
||||
|
77
internal/store/database/pipeline_join.go
Normal file
77
internal/store/database/pipeline_join.go
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/harness/gitness/types"
|
||||
)
|
||||
|
||||
// pipelineExecutionjoin struct represents a joined row between pipelines and executions
|
||||
type pipelineExecutionJoin struct {
|
||||
*types.Pipeline
|
||||
ID sql.NullInt64 `db:"execution_id"`
|
||||
PipelineID sql.NullInt64 `db:"execution_pipeline_id"`
|
||||
RepoID sql.NullInt64 `db:"execution_repo_id"`
|
||||
Trigger sql.NullString `db:"execution_trigger"`
|
||||
Number sql.NullInt64 `db:"execution_number"`
|
||||
Status sql.NullString `db:"execution_status"`
|
||||
Error sql.NullString `db:"execution_error"`
|
||||
Link sql.NullString `db:"execution_link"`
|
||||
Timestamp sql.NullInt64 `db:"execution_timestamp"`
|
||||
Title sql.NullString `db:"execution_title"`
|
||||
Fork sql.NullString `db:"execution_source_repo"`
|
||||
Source sql.NullString `db:"execution_source"`
|
||||
Target sql.NullString `db:"execution_target"`
|
||||
Author sql.NullString `db:"execution_author"`
|
||||
AuthorName sql.NullString `db:"execution_author_name"`
|
||||
AuthorEmail sql.NullString `db:"execution_author_email"`
|
||||
AuthorAvatar sql.NullString `db:"execution_author_avatar"`
|
||||
Started sql.NullInt64 `db:"execution_started"`
|
||||
Finished sql.NullInt64 `db:"execution_finished"`
|
||||
Created sql.NullInt64 `db:"execution_created"`
|
||||
Updated sql.NullInt64 `db:"execution_updated"`
|
||||
}
|
||||
|
||||
func convert(rows []*pipelineExecutionJoin) []*types.Pipeline {
|
||||
pipelines := []*types.Pipeline{}
|
||||
for _, k := range rows {
|
||||
pipeline := convertPipelineJoin(k)
|
||||
pipelines = append(pipelines, pipeline)
|
||||
}
|
||||
return pipelines
|
||||
}
|
||||
|
||||
func convertPipelineJoin(join *pipelineExecutionJoin) *types.Pipeline {
|
||||
ret := join.Pipeline
|
||||
if !join.ID.Valid {
|
||||
return ret
|
||||
}
|
||||
ret.Execution = &types.Execution{
|
||||
ID: join.ID.Int64,
|
||||
PipelineID: join.PipelineID.Int64,
|
||||
RepoID: join.RepoID.Int64,
|
||||
Trigger: join.Trigger.String,
|
||||
Number: join.Number.Int64,
|
||||
Status: join.Status.String,
|
||||
Error: join.Error.String,
|
||||
Link: join.Link.String,
|
||||
Timestamp: join.Timestamp.Int64,
|
||||
Title: join.Title.String,
|
||||
Fork: join.Fork.String,
|
||||
Source: join.Source.String,
|
||||
Target: join.Target.String,
|
||||
Author: join.Author.String,
|
||||
AuthorName: join.AuthorName.String,
|
||||
AuthorEmail: join.AuthorEmail.String,
|
||||
AuthorAvatar: join.AuthorAvatar.String,
|
||||
Started: join.Started.Int64,
|
||||
Finished: join.Finished.Int64,
|
||||
Created: join.Created.Int64,
|
||||
Updated: join.Updated.Int64,
|
||||
}
|
||||
return ret
|
||||
}
|
@ -357,6 +357,7 @@ func mapToWebhook(hook *webhook) (*types.Webhook, error) {
|
||||
Insecure: hook.Insecure,
|
||||
Triggers: triggersFromString(hook.Triggers),
|
||||
LatestExecutionResult: (*enum.WebhookExecutionResult)(hook.LatestExecutionResult.Ptr()),
|
||||
Internal: hook.Internal,
|
||||
}
|
||||
|
||||
switch {
|
||||
@ -390,6 +391,7 @@ func mapToInternalWebhook(hook *types.Webhook) (*webhook, error) {
|
||||
Insecure: hook.Insecure,
|
||||
Triggers: triggersToString(hook.Triggers),
|
||||
LatestExecutionResult: null.StringFromPtr((*string)(hook.LatestExecutionResult)),
|
||||
Internal: hook.Internal,
|
||||
}
|
||||
|
||||
switch hook.ParentType {
|
||||
|
@ -23,6 +23,7 @@ var WireSet = wire.NewSet(
|
||||
ProvidePathStore,
|
||||
ProvideSpaceStore,
|
||||
ProvideRepoStore,
|
||||
ProvideJobStore,
|
||||
ProvideExecutionStore,
|
||||
ProvidePipelineStore,
|
||||
ProvideStageStore,
|
||||
@ -87,6 +88,11 @@ func ProvideRepoStore(db *sqlx.DB, pathCache store.PathCache) store.RepoStore {
|
||||
return NewRepoStore(db, pathCache)
|
||||
}
|
||||
|
||||
// ProvideJobStore provides a job store.
|
||||
func ProvideJobStore(db *sqlx.DB) store.JobStore {
|
||||
return NewJobStore(db)
|
||||
}
|
||||
|
||||
// ProvidePipelineStore provides a pipeline store.
|
||||
func ProvidePipelineStore(db *sqlx.DB) store.PipelineStore {
|
||||
return NewPipelineStore(db)
|
||||
|
@ -8,10 +8,9 @@ import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
user "github.com/harness/gitness/internal/api/controller/user"
|
||||
types "github.com/harness/gitness/types"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockClient is a mock of Client interface.
|
||||
|
@ -8,10 +8,9 @@ import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
types "github.com/harness/gitness/types"
|
||||
enum "github.com/harness/gitness/types/enum"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
// MockPrincipalStore is a mock of PrincipalStore interface.
|
||||
|
@ -180,4 +180,13 @@ type Config struct {
|
||||
SendTimeout time.Duration `envconfig:"GITNESS_PUBSUB_SEND_TIMEOUT" default:"60s"`
|
||||
ChannelSize int `envconfig:"GITNESS_PUBSUB_CHANNEL_SIZE" default:"100"`
|
||||
}
|
||||
|
||||
BackgroundJobs struct {
|
||||
// MaxRunning is maximum number of jobs that can be running at once.
|
||||
MaxRunning int `envconfig:"GITNESS_JOBS_MAX_RUNNING" default:"10"`
|
||||
|
||||
// PurgeFinishedOlderThan is duration after non-recurring,
|
||||
// finished and failed jobs will be purged from the DB.
|
||||
PurgeFinishedOlderThan time.Duration `envconfig:"GITNESS_JOBS_PURGE_FINISHED_OLDER_THAN" default:"120h"`
|
||||
}
|
||||
}
|
||||
|
25
types/enum/job.go
Normal file
25
types/enum/job.go
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package enum
|
||||
|
||||
// JobState represents state of a background job.
|
||||
type JobState string
|
||||
|
||||
// JobState enumeration.
|
||||
const (
|
||||
JobStateScheduled JobState = "scheduled"
|
||||
JobStateRunning JobState = "running"
|
||||
JobStateFinished JobState = "finished"
|
||||
JobStateFailed JobState = "failed"
|
||||
)
|
||||
|
||||
// JobPriority represents priority of a background job.
|
||||
type JobPriority int
|
||||
|
||||
// JobPriority enumeration.
|
||||
const (
|
||||
JobPriorityNormal JobPriority = 0
|
||||
JobPriorityElevated JobPriority = 1
|
||||
)
|
@ -31,7 +31,7 @@ var membershipRoleReaderPermissions = slices.Clip(slices.Insert([]Permission{},
|
||||
))
|
||||
|
||||
var membershipRoleExecutorPermissions = slices.Clip(slices.Insert(membershipRoleReaderPermissions, 0,
|
||||
PermissionCommitCheckReport,
|
||||
PermissionRepoReportCommitCheck,
|
||||
PermissionPipelineExecute,
|
||||
PermissionSecretAccess,
|
||||
PermissionConnectorAccess,
|
||||
@ -46,7 +46,7 @@ var membershipRoleSpaceOwnerPermissions = slices.Clip(slices.Insert(membershipRo
|
||||
PermissionRepoEdit,
|
||||
PermissionRepoDelete,
|
||||
PermissionRepoPush,
|
||||
PermissionCommitCheckReport,
|
||||
PermissionRepoReportCommitCheck,
|
||||
|
||||
PermissionSpaceEdit,
|
||||
PermissionSpaceCreate,
|
||||
|
@ -17,7 +17,6 @@ const (
|
||||
ResourceTypeSecret ResourceType = "SECRET"
|
||||
ResourceTypeConnector ResourceType = "CONNECTOR"
|
||||
ResourceTypeTemplate ResourceType = "TEMPLATE"
|
||||
// ResourceType_Branch ResourceType = "BRANCH"
|
||||
)
|
||||
|
||||
// Permission represents the different types of permissions a principal can have.
|
||||
@ -37,10 +36,11 @@ const (
|
||||
/*
|
||||
----- REPOSITORY -----
|
||||
*/
|
||||
PermissionRepoView Permission = "repo_view"
|
||||
PermissionRepoEdit Permission = "repo_edit"
|
||||
PermissionRepoDelete Permission = "repo_delete"
|
||||
PermissionRepoPush Permission = "repo_push"
|
||||
PermissionRepoView Permission = "repo_view"
|
||||
PermissionRepoEdit Permission = "repo_edit"
|
||||
PermissionRepoDelete Permission = "repo_delete"
|
||||
PermissionRepoPush Permission = "repo_push"
|
||||
PermissionRepoReportCommitCheck Permission = "repo_reportCommitCheck"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -114,10 +114,3 @@ const (
|
||||
PermissionTemplateDelete Permission = "template_delete"
|
||||
PermissionTemplateAccess Permission = "template_access"
|
||||
)
|
||||
|
||||
const (
|
||||
/*
|
||||
----- COMMIT CHECK -----
|
||||
*/
|
||||
PermissionCommitCheckReport Permission = "commitCheck_report"
|
||||
)
|
||||
|
38
types/job.go
Normal file
38
types/job.go
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2022 Harness Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Polyform Free Trial License
|
||||
// that can be found in the LICENSE.md file for this repository.
|
||||
|
||||
package types
|
||||
|
||||
import "github.com/harness/gitness/types/enum"
|
||||
|
||||
type Job struct {
|
||||
UID string `db:"job_uid"`
|
||||
Created int64 `db:"job_created"`
|
||||
Updated int64 `db:"job_updated"`
|
||||
Type string `db:"job_type"`
|
||||
Priority enum.JobPriority `db:"job_priority"`
|
||||
Data string `db:"job_data"`
|
||||
Result string `db:"job_result"`
|
||||
MaxDurationSeconds int `db:"job_max_duration_seconds"`
|
||||
MaxRetries int `db:"job_max_retries"`
|
||||
State enum.JobState `db:"job_state"`
|
||||
Scheduled int64 `db:"job_scheduled"`
|
||||
TotalExecutions int `db:"job_total_executions"`
|
||||
RunBy string `db:"job_run_by"`
|
||||
RunDeadline int64 `db:"job_run_deadline"`
|
||||
RunProgress int `db:"job_run_progress"`
|
||||
LastExecuted int64 `db:"job_last_executed"`
|
||||
IsRecurring bool `db:"job_is_recurring"`
|
||||
RecurringCron string `db:"job_recurring_cron"`
|
||||
ConsecutiveFailures int `db:"job_consecutive_failures"`
|
||||
LastFailureError string `db:"job_last_failure_error"`
|
||||
}
|
||||
|
||||
type JobStateChange struct {
|
||||
UID string `json:"uid"`
|
||||
State enum.JobState `json:"state"`
|
||||
Progress int `json:"progress"`
|
||||
Result string `json:"result"`
|
||||
Failure string `json:"failure"`
|
||||
}
|
@ -4,20 +4,16 @@
|
||||
|
||||
package types
|
||||
|
||||
import "github.com/harness/gitness/types/enum"
|
||||
|
||||
type Pipeline struct {
|
||||
ID int64 `db:"pipeline_id" json:"id"`
|
||||
Description string `db:"pipeline_description" json:"description"`
|
||||
SpaceID int64 `db:"pipeline_space_id" json:"space_id"`
|
||||
UID string `db:"pipeline_uid" json:"uid"`
|
||||
Seq int64 `db:"pipeline_seq" json:"seq"` // last execution number for this pipeline
|
||||
RepoID int64 `db:"pipeline_repo_id" json:"repo_id"` // null if repo_type != gitness
|
||||
RepoType enum.ScmType `db:"pipeline_repo_type" json:"repo_type"`
|
||||
RepoName string `db:"pipeline_repo_name" json:"repo_name"`
|
||||
DefaultBranch string `db:"pipeline_default_branch" json:"default_branch"`
|
||||
ConfigPath string `db:"pipeline_config_path" json:"config_path"`
|
||||
Created int64 `db:"pipeline_created" json:"created"`
|
||||
Updated int64 `db:"pipeline_updated" json:"updated"`
|
||||
Version int64 `db:"pipeline_version" json:"version"`
|
||||
ID int64 `db:"pipeline_id" json:"id"`
|
||||
Description string `db:"pipeline_description" json:"description"`
|
||||
UID string `db:"pipeline_uid" json:"uid"`
|
||||
Seq int64 `db:"pipeline_seq" json:"seq"` // last execution number for this pipeline
|
||||
RepoID int64 `db:"pipeline_repo_id" json:"repo_id"`
|
||||
DefaultBranch string `db:"pipeline_default_branch" json:"default_branch"`
|
||||
ConfigPath string `db:"pipeline_config_path" json:"config_path"`
|
||||
Created int64 `db:"pipeline_created" json:"created"`
|
||||
Execution *Execution `db:"-" json:"execution,omitempty"` // information about the latest execution if available
|
||||
Updated int64 `db:"pipeline_updated" json:"updated"`
|
||||
Version int64 `db:"pipeline_version" json:"-"`
|
||||
}
|
||||
|
@ -45,8 +45,8 @@ export interface CODERoutes {
|
||||
|
||||
toCODESpaceAccessControl: (args: Required<Pick<CODEProps, 'space'>>) => string
|
||||
toCODESpaceSettings: (args: Required<Pick<CODEProps, 'space'>>) => string
|
||||
toCODEPipelines: (args: Required<Pick<CODEProps, 'space'>>) => string
|
||||
toCODEPipelineEdit: (args: Required<Pick<CODEProps, 'space' | 'pipeline'>>) => string
|
||||
toCODEPipelines: (args: Required<Pick<CODEProps, 'repoPath'>>) => string
|
||||
toCODEPipelineEdit: (args: Required<Pick<CODEProps, 'repoPath' | 'pipeline'>>) => string
|
||||
toCODESecrets: (args: Required<Pick<CODEProps, 'space'>>) => string
|
||||
|
||||
toCODEGlobalSettings: () => string
|
||||
@ -74,8 +74,8 @@ export interface CODERoutes {
|
||||
toCODEWebhookDetails: (args: Required<Pick<CODEProps, 'repoPath' | 'webhookId'>>) => string
|
||||
toCODESettings: (args: Required<Pick<CODEProps, 'repoPath'>>) => string
|
||||
|
||||
toCODEExecutions: (args: Required<Pick<CODEProps, 'space' | 'pipeline'>>) => string
|
||||
toCODEExecution: (args: Required<Pick<CODEProps, 'space' | 'pipeline' | 'execution'>>) => string
|
||||
toCODEExecutions: (args: Required<Pick<CODEProps, 'repoPath' | 'pipeline'>>) => string
|
||||
toCODEExecution: (args: Required<Pick<CODEProps, 'repoPath' | 'pipeline' | 'execution'>>) => string
|
||||
toCODESecret: (args: Required<Pick<CODEProps, 'space' | 'secret'>>) => string
|
||||
}
|
||||
|
||||
@ -96,8 +96,8 @@ export const routes: CODERoutes = {
|
||||
|
||||
toCODESpaceAccessControl: ({ space }) => `/access-control/${space}`,
|
||||
toCODESpaceSettings: ({ space }) => `/settings/${space}`,
|
||||
toCODEPipelines: ({ space }) => `/pipelines/${space}`,
|
||||
toCODEPipelineEdit: ({ space, pipeline }) => `/pipelines/${space}/pipeline/${pipeline}/edit`,
|
||||
toCODEPipelines: ({ repoPath }) => `/${repoPath}/pipelines`,
|
||||
toCODEPipelineEdit: ({ repoPath, pipeline }) => `/${repoPath}/pipeline/${pipeline}/edit`,
|
||||
toCODESecrets: ({ space }) => `/secrets/${space}`,
|
||||
|
||||
toCODEGlobalSettings: () => '/settings',
|
||||
@ -130,8 +130,7 @@ export const routes: CODERoutes = {
|
||||
toCODEWebhookNew: ({ repoPath }) => `/${repoPath}/webhooks/new`,
|
||||
toCODEWebhookDetails: ({ repoPath, webhookId }) => `/${repoPath}/webhook/${webhookId}`,
|
||||
|
||||
toCODEExecutions: ({ space, pipeline }) => `/pipelines/${space}/pipeline/${pipeline}`,
|
||||
toCODEExecution: ({ space, pipeline, execution }) =>
|
||||
`/pipelines/${space}/pipeline/${pipeline}/execution/${execution}`,
|
||||
toCODEExecutions: ({ repoPath, pipeline }) => `/${repoPath}/pipelines/${pipeline}`,
|
||||
toCODEExecution: ({ repoPath, pipeline, execution }) => `/${repoPath}/pipelines/${pipeline}/execution/${execution}`,
|
||||
toCODESecret: ({ space, secret }) => `/secrets/${space}/secret/${secret}`
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ export const RouteDestinations: React.FC = React.memo(function RouteDestinations
|
||||
{OPEN_SOURCE_PIPELINES && (
|
||||
<Route
|
||||
path={routes.toCODEExecution({
|
||||
space: pathProps.space,
|
||||
repoPath,
|
||||
pipeline: pathProps.pipeline,
|
||||
execution: pathProps.execution
|
||||
})}
|
||||
@ -179,7 +179,7 @@ export const RouteDestinations: React.FC = React.memo(function RouteDestinations
|
||||
)}
|
||||
|
||||
{OPEN_SOURCE_PIPELINES && (
|
||||
<Route path={routes.toCODEExecutions({ space: pathProps.space, pipeline: pathProps.pipeline })} exact>
|
||||
<Route path={routes.toCODEExecutions({ repoPath, pipeline: pathProps.pipeline })} exact>
|
||||
<LayoutWithSideNav title={getString('pageTitle.executions')}>
|
||||
<ExecutionList />
|
||||
</LayoutWithSideNav>
|
||||
@ -195,7 +195,7 @@ export const RouteDestinations: React.FC = React.memo(function RouteDestinations
|
||||
)}
|
||||
|
||||
{OPEN_SOURCE_PIPELINES && (
|
||||
<Route path={routes.toCODEPipelines({ space: pathProps.space })} exact>
|
||||
<Route path={routes.toCODEPipelines({ repoPath })} exact>
|
||||
<LayoutWithSideNav title={getString('pageTitle.pipelines')}>
|
||||
<PipelineList />
|
||||
</LayoutWithSideNav>
|
||||
|
@ -243,8 +243,8 @@ export const Changes: React.FC<ChangesProps> = ({
|
||||
str={getString('pr.diffStatsLabel')}
|
||||
vars={{
|
||||
changedFilesLink: <ChangesDropdown diffs={diffs} />,
|
||||
addedLines: formatNumber(diffStats.addedLines),
|
||||
deletedLines: formatNumber(diffStats.deletedLines),
|
||||
addedLines: diffStats.addedLines ? formatNumber(diffStats.addedLines) : '0',
|
||||
deletedLines: diffStats.deletedLines ? formatNumber(diffStats.deletedLines) : '0',
|
||||
configuration: (
|
||||
<DiffViewConfiguration
|
||||
viewStyle={viewStyle}
|
||||
|
@ -50,7 +50,6 @@ const CommitRangeDropdown: React.FC<CommitRangeDropdownProps> = ({
|
||||
setSelectedCommits
|
||||
}) => {
|
||||
const { getString } = useStrings()
|
||||
|
||||
const allCommitsSHA = allCommits.map(commit => commit.sha as string)
|
||||
|
||||
useEffect(() => {
|
||||
@ -63,7 +62,7 @@ const CommitRangeDropdown: React.FC<CommitRangeDropdownProps> = ({
|
||||
event: React.MouseEvent<HTMLInputElement | HTMLDivElement, MouseEvent>,
|
||||
selectedCommitSHA: string
|
||||
) => {
|
||||
if (event.shiftKey || selectedCommits.length > 1) {
|
||||
if (event.shiftKey) {
|
||||
// Select Commit
|
||||
setSelectedCommits(current => {
|
||||
if (current.includes(selectedCommitSHA)) {
|
||||
|
32
web/src/components/Console/Console.module.scss
Normal file
32
web/src/components/Console/Console.module.scss
Normal file
@ -0,0 +1,32 @@
|
||||
.container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
background-color: black;
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
.log {
|
||||
color: white;
|
||||
font-family: Inconsolata, monospace;
|
||||
font-size: 2rem;
|
||||
}
|
||||
|
||||
.header {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
background-color: var(--black);
|
||||
height: var(--log-content-header-height);
|
||||
|
||||
.headerLayout {
|
||||
display: flex;
|
||||
align-items: baseline;
|
||||
border-bottom: 1px solid var(--grey-800);
|
||||
padding: var(--spacing-medium) 0;
|
||||
font-weight: 600;
|
||||
}
|
||||
}
|
||||
|
||||
.steps {
|
||||
padding: var(--spacing-medium) !important;
|
||||
}
|
10
web/src/components/Console/Console.module.scss.d.ts
vendored
Normal file
10
web/src/components/Console/Console.module.scss.d.ts
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
/* eslint-disable */
|
||||
// this is an auto-generated file
|
||||
declare const styles: {
|
||||
readonly container: string
|
||||
readonly log: string
|
||||
readonly header: string
|
||||
readonly headerLayout: string
|
||||
readonly steps: string
|
||||
}
|
||||
export default styles
|
51
web/src/components/Console/Console.tsx
Normal file
51
web/src/components/Console/Console.tsx
Normal file
@ -0,0 +1,51 @@
|
||||
import React, { FC } from 'react'
|
||||
import { useParams } from 'react-router-dom'
|
||||
import { Container, Layout, Text } from '@harnessio/uicore'
|
||||
import { Color, FontVariation } from '@harnessio/design-system'
|
||||
import type { CODEProps } from 'RouteDefinitions'
|
||||
import type { TypesStage } from 'services/code'
|
||||
import ConsoleStep from 'components/ConsoleStep/ConsoleStep'
|
||||
import { timeDistance } from 'utils/Utils'
|
||||
// import { useGetRepositoryMetadata } from 'hooks/useGetRepositoryMetadata'
|
||||
import css from './Console.module.scss'
|
||||
|
||||
interface ConsoleProps {
|
||||
stage: TypesStage | undefined
|
||||
repoPath: string
|
||||
}
|
||||
|
||||
const Console: FC<ConsoleProps> = ({ stage, repoPath }) => {
|
||||
const { pipeline, execution: executionNum } = useParams<CODEProps>()
|
||||
|
||||
return (
|
||||
<div className={css.container}>
|
||||
<Container className={css.header}>
|
||||
<Layout.Horizontal className={css.headerLayout} spacing="small">
|
||||
<Text font={{ variation: FontVariation.H4 }} color={Color.WHITE} padding={{ left: 'large', right: 'large' }}>
|
||||
{stage?.name}
|
||||
</Text>
|
||||
{stage?.started && stage?.stopped && (
|
||||
<Text font={{ variation: FontVariation.BODY }} color={Color.GREY_500}>
|
||||
{/* this needs fixed */}
|
||||
{timeDistance(stage?.started, stage?.stopped)}
|
||||
</Text>
|
||||
)}
|
||||
</Layout.Horizontal>
|
||||
</Container>
|
||||
<Layout.Vertical className={css.steps} spacing="small">
|
||||
{stage?.steps?.map((step, index) => (
|
||||
<ConsoleStep
|
||||
key={index}
|
||||
step={step}
|
||||
executionNumber={Number(executionNum)}
|
||||
pipelineName={pipeline}
|
||||
repoPath={repoPath}
|
||||
stageNumber={stage.number}
|
||||
/>
|
||||
))}
|
||||
</Layout.Vertical>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
export default Console
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user