diff --git a/.github/workflows/next_deploy.yml b/.github/workflows/next_deploy.yml index 98fb89a5d..2a991cde7 100644 --- a/.github/workflows/next_deploy.yml +++ b/.github/workflows/next_deploy.yml @@ -3,7 +3,7 @@ on: push: branches: - develop # change to main if needed - - feat/next2 + - feat/next3 jobs: deploy: diff --git a/next/ci_backends/ci_backends.go b/next/ci_backends/ci_backends.go new file mode 100644 index 000000000..766c73b35 --- /dev/null +++ b/next/ci_backends/ci_backends.go @@ -0,0 +1,31 @@ +package ci_backends + +import ( + "github.com/diggerhq/digger/backend/utils" + "github.com/diggerhq/digger/libs/spec" +) + +type CiBackend interface { + TriggerWorkflow(spec spec.Spec, runName string, vcsToken string) error +} + +type JenkinsCi struct{} + +type CiBackendOptions struct { + GithubClientProvider utils.GithubClientProvider + GithubInstallationId int64 + GitlabProjectId int + GitlabmergeRequestEventName string + GitlabCIPipelineID string + GitlabCIPipelineIID int + GitlabCIMergeRequestID int + GitlabCIMergeRequestIID int + GitlabCIProjectName string + GitlabciprojectNamespace string + GitlabciprojectId int + GitlabciprojectNamespaceId int + GitlabDiscussionId string + RepoFullName string + RepoOwner string + RepoName string +} diff --git a/next/ci_backends/github_actions.go b/next/ci_backends/github_actions.go new file mode 100644 index 000000000..5a6d51f76 --- /dev/null +++ b/next/ci_backends/github_actions.go @@ -0,0 +1,32 @@ +package ci_backends + +import ( + "context" + "encoding/json" + orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/libs/spec" + "github.com/google/go-github/v61/github" + "log" +) + +type GithubActionCi struct { + Client *github.Client +} + +func (g GithubActionCi) TriggerWorkflow(spec spec.Spec, runName string, vcsToken string) error { + log.Printf("TriggerGithubWorkflow: repoOwner: %v, repoName: %v, commentId: %v", spec.VCS.RepoOwner, spec.VCS.RepoName, spec.CommentId) + client := g.Client + specBytes, err := json.Marshal(spec) + + inputs := orchestrator_scheduler.WorkflowInput{ + Spec: string(specBytes), + RunName: runName, + } + + _, err = client.Actions.CreateWorkflowDispatchEventByFileName(context.Background(), spec.VCS.RepoOwner, spec.VCS.RepoName, spec.VCS.WorkflowFile, github.CreateWorkflowDispatchEventRequest{ + Ref: spec.Job.Branch, + Inputs: inputs.ToMap(), + }) + + return err +} diff --git a/next/ci_backends/jenkins.go b/next/ci_backends/jenkins.go new file mode 100644 index 000000000..c027cfc4c --- /dev/null +++ b/next/ci_backends/jenkins.go @@ -0,0 +1 @@ +package ci_backends diff --git a/next/ci_backends/provider.go b/next/ci_backends/provider.go new file mode 100644 index 000000000..9fd5b2ae2 --- /dev/null +++ b/next/ci_backends/provider.go @@ -0,0 +1,25 @@ +package ci_backends + +import ( + "fmt" + "github.com/diggerhq/digger/next/utils" + "log" +) + +type CiBackendProvider interface { + GetCiBackend(options CiBackendOptions) (CiBackend, error) +} + +type DefaultBackendProvider struct{} + +func (d DefaultBackendProvider) GetCiBackend(options CiBackendOptions) (CiBackend, error) { + client, _, err := utils.GetGithubClient(options.GithubClientProvider, options.GithubInstallationId, options.RepoFullName) + if err != nil { + log.Printf("GetCiBackend: could not get github client: %v", err) + return nil, fmt.Errorf("could not get github client: %v", err) + } + backend := &GithubActionCi{ + Client: client, + } + return backend, nil +} diff --git a/next/controllers/github.go b/next/controllers/github.go index 1fc9b6b1d..b911e46c5 100644 --- a/next/controllers/github.go +++ b/next/controllers/github.go @@ -5,9 +5,13 @@ import ( "encoding/base64" "encoding/json" "fmt" - "github.com/diggerhq/digger/backend/ci_backends" + "github.com/diggerhq/digger/backend/segment" + "github.com/diggerhq/digger/ee/cli/pkg/utils" + "github.com/diggerhq/digger/libs/ci" orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/next/ci_backends" "github.com/diggerhq/digger/next/model" + "github.com/diggerhq/digger/next/services" "log" "math/rand" "net/http" @@ -19,10 +23,11 @@ import ( "strings" "github.com/diggerhq/digger/backend/middleware" - "github.com/diggerhq/digger/backend/utils" + backend_utils "github.com/diggerhq/digger/backend/utils" dg_github "github.com/diggerhq/digger/libs/ci/github" dg_configuration "github.com/diggerhq/digger/libs/digger_config" - "github.com/diggerhq/digger/next/models" + "github.com/diggerhq/digger/next/dbmodels" + next_utils "github.com/diggerhq/digger/next/utils" "github.com/dominikbraun/graph" "github.com/gin-gonic/gin" "github.com/google/go-github/v61/github" @@ -32,7 +37,7 @@ import ( type DiggerController struct { CiBackendProvider ci_backends.CiBackendProvider - GithubClientProvider utils.GithubClientProvider + GithubClientProvider next_utils.GithubClientProvider } func (d DiggerController) GithubAppWebHook(c *gin.Context) { @@ -93,6 +98,12 @@ func (d DiggerController) GithubAppWebHook(c *gin.Context) { log.Printf("IssueCommentEvent, action: %v\n", *event.Action) case *github.PullRequestEvent: log.Printf("Got pull request event for %d", *event.PullRequest.ID) + err := handlePullRequestEvent(gh, event, d.CiBackendProvider) + if err != nil { + log.Printf("handlePullRequestEvent error: %v", err) + c.String(http.StatusInternalServerError, err.Error()) + return + } case *github.PushEvent: log.Printf("Got push event for %d", event.Repo.URL) default: @@ -223,7 +234,7 @@ func (d DiggerController) GithubSetupExchangeCode(c *gin.Context) { } log.Printf("Found credentials for GitHub app %v with id %d", *cfg.Name, cfg.GetID()) - _, err = models.DB.CreateGithubApp(cfg.GetName(), cfg.GetID(), cfg.GetHTMLURL()) + _, err = dbmodels.DB.CreateGithubApp(cfg.GetName(), cfg.GetID(), cfg.GetHTMLURL()) if err != nil { c.Error(fmt.Errorf("Failed to create github app record on callback")) } @@ -245,13 +256,13 @@ func (d DiggerController) GithubSetupExchangeCode(c *gin.Context) { } func createOrGetDiggerRepoForGithubRepo(ghRepoFullName string, ghRepoOrganisation string, ghRepoName string, ghRepoUrl string, installationId int64) (*model.Repo, *model.Organization, error) { - link, err := models.DB.GetGithubInstallationLinkForInstallationId(installationId) + link, err := dbmodels.DB.GetGithubInstallationLinkForInstallationId(installationId) if err != nil { log.Printf("Error fetching installation link: %v", err) return nil, nil, err } orgId := link.OrganizationID - org, err := models.DB.GetOrganisationById(orgId) + org, err := dbmodels.DB.GetOrganisationById(orgId) if err != nil { log.Printf("Error fetching organisation by id: %v, error: %v\n", orgId, err) return nil, nil, err @@ -259,7 +270,7 @@ func createOrGetDiggerRepoForGithubRepo(ghRepoFullName string, ghRepoOrganisatio diggerRepoName := strings.ReplaceAll(ghRepoFullName, "/", "-") - repo, err := models.DB.GetRepo(orgId, diggerRepoName) + repo, err := dbmodels.DB.GetRepo(orgId, diggerRepoName) if err != nil { log.Printf("Error fetching repo: %v", err) @@ -271,7 +282,7 @@ func createOrGetDiggerRepoForGithubRepo(ghRepoFullName string, ghRepoOrganisatio return repo, org, nil } - repo, err = models.DB.CreateRepo(diggerRepoName, ghRepoFullName, ghRepoOrganisation, ghRepoName, ghRepoUrl, org, ` + repo, err = dbmodels.DB.CreateRepo(diggerRepoName, ghRepoFullName, ghRepoOrganisation, ghRepoName, ghRepoUrl, org, ` generate_projects: include: "." `) @@ -283,7 +294,7 @@ generate_projects: return repo, org, nil } -func handleInstallationRepositoriesAddedEvent(ghClientProvider utils.GithubClientProvider, payload *github.InstallationRepositoriesEvent) error { +func handleInstallationRepositoriesAddedEvent(ghClientProvider next_utils.GithubClientProvider, payload *github.InstallationRepositoriesEvent) error { installationId := *payload.Installation.ID login := *payload.Installation.Account.Login accountId := *payload.Installation.Account.ID @@ -294,7 +305,7 @@ func handleInstallationRepositoriesAddedEvent(ghClientProvider utils.GithubClien repoOwner := strings.Split(*repo.FullName, "/")[0] repoName := *repo.Name repoUrl := fmt.Sprintf("https://github.com/%v", repoFullName) - _, err := models.DB.GithubRepoAdded(installationId, appId, login, accountId, repoFullName) + _, err := dbmodels.DB.GithubRepoAdded(installationId, appId, login, accountId, repoFullName) if err != nil { log.Printf("GithubRepoAdded failed, error: %v\n", err) return err @@ -314,7 +325,7 @@ func handleInstallationRepositoriesDeletedEvent(payload *github.InstallationRepo appId := *payload.Installation.AppID for _, repo := range payload.RepositoriesRemoved { repoFullName := *repo.FullName - _, err := models.DB.GithubRepoRemoved(installationId, appId, repoFullName) + _, err := dbmodels.DB.GithubRepoRemoved(installationId, appId, repoFullName) if err != nil { return err } @@ -337,7 +348,7 @@ func handleInstallationCreatedEvent(installation *github.InstallationEvent) erro repoUrl := fmt.Sprintf("https://github.com/%v", repoFullName) log.Printf("Adding a new installation %d for repo: %s", installationId, repoFullName) - _, err := models.DB.GithubRepoAdded(installationId, appId, login, accountId, repoFullName) + _, err := dbmodels.DB.GithubRepoAdded(installationId, appId, login, accountId, repoFullName) if err != nil { return err } @@ -353,11 +364,11 @@ func handleInstallationDeletedEvent(installation *github.InstallationEvent) erro installationId := *installation.Installation.ID appId := *installation.Installation.AppID - link, err := models.DB.GetGithubInstallationLinkForInstallationId(installationId) + link, err := dbmodels.DB.GetGithubInstallationLinkForInstallationId(installationId) if err != nil { return err } - _, err = models.DB.MakeGithubAppInstallationLinkInactive(link) + _, err = dbmodels.DB.MakeGithubAppInstallationLinkInactive(link) if err != nil { return err } @@ -365,7 +376,7 @@ func handleInstallationDeletedEvent(installation *github.InstallationEvent) erro for _, repo := range installation.Repositories { repoFullName := *repo.FullName log.Printf("Removing an installation %d for repo: %s", installationId, repoFullName) - _, err := models.DB.GithubRepoRemoved(installationId, appId, repoFullName) + _, err := dbmodels.DB.GithubRepoRemoved(installationId, appId, repoFullName) if err != nil { return err } @@ -373,7 +384,7 @@ func handleInstallationDeletedEvent(installation *github.InstallationEvent) erro return nil } -func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) error { +func handlePushEvent(gh next_utils.GithubClientProvider, payload *github.PushEvent) error { installationId := *payload.Installation.ID repoName := *payload.Repo.Name repoFullName := *payload.Repo.FullName @@ -382,7 +393,7 @@ func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) e ref := *payload.Ref defaultBranch := *payload.Repo.DefaultBranch - link, err := models.DB.GetGithubAppInstallationLink(installationId) + link, err := dbmodels.DB.GetGithubAppInstallationLink(installationId) if err != nil { log.Printf("Error getting GetGithubAppInstallationLink: %v", err) return fmt.Errorf("error getting github app link") @@ -390,7 +401,7 @@ func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) e orgId := link.OrganizationID diggerRepoName := strings.ReplaceAll(repoFullName, "/", "-") - repo, err := models.DB.GetRepo(orgId, diggerRepoName) + repo, err := dbmodels.DB.GetRepo(orgId, diggerRepoName) if err != nil { log.Printf("Error getting Repo: %v", err) return fmt.Errorf("error getting github app link") @@ -400,7 +411,7 @@ func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) e return fmt.Errorf("Repo not found: Org: %v | repo: %v", orgId, diggerRepoName) } - _, token, err := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) + _, token, err := next_utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) if err != nil { log.Printf("Error getting github service: %v", err) return fmt.Errorf("error getting github service") @@ -419,7 +430,7 @@ func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) e log.Printf("ERROR load digger.yml: %v", err) return fmt.Errorf("error loading digger.yml %v", err) } - models.DB.UpdateRepoDiggerConfig(link.OrganizationID, *config, repo, isMainBranch) + dbmodels.DB.UpdateRepoDiggerConfig(link.OrganizationID, *config, repo, isMainBranch) return nil }) if err != nil { @@ -429,202 +440,263 @@ func handlePushEvent(gh utils.GithubClientProvider, payload *github.PushEvent) e return nil } -//func handlePullRequestEvent(gh utils.GithubClientProvider, payload *github.PullRequestEvent, ciBackendProvider ci_backends.CiBackendProvider) error { -// installationId := *payload.Installation.ID -// repoName := *payload.Repo.Name -// repoOwner := *payload.Repo.Owner.Login -// repoFullName := *payload.Repo.FullName -// cloneURL := *payload.Repo.CloneURL -// prNumber := *payload.PullRequest.Number -// isDraft := payload.PullRequest.GetDraft() -// commitSha := payload.PullRequest.Head.GetSHA() -// branch := payload.PullRequest.Head.GetRef() -// -// link, err := models.DB.GetGithubAppInstallationLink(installationId) -// if err != nil { -// log.Printf("Error getting GetGithubAppInstallationLink: %v", err) -// return fmt.Errorf("error getting github app link") -// } -// organisationId := link.OrganizationID -// -// diggerYmlStr, ghService, config, projectsGraph, _, _, err := getDiggerConfigForPR(gh, installationId, repoFullName, repoOwner, repoName, cloneURL, prNumber) -// if err != nil { -// ghService, _, err := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) -// if err != nil { -// log.Printf("GetGithubService error: %v", err) -// return fmt.Errorf("error getting ghService to post error comment") -// } -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Could not load digger config, error: %v", err)) -// log.Printf("getDiggerConfigForPR error: %v", err) -// return fmt.Errorf("error getting digger config") -// } -// -// impactedProjects, impactedProjectsSourceMapping, _, err := dg_github.ProcessGitHubPullRequestEvent(payload, config, projectsGraph, ghService) -// if err != nil { -// log.Printf("Error processing event: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Error processing event: %v", err)) -// return fmt.Errorf("error processing event") -// } -// -// jobsForImpactedProjects, _, err := dg_github.ConvertGithubPullRequestEventToJobs(payload, impactedProjects, nil, *config) -// if err != nil { -// log.Printf("Error converting event to jobsForImpactedProjects: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Error converting event to jobsForImpactedProjects: %v", err)) -// return fmt.Errorf("error converting event to jobsForImpactedProjects") -// } -// -// if len(jobsForImpactedProjects) == 0 { -// // do not report if no projects are impacted to minimise noise in the PR thread -// // TODO use status checks instead: https://github.com/diggerhq/digger/issues/1135 -// log.Printf("No projects impacted; not starting any jobs") -// // This one is for aggregate reporting -// err = utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) -// return nil -// } -// -// diggerCommand, err := orchestrator_scheduler.GetCommandFromJob(jobsForImpactedProjects[0]) -// if err != nil { -// log.Printf("could not determine digger command from job: %v", jobsForImpactedProjects[0].Commands) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: could not determine digger command from job: %v", err)) -// return fmt.Errorf("unkown digger command in comment %v", err) -// } -// -// if *diggerCommand == orchestrator_scheduler.DiggerCommandNoop { -// log.Printf("job is of type noop, no actions top perform") -// return nil -// } -// -// // perform locking/unlocking in backend -// //if config.PrLocks { -// // for _, project := range impactedProjects { -// // prLock := dg_locking.PullRequestLock{ -// // InternalLock: locking.BackendDBLock{ -// // OrgId: organisationId, -// // }, -// // CIService: ghService, -// // Reporter: comment_updater.NoopReporter{}, -// // ProjectName: project.Name, -// // ProjectNamespace: repoFullName, -// // PrNumber: prNumber, -// // } -// // err = dg_locking.PerformLockingActionFromCommand(prLock, *diggerCommand) -// // if err != nil { -// // utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Failed perform lock action on project: %v %v", project.Name, err)) -// // return fmt.Errorf("failed to perform lock action on project: %v, %v", project.Name, err) -// // } -// // } -// //} -// -// // if commands are locking or unlocking we don't need to trigger any jobs -// if *diggerCommand == orchestrator_scheduler.DiggerCommandUnlock || -// *diggerCommand == orchestrator_scheduler.DiggerCommandLock { -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":white_check_mark: Command %v completed successfully", *diggerCommand)) -// return nil -// } -// -// if !config.AllowDraftPRs && isDraft { -// log.Printf("Draft PRs are disabled, skipping PR: %v", prNumber) -// return nil -// } -// -// commentReporter, err := utils.InitCommentReporter(ghService, prNumber, ":construction_worker: Digger starting...") -// if err != nil { -// log.Printf("Error initializing comment reporter: %v", err) -// return fmt.Errorf("error initializing comment reporter") -// } -// -// err = utils.ReportInitialJobsStatus(commentReporter, jobsForImpactedProjects) -// if err != nil { -// log.Printf("Failed to comment initial status for jobs: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Failed to comment initial status for jobs: %v", err)) -// return fmt.Errorf("failed to comment initial status for jobs") -// } -// -// err = utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) -// if err != nil { -// log.Printf("error setting status for PR: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: error setting status for PR: %v", err)) -// fmt.Errorf("error setting status for PR: %v", err) -// } -// -// impactedProjectsMap := make(map[string]dg_configuration.Project) -// for _, p := range impactedProjects { -// impactedProjectsMap[p.Name] = p -// } -// -// impactedJobsMap := make(map[string]orchestrator_scheduler.Job) -// for _, j := range jobsForImpactedProjects { -// impactedJobsMap[j.ProjectName] = j -// } -// -// commentId, err := strconv.ParseInt(commentReporter.CommentId, 10, 64) -// if err != nil { -// log.Printf("strconv.ParseInt error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: could not handle commentId: %v", err)) -// } -// batchId, _, err := utils.ConvertJobsToDiggerJobs(*diggerCommand, models2.DiggerVCSGithub, organisationId, impactedJobsMap, impactedProjectsMap, projectsGraph, installationId, branch, prNumber, repoOwner, repoName, repoFullName, commitSha, commentId, diggerYmlStr, 0) -// if err != nil { -// log.Printf("ConvertJobsToDiggerJobs error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) -// return fmt.Errorf("error converting jobs") -// } -// -// if config.CommentRenderMode == dg_configuration.CommentRenderModeGroupByModule { -// sourceDetails, err := comment_updater.PostInitialSourceComments(ghService, prNumber, impactedProjectsSourceMapping) -// if err != nil { -// log.Printf("PostInitialSourceComments error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: PostInitialSourceComments error: %v", err)) -// return fmt.Errorf("error posting initial comments") -// } -// batch, err := models.DB.GetDiggerBatch(batchId) -// if err != nil { -// log.Printf("GetDiggerBatch error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: PostInitialSourceComments error: %v", err)) -// return fmt.Errorf("error getting digger batch") -// } -// batch.SourceDetails, err = json.Marshal(sourceDetails) -// if err != nil { -// log.Printf("sourceDetails, json Marshal error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: json Marshal error: %v", err)) -// return fmt.Errorf("error marshalling sourceDetails") -// } -// err = models.DB.UpdateDiggerBatch(batch) -// if err != nil { -// log.Printf("UpdateDiggerBatch error: %v", err) -// utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: UpdateDiggerBatch error: %v", err)) -// return fmt.Errorf("error updating digger batch") -// } -// } -// -// segment.Track(strconv.Itoa(int(organisationId)), "backend_trigger_job") -// -// //ciBackend, err := ciBackendProvider.GetCiBackend( -// // ci_backends.CiBackendOptions{ -// // GithubClientProvider: gh, -// // GithubInstallationId: installationId, -// // RepoName: repoName, -// // RepoOwner: repoOwner, -// // RepoFullName: repoFullName, -// // }, -// //) -// //if err != nil { -// // log.Printf("GetCiBackend error: %v", err) -// // utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: GetCiBackend error: %v", err)) -// // return fmt.Errorf("error fetching ci backed %v", err) -// //} -// // -// //err = TriggerDiggerJobs(ciBackend, repoFullName, repoOwner, repoName, batchId, prNumber, ghService, gh) -// //if err != nil { -// // log.Printf("TriggerDiggerJobs error: %v", err) -// // utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: TriggerDiggerJobs error: %v", err)) -// // return fmt.Errorf("error triggerring Digger Jobs") -// //} -// -// return nil -//} - -func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string, prNumber int) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], error) { - ghService, token, err := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) +func handlePullRequestEvent(gh next_utils.GithubClientProvider, payload *github.PullRequestEvent, ciBackendProvider ci_backends.CiBackendProvider) error { + installationId := *payload.Installation.ID + repoName := *payload.Repo.Name + repoOwner := *payload.Repo.Owner.Login + repoFullName := *payload.Repo.FullName + //cloneURL := *payload.Repo.CloneURL + prNumber := *payload.PullRequest.Number + isDraft := payload.PullRequest.GetDraft() + commitSha := payload.PullRequest.Head.GetSHA() + branch := payload.PullRequest.Head.GetRef() + + link, err := dbmodels.DB.GetGithubAppInstallationLink(installationId) + if err != nil { + log.Printf("Error getting GetGithubAppInstallationLink: %v", err) + return fmt.Errorf("error getting github app link") + } + organisationId := link.OrganizationID + segment.Track(organisationId, "backend_trigger_job") + + ghService, _, err := next_utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) + if err != nil { + log.Printf("GetGithubService error: %v", err) + return fmt.Errorf("error getting ghService to post error comment") + } + + // impacated projects should be fetched from a query + r := dbmodels.DB.Query.Repo + repo, err := dbmodels.DB.Query.Repo.Where(r.RepoFullName.Eq(repoFullName), r.OrganizationID.Eq(organisationId)).First() + if err != nil { + log.Printf("could not find repo: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Error could not find repository for org: %v", err)) + return fmt.Errorf("could not find reop: %v", err) + } + p := dbmodels.DB.Query.Project + projects, err := dbmodels.DB.Query.Project.Where(p.RepoID.Eq(repo.ID)).Find() + + var dgprojects []dg_configuration.Project = []dg_configuration.Project{} + for _, proj := range projects { + dgprojects = append(dgprojects, dbmodels.ToDiggerProject(proj)) + } + projectsGraph, err := dg_configuration.CreateProjectDependencyGraph(dgprojects) + var config *dg_configuration.DiggerConfig = &dg_configuration.DiggerConfig{ + ApplyAfterMerge: true, + AllowDraftPRs: false, + CommentRenderMode: "", + DependencyConfiguration: dg_configuration.DependencyConfiguration{ + Mode: dg_configuration.DependencyConfigurationHard, + }, + PrLocks: false, + Projects: dgprojects, + AutoMerge: false, + Telemetry: false, + Workflows: map[string]dg_configuration.Workflow{ + "default": dg_configuration.Workflow{ + EnvVars: nil, + Plan: nil, + Apply: nil, + Configuration: &dg_configuration.WorkflowConfiguration{ + OnPullRequestPushed: []string{"digger plan"}, + OnPullRequestClosed: []string{}, + OnPullRequestConvertedToDraft: []string{}, + OnCommitToDefault: []string{}, + }, + }, + }, + MentionDriftedProjectsInPR: false, + TraverseToNestedProjects: false, + } + + impactedProjects, _, _, err := dg_github.ProcessGitHubPullRequestEvent(payload, config, projectsGraph, ghService) + if err != nil { + log.Printf("Error processing event: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Error processing event: %v", err)) + return fmt.Errorf("error processing event") + } + + jobsForImpactedProjects, _, err := dg_github.ConvertGithubPullRequestEventToJobs(payload, impactedProjects, nil, *config) + if err != nil { + log.Printf("Error converting event to jobsForImpactedProjects: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Error converting event to jobsForImpactedProjects: %v", err)) + return fmt.Errorf("error converting event to jobsForImpactedProjects") + } + + if len(jobsForImpactedProjects) == 0 { + // do not report if no projects are impacted to minimise noise in the PR thread + // TODO use status checks instead: https://github.com/diggerhq/digger/issues/1135 + log.Printf("No projects impacted; not starting any jobs") + // This one is for aggregate reporting + err = backend_utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) + return nil + } + + diggerCommand, err := orchestrator_scheduler.GetCommandFromJob(jobsForImpactedProjects[0]) + if err != nil { + log.Printf("could not determine digger command from job: %v", jobsForImpactedProjects[0].Commands) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: could not determine digger command from job: %v", err)) + return fmt.Errorf("unkown digger command in comment %v", err) + } + + if *diggerCommand == orchestrator_scheduler.DiggerCommandNoop { + log.Printf("job is of type noop, no actions top perform") + return nil + } + + if !config.AllowDraftPRs && isDraft { + log.Printf("Draft PRs are disabled, skipping PR: %v", prNumber) + return nil + } + + commentReporter, err := backend_utils.InitCommentReporter(ghService, prNumber, ":construction_worker: Digger starting...") + if err != nil { + log.Printf("Error initializing comment reporter: %v", err) + return fmt.Errorf("error initializing comment reporter") + } + + err = backend_utils.ReportInitialJobsStatus(commentReporter, jobsForImpactedProjects) + if err != nil { + log.Printf("Failed to comment initial status for jobs: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: Failed to comment initial status for jobs: %v", err)) + return fmt.Errorf("failed to comment initial status for jobs") + } + + err = backend_utils.SetPRStatusForJobs(ghService, prNumber, jobsForImpactedProjects) + if err != nil { + log.Printf("error setting status for PR: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: error setting status for PR: %v", err)) + fmt.Errorf("error setting status for PR: %v", err) + } + + impactedProjectsMap := make(map[string]dg_configuration.Project) + for _, p := range impactedProjects { + impactedProjectsMap[p.Name] = p + } + + impactedJobsMap := make(map[string]orchestrator_scheduler.Job) + for _, j := range jobsForImpactedProjects { + impactedJobsMap[j.ProjectName] = j + } + + commentId, err := strconv.ParseInt(commentReporter.CommentId, 10, 64) + if err != nil { + log.Printf("strconv.ParseInt error: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: could not handle commentId: %v", err)) + } + batchId, _, err := ConvertJobsToDiggerJobs(*diggerCommand, dbmodels.DiggerVCSGithub, organisationId, impactedJobsMap, impactedProjectsMap, projectsGraph, installationId, branch, prNumber, repoOwner, repoName, repoFullName, commitSha, commentId, "", 0) + if err != nil { + log.Printf("ConvertJobsToDiggerJobs error: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: ConvertJobsToDiggerJobs error: %v", err)) + return fmt.Errorf("error converting jobs") + } + + ciBackend, err := ciBackendProvider.GetCiBackend( + ci_backends.CiBackendOptions{ + GithubClientProvider: gh, + GithubInstallationId: installationId, + RepoName: repoName, + RepoOwner: repoOwner, + RepoFullName: repoFullName, + }, + ) + if err != nil { + log.Printf("GetCiBackend error: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: GetCiBackend error: %v", err)) + return fmt.Errorf("error fetching ci backed %v", err) + } + + err = TriggerDiggerJobs(ciBackend, repoFullName, repoOwner, repoName, *batchId, prNumber, ghService, gh) + if err != nil { + log.Printf("TriggerDiggerJobs error: %v", err) + backend_utils.InitCommentReporter(ghService, prNumber, fmt.Sprintf(":x: TriggerDiggerJobs error: %v", err)) + return fmt.Errorf("error triggerring Digger Jobs") + } + + return nil +} + +func TriggerDiggerJobs(ciBackend ci_backends.CiBackend, repoFullName string, repoOwner string, repoName string, batchId string, prNumber int, prService ci.PullRequestService, gh next_utils.GithubClientProvider) error { + _, err := dbmodels.DB.GetDiggerBatch(batchId) + if err != nil { + log.Printf("failed to get digger batch, %v\n", err) + return fmt.Errorf("failed to get digger batch, %v\n", err) + } + diggerJobs, err := dbmodels.DB.GetPendingParentDiggerJobs(batchId) + + if err != nil { + log.Printf("failed to get pending digger jobs, %v\n", err) + return fmt.Errorf("failed to get pending digger jobs, %v\n", err) + } + + log.Printf("number of diggerJobs:%v\n", len(diggerJobs)) + + for _, job := range diggerJobs { + if job.JobSpec == nil { + return fmt.Errorf("GitHub job can't be nil") + } + jobString := string(job.JobSpec) + log.Printf("jobString: %v \n", jobString) + + // TODO: make workflow file name configurable + err = services.ScheduleJob(ciBackend, repoFullName, repoOwner, repoName, batchId, &job, gh) + if err != nil { + log.Printf("failed to trigger CI workflow, %v\n", err) + return fmt.Errorf("failed to trigger CI workflow, %v\n", err) + } + } + return nil +} + +func ConvertJobsToDiggerJobs(jobType orchestrator_scheduler.DiggerCommand, vcsType dbmodels.DiggerVCSType, organisationId string, jobsMap map[string]orchestrator_scheduler.Job, projectMap map[string]dg_configuration.Project, projectsGraph graph.Graph[string, dg_configuration.Project], githubInstallationId int64, branch string, prNumber int, repoOwner string, repoName string, repoFullName string, commitSha string, commentId int64, diggerConfigStr string, gitlabProjectId int) (*string, []*model.DiggerJob, error) { + result := make([]*model.DiggerJob, 0) + organisation, err := dbmodels.DB.GetOrganisationById(organisationId) + if err != nil { + log.Printf("Error getting organisation: %v %v", organisationId, err) + return nil, nil, fmt.Errorf("error retriving organisation") + } + organisationName := organisation.Title + + backendHostName := os.Getenv("HOSTNAME") + + log.Printf("Number of Jobs: %v\n", len(jobsMap)) + marshalledJobsMap := map[string][]byte{} + for projectName, job := range jobsMap { + jobToken, err := dbmodels.DB.CreateDiggerJobToken(organisationId) + if err != nil { + log.Printf("Error creating job token: %v %v", projectName, err) + return nil, nil, fmt.Errorf("error creating job token") + } + + marshalled, err := json.Marshal(orchestrator_scheduler.JobToJson(job, jobType, organisationName, branch, commitSha, jobToken.Value, backendHostName, projectMap[projectName])) + if err != nil { + return nil, nil, err + } + marshalledJobsMap[job.ProjectName] = marshalled + } + + log.Printf("marshalledJobsMap: %v\n", marshalledJobsMap) + + batch, err := dbmodels.DB.CreateDiggerBatch(vcsType, githubInstallationId, repoOwner, repoName, repoFullName, prNumber, diggerConfigStr, branch, jobType, &commentId, gitlabProjectId) + if err != nil { + return nil, nil, fmt.Errorf("failed to create batch: %v", err) + } + for pname, _ := range marshalledJobsMap { + _, err := dbmodels.DB.CreateDiggerJob(batch.ID, marshalledJobsMap[pname], projectMap[pname].WorkflowFile) + if err != nil { + return nil, nil, fmt.Errorf("failed to create job: %v %v", pname, err) + } + } + + if err != nil { + return nil, nil, err + } + + return &batch.ID, result, nil +} + +func getDiggerConfigForBranch(gh next_utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, branch string, prNumber int) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], error) { + ghService, token, err := next_utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) if err != nil { log.Printf("Error getting github service: %v", err) return "", nil, nil, nil, fmt.Errorf("error getting github service") @@ -659,8 +731,8 @@ func getDiggerConfigForBranch(gh utils.GithubClientProvider, installationId int6 } // TODO: Refactor this func to receive ghService as input -func getDiggerConfigForPR(gh utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, prNumber int) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], *string, *string, error) { - ghService, _, err := utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) +func getDiggerConfigForPR(gh next_utils.GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string, cloneUrl string, prNumber int) (string, *dg_github.GithubService, *dg_configuration.DiggerConfig, graph.Graph[string, dg_configuration.Project], *string, *string, error) { + ghService, _, err := next_utils.GetGithubService(gh, installationId, repoFullName, repoOwner, repoName) if err != nil { log.Printf("Error getting github service: %v", err) return "", nil, nil, nil, nil, nil, fmt.Errorf("error getting github service") @@ -684,7 +756,7 @@ func getDiggerConfigForPR(gh utils.GithubClientProvider, installationId int64, r } func GetRepoByInstllationId(installationId int64, repoOwner string, repoName string) (*model.Repo, error) { - link, err := models.DB.GetGithubAppInstallationLink(installationId) + link, err := dbmodels.DB.GetGithubAppInstallationLink(installationId) if err != nil { log.Printf("Error getting GetGithubAppInstallationLink: %v", err) return nil, fmt.Errorf("error getting github app link") @@ -696,7 +768,7 @@ func GetRepoByInstllationId(installationId int64, repoOwner string, repoName str } diggerRepoName := repoOwner + "-" + repoName - repo, err := models.DB.GetRepo(link.OrganizationID, diggerRepoName) + repo, err := dbmodels.DB.GetRepo(link.OrganizationID, diggerRepoName) return repo, nil } @@ -733,14 +805,14 @@ func (d DiggerController) GithubAppCallbackPage(c *gin.Context) { } orgId := c.GetString(middleware.ORGANISATION_ID_KEY) - org, err := models.DB.GetOrganisationById(orgId) + org, err := dbmodels.DB.GetOrganisationById(orgId) if err != nil { log.Printf("Error fetching organisation: %v", err) c.JSON(http.StatusInternalServerError, gin.H{"error": "Error fetching organisation"}) return } - _, err = models.DB.CreateGithubInstallationLink(org, installationId64) + _, err = dbmodels.DB.CreateGithubInstallationLink(org, installationId64) if err != nil { log.Printf("Error saving CreateGithubInstallationLink to database: %v", err) c.JSON(http.StatusInternalServerError, gin.H{"error": "Error updating GitHub installation"}) @@ -758,14 +830,14 @@ func (d DiggerController) GithubReposPage(c *gin.Context) { return } - link, err := models.DB.GetGithubInstallationLinkForOrg(orgId) + link, err := dbmodels.DB.GetGithubInstallationLinkForOrg(orgId) if err != nil { log.Printf("GetGithubInstallationLinkForOrg error: %v\n", err) c.String(http.StatusForbidden, "Failed to find any GitHub installations for this org") return } - installations, err := models.DB.GetGithubAppInstallations(link.GithubInstallationID) + installations, err := dbmodels.DB.GetGithubAppInstallations(link.GithubInstallationID) if err != nil { log.Printf("GetGithubAppInstallations error: %v\n", err) c.String(http.StatusForbidden, "Failed to find any GitHub installations for this org") @@ -797,7 +869,7 @@ func (d DiggerController) GithubReposPage(c *gin.Context) { // why this validation is needed: https://roadie.io/blog/avoid-leaking-github-org-data/ // validation based on https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/generating-a-user-access-token-for-a-github-app , step 3 -func validateGithubCallback(githubClientProvider utils.GithubClientProvider, clientId string, clientSecret string, code string, installationId int64) (bool, error) { +func validateGithubCallback(githubClientProvider next_utils.GithubClientProvider, clientId string, clientSecret string, code string, installationId int64) (bool, error) { ctx := context.Background() type OAuthAccessResponse struct { AccessToken string `json:"access_token"` diff --git a/next/controllers/projects.go b/next/controllers/projects.go new file mode 100644 index 000000000..f310fc59e --- /dev/null +++ b/next/controllers/projects.go @@ -0,0 +1,262 @@ +package controllers + +import ( + "encoding/json" + "fmt" + "github.com/diggerhq/digger/backend/models" + "github.com/diggerhq/digger/libs/comment_utils/reporting" + "github.com/diggerhq/digger/libs/digger_config" + orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/libs/terraform_utils" + "github.com/diggerhq/digger/next/dbmodels" + //"github.com/diggerhq/digger/next/middleware" + "github.com/diggerhq/digger/next/model" + "github.com/diggerhq/digger/next/utils" + "github.com/gin-gonic/gin" + "log" + "net/http" + "time" +) + +type SetJobStatusRequest struct { + Status string `json:"status"` + Timestamp time.Time `json:"timestamp"` + JobSummary *terraform_utils.PlanSummary `json:"job_summary"` + Footprint *terraform_utils.TerraformPlanFootprint `json:"job_plan_footprint"` + PrCommentUrl string `json:"pr_comment_url"` + TerraformOutput string `json:"terraform_output""` +} + +func (d DiggerController) SetJobStatusForProject(c *gin.Context) { + jobId := c.Param("jobId") + + //orgId, exists := c.Get(middleware.ORGANISATION_ID_KEY) + + //if !exists { + // c.String(http.StatusForbidden, "Not allowed to access this resource") + // return + //} + + var request SetJobStatusRequest + + err := c.BindJSON(&request) + + if err != nil { + log.Printf("Error binding JSON: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error binding JSON"}) + return + } + + job, err := dbmodels.DB.GetDiggerJob(jobId) + if err != nil { + log.Printf("Error fetching job: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error fetching job"}) + return + } + + batch, err := dbmodels.DB.GetDiggerBatch(job.BatchID) + if err != nil { + log.Printf("Error getting digger batch: %v ", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "error fetching batch"}) + return + } + + switch request.Status { + case "started": + job.Status = int16(orchestrator_scheduler.DiggerJobStarted) + err := dbmodels.DB.UpdateDiggerJob(job) + if err != nil { + log.Printf("Error updating job status: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error updating job status"}) + return + } + + client, _, err := utils.GetGithubClient(d.GithubClientProvider, batch.GithubInstallationID, batch.RepoFullName) + if err != nil { + log.Printf("Error Creating github client: %v", err) + } else { + _, workflowRunUrl, err := utils.GetWorkflowIdAndUrlFromDiggerJobId(client, batch.RepoOwner, batch.RepoName, job.DiggerJobID) + if err != nil { + log.Printf("Error getting workflow ID from job: %v", err) + } else { + job.WorkflowRunURL = workflowRunUrl + err = dbmodels.DB.UpdateDiggerJob(job) + if err != nil { + log.Printf("Error updating digger job: %v", err) + } + } + } + case "succeeded": + job.Status = int16(orchestrator_scheduler.DiggerJobSucceeded) + job.TerraformOutput = request.TerraformOutput + if request.Footprint != nil { + job.PlanFootprint, err = json.Marshal(request.Footprint) + if err != nil { + log.Printf("Error marshalling plan footprint: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error marshalling plan footprint"}) + } + } + job.PrCommentURL = request.PrCommentUrl + err := dbmodels.DB.UpdateDiggerJob(job) + if err != nil { + log.Printf("Error updating job status: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error saving job"}) + return + } + + //go func() { + // defer func() { + // if r := recover(); r != nil { + // log.Printf("Recovered from panic while executing goroutine dispatching digger jobs: %v ", r) + // } + // }() + // ghClientProvider := d.GithubClientProvider + // installationLink, err := models.DB.GetGithubInstallationLinkForOrg(orgId) + // if err != nil { + // log.Printf("Error fetching installation link: %v", err) + // return + // } + // + // installations, err := models.DB.GetGithubAppInstallations(installationLink.GithubInstallationId) + // if err != nil { + // log.Printf("Error fetching installation: %v", err) + // return + // } + // + // if len(installations) == 0 { + // log.Printf("No installations found for installation id %v", installationLink.GithubInstallationId) + // return + // } + // + // jobLink, err := models.DB.GetDiggerJobLink(jobId) + // + // if err != nil { + // log.Printf("Error fetching job link: %v", err) + // return + // } + // + // workflowFileName := "digger_workflow.yml" + // + // if !strings.Contains(jobLink.RepoFullName, "/") { + // log.Printf("Repo full name %v does not contain a slash", jobLink.RepoFullName) + // return + // } + // + // repoFullNameSplit := strings.Split(jobLink.RepoFullName, "/") + // client, _, err := ghClientProvider.Get(installations[0].GithubAppId, installationLink.GithubInstallationId) + // err = services.DiggerJobCompleted(client, batch.ID, job, jobLink.RepoFullName, repoFullNameSplit[0], repoFullNameSplit[1], workflowFileName, d.GithubClientProvider) + // if err != nil { + // log.Printf("Error triggering job: %v", err) + // return + // } + //}() + + // store digger job summary + if request.JobSummary != nil { + dbmodels.DB.UpdateDiggerJobSummary(job.DiggerJobID, request.JobSummary.ResourcesCreated, request.JobSummary.ResourcesUpdated, request.JobSummary.ResourcesDeleted) + } + + case "failed": + job.Status = int16(orchestrator_scheduler.DiggerJobFailed) + job.TerraformOutput = request.TerraformOutput + err := dbmodels.DB.UpdateDiggerJob(job) + if err != nil { + log.Printf("Error updating job status: %v", request.Status) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error saving job"}) + return + } + + default: + log.Printf("Unexpected status %v", request.Status) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error saving job"}) + return + } + job.StatusUpdatedAt = request.Timestamp + err = dbmodels.DB.GormDB.Save(&job).Error + if err != nil { + log.Printf("Error saving update job: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error saving job"}) + return + } + + // get batch ID + // check if all jobs have succeeded at this point + // if so, perform merge of PR (if configured to do so) + //batch := job.Batch + err = dbmodels.DB.UpdateBatchStatus(batch) + if err != nil { + log.Printf("Error updating batch status: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error updating batch status"}) + return + } + + //err = AutomergePRforBatchIfEnabled(d.GithubClientProvider, batch) + //if err != nil { + // log.Printf("Error merging PR with automerge option: %v", err) + // c.JSON(http.StatusInternalServerError, gin.H{"error": "Error merging PR with automerge option"}) + //} + + // return batch summary to client + res, err := dbmodels.BatchToJsonStruct(*batch) + if err != nil { + log.Printf("Error getting batch details: %v", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "Error getting batch details"}) + + } + + UpdateCommentsForBatchGroup(d.GithubClientProvider, batch, res.Jobs) + + c.JSON(http.StatusOK, res) +} + +func UpdateCommentsForBatchGroup(gh utils.GithubClientProvider, batch *model.DiggerBatch, serializedJobs []orchestrator_scheduler.SerializedJob) error { + diggerYmlString := batch.DiggerConfig + diggerConfigYml, err := digger_config.LoadDiggerConfigYamlFromString(diggerYmlString) + if err != nil { + log.Printf("Error loading digger config from batch: %v", err) + return fmt.Errorf("error loading digger config from batch: %v", err) + } + + if diggerConfigYml.CommentRenderMode == nil || + *diggerConfigYml.CommentRenderMode != digger_config.CommentRenderModeGroupByModule { + log.Printf("render mode is not group_by_module, skipping") + return nil + } + + if batch.BatchType != string(orchestrator_scheduler.DiggerCommandPlan) && batch.BatchType != string(orchestrator_scheduler.DiggerCommandApply) { + log.Printf("command is not plan or apply, skipping") + return nil + } + + ghService, _, err := utils.GetGithubService( + gh, + batch.GithubInstallationID, + batch.RepoFullName, + batch.RepoOwner, + batch.RepoName, + ) + + var sourceDetails []reporting.SourceDetails + err = json.Unmarshal(batch.SourceDetails, &sourceDetails) + if err != nil { + log.Printf("failed to unmarshall sourceDetails: %v", err) + return fmt.Errorf("failed to unmarshall sourceDetails: %v", err) + } + + // project_name => terraform output + projectToTerraformOutput := make(map[string]string) + // TODO: add projectName as a field of Job + for _, serialJob := range serializedJobs { + job, err := models.DB.GetDiggerJob(serialJob.DiggerJobId) + if err != nil { + return fmt.Errorf("Could not get digger job: %v", err) + } + projectToTerraformOutput[serialJob.ProjectName] = job.TerraformOutput + } + + for _, detail := range sourceDetails { + reporter := reporting.SourceGroupingReporter{serializedJobs, int(batch.PrNumber), ghService} + reporter.UpdateComment(sourceDetails, detail.SourceLocation, projectToTerraformOutput) + } + return nil +} diff --git a/next/models/github.go b/next/dbmodels/github.go similarity index 95% rename from next/models/github.go rename to next/dbmodels/github.go index 425662fe2..b5fe7c330 100644 --- a/next/models/github.go +++ b/next/dbmodels/github.go @@ -1,4 +1,4 @@ -package models +package dbmodels type GithubAppInstallStatus int diff --git a/next/models/orgs.go b/next/dbmodels/orgs.go similarity index 92% rename from next/models/orgs.go rename to next/dbmodels/orgs.go index 97bdf55fa..c974ccf1e 100644 --- a/next/models/orgs.go +++ b/next/dbmodels/orgs.go @@ -1,4 +1,4 @@ -package models +package dbmodels type ProjectStatus int diff --git a/next/dbmodels/projects.go b/next/dbmodels/projects.go new file mode 100644 index 000000000..d759642bb --- /dev/null +++ b/next/dbmodels/projects.go @@ -0,0 +1,24 @@ +package dbmodels + +import ( + "github.com/diggerhq/digger/libs/digger_config" + "github.com/diggerhq/digger/next/model" +) + +func ToDiggerProject(p *model.Project) digger_config.Project { + return digger_config.Project{ + Name: p.Name, + Dir: p.TerraformWorkingDir, + Workspace: "default", + Terragrunt: false, + OpenTofu: false, + Workflow: "default", + WorkflowFile: "digger_workflow.yml", + IncludePatterns: []string{}, + ExcludePatterns: []string{}, + DependencyProjects: []string{}, + DriftDetection: false, + AwsRoleToAssume: nil, + Generated: false, + } +} diff --git a/next/models/runs.go b/next/dbmodels/runs.go similarity index 97% rename from next/models/runs.go rename to next/dbmodels/runs.go index fca1361b3..3e86e050b 100644 --- a/next/models/runs.go +++ b/next/dbmodels/runs.go @@ -1,4 +1,4 @@ -package models +package dbmodels type DiggerRunStatus string diff --git a/next/dbmodels/scheduler.go b/next/dbmodels/scheduler.go new file mode 100644 index 000000000..4f51456b4 --- /dev/null +++ b/next/dbmodels/scheduler.go @@ -0,0 +1,70 @@ +package dbmodels + +import ( + "encoding/json" + "fmt" + orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/next/model" + "log" +) + +type DiggerVCSType string + +const DiggerVCSGithub DiggerVCSType = "github" +const DiggerVCSGitlab DiggerVCSType = "gitlab" + +type DiggerJobLinkStatus int8 + +const ( + DiggerJobLinkCreated DiggerJobLinkStatus = 1 + DiggerJobLinkSucceeded DiggerJobLinkStatus = 2 +) + +func JobToJsonStruct(j model.DiggerJob) (orchestrator_scheduler.SerializedJob, error) { + var job orchestrator_scheduler.JobJson + err := json.Unmarshal(j.JobSpec, &job) + if err != nil { + log.Printf("Failed to convert unmarshall Serialized job, %v", err) + } + + return orchestrator_scheduler.SerializedJob{ + DiggerJobId: j.DiggerJobID, + Status: orchestrator_scheduler.DiggerJobStatus(j.Status), + JobString: j.JobSpec, + PlanFootprint: j.PlanFootprint, + ProjectName: job.ProjectName, + WorkflowRunUrl: &j.WorkflowRunURL, + PRCommentUrl: j.PrCommentURL, + ResourcesCreated: 0, // todo: fetch from summary + ResourcesUpdated: 0, + ResourcesDeleted: 0, + }, nil +} + +func BatchToJsonStruct(b model.DiggerBatch) (orchestrator_scheduler.SerializedBatch, error) { + res := orchestrator_scheduler.SerializedBatch{ + ID: b.ID, + PrNumber: int(b.PrNumber), + Status: orchestrator_scheduler.DiggerBatchStatus(b.Status), + BranchName: b.BranchName, + RepoFullName: b.RepoFullName, + RepoOwner: b.RepoOwner, + RepoName: b.RepoName, + BatchType: orchestrator_scheduler.DiggerCommand(b.BatchType), + } + + serializedJobs := make([]orchestrator_scheduler.SerializedJob, 0) + jobs, err := DB.GetDiggerJobsForBatch(b.ID) + if err != nil { + return res, fmt.Errorf("could not unmarshall digger batch: %v", err) + } + for _, job := range jobs { + jobJson, err := JobToJsonStruct(job) + if err != nil { + return res, fmt.Errorf("error mapping job to struct (ID: %v); %v", job.ID, err) + } + serializedJobs = append(serializedJobs, jobJson) + } + res.Jobs = serializedJobs + return res, nil +} diff --git a/next/models/setup.go b/next/dbmodels/setup.go similarity index 97% rename from next/models/setup.go rename to next/dbmodels/setup.go index de7b86794..1ccff571e 100644 --- a/next/models/setup.go +++ b/next/dbmodels/setup.go @@ -1,4 +1,4 @@ -package models +package dbmodels import ( "github.com/diggerhq/digger/next/models_generated" diff --git a/next/models/storage.go b/next/dbmodels/storage.go similarity index 95% rename from next/models/storage.go rename to next/dbmodels/storage.go index 121d83b53..d96759d3e 100644 --- a/next/models/storage.go +++ b/next/dbmodels/storage.go @@ -1,4 +1,4 @@ -package models +package dbmodels import ( "errors" @@ -27,7 +27,7 @@ func (db *Database) GetProjectsFromContext(c *gin.Context, orgIdKey string) ([]m var projects []model.Project - err := db.GormDB.Preload("Organisation").Preload("Repo"). + err := db.GormDB. Joins("INNER JOIN repos ON projects.repo_id = repos.id"). Joins("INNER JOIN organizations ON projects.organization_id = organizations.id"). Where("projects.organization_id = ?", loggedInOrganisationId).Find(&projects).Error @@ -53,7 +53,7 @@ func (db *Database) GetReposFromContext(c *gin.Context, orgIdKey string) ([]mode var repos []model.Repo - err := db.GormDB.Preload("Organisation"). + err := db.GormDB. Joins("INNER JOIN organizations ON repos.organization_id = organizations.id"). Where("repos.organization_id = ?", loggedInOrganisationId).Find(&repos).Error @@ -164,7 +164,7 @@ func (db *Database) GetProjectByProjectId(c *gin.Context, projectId uint, orgIdK log.Printf("GetProjectByProjectId, org id: %v\n", loggedInOrganisationId) var project model.Project - err := db.GormDB.Preload("Organisation").Preload("Repo"). + err := db.GormDB. Joins("INNER JOIN repos ON projects.repo_id = repos.id"). Joins("INNER JOIN organizations ON projects.organization_id = organizations.id"). Where("projects.organization_id = ?", loggedInOrganisationId). @@ -182,7 +182,7 @@ func (db *Database) GetProject(projectId uint) (*model.Project, error) { log.Printf("GetProject, project id: %v\n", projectId) var project model.Project - err := db.GormDB.Preload("Organisation").Preload("Repo"). + err := db.GormDB. Where("id = ?", projectId). First(&project).Error @@ -200,7 +200,7 @@ func (db *Database) GetProjectByName(orgId any, repo *model.Repo, name string) ( log.Printf("GetProjectByName, org id: %v, project name: %v\n", orgId, name) var project model.Project - err := db.GormDB.Preload("Organisation").Preload("Repo"). + err := db.GormDB. Joins("INNER JOIN repos ON projects.repo_id = repos.id"). Joins("INNER JOIN organizations ON projects.organization_id = organizations.id"). Where("projects.organization_id = ?", orgId). @@ -223,7 +223,7 @@ func (db *Database) GetProjectByRepo(orgId any, repo *model.Repo) ([]model.Proje log.Printf("GetProjectByRepo, org id: %v, repo name: %v\n", orgId, repo.Name) projects := make([]model.Project, 0) - err := db.GormDB.Preload("Organisation").Preload("Repo"). + err := db.GormDB. Joins("INNER JOIN repos ON projects.repo_id = repos.id"). Joins("INNER JOIN organizations ON projects.organization_id = organizations.id"). Where("projects.organization_id = ?", orgId). @@ -275,7 +275,7 @@ func (db *Database) GetDefaultRepo(c *gin.Context, orgIdKey string) (*model.Repo log.Printf("getDefaultRepo, org id: %v\n", loggedInOrganisationId) var repo model.Repo - err := db.GormDB.Preload("organizations"). + err := db.GormDB. Joins("INNER JOIN organizations ON repos.organization_id = organizations.id"). Where("organizations.id = ?", loggedInOrganisationId).First(&repo).Error @@ -292,7 +292,7 @@ func (db *Database) GetDefaultRepo(c *gin.Context, orgIdKey string) (*model.Repo func (db *Database) GetRepo(orgIdKey any, repoName string) (*model.Repo, error) { var repo model.Repo - err := db.GormDB.Preload("Organisation"). + err := db.GormDB. Joins("INNER JOIN organizations ON repos.organization_id = organizations.id"). Where("organizations.id = ? AND repos.name=?", orgIdKey, repoName).First(&repo).Error @@ -310,7 +310,7 @@ func (db *Database) GetRepo(orgIdKey any, repoName string) (*model.Repo, error) func (db *Database) GetRepoById(orgIdKey any, repoId any) (*model.Repo, error) { var repo model.Repo - err := db.GormDB.Preload("organizations"). + err := db.GormDB. Joins("INNER JOIN organizations ON repos.organization_id = organizations.id"). Where("organizations.id = ? AND repos.ID=?", orgIdKey, repoId).First(&repo).Error @@ -421,7 +421,7 @@ func (db *Database) GetGithubAppInstallations(installationId int64) ([]model.Git // GetGithubAppInstallationLink repoFullName should be in the following format: org/repo_name, for example "diggerhq/github-job-scheduler" func (db *Database) GetGithubAppInstallationLink(installationId int64) (*model.GithubAppInstallationLink, error) { var link model.GithubAppInstallationLink - result := db.GormDB.Preload("Organisation").Where("github_installation_id = ? AND status=?", installationId, GithubAppInstallationLinkActive).Find(&link) + result := db.GormDB.Where("github_installation_id = ? AND status=?", installationId, GithubAppInstallationLinkActive).Find(&link) if result.Error != nil { if !errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -597,7 +597,7 @@ func (db *Database) GetOrganisationById(orgId string) (*model.Organization, erro return &org, nil } -func (db *Database) GetDiggerBatch(batchId *uuid.UUID) (*model.DiggerBatch, error) { +func (db *Database) GetDiggerBatch(batchId string) (*model.DiggerBatch, error) { batch := &model.DiggerBatch{} result := db.GormDB.Where("id=? ", batchId).Find(batch) if result.Error != nil { @@ -670,12 +670,12 @@ func (db *Database) UpdateBatchStatus(batch *model.DiggerBatch) error { } -func (db *Database) CreateDiggerJob(batchId uuid.UUID, serializedJob []byte, workflowFile string) (*model.DiggerJob, error) { +func (db *Database) CreateDiggerJob(batchId string, serializedJob []byte, workflowFile string) (*model.DiggerJob, error) { if serializedJob == nil || len(serializedJob) == 0 { return nil, fmt.Errorf("serializedJob can't be empty") } jobId := uniuri.New() - batchIdStr := batchId.String() + batchIdStr := batchId summary := &model.DiggerJobSummary{} result := db.GormDB.Save(summary) @@ -698,7 +698,7 @@ func (db *Database) CreateDiggerJob(batchId uuid.UUID, serializedJob []byte, wor func (db *Database) ListDiggerRunsForProject(projectName string, repoId uint) ([]model.DiggerRun, error) { var runs []model.DiggerRun - err := db.GormDB.Preload("PlanStage").Preload("ApplyStage"). + err := db.GormDB. Where("project_name = ? AND repo_id= ?", projectName, repoId).Order("created_at desc").Find(&runs).Error if err != nil { @@ -759,7 +759,7 @@ func (db *Database) GetLastDiggerRunForProject(projectName string) (*model.Digge func (db *Database) GetDiggerRun(id uint) (*model.DiggerRun, error) { dr := &model.DiggerRun{} - result := db.GormDB.Preload("Repo"). + result := db.GormDB. Preload("ApplyStage"). Preload("PlanStage"). Where("id=? ", id).Find(dr) @@ -785,7 +785,7 @@ func (db *Database) CreateDiggerRunQueueItem(diggeRrunId int64, projectId int64) func (db *Database) GetDiggerRunQueueItem(id uint) (*model.DiggerRunQueueItem, error) { dr := &model.DiggerRunQueueItem{} - result := db.GormDB.Preload("DiggerRun").Where("id=? ", id).Find(dr) + result := db.GormDB.Where("id=? ", id).Find(dr) if result.Error != nil { return nil, result.Error } @@ -794,7 +794,7 @@ func (db *Database) GetDiggerRunQueueItem(id uint) (*model.DiggerRunQueueItem, e func (db *Database) GetDiggerJobFromRunStage(stage model.DiggerRunStage) (*model.DiggerJob, error) { job := &model.DiggerJob{} - result := db.GormDB.Preload("Batch").Take(job, "batch_id = ?", stage.BatchID) + result := db.GormDB.Take(job, "batch_id = ?", stage.BatchID) if result.Error != nil { if errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -906,13 +906,13 @@ func (db *Database) UpdateDiggerJob(job *model.DiggerJob) error { return nil } -func (db *Database) GetDiggerJobsForBatch(batchId uuid.UUID) ([]model.DiggerJob, error) { +func (db *Database) GetDiggerJobsForBatch(batchId string) ([]model.DiggerJob, error) { jobs := make([]model.DiggerJob, 0) var where *gorm.DB where = db.GormDB.Where("digger_jobs.batch_id = ?", batchId) - result := where.Preload("Batch").Preload("DiggerJobSummary").Find(&jobs) + result := where.Find(&jobs) if result.Error != nil { if !errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -921,13 +921,13 @@ func (db *Database) GetDiggerJobsForBatch(batchId uuid.UUID) ([]model.DiggerJob, return jobs, nil } -func (db *Database) GetDiggerJobsForBatchWithStatus(batchId uuid.UUID, status []scheduler.DiggerJobStatus) ([]model.DiggerJob, error) { +func (db *Database) GetDiggerJobsForBatchWithStatus(batchId string, status []scheduler.DiggerJobStatus) ([]model.DiggerJob, error) { jobs := make([]model.DiggerJob, 0) var where *gorm.DB where = db.GormDB.Where("digger_jobs.batch_id = ?", batchId).Where("status IN ?", status) - result := where.Preload("Batch").Preload("DiggerJobSummary").Find(&jobs) + result := where.Find(&jobs) if result.Error != nil { if !errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -942,7 +942,7 @@ func (db *Database) GetDiggerJobsWithStatus(status scheduler.DiggerJobStatus) ([ var where *gorm.DB where = db.GormDB.Where("status = ?", status) - result := where.Preload("Batch").Find(&jobs) + result := where.Find(&jobs) if result.Error != nil { if !errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -951,14 +951,14 @@ func (db *Database) GetDiggerJobsWithStatus(status scheduler.DiggerJobStatus) ([ return jobs, nil } -func (db *Database) GetPendingParentDiggerJobs(batchId *uuid.UUID) ([]model.DiggerJob, error) { +func (db *Database) GetPendingParentDiggerJobs(batchId string) ([]model.DiggerJob, error) { jobs := make([]model.DiggerJob, 0) - joins := db.GormDB.Joins("LEFT JOIN digger_job_parent_links ON digger_jobs.digger_job_id = digger_job_parent_links.digger_job_id").Preload("Batch") + joins := db.GormDB.Joins("LEFT JOIN digger_job_parent_links ON digger_jobs.digger_job_id = digger_job_parent_links.digger_job_id") var where *gorm.DB - if batchId != nil { - where = joins.Where("digger_jobs.status = ? AND digger_job_parent_links.id IS NULL AND digger_jobs.batch_id = ?", scheduler.DiggerJobCreated, *batchId) + if batchId != "" { + where = joins.Where("digger_jobs.status = ? AND digger_job_parent_links.id IS NULL AND digger_jobs.batch_id = ?", scheduler.DiggerJobCreated, batchId) } else { where = joins.Where("digger_jobs.status = ? AND digger_job_parent_links.id IS NULL", scheduler.DiggerJobCreated) } @@ -974,7 +974,7 @@ func (db *Database) GetPendingParentDiggerJobs(batchId *uuid.UUID) ([]model.Digg func (db *Database) GetDiggerJob(jobId string) (*model.DiggerJob, error) { job := &model.DiggerJob{} - result := db.GormDB.Preload("Batch").Preload("DiggerJobSummary").Where("digger_job_id=? ", jobId).Find(job) + result := db.GormDB.Where("digger_job_id=? ", jobId).Find(job) if result.Error != nil { if !errors.Is(result.Error, gorm.ErrRecordNotFound) { return nil, result.Error @@ -1111,14 +1111,14 @@ func (db *Database) CreateRepo(name string, repoFullName string, repoOrganisatio // return token, nil //} -func (db *Database) CreateDiggerJobToken(organisationId uint) (*model.DiggerJobToken, error) { +func (db *Database) CreateDiggerJobToken(organisationId string) (*model.DiggerJobToken, error) { // create a digger job token // prefixing token to make easier to retire this type of tokens later token := "cli:" + uuid.New().String() jobToken := &model.DiggerJobToken{ Value: token, - OrganisationID: int64(organisationId), + OrganisationID: organisationId, Type: CliJobAccessType, Expiry: time.Now().Add(time.Hour * 2), // some jobs can take >30 mins (k8s cluster) } diff --git a/next/main b/next/main new file mode 100755 index 000000000..13486bd7a Binary files /dev/null and b/next/main differ diff --git a/next/main.go b/next/main.go index 353fbd959..50b5aaf50 100644 --- a/next/main.go +++ b/next/main.go @@ -3,12 +3,12 @@ package main import ( "embed" "fmt" - "github.com/diggerhq/digger/backend/ci_backends" "github.com/diggerhq/digger/backend/config" - "github.com/diggerhq/digger/backend/utils" + "github.com/diggerhq/digger/next/ci_backends" controllers "github.com/diggerhq/digger/next/controllers" + "github.com/diggerhq/digger/next/dbmodels" "github.com/diggerhq/digger/next/middleware" - "github.com/diggerhq/digger/next/models" + "github.com/diggerhq/digger/next/utils" "github.com/getsentry/sentry-go" "github.com/gin-gonic/gin" "io/fs" @@ -44,7 +44,7 @@ func main() { } // initialize the database - models.ConnectDatabase() + dbmodels.ConnectDatabase() r := gin.Default() @@ -68,6 +68,11 @@ func main() { r.GET("/github/callback", middleware.SupabaseCookieAuth(), diggerController.GithubAppCallbackPage) r.POST("/github-app-webhook", diggerController.GithubAppWebHook) + + //authorized := r.Group("/") + //authorized.Use(middleware.GetApiMiddleware(), middleware.AccessLevel(dbmodels.CliJobAccessType, dbmodels.AccessPolicyType, models.AdminPolicyType)) + + r.POST("/repos/:repo/projects/:projectName/jobs/:jobId/set-status", middleware.JobTokenAuth(), diggerController.SetJobStatusForProject) port := config.GetPort() r.Run(fmt.Sprintf(":%d", port)) diff --git a/next/middleware/basic.go b/next/middleware/basic.go deleted file mode 100644 index 5313110a8..000000000 --- a/next/middleware/basic.go +++ /dev/null @@ -1,74 +0,0 @@ -package middleware - -import ( - "fmt" - "github.com/diggerhq/digger/backend/models" - "github.com/gin-gonic/gin" - "log" - "net/http" - "os" - "strings" -) - -func HttpBasicWebAuth() gin.HandlerFunc { - - return func(c *gin.Context) { - log.Printf("Restricting access") - username := os.Getenv("HTTP_BASIC_AUTH_USERNAME") - password := os.Getenv("HTTP_BASIC_AUTH_PASSWORD") - if username == "" || password == "" { - c.Error(fmt.Errorf("configuration error: HTTP Basic Auth configured but username or password not set")) - } - gin.BasicAuth(gin.Accounts{ - username: password, - })(c) - c.Set(ACCESS_LEVEL_KEY, models.AdminPolicyType) - setDefaultOrganisationId(c) - c.Next() - } -} - -func setDefaultOrganisationId(c *gin.Context) { - orgNumberOne, err := models.DB.GetOrganisation(models.DEFAULT_ORG_NAME) - if err != nil { - c.Error(fmt.Errorf("Error fetching default organisation please check your configuration")) - } - c.Set(ORGANISATION_ID_KEY, orgNumberOne.ID) -} - -func HttpBasicApiAuth() gin.HandlerFunc { - return func(c *gin.Context) { - authHeader := c.Request.Header.Get("Authorization") - if authHeader == "" { - c.String(http.StatusForbidden, "No Authorization header provided") - c.Abort() - return - } - token := strings.TrimPrefix(authHeader, "Bearer ") - if token == authHeader { - c.String(http.StatusForbidden, "Could not find bearer token in Authorization header") - c.Abort() - return - } - - if strings.HasPrefix(token, "cli:") { - if jobToken, err := CheckJobToken(c, token); err != nil { - c.String(http.StatusForbidden, err.Error()) - c.Abort() - return - } else { - setDefaultOrganisationId(c) - c.Set(ACCESS_LEVEL_KEY, jobToken.Type) - } - } else if token == os.Getenv("BEARER_AUTH_TOKEN") { - setDefaultOrganisationId(c) - c.Set(ACCESS_LEVEL_KEY, models.AdminPolicyType) - c.Next() - } else { - c.String(http.StatusForbidden, "Invalid Bearer token") - c.Abort() - return - } - return - } -} diff --git a/next/middleware/job_token_auth.go b/next/middleware/job_token_auth.go new file mode 100644 index 000000000..44ff982ab --- /dev/null +++ b/next/middleware/job_token_auth.go @@ -0,0 +1,70 @@ +package middleware + +import ( + "fmt" + "github.com/diggerhq/digger/next/dbmodels" + "github.com/diggerhq/digger/next/model" + "github.com/gin-gonic/gin" + "log" + "net/http" + "strings" + "time" +) + +func CheckJobToken(c *gin.Context, token string) (*model.DiggerJobToken, error) { + jobToken, err := dbmodels.DB.GetJobToken(token) + if jobToken == nil { + c.String(http.StatusForbidden, "Invalid bearer token") + c.Abort() + return nil, fmt.Errorf("invalid bearer token") + } + + if time.Now().After(jobToken.Expiry) { + log.Printf("Token has already expired: %v", err) + c.String(http.StatusForbidden, "Token has expired") + c.Abort() + return nil, fmt.Errorf("token has expired") + } + + if err != nil { + log.Printf("Error while fetching token from database: %v", err) + c.String(http.StatusInternalServerError, "Error occurred while fetching database") + c.Abort() + return nil, fmt.Errorf("could not fetch cli token") + } + + return jobToken, nil +} + +func JobTokenAuth() gin.HandlerFunc { + return func(c *gin.Context) { + authHeader := c.Request.Header.Get("Authorization") + if authHeader == "" { + c.String(http.StatusForbidden, "No Authorization header provided") + c.Abort() + return + } + token := strings.TrimPrefix(authHeader, "Bearer ") + if token == authHeader { + c.String(http.StatusForbidden, "Could not find bearer token in Authorization header") + c.Abort() + return + } + + if strings.HasPrefix(token, "cli:") { + if jobToken, err := CheckJobToken(c, token); err != nil { + c.String(http.StatusForbidden, err.Error()) + c.Abort() + return + } else { + c.Set(jobToken.OrganisationID, jobToken.OrganisationID) + c.Set(ACCESS_LEVEL_KEY, jobToken.Type) + } + } else { + c.String(http.StatusForbidden, "Invalid Bearer token") + c.Abort() + return + } + return + } +} diff --git a/next/middleware/jwt.go b/next/middleware/jwt.go deleted file mode 100644 index 297703eab..000000000 --- a/next/middleware/jwt.go +++ /dev/null @@ -1,311 +0,0 @@ -package middleware - -import ( - "fmt" - "github.com/diggerhq/digger/backend/models" - "github.com/diggerhq/digger/backend/segment" - "github.com/diggerhq/digger/backend/services" - "github.com/gin-gonic/gin" - "github.com/golang-jwt/jwt" - "log" - "net/http" - "os" - "strconv" - "strings" -) - -func SetContextParameters(c *gin.Context, auth services.Auth, token *jwt.Token) error { - if claims, ok := token.Claims.(jwt.MapClaims); ok { - if claims.Valid() != nil { - log.Printf("Token's claim is invalid") - return fmt.Errorf("token is invalid") - } - var org *models.Organisation - tenantId := claims["tenantId"] - if tenantId == nil { - log.Printf("claim's tenantId is nil") - return fmt.Errorf("token is invalid") - } - tenantId = tenantId.(string) - log.Printf("tenantId: %s", tenantId) - - org, err := models.DB.GetOrganisation(tenantId) - if err != nil { - log.Printf("Error while fetching organisation: %v", err) - return err - } else if org == nil { - log.Printf("No organisation found for tenantId %s", tenantId) - return fmt.Errorf("token is invalid") - } - - c.Set(ORGANISATION_ID_KEY, org.ID) - - segment.GetClient() - segment.IdentifyClient(strconv.Itoa(int(org.ID)), org.Name, org.Name, org.Name, org.Name, strconv.Itoa(int(org.ID)), "") - - log.Printf("set org id %v\n", org.ID) - - tokenType := claims["type"].(string) - - permissions := make([]string, 0) - if tokenType == "tenantAccessToken" { - permission, err := auth.FetchTokenPermissions(claims["sub"].(string)) - if err != nil { - log.Printf("Error while fetching permissions: %v", err) - return fmt.Errorf("token is invalid") - } - permissions = permission - } else { - permissionsClaims := claims["permissions"] - if permissionsClaims == nil { - log.Printf("claim's permissions is nil") - return fmt.Errorf("token is invalid") - } - for _, permissionClaim := range permissionsClaims.([]interface{}) { - permissions = append(permissions, permissionClaim.(string)) - } - } - for _, permission := range permissions { - if permission == "digger.all.*" { - c.Set(ACCESS_LEVEL_KEY, models.AdminPolicyType) - return nil - } - } - for _, permission := range permissions { - if permission == "digger.all.read.*" { - c.Set(ACCESS_LEVEL_KEY, models.AccessPolicyType) - return nil - } - } - } else { - log.Printf("Token's claim is invalid") - return fmt.Errorf("token is invalid") - } - return nil -} - -func JWTWebAuth(auth services.Auth) gin.HandlerFunc { - return func(c *gin.Context) { - var tokenString string - tokenString, err := c.Cookie("token") - if err != nil { - log.Printf("can't get a cookie token, %v\n", err) - c.AbortWithStatus(http.StatusForbidden) - return - } - - if tokenString == "" { - log.Println("auth token is empty") - c.AbortWithStatus(http.StatusForbidden) - return - } - - jwtPublicKey := os.Getenv("JWT_PUBLIC_KEY") - if jwtPublicKey == "" { - log.Printf("No JWT_PUBLIC_KEY environment variable provided") - c.String(http.StatusInternalServerError, "Error occurred while reading public key") - c.Abort() - return - } - publicKeyData := []byte(jwtPublicKey) - - publicKey, err := jwt.ParseRSAPublicKeyFromPEM(publicKeyData) - if err != nil { - log.Printf("Error while parsing public key: %v", err.Error()) - c.String(http.StatusInternalServerError, "Error occurred while parsing public key") - c.Abort() - return - } - - // validate token - token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return publicKey, nil - }) - if err != nil { - log.Printf("can't parse a token, %v\n", err) - c.AbortWithStatus(http.StatusForbidden) - return - } - - if token.Valid { - err = SetContextParameters(c, auth, token) - if err != nil { - log.Printf("Error while setting context parameters: %v", err) - c.String(http.StatusForbidden, "Failed to parse token") - c.Abort() - return - } - - c.Next() - return - } else if ve, ok := err.(*jwt.ValidationError); ok { - if ve.Errors&jwt.ValidationErrorMalformed != 0 { - log.Println("That's not even a token") - } else if ve.Errors&(jwt.ValidationErrorExpired|jwt.ValidationErrorNotValidYet) != 0 { - log.Println("Token is either expired or not active yet") - } else { - log.Println("Couldn't handle this token:", err) - } - } else { - log.Println("Couldn't handle this token:", err) - } - - c.AbortWithStatus(http.StatusForbidden) - } -} - -func SecretCodeAuth() gin.HandlerFunc { - return func(c *gin.Context) { - secret := c.Request.Header.Get("x-webhook-secret") - if secret == "" { - log.Printf("No x-webhook-secret header provided") - c.String(http.StatusForbidden, "No x-webhook-secret header provided") - c.Abort() - return - } - _, err := jwt.Parse(secret, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return []byte(os.Getenv("WEBHOOK_SECRET")), nil - }) - - if err != nil { - log.Printf("Error parsing secret: %v", err.Error()) - c.String(http.StatusForbidden, "Invalid x-webhook-secret header provided") - c.Abort() - return - } - c.Next() - } -} - -func JWTBearerTokenAuth(auth services.Auth) gin.HandlerFunc { - return func(c *gin.Context) { - authHeader := c.Request.Header.Get("Authorization") - if authHeader == "" { - c.String(http.StatusForbidden, "No Authorization header provided") - c.Abort() - return - } - token := strings.TrimPrefix(authHeader, "Bearer ") - if token == authHeader { - c.String(http.StatusForbidden, "Could not find bearer token in Authorization header") - c.Abort() - return - } - - if strings.HasPrefix(token, "cli:") { - if jobToken, err := CheckJobToken(c, token); err != nil { - c.String(http.StatusForbidden, err.Error()) - c.Abort() - return - } else { - c.Set(ORGANISATION_ID_KEY, jobToken.OrganisationID) - c.Set(ACCESS_LEVEL_KEY, jobToken.Type) - } - } else if strings.HasPrefix(token, "t:") { - var dbToken models.Token - - token, err := models.DB.GetToken(token) - if token == nil { - c.String(http.StatusForbidden, "Invalid bearer token") - c.Abort() - return - } - - if err != nil { - log.Printf("Error while fetching token from database: %v", err) - c.String(http.StatusInternalServerError, "Error occurred while fetching database") - c.Abort() - return - } - c.Set(ORGANISATION_ID_KEY, dbToken.OrganisationID) - c.Set(ACCESS_LEVEL_KEY, dbToken.Type) - } else { - jwtPublicKey := os.Getenv("JWT_PUBLIC_KEY") - if jwtPublicKey == "" { - log.Printf("No JWT_PUBLIC_KEY environment variable provided") - c.String(http.StatusInternalServerError, "Error occurred while reading public key") - c.Abort() - return - } - publicKeyData := []byte(jwtPublicKey) - - publicKey, err := jwt.ParseRSAPublicKeyFromPEM(publicKeyData) - if err != nil { - log.Printf("Error while parsing public key: %v", err.Error()) - c.String(http.StatusInternalServerError, "Error occurred while parsing public key") - c.Abort() - return - } - - token, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) - } - return publicKey, nil - }) - - if err != nil { - log.Printf("Error while parsing token: %v", err.Error()) - c.String(http.StatusForbidden, "Authorization header is invalid") - c.Abort() - return - } - - if !token.Valid { - log.Printf("Token is invalid") - c.String(http.StatusForbidden, "Authorization header is invalid") - c.Abort() - return - } - - err = SetContextParameters(c, auth, token) - if err != nil { - log.Printf("Error while setting context parameters: %v", err) - c.String(http.StatusForbidden, "Failed to parse token") - c.Abort() - return - } - } - - c.Next() - } -} - -func AccessLevel(allowedAccessLevels ...string) gin.HandlerFunc { - return func(c *gin.Context) { - accessLevel := c.GetString(ACCESS_LEVEL_KEY) - for _, allowedAccessLevel := range allowedAccessLevels { - if accessLevel == allowedAccessLevel { - c.Next() - return - } - } - c.String(http.StatusForbidden, "Not allowed to access this resource with this access level") - c.Abort() - } -} - -func CORSMiddleware() gin.HandlerFunc { - return func(c *gin.Context) { - c.Writer.Header().Set("Access-Control-Allow-Origin", "*") - c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") - c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With") - c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT") - - if c.Request.Method == "OPTIONS" { - c.AbortWithStatus(204) - return - } - - c.Next() - } -} - -const ORGANISATION_ID_KEY = "organisation_ID" -const ACCESS_LEVEL_KEY = "access_level" diff --git a/next/middleware/middleware.go b/next/middleware/middleware.go index 86424b911..3f3fb079b 100644 --- a/next/middleware/middleware.go +++ b/next/middleware/middleware.go @@ -1,81 +1,4 @@ package middleware -import ( - "fmt" - "github.com/diggerhq/digger/backend/models" - "github.com/diggerhq/digger/backend/services" - "github.com/gin-gonic/gin" - "log" - "net/http" - "os" - "time" -) - -func GetWebMiddleware() gin.HandlerFunc { - if _, ok := os.LookupEnv("JWT_AUTH"); ok { - log.Printf("Using JWT middleware for web routes") - auth := services.Auth{ - HttpClient: http.Client{}, - Host: os.Getenv("AUTH_HOST"), - Secret: os.Getenv("AUTH_SECRET"), - ClientId: os.Getenv("FRONTEGG_CLIENT_ID"), - } - return JWTWebAuth(auth) - } else if _, ok := os.LookupEnv("HTTP_BASIC_AUTH"); ok { - log.Printf("Using http basic auth middleware for web routes") - return HttpBasicWebAuth() - } else if _, ok := os.LookupEnv("NOOP_AUTH"); ok { - log.Printf("Using noop auth for web routes") - return NoopWebAuth() - } else { - log.Fatalf("Please specify one of JWT_AUTH or HTTP_BASIC_AUTH") - return nil - } -} - -func GetApiMiddleware() gin.HandlerFunc { - if _, ok := os.LookupEnv("JWT_AUTH"); ok { - log.Printf("Using JWT middleware for API routes") - auth := services.Auth{ - HttpClient: http.Client{}, - Host: os.Getenv("AUTH_HOST"), - Secret: os.Getenv("AUTH_SECRET"), - ClientId: os.Getenv("FRONTEGG_CLIENT_ID"), - } - return JWTBearerTokenAuth(auth) - } else if _, ok := os.LookupEnv("HTTP_BASIC_AUTH"); ok { - log.Printf("Using http basic auth middleware for API routes") - return HttpBasicApiAuth() - } else if _, ok := os.LookupEnv("NOOP_AUTH"); ok { - return NoopApiAuth() - } else { - log.Fatalf("Please specify one of JWT_AUTH or HTTP_BASIC_AUTH") - return nil - } -} - -func CheckJobToken(c *gin.Context, token string) (*models.JobToken, error) { - jobToken, err := models.DB.GetJobToken(token) - if jobToken == nil { - c.String(http.StatusForbidden, "Invalid bearer token") - c.Abort() - return nil, fmt.Errorf("invalid bearer token") - } - - if time.Now().After(jobToken.Expiry) { - log.Printf("Token has already expired: %v", err) - c.String(http.StatusForbidden, "Token has expired") - c.Abort() - return nil, fmt.Errorf("token has expired") - } - - if err != nil { - log.Printf("Error while fetching token from database: %v", err) - c.String(http.StatusInternalServerError, "Error occurred while fetching database") - c.Abort() - return nil, fmt.Errorf("could not fetch cli token") - } - - log.Printf("Token: %v access level: %v", jobToken.Value, jobToken.Type) - return jobToken, nil -} +const ORGANISATION_ID_KEY = "organisation_ID" +const ACCESS_LEVEL_KEY = "access_level" diff --git a/next/middleware/noop.go b/next/middleware/noop.go deleted file mode 100644 index c3b82b618..000000000 --- a/next/middleware/noop.go +++ /dev/null @@ -1,22 +0,0 @@ -package middleware - -import ( - "github.com/diggerhq/digger/backend/models" - "github.com/gin-gonic/gin" -) - -func NoopWebAuth() gin.HandlerFunc { - return func(c *gin.Context) { - setDefaultOrganisationId(c) - c.Set(ACCESS_LEVEL_KEY, models.AdminPolicyType) - c.Next() - } -} - -func NoopApiAuth() gin.HandlerFunc { - return func(c *gin.Context) { - setDefaultOrganisationId(c) - c.Set(ACCESS_LEVEL_KEY, models.AdminPolicyType) - c.Next() - } -} diff --git a/next/middleware/supabase_cookie_auth.go b/next/middleware/supabase_cookie_auth.go index 911d0399d..93c397ed7 100644 --- a/next/middleware/supabase_cookie_auth.go +++ b/next/middleware/supabase_cookie_auth.go @@ -3,8 +3,8 @@ package middleware import ( "encoding/json" "fmt" + "github.com/diggerhq/digger/next/dbmodels" "github.com/diggerhq/digger/next/model" - "github.com/diggerhq/digger/next/models" "github.com/diggerhq/digger/next/supa" "github.com/gin-gonic/gin" "log" @@ -61,7 +61,7 @@ func SupabaseCookieAuth() gin.HandlerFunc { return } - selectedOrg, err := models.DB.GetUserOrganizationsFirstMatch(userId) + selectedOrg, err := dbmodels.DB.GetUserOrganizationsFirstMatch(userId) if err != nil { log.Printf("error while finding organisation: %v", err) c.String(http.StatusBadRequest, "User does not belong to any orgs") diff --git a/next/model/digger_job_tokens.gen.go b/next/model/digger_job_tokens.gen.go index 524495adf..476eea744 100644 --- a/next/model/digger_job_tokens.gen.go +++ b/next/model/digger_job_tokens.gen.go @@ -20,8 +20,8 @@ type DiggerJobToken struct { DeletedAt gorm.DeletedAt `gorm:"column:deleted_at" json:"deleted_at"` Value string `gorm:"column:value" json:"value"` Expiry time.Time `gorm:"column:expiry" json:"expiry"` - OrganisationID int64 `gorm:"column:organisation_id" json:"organisation_id"` Type string `gorm:"column:type" json:"type"` + OrganisationID string `gorm:"column:organisation_id" json:"organisation_id"` } // TableName DiggerJobToken's table name diff --git a/next/model/encrypted_env_vars.gen.go b/next/model/encrypted_env_vars.gen.go new file mode 100644 index 000000000..60f96d730 --- /dev/null +++ b/next/model/encrypted_env_vars.gen.go @@ -0,0 +1,27 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameEncryptedEnvVar = "encrypted_env_vars" + +// EncryptedEnvVar mapped from table +type EncryptedEnvVar struct { + ID string `gorm:"column:id;primaryKey;default:gen_random_uuid()" json:"id"` + ProjectID string `gorm:"column:project_id;not null" json:"project_id"` + Name string `gorm:"column:name;not null" json:"name"` + EncryptedValue []uint8 `gorm:"column:encrypted_value;not null" json:"encrypted_value"` + Iv []uint8 `gorm:"column:iv;not null" json:"iv"` + UpdatedAt time.Time `gorm:"column:updated_at;not null;default:now()" json:"updated_at"` + IsSecret bool `gorm:"column:is_secret;not null" json:"is_secret"` +} + +// TableName EncryptedEnvVar's table name +func (*EncryptedEnvVar) TableName() string { + return TableNameEncryptedEnvVar +} diff --git a/next/model/project_tfvars.gen.go b/next/model/project_tfvars.gen.go new file mode 100644 index 000000000..dccf9cc6e --- /dev/null +++ b/next/model/project_tfvars.gen.go @@ -0,0 +1,24 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package model + +import ( + "time" +) + +const TableNameProjectTfvar = "project_tfvars" + +// ProjectTfvar mapped from table +type ProjectTfvar struct { + ID string `gorm:"column:id;primaryKey;default:gen_random_uuid()" json:"id"` + ProjectID string `gorm:"column:project_id;not null" json:"project_id"` + Tfvars string `gorm:"column:tfvars;not null" json:"tfvars"` + UpdatedAt time.Time `gorm:"column:updated_at;not null;default:now()" json:"updated_at"` +} + +// TableName ProjectTfvar's table name +func (*ProjectTfvar) TableName() string { + return TableNameProjectTfvar +} diff --git a/next/models/scheduler.go b/next/models/scheduler.go deleted file mode 100644 index a7667ce8a..000000000 --- a/next/models/scheduler.go +++ /dev/null @@ -1,13 +0,0 @@ -package models - -type DiggerVCSType string - -const DiggerVCSGithub DiggerVCSType = "github" -const DiggerVCSGitlab DiggerVCSType = "gitlab" - -type DiggerJobLinkStatus int8 - -const ( - DiggerJobLinkCreated DiggerJobLinkStatus = 1 - DiggerJobLinkSucceeded DiggerJobLinkStatus = 2 -) diff --git a/next/models_generated/digger_job_tokens.gen.go b/next/models_generated/digger_job_tokens.gen.go index 81484781a..2f1776f42 100644 --- a/next/models_generated/digger_job_tokens.gen.go +++ b/next/models_generated/digger_job_tokens.gen.go @@ -33,8 +33,8 @@ func newDiggerJobToken(db *gorm.DB, opts ...gen.DOOption) diggerJobToken { _diggerJobToken.DeletedAt = field.NewField(tableName, "deleted_at") _diggerJobToken.Value = field.NewString(tableName, "value") _diggerJobToken.Expiry = field.NewTime(tableName, "expiry") - _diggerJobToken.OrganisationID = field.NewInt64(tableName, "organisation_id") _diggerJobToken.Type = field.NewString(tableName, "type") + _diggerJobToken.OrganisationID = field.NewString(tableName, "organisation_id") _diggerJobToken.fillFieldMap() @@ -51,8 +51,8 @@ type diggerJobToken struct { DeletedAt field.Field Value field.String Expiry field.Time - OrganisationID field.Int64 Type field.String + OrganisationID field.String fieldMap map[string]field.Expr } @@ -75,8 +75,8 @@ func (d *diggerJobToken) updateTableName(table string) *diggerJobToken { d.DeletedAt = field.NewField(table, "deleted_at") d.Value = field.NewString(table, "value") d.Expiry = field.NewTime(table, "expiry") - d.OrganisationID = field.NewInt64(table, "organisation_id") d.Type = field.NewString(table, "type") + d.OrganisationID = field.NewString(table, "organisation_id") d.fillFieldMap() @@ -100,8 +100,8 @@ func (d *diggerJobToken) fillFieldMap() { d.fieldMap["deleted_at"] = d.DeletedAt d.fieldMap["value"] = d.Value d.fieldMap["expiry"] = d.Expiry - d.fieldMap["organisation_id"] = d.OrganisationID d.fieldMap["type"] = d.Type + d.fieldMap["organisation_id"] = d.OrganisationID } func (d diggerJobToken) clone(db *gorm.DB) diggerJobToken { diff --git a/next/models_generated/encrypted_env_vars.gen.go b/next/models_generated/encrypted_env_vars.gen.go new file mode 100644 index 000000000..52c508b99 --- /dev/null +++ b/next/models_generated/encrypted_env_vars.gen.go @@ -0,0 +1,404 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package models_generated + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/diggerhq/digger/next/model" +) + +func newEncryptedEnvVar(db *gorm.DB, opts ...gen.DOOption) encryptedEnvVar { + _encryptedEnvVar := encryptedEnvVar{} + + _encryptedEnvVar.encryptedEnvVarDo.UseDB(db, opts...) + _encryptedEnvVar.encryptedEnvVarDo.UseModel(&model.EncryptedEnvVar{}) + + tableName := _encryptedEnvVar.encryptedEnvVarDo.TableName() + _encryptedEnvVar.ALL = field.NewAsterisk(tableName) + _encryptedEnvVar.ID = field.NewString(tableName, "id") + _encryptedEnvVar.ProjectID = field.NewString(tableName, "project_id") + _encryptedEnvVar.Name = field.NewString(tableName, "name") + _encryptedEnvVar.EncryptedValue = field.NewField(tableName, "encrypted_value") + _encryptedEnvVar.Iv = field.NewField(tableName, "iv") + _encryptedEnvVar.UpdatedAt = field.NewTime(tableName, "updated_at") + _encryptedEnvVar.IsSecret = field.NewBool(tableName, "is_secret") + + _encryptedEnvVar.fillFieldMap() + + return _encryptedEnvVar +} + +type encryptedEnvVar struct { + encryptedEnvVarDo + + ALL field.Asterisk + ID field.String + ProjectID field.String + Name field.String + EncryptedValue field.Field + Iv field.Field + UpdatedAt field.Time + IsSecret field.Bool + + fieldMap map[string]field.Expr +} + +func (e encryptedEnvVar) Table(newTableName string) *encryptedEnvVar { + e.encryptedEnvVarDo.UseTable(newTableName) + return e.updateTableName(newTableName) +} + +func (e encryptedEnvVar) As(alias string) *encryptedEnvVar { + e.encryptedEnvVarDo.DO = *(e.encryptedEnvVarDo.As(alias).(*gen.DO)) + return e.updateTableName(alias) +} + +func (e *encryptedEnvVar) updateTableName(table string) *encryptedEnvVar { + e.ALL = field.NewAsterisk(table) + e.ID = field.NewString(table, "id") + e.ProjectID = field.NewString(table, "project_id") + e.Name = field.NewString(table, "name") + e.EncryptedValue = field.NewField(table, "encrypted_value") + e.Iv = field.NewField(table, "iv") + e.UpdatedAt = field.NewTime(table, "updated_at") + e.IsSecret = field.NewBool(table, "is_secret") + + e.fillFieldMap() + + return e +} + +func (e *encryptedEnvVar) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := e.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (e *encryptedEnvVar) fillFieldMap() { + e.fieldMap = make(map[string]field.Expr, 7) + e.fieldMap["id"] = e.ID + e.fieldMap["project_id"] = e.ProjectID + e.fieldMap["name"] = e.Name + e.fieldMap["encrypted_value"] = e.EncryptedValue + e.fieldMap["iv"] = e.Iv + e.fieldMap["updated_at"] = e.UpdatedAt + e.fieldMap["is_secret"] = e.IsSecret +} + +func (e encryptedEnvVar) clone(db *gorm.DB) encryptedEnvVar { + e.encryptedEnvVarDo.ReplaceConnPool(db.Statement.ConnPool) + return e +} + +func (e encryptedEnvVar) replaceDB(db *gorm.DB) encryptedEnvVar { + e.encryptedEnvVarDo.ReplaceDB(db) + return e +} + +type encryptedEnvVarDo struct{ gen.DO } + +type IEncryptedEnvVarDo interface { + gen.SubQuery + Debug() IEncryptedEnvVarDo + WithContext(ctx context.Context) IEncryptedEnvVarDo + WithResult(fc func(tx gen.Dao)) gen.ResultInfo + ReplaceDB(db *gorm.DB) + ReadDB() IEncryptedEnvVarDo + WriteDB() IEncryptedEnvVarDo + As(alias string) gen.Dao + Session(config *gorm.Session) IEncryptedEnvVarDo + Columns(cols ...field.Expr) gen.Columns + Clauses(conds ...clause.Expression) IEncryptedEnvVarDo + Not(conds ...gen.Condition) IEncryptedEnvVarDo + Or(conds ...gen.Condition) IEncryptedEnvVarDo + Select(conds ...field.Expr) IEncryptedEnvVarDo + Where(conds ...gen.Condition) IEncryptedEnvVarDo + Order(conds ...field.Expr) IEncryptedEnvVarDo + Distinct(cols ...field.Expr) IEncryptedEnvVarDo + Omit(cols ...field.Expr) IEncryptedEnvVarDo + Join(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo + LeftJoin(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo + RightJoin(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo + Group(cols ...field.Expr) IEncryptedEnvVarDo + Having(conds ...gen.Condition) IEncryptedEnvVarDo + Limit(limit int) IEncryptedEnvVarDo + Offset(offset int) IEncryptedEnvVarDo + Count() (count int64, err error) + Scopes(funcs ...func(gen.Dao) gen.Dao) IEncryptedEnvVarDo + Unscoped() IEncryptedEnvVarDo + Create(values ...*model.EncryptedEnvVar) error + CreateInBatches(values []*model.EncryptedEnvVar, batchSize int) error + Save(values ...*model.EncryptedEnvVar) error + First() (*model.EncryptedEnvVar, error) + Take() (*model.EncryptedEnvVar, error) + Last() (*model.EncryptedEnvVar, error) + Find() ([]*model.EncryptedEnvVar, error) + FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.EncryptedEnvVar, err error) + FindInBatches(result *[]*model.EncryptedEnvVar, batchSize int, fc func(tx gen.Dao, batch int) error) error + Pluck(column field.Expr, dest interface{}) error + Delete(...*model.EncryptedEnvVar) (info gen.ResultInfo, err error) + Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error) + UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) + Updates(value interface{}) (info gen.ResultInfo, err error) + UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error) + UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) + UpdateColumns(value interface{}) (info gen.ResultInfo, err error) + UpdateFrom(q gen.SubQuery) gen.Dao + Attrs(attrs ...field.AssignExpr) IEncryptedEnvVarDo + Assign(attrs ...field.AssignExpr) IEncryptedEnvVarDo + Joins(fields ...field.RelationField) IEncryptedEnvVarDo + Preload(fields ...field.RelationField) IEncryptedEnvVarDo + FirstOrInit() (*model.EncryptedEnvVar, error) + FirstOrCreate() (*model.EncryptedEnvVar, error) + FindByPage(offset int, limit int) (result []*model.EncryptedEnvVar, count int64, err error) + ScanByPage(result interface{}, offset int, limit int) (count int64, err error) + Scan(result interface{}) (err error) + Returning(value interface{}, columns ...string) IEncryptedEnvVarDo + UnderlyingDB() *gorm.DB + schema.Tabler +} + +func (e encryptedEnvVarDo) Debug() IEncryptedEnvVarDo { + return e.withDO(e.DO.Debug()) +} + +func (e encryptedEnvVarDo) WithContext(ctx context.Context) IEncryptedEnvVarDo { + return e.withDO(e.DO.WithContext(ctx)) +} + +func (e encryptedEnvVarDo) ReadDB() IEncryptedEnvVarDo { + return e.Clauses(dbresolver.Read) +} + +func (e encryptedEnvVarDo) WriteDB() IEncryptedEnvVarDo { + return e.Clauses(dbresolver.Write) +} + +func (e encryptedEnvVarDo) Session(config *gorm.Session) IEncryptedEnvVarDo { + return e.withDO(e.DO.Session(config)) +} + +func (e encryptedEnvVarDo) Clauses(conds ...clause.Expression) IEncryptedEnvVarDo { + return e.withDO(e.DO.Clauses(conds...)) +} + +func (e encryptedEnvVarDo) Returning(value interface{}, columns ...string) IEncryptedEnvVarDo { + return e.withDO(e.DO.Returning(value, columns...)) +} + +func (e encryptedEnvVarDo) Not(conds ...gen.Condition) IEncryptedEnvVarDo { + return e.withDO(e.DO.Not(conds...)) +} + +func (e encryptedEnvVarDo) Or(conds ...gen.Condition) IEncryptedEnvVarDo { + return e.withDO(e.DO.Or(conds...)) +} + +func (e encryptedEnvVarDo) Select(conds ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Select(conds...)) +} + +func (e encryptedEnvVarDo) Where(conds ...gen.Condition) IEncryptedEnvVarDo { + return e.withDO(e.DO.Where(conds...)) +} + +func (e encryptedEnvVarDo) Order(conds ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Order(conds...)) +} + +func (e encryptedEnvVarDo) Distinct(cols ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Distinct(cols...)) +} + +func (e encryptedEnvVarDo) Omit(cols ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Omit(cols...)) +} + +func (e encryptedEnvVarDo) Join(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Join(table, on...)) +} + +func (e encryptedEnvVarDo) LeftJoin(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.LeftJoin(table, on...)) +} + +func (e encryptedEnvVarDo) RightJoin(table schema.Tabler, on ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.RightJoin(table, on...)) +} + +func (e encryptedEnvVarDo) Group(cols ...field.Expr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Group(cols...)) +} + +func (e encryptedEnvVarDo) Having(conds ...gen.Condition) IEncryptedEnvVarDo { + return e.withDO(e.DO.Having(conds...)) +} + +func (e encryptedEnvVarDo) Limit(limit int) IEncryptedEnvVarDo { + return e.withDO(e.DO.Limit(limit)) +} + +func (e encryptedEnvVarDo) Offset(offset int) IEncryptedEnvVarDo { + return e.withDO(e.DO.Offset(offset)) +} + +func (e encryptedEnvVarDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IEncryptedEnvVarDo { + return e.withDO(e.DO.Scopes(funcs...)) +} + +func (e encryptedEnvVarDo) Unscoped() IEncryptedEnvVarDo { + return e.withDO(e.DO.Unscoped()) +} + +func (e encryptedEnvVarDo) Create(values ...*model.EncryptedEnvVar) error { + if len(values) == 0 { + return nil + } + return e.DO.Create(values) +} + +func (e encryptedEnvVarDo) CreateInBatches(values []*model.EncryptedEnvVar, batchSize int) error { + return e.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (e encryptedEnvVarDo) Save(values ...*model.EncryptedEnvVar) error { + if len(values) == 0 { + return nil + } + return e.DO.Save(values) +} + +func (e encryptedEnvVarDo) First() (*model.EncryptedEnvVar, error) { + if result, err := e.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.EncryptedEnvVar), nil + } +} + +func (e encryptedEnvVarDo) Take() (*model.EncryptedEnvVar, error) { + if result, err := e.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.EncryptedEnvVar), nil + } +} + +func (e encryptedEnvVarDo) Last() (*model.EncryptedEnvVar, error) { + if result, err := e.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.EncryptedEnvVar), nil + } +} + +func (e encryptedEnvVarDo) Find() ([]*model.EncryptedEnvVar, error) { + result, err := e.DO.Find() + return result.([]*model.EncryptedEnvVar), err +} + +func (e encryptedEnvVarDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.EncryptedEnvVar, err error) { + buf := make([]*model.EncryptedEnvVar, 0, batchSize) + err = e.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (e encryptedEnvVarDo) FindInBatches(result *[]*model.EncryptedEnvVar, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return e.DO.FindInBatches(result, batchSize, fc) +} + +func (e encryptedEnvVarDo) Attrs(attrs ...field.AssignExpr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Attrs(attrs...)) +} + +func (e encryptedEnvVarDo) Assign(attrs ...field.AssignExpr) IEncryptedEnvVarDo { + return e.withDO(e.DO.Assign(attrs...)) +} + +func (e encryptedEnvVarDo) Joins(fields ...field.RelationField) IEncryptedEnvVarDo { + for _, _f := range fields { + e = *e.withDO(e.DO.Joins(_f)) + } + return &e +} + +func (e encryptedEnvVarDo) Preload(fields ...field.RelationField) IEncryptedEnvVarDo { + for _, _f := range fields { + e = *e.withDO(e.DO.Preload(_f)) + } + return &e +} + +func (e encryptedEnvVarDo) FirstOrInit() (*model.EncryptedEnvVar, error) { + if result, err := e.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.EncryptedEnvVar), nil + } +} + +func (e encryptedEnvVarDo) FirstOrCreate() (*model.EncryptedEnvVar, error) { + if result, err := e.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.EncryptedEnvVar), nil + } +} + +func (e encryptedEnvVarDo) FindByPage(offset int, limit int) (result []*model.EncryptedEnvVar, count int64, err error) { + result, err = e.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = e.Offset(-1).Limit(-1).Count() + return +} + +func (e encryptedEnvVarDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = e.Count() + if err != nil { + return + } + + err = e.Offset(offset).Limit(limit).Scan(result) + return +} + +func (e encryptedEnvVarDo) Scan(result interface{}) (err error) { + return e.DO.Scan(result) +} + +func (e encryptedEnvVarDo) Delete(models ...*model.EncryptedEnvVar) (result gen.ResultInfo, err error) { + return e.DO.Delete(models) +} + +func (e *encryptedEnvVarDo) withDO(do gen.Dao) *encryptedEnvVarDo { + e.DO = *do.(*gen.DO) + return e +} diff --git a/next/models_generated/gen.go b/next/models_generated/gen.go index d52dfe35a..dad524d2f 100644 --- a/next/models_generated/gen.go +++ b/next/models_generated/gen.go @@ -29,6 +29,7 @@ var ( DiggerRun *diggerRun DiggerRunQueueItem *diggerRunQueueItem DiggerRunStage *diggerRunStage + EncryptedEnvVar *encryptedEnvVar GithubApp *githubApp GithubAppInstallation *githubAppInstallation GithubAppInstallationLink *githubAppInstallationLink @@ -49,6 +50,7 @@ var ( Product *product Project *project ProjectComment *projectComment + ProjectTfvar *projectTfvar Repo *repo Subscription *subscription UserAPIKey *userAPIKey @@ -73,6 +75,7 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) { DiggerRun = &Q.DiggerRun DiggerRunQueueItem = &Q.DiggerRunQueueItem DiggerRunStage = &Q.DiggerRunStage + EncryptedEnvVar = &Q.EncryptedEnvVar GithubApp = &Q.GithubApp GithubAppInstallation = &Q.GithubAppInstallation GithubAppInstallationLink = &Q.GithubAppInstallationLink @@ -93,6 +96,7 @@ func SetDefault(db *gorm.DB, opts ...gen.DOOption) { Product = &Q.Product Project = &Q.Project ProjectComment = &Q.ProjectComment + ProjectTfvar = &Q.ProjectTfvar Repo = &Q.Repo Subscription = &Q.Subscription UserAPIKey = &Q.UserAPIKey @@ -118,6 +122,7 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query { DiggerRun: newDiggerRun(db, opts...), DiggerRunQueueItem: newDiggerRunQueueItem(db, opts...), DiggerRunStage: newDiggerRunStage(db, opts...), + EncryptedEnvVar: newEncryptedEnvVar(db, opts...), GithubApp: newGithubApp(db, opts...), GithubAppInstallation: newGithubAppInstallation(db, opts...), GithubAppInstallationLink: newGithubAppInstallationLink(db, opts...), @@ -138,6 +143,7 @@ func Use(db *gorm.DB, opts ...gen.DOOption) *Query { Product: newProduct(db, opts...), Project: newProject(db, opts...), ProjectComment: newProjectComment(db, opts...), + ProjectTfvar: newProjectTfvar(db, opts...), Repo: newRepo(db, opts...), Subscription: newSubscription(db, opts...), UserAPIKey: newUserAPIKey(db, opts...), @@ -164,6 +170,7 @@ type Query struct { DiggerRun diggerRun DiggerRunQueueItem diggerRunQueueItem DiggerRunStage diggerRunStage + EncryptedEnvVar encryptedEnvVar GithubApp githubApp GithubAppInstallation githubAppInstallation GithubAppInstallationLink githubAppInstallationLink @@ -184,6 +191,7 @@ type Query struct { Product product Project project ProjectComment projectComment + ProjectTfvar projectTfvar Repo repo Subscription subscription UserAPIKey userAPIKey @@ -211,6 +219,7 @@ func (q *Query) clone(db *gorm.DB) *Query { DiggerRun: q.DiggerRun.clone(db), DiggerRunQueueItem: q.DiggerRunQueueItem.clone(db), DiggerRunStage: q.DiggerRunStage.clone(db), + EncryptedEnvVar: q.EncryptedEnvVar.clone(db), GithubApp: q.GithubApp.clone(db), GithubAppInstallation: q.GithubAppInstallation.clone(db), GithubAppInstallationLink: q.GithubAppInstallationLink.clone(db), @@ -231,6 +240,7 @@ func (q *Query) clone(db *gorm.DB) *Query { Product: q.Product.clone(db), Project: q.Project.clone(db), ProjectComment: q.ProjectComment.clone(db), + ProjectTfvar: q.ProjectTfvar.clone(db), Repo: q.Repo.clone(db), Subscription: q.Subscription.clone(db), UserAPIKey: q.UserAPIKey.clone(db), @@ -265,6 +275,7 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query { DiggerRun: q.DiggerRun.replaceDB(db), DiggerRunQueueItem: q.DiggerRunQueueItem.replaceDB(db), DiggerRunStage: q.DiggerRunStage.replaceDB(db), + EncryptedEnvVar: q.EncryptedEnvVar.replaceDB(db), GithubApp: q.GithubApp.replaceDB(db), GithubAppInstallation: q.GithubAppInstallation.replaceDB(db), GithubAppInstallationLink: q.GithubAppInstallationLink.replaceDB(db), @@ -285,6 +296,7 @@ func (q *Query) ReplaceDB(db *gorm.DB) *Query { Product: q.Product.replaceDB(db), Project: q.Project.replaceDB(db), ProjectComment: q.ProjectComment.replaceDB(db), + ProjectTfvar: q.ProjectTfvar.replaceDB(db), Repo: q.Repo.replaceDB(db), Subscription: q.Subscription.replaceDB(db), UserAPIKey: q.UserAPIKey.replaceDB(db), @@ -309,6 +321,7 @@ type queryCtx struct { DiggerRun IDiggerRunDo DiggerRunQueueItem IDiggerRunQueueItemDo DiggerRunStage IDiggerRunStageDo + EncryptedEnvVar IEncryptedEnvVarDo GithubApp IGithubAppDo GithubAppInstallation IGithubAppInstallationDo GithubAppInstallationLink IGithubAppInstallationLinkDo @@ -329,6 +342,7 @@ type queryCtx struct { Product IProductDo Project IProjectDo ProjectComment IProjectCommentDo + ProjectTfvar IProjectTfvarDo Repo IRepoDo Subscription ISubscriptionDo UserAPIKey IUserAPIKeyDo @@ -353,6 +367,7 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx { DiggerRun: q.DiggerRun.WithContext(ctx), DiggerRunQueueItem: q.DiggerRunQueueItem.WithContext(ctx), DiggerRunStage: q.DiggerRunStage.WithContext(ctx), + EncryptedEnvVar: q.EncryptedEnvVar.WithContext(ctx), GithubApp: q.GithubApp.WithContext(ctx), GithubAppInstallation: q.GithubAppInstallation.WithContext(ctx), GithubAppInstallationLink: q.GithubAppInstallationLink.WithContext(ctx), @@ -373,6 +388,7 @@ func (q *Query) WithContext(ctx context.Context) *queryCtx { Product: q.Product.WithContext(ctx), Project: q.Project.WithContext(ctx), ProjectComment: q.ProjectComment.WithContext(ctx), + ProjectTfvar: q.ProjectTfvar.WithContext(ctx), Repo: q.Repo.WithContext(ctx), Subscription: q.Subscription.WithContext(ctx), UserAPIKey: q.UserAPIKey.WithContext(ctx), diff --git a/next/models_generated/project_tfvars.gen.go b/next/models_generated/project_tfvars.gen.go new file mode 100644 index 000000000..22f21ea4d --- /dev/null +++ b/next/models_generated/project_tfvars.gen.go @@ -0,0 +1,392 @@ +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. +// Code generated by gorm.io/gen. DO NOT EDIT. + +package models_generated + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "gorm.io/gen" + "gorm.io/gen/field" + + "gorm.io/plugin/dbresolver" + + "github.com/diggerhq/digger/next/model" +) + +func newProjectTfvar(db *gorm.DB, opts ...gen.DOOption) projectTfvar { + _projectTfvar := projectTfvar{} + + _projectTfvar.projectTfvarDo.UseDB(db, opts...) + _projectTfvar.projectTfvarDo.UseModel(&model.ProjectTfvar{}) + + tableName := _projectTfvar.projectTfvarDo.TableName() + _projectTfvar.ALL = field.NewAsterisk(tableName) + _projectTfvar.ID = field.NewString(tableName, "id") + _projectTfvar.ProjectID = field.NewString(tableName, "project_id") + _projectTfvar.Tfvars = field.NewString(tableName, "tfvars") + _projectTfvar.UpdatedAt = field.NewTime(tableName, "updated_at") + + _projectTfvar.fillFieldMap() + + return _projectTfvar +} + +type projectTfvar struct { + projectTfvarDo + + ALL field.Asterisk + ID field.String + ProjectID field.String + Tfvars field.String + UpdatedAt field.Time + + fieldMap map[string]field.Expr +} + +func (p projectTfvar) Table(newTableName string) *projectTfvar { + p.projectTfvarDo.UseTable(newTableName) + return p.updateTableName(newTableName) +} + +func (p projectTfvar) As(alias string) *projectTfvar { + p.projectTfvarDo.DO = *(p.projectTfvarDo.As(alias).(*gen.DO)) + return p.updateTableName(alias) +} + +func (p *projectTfvar) updateTableName(table string) *projectTfvar { + p.ALL = field.NewAsterisk(table) + p.ID = field.NewString(table, "id") + p.ProjectID = field.NewString(table, "project_id") + p.Tfvars = field.NewString(table, "tfvars") + p.UpdatedAt = field.NewTime(table, "updated_at") + + p.fillFieldMap() + + return p +} + +func (p *projectTfvar) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := p.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (p *projectTfvar) fillFieldMap() { + p.fieldMap = make(map[string]field.Expr, 4) + p.fieldMap["id"] = p.ID + p.fieldMap["project_id"] = p.ProjectID + p.fieldMap["tfvars"] = p.Tfvars + p.fieldMap["updated_at"] = p.UpdatedAt +} + +func (p projectTfvar) clone(db *gorm.DB) projectTfvar { + p.projectTfvarDo.ReplaceConnPool(db.Statement.ConnPool) + return p +} + +func (p projectTfvar) replaceDB(db *gorm.DB) projectTfvar { + p.projectTfvarDo.ReplaceDB(db) + return p +} + +type projectTfvarDo struct{ gen.DO } + +type IProjectTfvarDo interface { + gen.SubQuery + Debug() IProjectTfvarDo + WithContext(ctx context.Context) IProjectTfvarDo + WithResult(fc func(tx gen.Dao)) gen.ResultInfo + ReplaceDB(db *gorm.DB) + ReadDB() IProjectTfvarDo + WriteDB() IProjectTfvarDo + As(alias string) gen.Dao + Session(config *gorm.Session) IProjectTfvarDo + Columns(cols ...field.Expr) gen.Columns + Clauses(conds ...clause.Expression) IProjectTfvarDo + Not(conds ...gen.Condition) IProjectTfvarDo + Or(conds ...gen.Condition) IProjectTfvarDo + Select(conds ...field.Expr) IProjectTfvarDo + Where(conds ...gen.Condition) IProjectTfvarDo + Order(conds ...field.Expr) IProjectTfvarDo + Distinct(cols ...field.Expr) IProjectTfvarDo + Omit(cols ...field.Expr) IProjectTfvarDo + Join(table schema.Tabler, on ...field.Expr) IProjectTfvarDo + LeftJoin(table schema.Tabler, on ...field.Expr) IProjectTfvarDo + RightJoin(table schema.Tabler, on ...field.Expr) IProjectTfvarDo + Group(cols ...field.Expr) IProjectTfvarDo + Having(conds ...gen.Condition) IProjectTfvarDo + Limit(limit int) IProjectTfvarDo + Offset(offset int) IProjectTfvarDo + Count() (count int64, err error) + Scopes(funcs ...func(gen.Dao) gen.Dao) IProjectTfvarDo + Unscoped() IProjectTfvarDo + Create(values ...*model.ProjectTfvar) error + CreateInBatches(values []*model.ProjectTfvar, batchSize int) error + Save(values ...*model.ProjectTfvar) error + First() (*model.ProjectTfvar, error) + Take() (*model.ProjectTfvar, error) + Last() (*model.ProjectTfvar, error) + Find() ([]*model.ProjectTfvar, error) + FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ProjectTfvar, err error) + FindInBatches(result *[]*model.ProjectTfvar, batchSize int, fc func(tx gen.Dao, batch int) error) error + Pluck(column field.Expr, dest interface{}) error + Delete(...*model.ProjectTfvar) (info gen.ResultInfo, err error) + Update(column field.Expr, value interface{}) (info gen.ResultInfo, err error) + UpdateSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) + Updates(value interface{}) (info gen.ResultInfo, err error) + UpdateColumn(column field.Expr, value interface{}) (info gen.ResultInfo, err error) + UpdateColumnSimple(columns ...field.AssignExpr) (info gen.ResultInfo, err error) + UpdateColumns(value interface{}) (info gen.ResultInfo, err error) + UpdateFrom(q gen.SubQuery) gen.Dao + Attrs(attrs ...field.AssignExpr) IProjectTfvarDo + Assign(attrs ...field.AssignExpr) IProjectTfvarDo + Joins(fields ...field.RelationField) IProjectTfvarDo + Preload(fields ...field.RelationField) IProjectTfvarDo + FirstOrInit() (*model.ProjectTfvar, error) + FirstOrCreate() (*model.ProjectTfvar, error) + FindByPage(offset int, limit int) (result []*model.ProjectTfvar, count int64, err error) + ScanByPage(result interface{}, offset int, limit int) (count int64, err error) + Scan(result interface{}) (err error) + Returning(value interface{}, columns ...string) IProjectTfvarDo + UnderlyingDB() *gorm.DB + schema.Tabler +} + +func (p projectTfvarDo) Debug() IProjectTfvarDo { + return p.withDO(p.DO.Debug()) +} + +func (p projectTfvarDo) WithContext(ctx context.Context) IProjectTfvarDo { + return p.withDO(p.DO.WithContext(ctx)) +} + +func (p projectTfvarDo) ReadDB() IProjectTfvarDo { + return p.Clauses(dbresolver.Read) +} + +func (p projectTfvarDo) WriteDB() IProjectTfvarDo { + return p.Clauses(dbresolver.Write) +} + +func (p projectTfvarDo) Session(config *gorm.Session) IProjectTfvarDo { + return p.withDO(p.DO.Session(config)) +} + +func (p projectTfvarDo) Clauses(conds ...clause.Expression) IProjectTfvarDo { + return p.withDO(p.DO.Clauses(conds...)) +} + +func (p projectTfvarDo) Returning(value interface{}, columns ...string) IProjectTfvarDo { + return p.withDO(p.DO.Returning(value, columns...)) +} + +func (p projectTfvarDo) Not(conds ...gen.Condition) IProjectTfvarDo { + return p.withDO(p.DO.Not(conds...)) +} + +func (p projectTfvarDo) Or(conds ...gen.Condition) IProjectTfvarDo { + return p.withDO(p.DO.Or(conds...)) +} + +func (p projectTfvarDo) Select(conds ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Select(conds...)) +} + +func (p projectTfvarDo) Where(conds ...gen.Condition) IProjectTfvarDo { + return p.withDO(p.DO.Where(conds...)) +} + +func (p projectTfvarDo) Order(conds ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Order(conds...)) +} + +func (p projectTfvarDo) Distinct(cols ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Distinct(cols...)) +} + +func (p projectTfvarDo) Omit(cols ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Omit(cols...)) +} + +func (p projectTfvarDo) Join(table schema.Tabler, on ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Join(table, on...)) +} + +func (p projectTfvarDo) LeftJoin(table schema.Tabler, on ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.LeftJoin(table, on...)) +} + +func (p projectTfvarDo) RightJoin(table schema.Tabler, on ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.RightJoin(table, on...)) +} + +func (p projectTfvarDo) Group(cols ...field.Expr) IProjectTfvarDo { + return p.withDO(p.DO.Group(cols...)) +} + +func (p projectTfvarDo) Having(conds ...gen.Condition) IProjectTfvarDo { + return p.withDO(p.DO.Having(conds...)) +} + +func (p projectTfvarDo) Limit(limit int) IProjectTfvarDo { + return p.withDO(p.DO.Limit(limit)) +} + +func (p projectTfvarDo) Offset(offset int) IProjectTfvarDo { + return p.withDO(p.DO.Offset(offset)) +} + +func (p projectTfvarDo) Scopes(funcs ...func(gen.Dao) gen.Dao) IProjectTfvarDo { + return p.withDO(p.DO.Scopes(funcs...)) +} + +func (p projectTfvarDo) Unscoped() IProjectTfvarDo { + return p.withDO(p.DO.Unscoped()) +} + +func (p projectTfvarDo) Create(values ...*model.ProjectTfvar) error { + if len(values) == 0 { + return nil + } + return p.DO.Create(values) +} + +func (p projectTfvarDo) CreateInBatches(values []*model.ProjectTfvar, batchSize int) error { + return p.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (p projectTfvarDo) Save(values ...*model.ProjectTfvar) error { + if len(values) == 0 { + return nil + } + return p.DO.Save(values) +} + +func (p projectTfvarDo) First() (*model.ProjectTfvar, error) { + if result, err := p.DO.First(); err != nil { + return nil, err + } else { + return result.(*model.ProjectTfvar), nil + } +} + +func (p projectTfvarDo) Take() (*model.ProjectTfvar, error) { + if result, err := p.DO.Take(); err != nil { + return nil, err + } else { + return result.(*model.ProjectTfvar), nil + } +} + +func (p projectTfvarDo) Last() (*model.ProjectTfvar, error) { + if result, err := p.DO.Last(); err != nil { + return nil, err + } else { + return result.(*model.ProjectTfvar), nil + } +} + +func (p projectTfvarDo) Find() ([]*model.ProjectTfvar, error) { + result, err := p.DO.Find() + return result.([]*model.ProjectTfvar), err +} + +func (p projectTfvarDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*model.ProjectTfvar, err error) { + buf := make([]*model.ProjectTfvar, 0, batchSize) + err = p.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (p projectTfvarDo) FindInBatches(result *[]*model.ProjectTfvar, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return p.DO.FindInBatches(result, batchSize, fc) +} + +func (p projectTfvarDo) Attrs(attrs ...field.AssignExpr) IProjectTfvarDo { + return p.withDO(p.DO.Attrs(attrs...)) +} + +func (p projectTfvarDo) Assign(attrs ...field.AssignExpr) IProjectTfvarDo { + return p.withDO(p.DO.Assign(attrs...)) +} + +func (p projectTfvarDo) Joins(fields ...field.RelationField) IProjectTfvarDo { + for _, _f := range fields { + p = *p.withDO(p.DO.Joins(_f)) + } + return &p +} + +func (p projectTfvarDo) Preload(fields ...field.RelationField) IProjectTfvarDo { + for _, _f := range fields { + p = *p.withDO(p.DO.Preload(_f)) + } + return &p +} + +func (p projectTfvarDo) FirstOrInit() (*model.ProjectTfvar, error) { + if result, err := p.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*model.ProjectTfvar), nil + } +} + +func (p projectTfvarDo) FirstOrCreate() (*model.ProjectTfvar, error) { + if result, err := p.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*model.ProjectTfvar), nil + } +} + +func (p projectTfvarDo) FindByPage(offset int, limit int) (result []*model.ProjectTfvar, count int64, err error) { + result, err = p.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = p.Offset(-1).Limit(-1).Count() + return +} + +func (p projectTfvarDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = p.Count() + if err != nil { + return + } + + err = p.Offset(offset).Limit(limit).Scan(result) + return +} + +func (p projectTfvarDo) Scan(result interface{}) (err error) { + return p.DO.Scan(result) +} + +func (p projectTfvarDo) Delete(models ...*model.ProjectTfvar) (result gen.ResultInfo, err error) { + return p.DO.Delete(models) +} + +func (p *projectTfvarDo) withDO(do gen.Dao) *projectTfvarDo { + p.DO = *do.(*gen.DO) + return p +} diff --git a/next/services/scheduler.go b/next/services/scheduler.go new file mode 100644 index 000000000..65c4ee196 --- /dev/null +++ b/next/services/scheduler.go @@ -0,0 +1,98 @@ +package services + +import ( + "fmt" + "github.com/diggerhq/digger/backend/ci_backends" + "github.com/diggerhq/digger/backend/utils" + orchestrator_scheduler "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/next/dbmodels" + "github.com/diggerhq/digger/next/model" + "log" + "os" + "strconv" +) + +func ScheduleJob(ciBackend ci_backends.CiBackend, repoFullname string, repoOwner string, repoName string, batchId string, job *model.DiggerJob, gh utils.GithubClientProvider) error { + maxConcurrencyForBatch, err := strconv.Atoi(os.Getenv("MAX_DIGGER_CONCURRENCY_PER_BATCH")) + if err != nil { + log.Printf("WARN: could not get max concurrency for batch, setting it to 0: %v", err) + maxConcurrencyForBatch = 0 + } + if maxConcurrencyForBatch == 0 { + // concurrency limits not set + err := TriggerJob(gh, ciBackend, repoFullname, repoOwner, repoName, batchId, job) + if err != nil { + log.Printf("Could not trigger job: %v", err) + return err + } + } else { + // concurrency limits set + log.Printf("Scheduling job with concurrency limit: %v per batch", maxConcurrencyForBatch) + jobs, err := dbmodels.DB.GetDiggerJobsForBatchWithStatus(batchId, []orchestrator_scheduler.DiggerJobStatus{ + orchestrator_scheduler.DiggerJobTriggered, + orchestrator_scheduler.DiggerJobStarted, + }) + if err != nil { + log.Printf("GetDiggerJobsForBatchWithStatus err: %v\n", err) + return err + } + log.Printf("Length of jobs: %v", len(jobs)) + if len(jobs) >= maxConcurrencyForBatch { + log.Printf("max concurrency for jobs reached: %v, queuing until more jobs succeed", len(jobs)) + job.Status = int16(orchestrator_scheduler.DiggerJobQueuedForRun) + dbmodels.DB.UpdateDiggerJob(job) + return nil + } else { + err := TriggerJob(gh, ciBackend, repoFullname, repoOwner, repoName, batchId, job) + if err != nil { + log.Printf("Could not trigger job: %v", err) + return err + } + } + } + return nil +} + +func TriggerJob(gh utils.GithubClientProvider, ciBackend ci_backends.CiBackend, repoFullname string, repoOwner string, repoName string, batchId string, job *model.DiggerJob) error { + log.Printf("TriggerJob jobId: %v", job.DiggerJobID) + + if job.JobSpec == nil { + log.Printf("Jobspec can't be nil") + return fmt.Errorf("JobSpec is nil, skipping") + } + jobString := string(job.JobSpec) + log.Printf("jobString: %v \n", jobString) + + runName, err := GetRunNameFromJob(*job) + if err != nil { + log.Printf("could not get run name: %v", err) + return fmt.Errorf("coult not get run name %v", err) + } + + spec, err := GetSpecFromJob(*job) + if err != nil { + log.Printf("could not get spec: %v", err) + return fmt.Errorf("coult not get spec %v", err) + } + + vcsToken, err := GetVCSTokenFromJob(*job, gh) + if err != nil { + log.Printf("could not get vcs token: %v", err) + return fmt.Errorf("coult not get vcs token: %v", err) + } + + err = ciBackend.TriggerWorkflow(*spec, *runName, *vcsToken) + if err != nil { + log.Printf("TriggerJob err: %v\n", err) + return err + } + + job.Status = int16(orchestrator_scheduler.DiggerJobTriggered) + err = dbmodels.DB.UpdateDiggerJob(job) + if err != nil { + log.Printf("failed to Update digger job state: %v\n", err) + return err + } + + return nil +} diff --git a/next/services/spec.go b/next/services/spec.go new file mode 100644 index 000000000..050adafd9 --- /dev/null +++ b/next/services/spec.go @@ -0,0 +1,118 @@ +package services + +import ( + "encoding/json" + "fmt" + "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/libs/spec" + "github.com/diggerhq/digger/next/dbmodels" + "github.com/diggerhq/digger/next/model" + "github.com/diggerhq/digger/next/utils" + "log" + "os" + "strconv" +) + +func GetVCSTokenFromJob(job model.DiggerJob, gh utils.GithubClientProvider) (*string, error) { + // TODO: make it VCS generic + batchId := job.BatchID + batch, err := dbmodels.DB.GetDiggerBatch(batchId) + if err != nil { + log.Printf("could not get digger batch: %v", err) + return nil, fmt.Errorf("could not get digger batch: %v", err) + } + var token string + switch batch.Vcs { + case string(dbmodels.DiggerVCSGithub): + _, ghToken, err := utils.GetGithubService( + gh, + batch.GithubInstallationID, + batch.RepoFullName, + batch.RepoOwner, + batch.RepoName, + ) + token = *ghToken + if err != nil { + return nil, fmt.Errorf("TriggerWorkflow: could not retrieve token: %v", err) + } + case string(dbmodels.DiggerVCSGitlab): + token = os.Getenv("DIGGER_GITLAB_ACCESS_TOKEN") + default: + return nil, fmt.Errorf("unknown batch VCS: %v", batch.Vcs) + } + + return &token, nil +} + +func GetRunNameFromJob(job model.DiggerJob) (*string, error) { + var jobSpec scheduler.JobJson + err := json.Unmarshal([]byte(job.JobSpec), &jobSpec) + if err != nil { + log.Printf("could not unmarshal job string: %v", err) + return nil, fmt.Errorf("could not marshal json string: %v", err) + } + + batchId := job.BatchID + batch, err := dbmodels.DB.GetDiggerBatch(batchId) + if err != nil { + log.Printf("could not get digger batch: %v", err) + return nil, fmt.Errorf("could not get digger batch: %v", err) + } + + batchIdShort := batch.ID[:8] + diggerCommand := fmt.Sprintf("digger %v", batch.BatchType) + projectName := jobSpec.ProjectName + requestedBy := jobSpec.RequestedBy + prNumber := *jobSpec.PullRequestNumber + + runName := fmt.Sprintf("[%v] %v %v By: %v PR: %v", batchIdShort, diggerCommand, projectName, requestedBy, prNumber) + return &runName, nil +} + +func GetSpecFromJob(job model.DiggerJob) (*spec.Spec, error) { + var jobSpec scheduler.JobJson + err := json.Unmarshal([]byte(job.JobSpec), &jobSpec) + if err != nil { + log.Printf("could not unmarshal job string: %v", err) + return nil, fmt.Errorf("could not marshal json string: %v", err) + } + + batchId := job.BatchID + batch, err := dbmodels.DB.GetDiggerBatch(batchId) + if err != nil { + log.Printf("could not get digger batch: %v", err) + return nil, fmt.Errorf("could not get digger batch: %v", err) + } + + spec := spec.Spec{ + SpecType: spec.SpecTypePullRequestJob, + JobId: job.DiggerJobID, + CommentId: strconv.FormatInt(batch.CommentID, 10), + Job: jobSpec, + Reporter: spec.ReporterSpec{ + ReportingStrategy: "comments_per_run", + ReporterType: "lazy", + }, + Lock: spec.LockSpec{ + LockType: "noop", + }, + Backend: spec.BackendSpec{ + BackendHostname: jobSpec.BackendHostname, + BackendOrganisationName: jobSpec.BackendOrganisationName, + BackendJobToken: jobSpec.BackendJobToken, + BackendType: "backend", + }, + VCS: spec.VcsSpec{ + VcsType: string(batch.Vcs), + Actor: jobSpec.RequestedBy, + RepoFullname: batch.RepoFullName, + RepoOwner: batch.RepoOwner, + RepoName: batch.RepoName, + WorkflowFile: job.WorkflowFile, + }, + Policy: spec.PolicySpec{ + PolicyType: "http", + }, + } + return &spec, nil +} diff --git a/next/utils/github.go b/next/utils/github.go new file mode 100644 index 000000000..6318a481f --- /dev/null +++ b/next/utils/github.go @@ -0,0 +1,196 @@ +package utils + +import ( + "context" + "encoding/base64" + "fmt" + "github.com/bradleyfalzon/ghinstallation/v2" + "github.com/diggerhq/digger/libs/ci" + github2 "github.com/diggerhq/digger/libs/ci/github" + "github.com/diggerhq/digger/libs/scheduler" + "github.com/diggerhq/digger/next/dbmodels" + "github.com/google/go-github/v61/github" + "log" + net "net/http" + "os" + "strings" + "time" +) + +func createTempDir() string { + tempDir, err := os.MkdirTemp("", "repo") + if err != nil { + log.Fatal(err) + } + return tempDir +} + +// just a wrapper around github client to be able to use mocks +type DiggerGithubRealClientProvider struct { +} + +type DiggerGithubClientMockProvider struct { + MockedHTTPClient *net.Client +} + +type GithubClientProvider interface { + NewClient(netClient *net.Client) (*github.Client, error) + Get(githubAppId int64, installationId int64) (*github.Client, *string, error) +} + +func (gh DiggerGithubRealClientProvider) NewClient(netClient *net.Client) (*github.Client, error) { + ghClient := github.NewClient(netClient) + return ghClient, nil +} + +func (gh DiggerGithubRealClientProvider) Get(githubAppId int64, installationId int64) (*github.Client, *string, error) { + githubAppPrivateKey := "" + githubAppPrivateKeyB64 := os.Getenv("GITHUB_APP_PRIVATE_KEY_BASE64") + if githubAppPrivateKeyB64 != "" { + decodedBytes, err := base64.StdEncoding.DecodeString(githubAppPrivateKeyB64) + if err != nil { + return nil, nil, fmt.Errorf("error initialising github app installation: please set GITHUB_APP_PRIVATE_KEY_BASE64 env variable\n") + } + githubAppPrivateKey = string(decodedBytes) + } else { + githubAppPrivateKey = os.Getenv("GITHUB_APP_PRIVATE_KEY") + if githubAppPrivateKey != "" { + log.Printf("WARNING: GITHUB_APP_PRIVATE_KEY will be deprecated in future releases, " + + "please use GITHUB_APP_PRIVATE_KEY_BASE64 instead") + } else { + return nil, nil, fmt.Errorf("error initialising github app installation: please set GITHUB_APP_PRIVATE_KEY_BASE64 env variable\n") + } + } + + tr := net.DefaultTransport + itr, err := ghinstallation.New(tr, githubAppId, installationId, []byte(githubAppPrivateKey)) + if err != nil { + return nil, nil, fmt.Errorf("error initialising github app installation: %v\n", err) + } + + token, err := itr.Token(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("error initialising git app token: %v\n", err) + } + ghClient, err := gh.NewClient(&net.Client{Transport: itr}) + if err != nil { + log.Printf("error creating new client: %v", err) + } + return ghClient, &token, nil +} + +func (gh DiggerGithubClientMockProvider) NewClient(netClient *net.Client) (*github.Client, error) { + ghClient := github.NewClient(gh.MockedHTTPClient) + return ghClient, nil +} + +func (gh DiggerGithubClientMockProvider) Get(githubAppId int64, installationId int64) (*github.Client, *string, error) { + ghClient, _ := gh.NewClient(gh.MockedHTTPClient) + token := "token" + return ghClient, &token, nil +} + +func GetGithubClient(gh GithubClientProvider, installationId int64, repoFullName string) (*github.Client, *string, error) { + installation, err := dbmodels.DB.GetGithubAppInstallationByIdAndRepo(installationId, repoFullName) + if err != nil { + log.Printf("Error getting installation: %v", err) + return nil, nil, fmt.Errorf("Error getting installation: %v", err) + } + + _, err = dbmodels.DB.GetGithubApp(installation.GithubAppID) + if err != nil { + log.Printf("Error getting app: %v", err) + return nil, nil, fmt.Errorf("Error getting app: %v", err) + } + + ghClient, token, err := gh.Get(installation.GithubAppID, installation.GithubInstallationID) + return ghClient, token, err +} +func GetGithubService(gh GithubClientProvider, installationId int64, repoFullName string, repoOwner string, repoName string) (*github2.GithubService, *string, error) { + ghClient, token, err := GetGithubClient(gh, installationId, repoFullName) + if err != nil { + log.Printf("Error creating github app client: %v", err) + return nil, nil, fmt.Errorf("Error creating github app client: %v", err) + } + + ghService := github2.GithubService{ + Client: ghClient, + RepoName: repoName, + Owner: repoOwner, + } + + return &ghService, token, nil +} + +func SetPRStatusForJobs(prService ci.PullRequestService, prNumber int, jobs []scheduler.Job) error { + for _, job := range jobs { + for _, command := range job.Commands { + var err error + switch command { + case "digger plan": + err = prService.SetStatus(prNumber, "pending", job.ProjectName+"/plan") + case "digger apply": + err = prService.SetStatus(prNumber, "pending", job.ProjectName+"/apply") + } + if err != nil { + log.Printf("Erorr setting status: %v", err) + return fmt.Errorf("Error setting pr status: %v", err) + } + } + } + // Report aggregate status for digger/plan or digger/apply + if len(jobs) > 0 { + var err error + if scheduler.IsPlanJobs(jobs) { + err = prService.SetStatus(prNumber, "pending", "digger/plan") + } else { + err = prService.SetStatus(prNumber, "pending", "digger/apply") + } + if err != nil { + log.Printf("error setting status: %v", err) + return fmt.Errorf("error setting pr status: %v", err) + } + + } else { + err := prService.SetStatus(prNumber, "success", "digger/plan") + if err != nil { + log.Printf("error setting status: %v", err) + return fmt.Errorf("error setting pr status: %v", err) + } + err = prService.SetStatus(prNumber, "success", "digger/apply") + if err != nil { + log.Printf("error setting status: %v", err) + return fmt.Errorf("error setting pr status: %v", err) + } + } + + return nil +} + +func GetWorkflowIdAndUrlFromDiggerJobId(client *github.Client, repoOwner string, repoName string, diggerJobID string) (int64, string, error) { + timeFilter := time.Now().Add(-5 * time.Minute) + runs, _, err := client.Actions.ListRepositoryWorkflowRuns(context.Background(), repoOwner, repoName, &github.ListWorkflowRunsOptions{ + Created: ">=" + timeFilter.Format(time.RFC3339), + }) + if err != nil { + return 0, "#", fmt.Errorf("error listing workflow runs %v", err) + } + + for _, workflowRun := range runs.WorkflowRuns { + println(*workflowRun.ID) + workflowjobs, _, err := client.Actions.ListWorkflowJobs(context.Background(), repoOwner, repoName, *workflowRun.ID, nil) + if err != nil { + return 0, "#", fmt.Errorf("error listing workflow jobs for run %v %v", workflowRun.ID, err) + } + + for _, workflowjob := range workflowjobs.Jobs { + for _, step := range workflowjob.Steps { + if strings.Contains(*step.Name, diggerJobID) { + return *workflowRun.ID, fmt.Sprintf("https://github.com/%v/%v/actions/runs/%v", repoOwner, repoName, *workflowRun.ID), nil + } + } + + } + } + return 0, "#", fmt.Errorf("workflow not found") +} diff --git a/next/utils/github_test.go b/next/utils/github_test.go new file mode 100644 index 000000000..e369c3cdb --- /dev/null +++ b/next/utils/github_test.go @@ -0,0 +1,33 @@ +package utils + +import ( + "github.com/stretchr/testify/assert" + "log" + "os" + "testing" +) + +func init() { + log.SetOutput(os.Stdout) + log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) +} + +func TestGithubCloneWithInvalidTokenThrowsErr(t *testing.T) { + f := func(d string) error { return nil } + err := CloneGitRepoAndDoAction("https://github.com/diggerhq/private-repo", "main", "invalid-token", f) + assert.NotNil(t, err) +} + +func TestGithubCloneWithPublicRepoThrowsNoError(t *testing.T) { + token := os.Getenv("GITHUB_PAT_TOKEN") + f := func(d string) error { return nil } + err := CloneGitRepoAndDoAction("https://github.com/diggerhq/digger", "develop", token, f) + assert.Nil(t, err) +} + +func TestGithubCloneWithInvalidBranchThrowsError(t *testing.T) { + token := os.Getenv("GITHUB_PAT_TOKEN") + f := func(d string) error { return nil } + err := CloneGitRepoAndDoAction("https://github.com/diggerhq/digger", "not-a-branch", token, f) + assert.NotNil(t, err) +}