parent
6cddfb9a1f
commit
d5b2292474
1 changed files with 156 additions and 153 deletions
63
main.go
63
main.go
|
@ -1,16 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"code.gitea.io/sdk/gitea"
|
||||
b64 "encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
nomad "github.com/hashicorp/nomad/api"
|
||||
"github.com/caarlos0/env/v7"
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
nomad "github.com/hashicorp/nomad/api"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"github.com/caarlos0/env/v7"
|
||||
"code.gitea.io/sdk/gitea"
|
||||
)
|
||||
|
||||
type GitUser struct {
|
||||
|
@ -107,12 +108,18 @@ func nomadToGiteaStatus(summary *nomad.TaskGroupSummary) gitea.StatusState {
|
|||
return gitea.StatusPending
|
||||
}
|
||||
|
||||
func notifSummary(notification *GiteaNotification) string {
|
||||
return fmt.Sprintf("%s/%s:%s", notification.Repository.Owner.Username, notification.Repository.Name, notification.After)
|
||||
}
|
||||
|
||||
func lifecycle(notification *GiteaNotification, dispatch *nomad.JobDispatchResponse, giteaCreds *SecretGitea) {
|
||||
log.Printf("[lifecyle] Gitea URL: %s\n", giteaCreds.Url)
|
||||
notifInfo := notifSummary(notification)
|
||||
|
||||
log.Printf("[lifecyle] Commit to build: %s, Gitea URL: %s\n", notifInfo, giteaCreds.Url)
|
||||
// init Gitea
|
||||
forge, err := gitea.NewClient(giteaCreds.Url, gitea.SetToken(giteaCreds.Token))
|
||||
if err != nil {
|
||||
log.Printf("Unable to create gitea client: %+v\n", err)
|
||||
log.Printf("Unable to create gitea client for %s: %+v\n", notifInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -127,17 +134,17 @@ func lifecycle(notification *GiteaNotification, dispatch *nomad.JobDispatchRespo
|
|||
// Blocking fetch on deployment info
|
||||
job, meta, err := jobs.Summary(dispatch.DispatchedJobID, &queryOpt)
|
||||
if err != nil {
|
||||
log.Printf("[lifecycle] can't fetch job: %+v\n", err)
|
||||
log.Printf("[lifecycle] can't fetch job for %s: %+v\n", notifInfo, err)
|
||||
break
|
||||
}
|
||||
queryOpt.WaitIndex = meta.LastIndex
|
||||
|
||||
summary, ok := job.Summary["runner"];
|
||||
summary, ok := job.Summary["runner"]
|
||||
if !ok {
|
||||
log.Printf("[lifecycle] your job %s must contain a 'runner' task\n", job.JobID)
|
||||
log.Printf("[lifecycle] Job %s for %s must contain a 'runner' task\n", job.JobID, notifInfo)
|
||||
break
|
||||
}
|
||||
log.Printf("[lifecycle] Summary for job %s: %+v\n", job.JobID, summary)
|
||||
log.Printf("[lifecycle] Task status for job %s on %s: %+v\n", job.JobID, notifInfo, summary)
|
||||
|
||||
// Compute new job state
|
||||
state := nomadToGiteaStatus(&summary)
|
||||
|
@ -147,7 +154,7 @@ func lifecycle(notification *GiteaNotification, dispatch *nomad.JobDispatchRespo
|
|||
notification.Repository.Owner.Username,
|
||||
notification.Repository.Name,
|
||||
notification.After,
|
||||
gitea.CreateStatusOption {
|
||||
gitea.CreateStatusOption{
|
||||
State: state,
|
||||
TargetURL: GlobalConfig.AlbatrosURL + "/build?log=stderr&job=" + dispatch.DispatchedJobID,
|
||||
Description: "build",
|
||||
|
@ -155,17 +162,12 @@ func lifecycle(notification *GiteaNotification, dispatch *nomad.JobDispatchRespo
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"[lifecycle] can't update gitea repo %s/%s:%s: %+v\n",
|
||||
notification.Repository.Owner.Username,
|
||||
notification.Repository.Name,
|
||||
notification.After,
|
||||
err)
|
||||
log.Printf("[lifecycle] can't update gitea repo %s for job %s: %+v\n", notifInfo, job.JobID, err)
|
||||
}
|
||||
|
||||
// Continue the loop only if the job is pending
|
||||
if state != gitea.StatusPending {
|
||||
log.Printf("Job %s teminated with status %s\n", job.JobID, state)
|
||||
log.Printf("Job %s for %s terminated with status %s\n", job.JobID, notifInfo, state)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -174,7 +176,7 @@ func lifecycle(notification *GiteaNotification, dispatch *nomad.JobDispatchRespo
|
|||
// To avoid dangerous infinite loops, we put an upperbound here
|
||||
// of 1k refresh here. Reaching this limit will allow us to know
|
||||
// that something did not work as expected...
|
||||
log.Println("!!! [lifecycle] we refreshed 1k times this deployment and it's still running, giving up...")
|
||||
log.Printf("!!! [lifecycle] we refreshed 1k times the job of %s and it's still running, giving up...\n", notifInfo)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,21 +200,22 @@ func hook(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "Can't parse your request JSON", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
notifInfo := notifSummary(¬ification)
|
||||
|
||||
log.Printf("Gitea notification: %+v\n", notification)
|
||||
log.Printf("Received gitea notification for %s\n", notifInfo)
|
||||
|
||||
// Fetch our repo descriptor
|
||||
kv := ConsulClient.KV()
|
||||
encodedRepoUrl := b64.StdEncoding.EncodeToString([]byte(notification.Repository.CloneUrl))
|
||||
key := "albatros/"+encodedRepoUrl
|
||||
log.Printf("Fetching key %s\n", key)
|
||||
key := "albatros/" + encodedRepoUrl
|
||||
log.Printf("Fetching key %s for %s\n", key, notifInfo)
|
||||
pair, _, err := kv.Get(key, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "Can't fetch the repo descriptor in Consul", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if pair == nil || pair.Value == nil {
|
||||
http.Error(w, "You must declare your repo in Consul in order to build it", http.StatusForbidden)
|
||||
http.Error(w, "You must declare %s in Consul in order to build it", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
// Parse our repo descriptor
|
||||
|
@ -241,12 +244,11 @@ func hook(w http.ResponseWriter, r *http.Request) {
|
|||
// 2. Transform the consul object into a nomad payload
|
||||
|
||||
jobs := NomadClient.Jobs()
|
||||
dres, dmeta, err := jobs.Dispatch("builder", meta, []byte{}, "albatros", &nomad.WriteOptions{})
|
||||
dres, _, err := jobs.Dispatch("builder", meta, []byte{}, "albatros", &nomad.WriteOptions{})
|
||||
if err != nil {
|
||||
http.Error(w, "Can't submit your job to Nomad", http.StatusInternalServerError)
|
||||
}
|
||||
log.Printf("Query info: %+v\n", dmeta)
|
||||
log.Printf("Job info: %+v\n", dres)
|
||||
log.Printf("Created job %s for %s\n", dres.DispatchedJobID, notifInfo)
|
||||
|
||||
// Start a lifecycle observer to update gitea status
|
||||
// @FIXME: need to inject gitea descriptor
|
||||
|
@ -264,7 +266,7 @@ func build(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
logType, ok := q["log"]
|
||||
log.Printf("%+v\n", q)
|
||||
log.Printf("Follow logs for %s\n", jobID)
|
||||
if !ok || len(logType) < 1 || !(logType[0] == "stdout" || logType[0] == "stderr") {
|
||||
http.Error(w, "Missing or wrong query parameter 'log'.\nTry adding '?log=stdout' or '?log=stderr'", http.StatusBadRequest)
|
||||
return
|
||||
|
@ -290,17 +292,17 @@ func build(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "Allocation does not exist anymore. Allocation might be garbage collected", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
log.Printf("Alloc: %+v\n", myAlloc)
|
||||
log.Printf("Selected alloc %s for job %s\n", myAlloc.ID, jobID)
|
||||
|
||||
allocFS := NomadClient.AllocFS()
|
||||
scancel := make(chan struct{})
|
||||
sframe, serr := allocFS.Logs(myAlloc, true, "runner", logFilter, "start", 0, scancel, &nomad.QueryOptions{})
|
||||
|
||||
// stream logs to client's browser
|
||||
build_loop:
|
||||
build_loop:
|
||||
for {
|
||||
select {
|
||||
case <- r.Context().Done():
|
||||
case <-r.Context().Done():
|
||||
// client disconnect, cleaning
|
||||
break build_loop
|
||||
case nomadErr := <-serr:
|
||||
|
@ -324,7 +326,7 @@ func build(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
log.Printf("Cleaning %+v\n", myAlloc)
|
||||
log.Printf("Connection closed, cleaning listeners for %s\n", jobID)
|
||||
scancel <- struct{}{}
|
||||
}
|
||||
|
||||
|
@ -336,6 +338,7 @@ type config struct {
|
|||
// @TODO get nomad config from env
|
||||
// @TODO get consul config from env
|
||||
}
|
||||
|
||||
var GlobalConfig config
|
||||
|
||||
func main() {
|
||||
|
|
Loading…
Reference in a new issue