Fix linter
This commit is contained in:
parent
d3f089aaf4
commit
8a0fd66a89
6 changed files with 43 additions and 38 deletions
|
@ -84,10 +84,12 @@ func newEnv(t *testing.T) (e *env, cancel func()) {
|
|||
workerName := fmt.Sprintf("worker%d", i)
|
||||
workerDir := filepath.Join(env.RootDir, workerName)
|
||||
|
||||
fileCache, err := filecache.New(filepath.Join(workerDir, "filecache"))
|
||||
var fileCache *filecache.Cache
|
||||
fileCache, err = filecache.New(filepath.Join(workerDir, "filecache"))
|
||||
require.NoError(t, err)
|
||||
|
||||
artifacts, err := artifact.NewCache(filepath.Join(workerDir, "artifacts"))
|
||||
var artifacts *artifact.Cache
|
||||
artifacts, err = artifact.NewCache(filepath.Join(workerDir, "artifacts"))
|
||||
require.NoError(t, err)
|
||||
|
||||
w := worker.New(
|
||||
|
|
|
@ -66,7 +66,7 @@ func (c *Cache) readLock(id build.ID) error {
|
|||
return ErrWriteLocked
|
||||
}
|
||||
|
||||
c.readLocked[id] += 1
|
||||
c.readLocked[id]++
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ func (c *Cache) readUnlock(id build.ID) {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.readLocked[id] -= 1
|
||||
c.readLocked[id]--
|
||||
if c.readLocked[id] == 0 {
|
||||
delete(c.readLocked, id)
|
||||
}
|
||||
|
|
6
distbuild/pkg/dist/schedule.go
vendored
6
distbuild/pkg/dist/schedule.go
vendored
|
@ -38,13 +38,13 @@ func (c *Coordinator) scheduleJob(job *build.Job) *scheduledJob {
|
|||
|
||||
if scheduled, ok := c.scheduledJobs[job.ID]; ok {
|
||||
return scheduled
|
||||
} else {
|
||||
scheduled = newScheduledJob(job)
|
||||
}
|
||||
|
||||
scheduled := newScheduledJob(job)
|
||||
c.scheduledJobs[job.ID] = scheduled
|
||||
c.queue = append(c.queue, scheduled)
|
||||
return scheduled
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Coordinator) pickJob() (*build.Job, bool) {
|
||||
c.mu.Lock()
|
||||
|
|
|
@ -24,12 +24,14 @@ func Send(dir string, w io.Writer) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
switch {
|
||||
case info.IsDir():
|
||||
return tw.WriteHeader(&tar.Header{
|
||||
Name: rel,
|
||||
Typeflag: tar.TypeDir,
|
||||
})
|
||||
} else {
|
||||
|
||||
default:
|
||||
h := &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: rel,
|
||||
|
|
|
@ -63,7 +63,8 @@ func executeCmd(ctx context.Context, cmd *build.Cmd) (stdout, stderr []byte, exi
|
|||
if cmd.CatOutput != "" {
|
||||
err = ioutil.WriteFile(cmd.CatOutput, []byte(cmd.CatTemplate), 0666)
|
||||
return
|
||||
} else {
|
||||
}
|
||||
|
||||
p := exec.CommandContext(ctx, cmd.Exec[0], cmd.Exec[1:]...)
|
||||
p.Dir = cmd.WorkingDirectory
|
||||
p.Env = cmd.Environ
|
||||
|
@ -84,9 +85,8 @@ func executeCmd(ctx context.Context, cmd *build.Cmd) (stdout, stderr []byte, exi
|
|||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Worker) prepareSourceDir(sourceDir string, sourceFiles map[build.ID]string) (unlock func(), err error) {
|
||||
func (w *Worker) prepareSourceDir(sourceDir string, sourceFiles map[build.ID]string) (unlockSources func(), err error) {
|
||||
var unlocks []func()
|
||||
doUnlock := func() {
|
||||
for _, u := range unlocks {
|
||||
|
@ -119,12 +119,12 @@ func (w *Worker) prepareSourceDir(sourceDir string, sourceFiles map[build.ID]str
|
|||
}
|
||||
}
|
||||
|
||||
unlock = doUnlock
|
||||
unlockSources = doUnlock
|
||||
doUnlock = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (w *Worker) lockDeps(deps []build.ID) (paths map[build.ID]string, unlock func(), err error) {
|
||||
func (w *Worker) lockDeps(deps []build.ID) (paths map[build.ID]string, unlockDeps func(), err error) {
|
||||
var unlocks []func()
|
||||
doUnlock := func() {
|
||||
for _, u := range unlocks {
|
||||
|
@ -150,7 +150,7 @@ func (w *Worker) lockDeps(deps []build.ID) (paths map[build.ID]string, unlock fu
|
|||
paths[id] = filepath.Join(path, outputDirName)
|
||||
}
|
||||
|
||||
unlock = doUnlock
|
||||
unlockDeps = doUnlock
|
||||
doUnlock = nil
|
||||
return
|
||||
}
|
||||
|
@ -173,18 +173,18 @@ func (w *Worker) runJob(ctx context.Context, spec *proto.JobSpec) (*proto.JobRes
|
|||
return
|
||||
}
|
||||
|
||||
if err := abort(); err != nil {
|
||||
if err = abort(); err != nil {
|
||||
w.log.Warn("error aborting job", zap.Any("job_id", spec.Job.ID), zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
outputDir := filepath.Join(aRoot, outputDirName)
|
||||
if err := os.Mkdir(outputDir, 0777); err != nil {
|
||||
if err = os.Mkdir(outputDir, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sourceDir := filepath.Join(aRoot, srcDirName)
|
||||
if err := os.Mkdir(sourceDir, 0777); err != nil {
|
||||
if err = os.Mkdir(sourceDir, 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ func (w *Worker) Run(ctx context.Context) error {
|
|||
zap.Int("num_jobs", len(rsp.JobsToRun)))
|
||||
|
||||
for _, spec := range rsp.JobsToRun {
|
||||
spec := spec
|
||||
result, err := w.runJob(ctx, &spec)
|
||||
if err != nil {
|
||||
errStr := fmt.Sprintf("job %s failed: %v", spec.Job.ID, err)
|
||||
|
|
Loading…
Reference in a new issue