diff options
| author | Aleksandr Nogikh <nogikh@google.com> | 2023-07-10 17:56:37 +0200 |
|---|---|---|
| committer | Aleksandr Nogikh <nogikh@google.com> | 2023-07-12 12:39:28 +0000 |
| commit | 979d5fe2cebac428704d73daed5379e007abef68 (patch) | |
| tree | 0da4a39ddc610cde6803d64ff848c6c04b9b45f8 /pkg/repro/repro.go | |
| parent | 2f19aa4f873578acf44b3bf4d4ae98e52c2817ca (diff) | |
pkg/report: request VMs outside of createInstances()
In the current code, there's a possibility that we write to ctx.bootRequests
after it was quickly closed. That could happen when we immediately abort
the reproduction process after it's started.
To avoid this, don't send elements over the bootRequests channel in the
createInstances() function.
Hopefully closes #4016.
Diffstat (limited to 'pkg/repro/repro.go')
| -rw-r--r-- | pkg/repro/repro.go | 54 |
1 files changed, 28 insertions, 26 deletions
diff --git a/pkg/repro/repro.go b/pkg/repro/repro.go index 8d039e159..b774705e6 100644 --- a/pkg/repro/repro.go +++ b/pkg/repro/repro.go @@ -84,8 +84,12 @@ func Run(crashLog []byte, cfg *mgrconfig.Config, features *host.Features, report wg.Add(1) go func() { defer wg.Done() - ctx.createInstances(cfg, vmPool, vmIndexes) + ctx.createInstances(cfg, vmPool) }() + // Prepare VMs in advance. + for _, idx := range vmIndexes { + ctx.bootRequests <- idx + } // Wait until all VMs are really released. defer wg.Wait() return ctx.run() @@ -642,36 +646,34 @@ func (ctx *context) bisectProgs(progs []*prog.LogEntry, pred func([]*prog.LogEnt return ret, err } -func (ctx *context) createInstances(cfg *mgrconfig.Config, vmPool *vm.Pool, vmIndexes []int) { +func (ctx *context) createInstances(cfg *mgrconfig.Config, vmPool *vm.Pool) { var wg sync.WaitGroup - wg.Add(len(vmIndexes)) - for _, vmIndex := range vmIndexes { - ctx.bootRequests <- vmIndex + for vmIndex := range ctx.bootRequests { + wg.Add(1) + vmIndex := vmIndex go func() { defer wg.Done() - for vmIndex := range ctx.bootRequests { - var inst *instance.ExecProgInstance - maxTry := 3 - for try := 0; try < maxTry; try++ { - select { - case <-vm.Shutdown: - try = maxTry - continue - default: - } - var err error - inst, err = instance.CreateExecProgInstance(vmPool, vmIndex, cfg, - ctx.reporter, &instance.OptionalConfig{Logf: ctx.reproLogf}) - if err != nil { - ctx.reproLogf(0, "failed to init instance: %v", err) - time.Sleep(10 * time.Second) - continue - } - break + + var inst *instance.ExecProgInstance + maxTry := 3 + for try := 0; try < maxTry; try++ { + select { + case <-vm.Shutdown: + try = maxTry + continue + default: } - if inst == nil { - break + var err error + inst, err = instance.CreateExecProgInstance(vmPool, vmIndex, cfg, + ctx.reporter, &instance.OptionalConfig{Logf: ctx.reproLogf}) + if err != nil { + ctx.reproLogf(0, "failed to init instance: %v", err) + time.Sleep(10 * time.Second) + continue } + break + } + if inst != nil { ctx.instances <- &reproInstance{execProg: inst, index: vmIndex} } }() |
