aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--executor/common.h16
-rw-r--r--executor/executor.cc40
-rw-r--r--executor/executor_runner.h46
-rw-r--r--executor/files.h23
-rw-r--r--executor/snapshot.h1
-rw-r--r--pkg/flatrpc/conn_test.go2
-rw-r--r--pkg/flatrpc/flatrpc.fbs17
-rw-r--r--pkg/flatrpc/flatrpc.go207
-rw-r--r--pkg/flatrpc/flatrpc.h174
-rw-r--r--pkg/fuzzer/queue/queue.go67
-rw-r--r--pkg/fuzzer/queue/queue_test.go7
-rw-r--r--pkg/ifaceprobe/ifaceprobe.go179
-rw-r--r--pkg/rpcserver/local.go3
-rw-r--r--pkg/rpcserver/mocks/Manager.go10
-rw-r--r--pkg/rpcserver/rpcserver.go25
-rw-r--r--pkg/rpcserver/runner.go31
-rw-r--r--pkg/runtest/run.go5
-rw-r--r--pkg/vminfo/syscalls.go36
-rw-r--r--pkg/vminfo/vminfo.go3
-rw-r--r--pkg/vminfo/vminfo_test.go27
-rw-r--r--syz-manager/manager.go8
-rw-r--r--tools/syz-diff/diff.go3
-rw-r--r--tools/syz-execprog/execprog.go36
23 files changed, 577 insertions, 389 deletions
diff --git a/executor/common.h b/executor/common.h
index 123723e5a..7425a8bff 100644
--- a/executor/common.h
+++ b/executor/common.h
@@ -705,9 +705,21 @@ static void loop(void)
last_executed = now;
}
// TODO: adjust timeout for progs with syz_usb_connect call.
- if ((now - start < program_timeout_ms) &&
- (now - start < min_timeout_ms || now - last_executed < inactive_timeout_ms))
+ // If the max program timeout is exceeded, kill unconditionally.
+ if (now - start > program_timeout_ms)
+ goto kill_test;
+ // If the request type is not a normal test program (currently, glob expansion request),
+ // then wait for the full timeout (these requests don't update number of completed calls
+ // + they are more important and we don't want timing flakes).
+ if (request_type != rpc::RequestType::Program)
continue;
+ // Always wait at least the min timeout for each program.
+ if (now - start < min_timeout_ms)
+ continue;
+ // If it keeps completing syscalls, then don't kill it.
+ if (now - last_executed < inactive_timeout_ms)
+ continue;
+ kill_test:
#else
if (current_time_ms() - start < /*{{{PROGRAM_TIMEOUT_MS}}}*/)
continue;
diff --git a/executor/executor.cc b/executor/executor.cc
index e8e1cb000..702f8c3b4 100644
--- a/executor/executor.cc
+++ b/executor/executor.cc
@@ -146,6 +146,7 @@ struct alignas(8) OutputData {
std::atomic<uint32> consumed;
std::atomic<uint32> completed;
std::atomic<uint32> num_calls;
+ std::atomic<flatbuffers::Offset<flatbuffers::Vector<uint8_t>>> result_offset;
struct {
// Call index in the test program (they may be out-of-order is some syscalls block).
int index;
@@ -159,6 +160,7 @@ struct alignas(8) OutputData {
consumed.store(0, std::memory_order_relaxed);
completed.store(0, std::memory_order_relaxed);
num_calls.store(0, std::memory_order_relaxed);
+ result_offset.store(0, std::memory_order_relaxed);
}
};
@@ -280,6 +282,7 @@ static bool flag_threaded;
static bool flag_comparisons;
static uint64 request_id;
+static rpc::RequestType request_type;
static uint64 all_call_signal;
static bool all_extra_signal;
@@ -417,6 +420,7 @@ struct handshake_req {
struct execute_req {
uint64 magic;
uint64 id;
+ rpc::RequestType type;
uint64 exec_flags;
uint64 all_call_signal;
bool all_extra_signal;
@@ -791,6 +795,7 @@ void receive_execute()
void parse_execute(const execute_req& req)
{
request_id = req.id;
+ request_type = req.type;
flag_collect_signal = req.exec_flags & (1 << 0);
flag_collect_cover = req.exec_flags & (1 << 1);
flag_dedup_cover = req.exec_flags & (1 << 2);
@@ -799,9 +804,9 @@ void parse_execute(const execute_req& req)
all_call_signal = req.all_call_signal;
all_extra_signal = req.all_extra_signal;
- debug("[%llums] exec opts: procid=%llu threaded=%d cover=%d comps=%d dedup=%d signal=%d "
+ debug("[%llums] exec opts: reqid=%llu type=%llu procid=%llu threaded=%d cover=%d comps=%d dedup=%d signal=%d "
" sandbox=%d/%d/%d/%d timeouts=%llu/%llu/%llu kernel_64_bit=%d\n",
- current_time_ms() - start_time_ms, procid, flag_threaded, flag_collect_cover,
+ current_time_ms() - start_time_ms, request_id, (uint64)request_type, procid, flag_threaded, flag_collect_cover,
flag_comparisons, flag_dedup_cover, flag_collect_signal, flag_sandbox_none, flag_sandbox_setuid,
flag_sandbox_namespace, flag_sandbox_android, syscall_timeout_ms, program_timeout_ms, slowdown_scale,
is_kernel_64_bit);
@@ -837,9 +842,35 @@ void realloc_output_data()
#endif
}
+void execute_glob()
+{
+ const char* pattern = (const char*)input_data;
+ const auto& files = Glob(pattern);
+ size_t size = 0;
+ for (const auto& file : files)
+ size += file.size() + 1;
+ mmap_output(kMaxOutput);
+ ShmemBuilder fbb(output_data, kMaxOutput, true);
+ uint8_t* pos = nullptr;
+ auto off = fbb.CreateUninitializedVector(size, &pos);
+ for (const auto& file : files) {
+ memcpy(pos, file.c_str(), file.size() + 1);
+ pos += file.size() + 1;
+ }
+ output_data->consumed.store(fbb.GetSize(), std::memory_order_release);
+ output_data->result_offset.store(off, std::memory_order_release);
+}
+
// execute_one executes program stored in input_data.
void execute_one()
{
+ if (request_type == rpc::RequestType::Glob) {
+ execute_glob();
+ return;
+ }
+ if (request_type != rpc::RequestType::Program)
+ failmsg("bad request type", "type=%llu", (uint64)request_type);
+
in_execute_one = true;
#if GOOS_linux
char buf[64];
@@ -1382,8 +1413,9 @@ flatbuffers::span<uint8_t> finish_output(OutputData* output, int proc_id, uint64
flatbuffers::Offset<flatbuffers::String> error_off = 0;
if (status == kFailStatus)
error_off = fbb.CreateString("process failed");
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> output_off = 0;
- if (process_output)
+ // If the request wrote binary result (currently glob requests do this), use it instead of the output.
+ auto output_off = output->result_offset.load(std::memory_order_relaxed);
+ if (output_off.IsNull() && process_output)
output_off = fbb.CreateVector(*process_output);
auto exec_off = rpc::CreateExecResultRaw(fbb, req_id, proc_id, output_off, hanged, error_off, prog_info_off);
auto msg_off = rpc::CreateExecutorMessageRaw(fbb, rpc::ExecutorMessagesRaw::ExecResult,
diff --git a/executor/executor_runner.h b/executor/executor_runner.h
index c24886b10..a3b668893 100644
--- a/executor/executor_runner.h
+++ b/executor/executor_runner.h
@@ -23,7 +23,7 @@ inline std::ostream& operator<<(std::ostream& ss, const rpc::ExecRequestRawT& re
<< " flags=0x" << std::hex << static_cast<uint64>(req.flags)
<< " env_flags=0x" << std::hex << static_cast<uint64>(req.exec_opts->env_flags())
<< " exec_flags=0x" << std::hex << static_cast<uint64>(req.exec_opts->exec_flags())
- << " prod_data=" << std::dec << req.prog_data.size()
+ << " data_size=" << std::dec << req.data.size()
<< "\n";
}
@@ -87,7 +87,7 @@ private:
class Proc
{
public:
- Proc(Connection& conn, const char* bin, int id, ProcIDPool& proc_id_pool, int& restarting, const bool& corpus_triaged, int max_signal_fd, int cover_filter_fd,
+ Proc(Connection& conn, const char* bin, ProcIDPool& proc_id_pool, int& restarting, const bool& corpus_triaged, int max_signal_fd, int cover_filter_fd,
bool use_cover_edges, bool is_kernel_64_bit, uint32 slowdown, uint32 syscall_timeout_ms, uint32 program_timeout_ms)
: conn_(conn),
bin_(bin),
@@ -122,8 +122,10 @@ public:
if (wait_start_)
wait_end_ = current_time_ms();
// Restart every once in a while to not let too much state accumulate.
+ // Also request if request type differs as it affects program timeout.
constexpr uint64 kRestartEvery = 600;
if (state_ == State::Idle && ((corpus_triaged_ && restarting_ == 0 && freshness_ >= kRestartEvery) ||
+ req_type_ != msg.type ||
exec_env_ != msg.exec_opts->env_flags() || sandbox_arg_ != msg.exec_opts->sandbox_arg()))
Restart();
attempts_ = 0;
@@ -150,9 +152,9 @@ public:
// fork server is enabled, so we use quite large timeout. Child process can be slow
// due to global locks in namespaces and other things, so let's better wait than
// report false misleading crashes.
- uint64 timeout = 3 * program_timeout_ms_;
+ uint64 timeout = 3 * ProgramTimeoutMs();
#else
- uint64 timeout = program_timeout_ms_;
+ uint64 timeout = ProgramTimeoutMs();
#endif
// Sandbox setup can take significant time.
if (state_ == State::Handshaking)
@@ -211,6 +213,7 @@ private:
int req_pipe_ = -1;
int resp_pipe_ = -1;
int stdout_pipe_ = -1;
+ rpc::RequestType req_type_ = rpc::RequestType::Program;
rpc::ExecEnv exec_env_ = rpc::ExecEnv::NONE;
int64_t sandbox_arg_ = 0;
std::optional<rpc::ExecRequestRawT> msg_;
@@ -349,6 +352,7 @@ private:
debug("proc %d: handshaking to execute request %llu\n", id_, static_cast<uint64>(msg_->id));
ChangeState(State::Handshaking);
exec_start_ = current_time_ms();
+ req_type_ = msg_->type;
exec_env_ = msg_->exec_opts->env_flags() & ~rpc::ExecEnv::ResetState;
sandbox_arg_ = msg_->exec_opts->sandbox_arg();
handshake_req req = {
@@ -359,7 +363,7 @@ private:
.pid = static_cast<uint64>(id_),
.sandbox_arg = static_cast<uint64>(sandbox_arg_),
.syscall_timeout_ms = syscall_timeout_ms_,
- .program_timeout_ms = program_timeout_ms_,
+ .program_timeout_ms = ProgramTimeoutMs(),
.slowdown_scale = slowdown_,
};
if (write(req_pipe_, &req, sizeof(req)) != sizeof(req)) {
@@ -401,10 +405,11 @@ private:
else
all_call_signal |= 1ull << call;
}
- memcpy(req_shmem_.Mem(), msg_->prog_data.data(), std::min(msg_->prog_data.size(), kMaxInput));
+ memcpy(req_shmem_.Mem(), msg_->data.data(), std::min(msg_->data.size(), kMaxInput));
execute_req req{
.magic = kInMagic,
.id = static_cast<uint64>(msg_->id),
+ .type = msg_->type,
.exec_flags = static_cast<uint64>(msg_->exec_opts->exec_flags()),
.all_call_signal = all_call_signal,
.all_extra_signal = all_extra_signal,
@@ -425,7 +430,7 @@ private:
// Note: if the child process crashed during handshake and the request has ReturnError flag,
// we have not started executing the request yet.
uint64 elapsed = (current_time_ms() - exec_start_) * 1000 * 1000;
- uint8* prog_data = msg_->prog_data.data();
+ uint8* prog_data = msg_->data.data();
input_data = prog_data;
std::vector<uint8_t>* output = nullptr;
if (IsSet(msg_->flags, rpc::RequestFlag::ReturnOutput)) {
@@ -436,7 +441,9 @@ private:
output_.insert(output_.end(), tmp, tmp + strlen(tmp));
}
}
- uint32 num_calls = read_input(&prog_data);
+ uint32 num_calls = 0;
+ if (msg_->type == rpc::RequestType::Program)
+ num_calls = read_input(&prog_data);
auto data = finish_output(resp_mem_, id_, msg_->id, num_calls, elapsed, freshness_++, status, hanged, output);
conn_.Send(data.data(), data.size());
@@ -497,6 +504,7 @@ private:
return false;
}
if (flag_debug) {
+ const bool has_nl = output_.back() == '\n';
output_.resize(output_.size() + 1);
char* output = reinterpret_cast<char*>(output_.data()) + debug_output_pos_;
// During machine check we can execute some requests that legitimately fail.
@@ -508,12 +516,18 @@ private:
if (syzfail)
memcpy(syzfail, "NOTFAIL", strlen("NOTFAIL"));
}
- debug("proc %d: got output: %s\n", id_, output);
+ debug("proc %d: got output: %s%s", id_, output, has_nl ? "" : "\n");
output_.resize(output_.size() - 1);
debug_output_pos_ = output_.size();
}
return true;
}
+
+ uint32 ProgramTimeoutMs() const
+ {
+ // Glob requests can expand to >10K files and can take a while to run.
+ return program_timeout_ms_ * (req_type_ == rpc::RequestType::Program ? 1 : 10);
+ }
};
// Runner manages a set of test subprocesses (Proc's), receives new test requests from the manager,
@@ -530,7 +544,7 @@ public:
int max_signal_fd = max_signal_ ? max_signal_->FD() : -1;
int cover_filter_fd = cover_filter_ ? cover_filter_->FD() : -1;
for (int i = 0; i < num_procs; i++)
- procs_.emplace_back(new Proc(conn, bin, i, *proc_id_pool_, restarting_, corpus_triaged_,
+ procs_.emplace_back(new Proc(conn, bin, *proc_id_pool_, restarting_, corpus_triaged_,
max_signal_fd, cover_filter_fd, use_cover_edges_, is_kernel_64_bit_, slowdown_,
syscall_timeout_ms_, program_timeout_ms_));
@@ -644,7 +658,6 @@ private:
rpc::InfoRequestRawT info_req;
info_req.files = ReadFiles(conn_reply.files);
- info_req.globs = ReadGlobs(conn_reply.globs);
// This does any one-time setup for the requested features on the machine.
// Note: this can be called multiple times and must be idempotent.
@@ -701,13 +714,14 @@ private:
void Handle(rpc::ExecRequestRawT& msg)
{
- debug("recv exec request %llu: flags=0x%llx env=0x%llx exec=0x%llx size=%zu\n",
+ debug("recv exec request %llu: type=%llu flags=0x%llx env=0x%llx exec=0x%llx size=%zu\n",
static_cast<uint64>(msg.id),
+ static_cast<uint64>(msg.type),
static_cast<uint64>(msg.flags),
static_cast<uint64>(msg.exec_opts->env_flags()),
static_cast<uint64>(msg.exec_opts->exec_flags()),
- msg.prog_data.size());
- if (IsSet(msg.flags, rpc::RequestFlag::IsBinary)) {
+ msg.data.size());
+ if (msg.type == rpc::RequestType::Binary) {
ExecuteBinary(msg);
return;
}
@@ -783,9 +797,9 @@ private:
int fd = open(file.c_str(), O_WRONLY | O_CLOEXEC | O_CREAT, 0755);
if (fd == -1)
return {"binary file creation failed", {}};
- ssize_t wrote = write(fd, msg.prog_data.data(), msg.prog_data.size());
+ ssize_t wrote = write(fd, msg.data.data(), msg.data.size());
close(fd);
- if (wrote != static_cast<ssize_t>(msg.prog_data.size()))
+ if (wrote != static_cast<ssize_t>(msg.data.size()))
return {"binary file write failed", {}};
int stdin_pipe[2];
diff --git a/executor/files.h b/executor/files.h
index 7be826d0a..f1d2a6104 100644
--- a/executor/files.h
+++ b/executor/files.h
@@ -24,20 +24,25 @@ static std::vector<std::string> Glob(const std::string& pattern)
// because they cause recursion, or lead outside of the target glob
// (e.g. /proc/self/{root,cwd}).
// However, we want to keep few links: /proc/self, /proc/thread-self,
- // /sys/kernel/slab/kmalloc-64 (may be a link with slab merging).
+ // /sys/kernel/slab/kmalloc-64 (may be a link with slab merging),
+ // and cgroup links created in the test dir.
// This is a hacky way to do it b/c e.g. "self" will be matched in all paths,
// not just /proc. A proper fix would require writing completly custom version of glob
// that would support recursion and would allow using/not using links on demand.
+
buf.gl_readdir = [](void* dir) -> dirent* {
for (;;) {
struct dirent* ent = readdir(static_cast<DIR*>(dir));
if (!ent || ent->d_type != DT_LNK ||
!strcmp(ent->d_name, "self") ||
!strcmp(ent->d_name, "thread-self") ||
- !strcmp(ent->d_name, "kmalloc-64"))
+ !strcmp(ent->d_name, "kmalloc-64") ||
+ !strcmp(ent->d_name, "cgroup") ||
+ !strcmp(ent->d_name, "cgroup.cpu") ||
+ !strcmp(ent->d_name, "cgroup.net"))
return ent;
}
- },
+ };
buf.gl_stat = stat;
buf.gl_lstat = lstat;
int res = glob(pattern.c_str(), GLOB_MARK | GLOB_NOSORT | GLOB_ALTDIRFUNC, nullptr, &buf);
@@ -112,15 +117,3 @@ static std::vector<std::unique_ptr<rpc::FileInfoRawT>> ReadFiles(const std::vect
}
return results;
}
-
-static std::vector<std::unique_ptr<rpc::GlobInfoRawT>> ReadGlobs(const std::vector<std::string>& patterns)
-{
- std::vector<std::unique_ptr<rpc::GlobInfoRawT>> results;
- for (const auto& pattern : patterns) {
- auto info = std::make_unique<rpc::GlobInfoRawT>();
- info->name = pattern;
- info->files = Glob(pattern);
- results.push_back(std::move(info));
- }
- return results;
-}
diff --git a/executor/snapshot.h b/executor/snapshot.h
index 0cac33822..71c0b3940 100644
--- a/executor/snapshot.h
+++ b/executor/snapshot.h
@@ -245,6 +245,7 @@ static void SnapshotStart()
execute_req req = {
.magic = kInMagic,
.id = 0,
+ .type = rpc::RequestType::Program,
.exec_flags = static_cast<uint64>(msg->exec_flags()),
.all_call_signal = msg->all_call_signal(),
.all_extra_signal = msg->all_extra_signal(),
diff --git a/pkg/flatrpc/conn_test.go b/pkg/flatrpc/conn_test.go
index 3ab83996e..132fd1cdd 100644
--- a/pkg/flatrpc/conn_test.go
+++ b/pkg/flatrpc/conn_test.go
@@ -28,7 +28,6 @@ func TestConn(t *testing.T) {
RaceFrames: []string{"bar", "baz"},
Features: FeatureCoverage | FeatureLeak,
Files: []string{"file1"},
- Globs: []string{"glob1"},
}
executorMsg := &ExecutorMessage{
Msg: &ExecutorMessages{
@@ -102,7 +101,6 @@ func BenchmarkConn(b *testing.B) {
RaceFrames: []string{"bar", "baz"},
Features: FeatureCoverage | FeatureLeak,
Files: []string{"file1"},
- Globs: []string{"glob1"},
}
done := make(chan bool)
diff --git a/pkg/flatrpc/flatrpc.fbs b/pkg/flatrpc/flatrpc.fbs
index 0bd32b743..6d2307d6a 100644
--- a/pkg/flatrpc/flatrpc.fbs
+++ b/pkg/flatrpc/flatrpc.fbs
@@ -57,14 +57,12 @@ table ConnectReplyRaw {
features :Feature;
// Fuzzer reads these files inside of the VM and returns contents in InfoRequest.files.
files :[string];
- globs :[string];
}
table InfoRequestRaw {
error :string;
features :[FeatureInfoRaw];
files :[FileInfoRaw];
- globs :[GlobInfoRaw];
}
table InfoReplyRaw {
@@ -112,10 +110,16 @@ table ExecutorMessageRaw {
msg :ExecutorMessagesRaw;
}
+enum RequestType : uint64 {
+ // Normal test program request (data contains serialized prog.Prog).
+ Program,
+ // Binary test program (data contains compiled executable binary).
+ Binary,
+ // Request for file glob expansion (data contains the glob pattern).
+ Glob,
+}
+
enum RequestFlag : uint64 (bit_flags) {
- // If set, prog_data contains compiled executable binary
- // that needs to be written to disk and executed.
- IsBinary,
// If set, collect program output and return in output field.
ReturnOutput,
// If set, don't fail on program failures, instead return the error in error field.
@@ -163,9 +167,10 @@ struct ExecOptsRaw {
// Request to execute a test program.
table ExecRequestRaw {
id :int64;
+ type :RequestType;
// Bitmask of procs to avoid when executing this request, if possible.
avoid :uint64;
- prog_data :[uint8];
+ data :[uint8];
exec_opts :ExecOptsRaw;
flags :RequestFlag;
// Return all signal for these calls.
diff --git a/pkg/flatrpc/flatrpc.go b/pkg/flatrpc/flatrpc.go
index 8445cde2d..fd5b4f614 100644
--- a/pkg/flatrpc/flatrpc.go
+++ b/pkg/flatrpc/flatrpc.go
@@ -257,22 +257,46 @@ func (rcv ExecutorMessagesRaw) UnPack(table flatbuffers.Table) *ExecutorMessages
return nil
}
+type RequestType uint64
+
+const (
+ RequestTypeProgram RequestType = 0
+ RequestTypeBinary RequestType = 1
+ RequestTypeGlob RequestType = 2
+)
+
+var EnumNamesRequestType = map[RequestType]string{
+ RequestTypeProgram: "Program",
+ RequestTypeBinary: "Binary",
+ RequestTypeGlob: "Glob",
+}
+
+var EnumValuesRequestType = map[string]RequestType{
+ "Program": RequestTypeProgram,
+ "Binary": RequestTypeBinary,
+ "Glob": RequestTypeGlob,
+}
+
+func (v RequestType) String() string {
+ if s, ok := EnumNamesRequestType[v]; ok {
+ return s
+ }
+ return "RequestType(" + strconv.FormatInt(int64(v), 10) + ")"
+}
+
type RequestFlag uint64
const (
- RequestFlagIsBinary RequestFlag = 1
- RequestFlagReturnOutput RequestFlag = 2
- RequestFlagReturnError RequestFlag = 4
+ RequestFlagReturnOutput RequestFlag = 1
+ RequestFlagReturnError RequestFlag = 2
)
var EnumNamesRequestFlag = map[RequestFlag]string{
- RequestFlagIsBinary: "IsBinary",
RequestFlagReturnOutput: "ReturnOutput",
RequestFlagReturnError: "ReturnError",
}
var EnumValuesRequestFlag = map[string]RequestFlag{
- "IsBinary": RequestFlagIsBinary,
"ReturnOutput": RequestFlagReturnOutput,
"ReturnError": RequestFlagReturnError,
}
@@ -594,7 +618,6 @@ type ConnectReplyRawT struct {
RaceFrames []string `json:"race_frames"`
Features Feature `json:"features"`
Files []string `json:"files"`
- Globs []string `json:"globs"`
}
func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -640,19 +663,6 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
}
filesOffset = builder.EndVector(filesLength)
}
- globsOffset := flatbuffers.UOffsetT(0)
- if t.Globs != nil {
- globsLength := len(t.Globs)
- globsOffsets := make([]flatbuffers.UOffsetT, globsLength)
- for j := 0; j < globsLength; j++ {
- globsOffsets[j] = builder.CreateString(t.Globs[j])
- }
- ConnectReplyRawStartGlobsVector(builder, globsLength)
- for j := globsLength - 1; j >= 0; j-- {
- builder.PrependUOffsetT(globsOffsets[j])
- }
- globsOffset = builder.EndVector(globsLength)
- }
ConnectReplyRawStart(builder)
ConnectReplyRawAddDebug(builder, t.Debug)
ConnectReplyRawAddCover(builder, t.Cover)
@@ -666,7 +676,6 @@ func (t *ConnectReplyRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffse
ConnectReplyRawAddRaceFrames(builder, raceFramesOffset)
ConnectReplyRawAddFeatures(builder, t.Features)
ConnectReplyRawAddFiles(builder, filesOffset)
- ConnectReplyRawAddGlobs(builder, globsOffset)
return ConnectReplyRawEnd(builder)
}
@@ -695,11 +704,6 @@ func (rcv *ConnectReplyRaw) UnPackTo(t *ConnectReplyRawT) {
for j := 0; j < filesLength; j++ {
t.Files[j] = string(rcv.Files(j))
}
- globsLength := rcv.GlobsLength()
- t.Globs = make([]string, globsLength)
- for j := 0; j < globsLength; j++ {
- t.Globs[j] = string(rcv.Globs(j))
- }
}
func (rcv *ConnectReplyRaw) UnPack() *ConnectReplyRawT {
@@ -897,25 +901,8 @@ func (rcv *ConnectReplyRaw) FilesLength() int {
return 0
}
-func (rcv *ConnectReplyRaw) Globs(j int) []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
- if o != 0 {
- a := rcv._tab.Vector(o)
- return rcv._tab.ByteVector(a + flatbuffers.UOffsetT(j*4))
- }
- return nil
-}
-
-func (rcv *ConnectReplyRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(28))
- if o != 0 {
- return rcv._tab.VectorLen(o)
- }
- return 0
-}
-
func ConnectReplyRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(13)
+ builder.StartObject(12)
}
func ConnectReplyRawAddDebug(builder *flatbuffers.Builder, debug bool) {
builder.PrependBoolSlot(0, debug, false)
@@ -962,12 +949,6 @@ func ConnectReplyRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOf
func ConnectReplyRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
-func ConnectReplyRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(12, flatbuffers.UOffsetT(globs), 0)
-}
-func ConnectReplyRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
- return builder.StartVector(4, numElems, 4)
-}
func ConnectReplyRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
@@ -976,7 +957,6 @@ type InfoRequestRawT struct {
Error string `json:"error"`
Features []*FeatureInfoRawT `json:"features"`
Files []*FileInfoRawT `json:"files"`
- Globs []*GlobInfoRawT `json:"globs"`
}
func (t *InfoRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
@@ -1010,24 +990,10 @@ func (t *InfoRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
}
filesOffset = builder.EndVector(filesLength)
}
- globsOffset := flatbuffers.UOffsetT(0)
- if t.Globs != nil {
- globsLength := len(t.Globs)
- globsOffsets := make([]flatbuffers.UOffsetT, globsLength)
- for j := 0; j < globsLength; j++ {
- globsOffsets[j] = t.Globs[j].Pack(builder)
- }
- InfoRequestRawStartGlobsVector(builder, globsLength)
- for j := globsLength - 1; j >= 0; j-- {
- builder.PrependUOffsetT(globsOffsets[j])
- }
- globsOffset = builder.EndVector(globsLength)
- }
InfoRequestRawStart(builder)
InfoRequestRawAddError(builder, errorOffset)
InfoRequestRawAddFeatures(builder, featuresOffset)
InfoRequestRawAddFiles(builder, filesOffset)
- InfoRequestRawAddGlobs(builder, globsOffset)
return InfoRequestRawEnd(builder)
}
@@ -1047,13 +1013,6 @@ func (rcv *InfoRequestRaw) UnPackTo(t *InfoRequestRawT) {
rcv.Files(&x, j)
t.Files[j] = x.UnPack()
}
- globsLength := rcv.GlobsLength()
- t.Globs = make([]*GlobInfoRawT, globsLength)
- for j := 0; j < globsLength; j++ {
- x := GlobInfoRaw{}
- rcv.Globs(&x, j)
- t.Globs[j] = x.UnPack()
- }
}
func (rcv *InfoRequestRaw) UnPack() *InfoRequestRawT {
@@ -1140,28 +1099,8 @@ func (rcv *InfoRequestRaw) FilesLength() int {
return 0
}
-func (rcv *InfoRequestRaw) Globs(obj *GlobInfoRaw, j int) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
- if o != 0 {
- x := rcv._tab.Vector(o)
- x += flatbuffers.UOffsetT(j) * 4
- x = rcv._tab.Indirect(x)
- obj.Init(rcv._tab.Bytes, x)
- return true
- }
- return false
-}
-
-func (rcv *InfoRequestRaw) GlobsLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
- if o != 0 {
- return rcv._tab.VectorLen(o)
- }
- return 0
-}
-
func InfoRequestRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(4)
+ builder.StartObject(3)
}
func InfoRequestRawAddError(builder *flatbuffers.Builder, error flatbuffers.UOffsetT) {
builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(error), 0)
@@ -1178,12 +1117,6 @@ func InfoRequestRawAddFiles(builder *flatbuffers.Builder, files flatbuffers.UOff
func InfoRequestRawStartFilesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
}
-func InfoRequestRawAddGlobs(builder *flatbuffers.Builder, globs flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(globs), 0)
-}
-func InfoRequestRawStartGlobsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
- return builder.StartVector(4, numElems, 4)
-}
func InfoRequestRawEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
return builder.EndObject()
}
@@ -1929,8 +1862,9 @@ func CreateExecOptsRaw(builder *flatbuffers.Builder, envFlags ExecEnv, execFlags
type ExecRequestRawT struct {
Id int64 `json:"id"`
+ Type RequestType `json:"type"`
Avoid uint64 `json:"avoid"`
- ProgData []byte `json:"prog_data"`
+ Data []byte `json:"data"`
ExecOpts *ExecOptsRawT `json:"exec_opts"`
Flags RequestFlag `json:"flags"`
AllSignal []int32 `json:"all_signal"`
@@ -1940,9 +1874,9 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
if t == nil {
return 0
}
- progDataOffset := flatbuffers.UOffsetT(0)
- if t.ProgData != nil {
- progDataOffset = builder.CreateByteString(t.ProgData)
+ dataOffset := flatbuffers.UOffsetT(0)
+ if t.Data != nil {
+ dataOffset = builder.CreateByteString(t.Data)
}
allSignalOffset := flatbuffers.UOffsetT(0)
if t.AllSignal != nil {
@@ -1955,8 +1889,9 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
}
ExecRequestRawStart(builder)
ExecRequestRawAddId(builder, t.Id)
+ ExecRequestRawAddType(builder, t.Type)
ExecRequestRawAddAvoid(builder, t.Avoid)
- ExecRequestRawAddProgData(builder, progDataOffset)
+ ExecRequestRawAddData(builder, dataOffset)
execOptsOffset := t.ExecOpts.Pack(builder)
ExecRequestRawAddExecOpts(builder, execOptsOffset)
ExecRequestRawAddFlags(builder, t.Flags)
@@ -1966,8 +1901,9 @@ func (t *ExecRequestRawT) Pack(builder *flatbuffers.Builder) flatbuffers.UOffset
func (rcv *ExecRequestRaw) UnPackTo(t *ExecRequestRawT) {
t.Id = rcv.Id()
+ t.Type = rcv.Type()
t.Avoid = rcv.Avoid()
- t.ProgData = rcv.ProgDataBytes()
+ t.Data = rcv.DataBytes()
t.ExecOpts = rcv.ExecOpts(nil).UnPack()
t.Flags = rcv.Flags()
allSignalLength := rcv.AllSignalLength()
@@ -2025,20 +1961,32 @@ func (rcv *ExecRequestRaw) MutateId(n int64) bool {
return rcv._tab.MutateInt64Slot(4, n)
}
-func (rcv *ExecRequestRaw) Avoid() uint64 {
+func (rcv *ExecRequestRaw) Type() RequestType {
o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
if o != 0 {
+ return RequestType(rcv._tab.GetUint64(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *ExecRequestRaw) MutateType(n RequestType) bool {
+ return rcv._tab.MutateUint64Slot(6, uint64(n))
+}
+
+func (rcv *ExecRequestRaw) Avoid() uint64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
return rcv._tab.GetUint64(o + rcv._tab.Pos)
}
return 0
}
func (rcv *ExecRequestRaw) MutateAvoid(n uint64) bool {
- return rcv._tab.MutateUint64Slot(6, n)
+ return rcv._tab.MutateUint64Slot(8, n)
}
-func (rcv *ExecRequestRaw) ProgData(j int) byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+func (rcv *ExecRequestRaw) Data(j int) byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetByte(a + flatbuffers.UOffsetT(j*1))
@@ -2046,24 +1994,24 @@ func (rcv *ExecRequestRaw) ProgData(j int) byte {
return 0
}
-func (rcv *ExecRequestRaw) ProgDataLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+func (rcv *ExecRequestRaw) DataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.VectorLen(o)
}
return 0
}
-func (rcv *ExecRequestRaw) ProgDataBytes() []byte {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+func (rcv *ExecRequestRaw) DataBytes() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
return rcv._tab.ByteVector(o + rcv._tab.Pos)
}
return nil
}
-func (rcv *ExecRequestRaw) MutateProgData(j int, n byte) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+func (rcv *ExecRequestRaw) MutateData(j int, n byte) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateByte(a+flatbuffers.UOffsetT(j*1), n)
@@ -2072,7 +2020,7 @@ func (rcv *ExecRequestRaw) MutateProgData(j int, n byte) bool {
}
func (rcv *ExecRequestRaw) ExecOpts(obj *ExecOptsRaw) *ExecOptsRaw {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
if o != 0 {
x := o + rcv._tab.Pos
if obj == nil {
@@ -2085,7 +2033,7 @@ func (rcv *ExecRequestRaw) ExecOpts(obj *ExecOptsRaw) *ExecOptsRaw {
}
func (rcv *ExecRequestRaw) Flags() RequestFlag {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
if o != 0 {
return RequestFlag(rcv._tab.GetUint64(o + rcv._tab.Pos))
}
@@ -2093,11 +2041,11 @@ func (rcv *ExecRequestRaw) Flags() RequestFlag {
}
func (rcv *ExecRequestRaw) MutateFlags(n RequestFlag) bool {
- return rcv._tab.MutateUint64Slot(12, uint64(n))
+ return rcv._tab.MutateUint64Slot(14, uint64(n))
}
func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4))
@@ -2106,7 +2054,7 @@ func (rcv *ExecRequestRaw) AllSignal(j int) int32 {
}
func (rcv *ExecRequestRaw) AllSignalLength() int {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
return rcv._tab.VectorLen(o)
}
@@ -2114,7 +2062,7 @@ func (rcv *ExecRequestRaw) AllSignalLength() int {
}
func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
- o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
if o != 0 {
a := rcv._tab.Vector(o)
return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n)
@@ -2123,28 +2071,31 @@ func (rcv *ExecRequestRaw) MutateAllSignal(j int, n int32) bool {
}
func ExecRequestRawStart(builder *flatbuffers.Builder) {
- builder.StartObject(6)
+ builder.StartObject(7)
}
func ExecRequestRawAddId(builder *flatbuffers.Builder, id int64) {
builder.PrependInt64Slot(0, id, 0)
}
+func ExecRequestRawAddType(builder *flatbuffers.Builder, type_ RequestType) {
+ builder.PrependUint64Slot(1, uint64(type_), 0)
+}
func ExecRequestRawAddAvoid(builder *flatbuffers.Builder, avoid uint64) {
- builder.PrependUint64Slot(1, avoid, 0)
+ builder.PrependUint64Slot(2, avoid, 0)
}
-func ExecRequestRawAddProgData(builder *flatbuffers.Builder, progData flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(progData), 0)
+func ExecRequestRawAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(data), 0)
}
-func ExecRequestRawStartProgDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+func ExecRequestRawStartDataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(1, numElems, 1)
}
func ExecRequestRawAddExecOpts(builder *flatbuffers.Builder, execOpts flatbuffers.UOffsetT) {
- builder.PrependStructSlot(3, flatbuffers.UOffsetT(execOpts), 0)
+ builder.PrependStructSlot(4, flatbuffers.UOffsetT(execOpts), 0)
}
func ExecRequestRawAddFlags(builder *flatbuffers.Builder, flags RequestFlag) {
- builder.PrependUint64Slot(4, uint64(flags), 0)
+ builder.PrependUint64Slot(5, uint64(flags), 0)
}
func ExecRequestRawAddAllSignal(builder *flatbuffers.Builder, allSignal flatbuffers.UOffsetT) {
- builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(allSignal), 0)
+ builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(allSignal), 0)
}
func ExecRequestRawStartAllSignalVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
return builder.StartVector(4, numElems, 4)
diff --git a/pkg/flatrpc/flatrpc.h b/pkg/flatrpc/flatrpc.h
index 94e1c15f6..12d905c12 100644
--- a/pkg/flatrpc/flatrpc.h
+++ b/pkg/flatrpc/flatrpc.h
@@ -486,18 +486,49 @@ struct ExecutorMessagesRawUnion {
bool VerifyExecutorMessagesRaw(flatbuffers::Verifier &verifier, const void *obj, ExecutorMessagesRaw type);
bool VerifyExecutorMessagesRawVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<ExecutorMessagesRaw> *types);
+enum class RequestType : uint64_t {
+ Program = 0,
+ Binary = 1ULL,
+ Glob = 2ULL,
+ MIN = Program,
+ MAX = Glob
+};
+
+inline const RequestType (&EnumValuesRequestType())[3] {
+ static const RequestType values[] = {
+ RequestType::Program,
+ RequestType::Binary,
+ RequestType::Glob
+ };
+ return values;
+}
+
+inline const char * const *EnumNamesRequestType() {
+ static const char * const names[4] = {
+ "Program",
+ "Binary",
+ "Glob",
+ nullptr
+ };
+ return names;
+}
+
+inline const char *EnumNameRequestType(RequestType e) {
+ if (flatbuffers::IsOutRange(e, RequestType::Program, RequestType::Glob)) return "";
+ const size_t index = static_cast<size_t>(e);
+ return EnumNamesRequestType()[index];
+}
+
enum class RequestFlag : uint64_t {
- IsBinary = 1ULL,
- ReturnOutput = 2ULL,
- ReturnError = 4ULL,
+ ReturnOutput = 1ULL,
+ ReturnError = 2ULL,
NONE = 0,
- ANY = 7ULL
+ ANY = 3ULL
};
FLATBUFFERS_DEFINE_BITMASK_OPERATORS(RequestFlag, uint64_t)
-inline const RequestFlag (&EnumValuesRequestFlag())[3] {
+inline const RequestFlag (&EnumValuesRequestFlag())[2] {
static const RequestFlag values[] = {
- RequestFlag::IsBinary,
RequestFlag::ReturnOutput,
RequestFlag::ReturnError
};
@@ -505,10 +536,8 @@ inline const RequestFlag (&EnumValuesRequestFlag())[3] {
}
inline const char * const *EnumNamesRequestFlag() {
- static const char * const names[5] = {
- "IsBinary",
+ static const char * const names[3] = {
"ReturnOutput",
- "",
"ReturnError",
nullptr
};
@@ -516,8 +545,8 @@ inline const char * const *EnumNamesRequestFlag() {
}
inline const char *EnumNameRequestFlag(RequestFlag e) {
- if (flatbuffers::IsOutRange(e, RequestFlag::IsBinary, RequestFlag::ReturnError)) return "";
- const size_t index = static_cast<size_t>(e) - static_cast<size_t>(RequestFlag::IsBinary);
+ if (flatbuffers::IsOutRange(e, RequestFlag::ReturnOutput, RequestFlag::ReturnError)) return "";
+ const size_t index = static_cast<size_t>(e) - static_cast<size_t>(RequestFlag::ReturnOutput);
return EnumNamesRequestFlag()[index];
}
@@ -936,7 +965,6 @@ struct ConnectReplyRawT : public flatbuffers::NativeTable {
std::vector<std::string> race_frames{};
rpc::Feature features = static_cast<rpc::Feature>(0);
std::vector<std::string> files{};
- std::vector<std::string> globs{};
};
struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
@@ -954,8 +982,7 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_LEAK_FRAMES = 20,
VT_RACE_FRAMES = 22,
VT_FEATURES = 24,
- VT_FILES = 26,
- VT_GLOBS = 28
+ VT_FILES = 26
};
bool debug() const {
return GetField<uint8_t>(VT_DEBUG, 0) != 0;
@@ -993,9 +1020,6 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *files() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_FILES);
}
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *globs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_GLOBS);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_DEBUG, 1) &&
@@ -1016,9 +1040,6 @@ struct ConnectReplyRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_FILES) &&
verifier.VerifyVector(files()) &&
verifier.VerifyVectorOfStrings(files()) &&
- VerifyOffset(verifier, VT_GLOBS) &&
- verifier.VerifyVector(globs()) &&
- verifier.VerifyVectorOfStrings(globs()) &&
verifier.EndTable();
}
ConnectReplyRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -1066,9 +1087,6 @@ struct ConnectReplyRawBuilder {
void add_files(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> files) {
fbb_.AddOffset(ConnectReplyRaw::VT_FILES, files);
}
- void add_globs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> globs) {
- fbb_.AddOffset(ConnectReplyRaw::VT_GLOBS, globs);
- }
explicit ConnectReplyRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1093,11 +1111,9 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> leak_frames = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> race_frames = 0,
rpc::Feature features = static_cast<rpc::Feature>(0),
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> files = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> globs = 0) {
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> files = 0) {
ConnectReplyRawBuilder builder_(_fbb);
builder_.add_features(features);
- builder_.add_globs(globs);
builder_.add_files(files);
builder_.add_race_frames(race_frames);
builder_.add_leak_frames(leak_frames);
@@ -1125,12 +1141,10 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
const std::vector<flatbuffers::Offset<flatbuffers::String>> *leak_frames = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *race_frames = nullptr,
rpc::Feature features = static_cast<rpc::Feature>(0),
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *files = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *globs = nullptr) {
+ const std::vector<flatbuffers::Offset<flatbuffers::String>> *files = nullptr) {
auto leak_frames__ = leak_frames ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*leak_frames) : 0;
auto race_frames__ = race_frames ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*race_frames) : 0;
auto files__ = files ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*files) : 0;
- auto globs__ = globs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*globs) : 0;
return rpc::CreateConnectReplyRaw(
_fbb,
debug,
@@ -1144,8 +1158,7 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRawDirect(
leak_frames__,
race_frames__,
features,
- files__,
- globs__);
+ files__);
}
flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::FlatBufferBuilder &_fbb, const ConnectReplyRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -1155,7 +1168,6 @@ struct InfoRequestRawT : public flatbuffers::NativeTable {
std::string error{};
std::vector<std::unique_ptr<rpc::FeatureInfoRawT>> features{};
std::vector<std::unique_ptr<rpc::FileInfoRawT>> files{};
- std::vector<std::unique_ptr<rpc::GlobInfoRawT>> globs{};
InfoRequestRawT() = default;
InfoRequestRawT(const InfoRequestRawT &o);
InfoRequestRawT(InfoRequestRawT&&) FLATBUFFERS_NOEXCEPT = default;
@@ -1168,8 +1180,7 @@ struct InfoRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ERROR = 4,
VT_FEATURES = 6,
- VT_FILES = 8,
- VT_GLOBS = 10
+ VT_FILES = 8
};
const flatbuffers::String *error() const {
return GetPointer<const flatbuffers::String *>(VT_ERROR);
@@ -1180,9 +1191,6 @@ struct InfoRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const flatbuffers::Vector<flatbuffers::Offset<rpc::FileInfoRaw>> *files() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::FileInfoRaw>> *>(VT_FILES);
}
- const flatbuffers::Vector<flatbuffers::Offset<rpc::GlobInfoRaw>> *globs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<rpc::GlobInfoRaw>> *>(VT_GLOBS);
- }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ERROR) &&
@@ -1193,9 +1201,6 @@ struct InfoRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VerifyOffset(verifier, VT_FILES) &&
verifier.VerifyVector(files()) &&
verifier.VerifyVectorOfTables(files()) &&
- VerifyOffset(verifier, VT_GLOBS) &&
- verifier.VerifyVector(globs()) &&
- verifier.VerifyVectorOfTables(globs()) &&
verifier.EndTable();
}
InfoRequestRawT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -1216,9 +1221,6 @@ struct InfoRequestRawBuilder {
void add_files(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::FileInfoRaw>>> files) {
fbb_.AddOffset(InfoRequestRaw::VT_FILES, files);
}
- void add_globs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::GlobInfoRaw>>> globs) {
- fbb_.AddOffset(InfoRequestRaw::VT_GLOBS, globs);
- }
explicit InfoRequestRawBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -1234,10 +1236,8 @@ inline flatbuffers::Offset<InfoRequestRaw> CreateInfoRequestRaw(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> error = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::FeatureInfoRaw>>> features = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::FileInfoRaw>>> files = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::GlobInfoRaw>>> globs = 0) {
+ flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<rpc::FileInfoRaw>>> files = 0) {
InfoRequestRawBuilder builder_(_fbb);
- builder_.add_globs(globs);
builder_.add_files(files);
builder_.add_features(features);
builder_.add_error(error);
@@ -1248,18 +1248,15 @@ inline flatbuffers::Offset<InfoRequestRaw> CreateInfoRequestRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *error = nullptr,
const std::vector<flatbuffers::Offset<rpc::FeatureInfoRaw>> *features = nullptr,
- const std::vector<flatbuffers::Offset<rpc::FileInfoRaw>> *files = nullptr,
- const std::vector<flatbuffers::Offset<rpc::GlobInfoRaw>> *globs = nullptr) {
+ const std::vector<flatbuffers::Offset<rpc::FileInfoRaw>> *files = nullptr) {
auto error__ = error ? _fbb.CreateString(error) : 0;
auto features__ = features ? _fbb.CreateVector<flatbuffers::Offset<rpc::FeatureInfoRaw>>(*features) : 0;
auto files__ = files ? _fbb.CreateVector<flatbuffers::Offset<rpc::FileInfoRaw>>(*files) : 0;
- auto globs__ = globs ? _fbb.CreateVector<flatbuffers::Offset<rpc::GlobInfoRaw>>(*globs) : 0;
return rpc::CreateInfoRequestRaw(
_fbb,
error__,
features__,
- files__,
- globs__);
+ files__);
}
flatbuffers::Offset<InfoRequestRaw> CreateInfoRequestRaw(flatbuffers::FlatBufferBuilder &_fbb, const InfoRequestRawT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
@@ -1777,8 +1774,9 @@ flatbuffers::Offset<ExecutorMessageRaw> CreateExecutorMessageRaw(flatbuffers::Fl
struct ExecRequestRawT : public flatbuffers::NativeTable {
typedef ExecRequestRaw TableType;
int64_t id = 0;
+ rpc::RequestType type = rpc::RequestType::Program;
uint64_t avoid = 0;
- std::vector<uint8_t> prog_data{};
+ std::vector<uint8_t> data{};
std::unique_ptr<rpc::ExecOptsRaw> exec_opts{};
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0);
std::vector<int32_t> all_signal{};
@@ -1793,20 +1791,24 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ExecRequestRawBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ID = 4,
- VT_AVOID = 6,
- VT_PROG_DATA = 8,
- VT_EXEC_OPTS = 10,
- VT_FLAGS = 12,
- VT_ALL_SIGNAL = 14
+ VT_TYPE = 6,
+ VT_AVOID = 8,
+ VT_DATA = 10,
+ VT_EXEC_OPTS = 12,
+ VT_FLAGS = 14,
+ VT_ALL_SIGNAL = 16
};
int64_t id() const {
return GetField<int64_t>(VT_ID, 0);
}
+ rpc::RequestType type() const {
+ return static_cast<rpc::RequestType>(GetField<uint64_t>(VT_TYPE, 0));
+ }
uint64_t avoid() const {
return GetField<uint64_t>(VT_AVOID, 0);
}
- const flatbuffers::Vector<uint8_t> *prog_data() const {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_PROG_DATA);
+ const flatbuffers::Vector<uint8_t> *data() const {
+ return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
}
const rpc::ExecOptsRaw *exec_opts() const {
return GetStruct<const rpc::ExecOptsRaw *>(VT_EXEC_OPTS);
@@ -1820,9 +1822,10 @@ struct ExecRequestRaw FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int64_t>(verifier, VT_ID, 8) &&
+ VerifyField<uint64_t>(verifier, VT_TYPE, 8) &&
VerifyField<uint64_t>(verifier, VT_AVOID, 8) &&
- VerifyOffset(verifier, VT_PROG_DATA) &&
- verifier.VerifyVector(prog_data()) &&
+ VerifyOffset(verifier, VT_DATA) &&
+ verifier.VerifyVector(data()) &&
VerifyField<rpc::ExecOptsRaw>(verifier, VT_EXEC_OPTS, 8) &&
VerifyField<uint64_t>(verifier, VT_FLAGS, 8) &&
VerifyOffset(verifier, VT_ALL_SIGNAL) &&
@@ -1841,11 +1844,14 @@ struct ExecRequestRawBuilder {
void add_id(int64_t id) {
fbb_.AddElement<int64_t>(ExecRequestRaw::VT_ID, id, 0);
}
+ void add_type(rpc::RequestType type) {
+ fbb_.AddElement<uint64_t>(ExecRequestRaw::VT_TYPE, static_cast<uint64_t>(type), 0);
+ }
void add_avoid(uint64_t avoid) {
fbb_.AddElement<uint64_t>(ExecRequestRaw::VT_AVOID, avoid, 0);
}
- void add_prog_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data) {
- fbb_.AddOffset(ExecRequestRaw::VT_PROG_DATA, prog_data);
+ void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+ fbb_.AddOffset(ExecRequestRaw::VT_DATA, data);
}
void add_exec_opts(const rpc::ExecOptsRaw *exec_opts) {
fbb_.AddStruct(ExecRequestRaw::VT_EXEC_OPTS, exec_opts);
@@ -1870,36 +1876,40 @@ struct ExecRequestRawBuilder {
inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(
flatbuffers::FlatBufferBuilder &_fbb,
int64_t id = 0,
+ rpc::RequestType type = rpc::RequestType::Program,
uint64_t avoid = 0,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> prog_data = 0,
+ flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
flatbuffers::Offset<flatbuffers::Vector<int32_t>> all_signal = 0) {
ExecRequestRawBuilder builder_(_fbb);
builder_.add_flags(flags);
builder_.add_avoid(avoid);
+ builder_.add_type(type);
builder_.add_id(id);
builder_.add_all_signal(all_signal);
builder_.add_exec_opts(exec_opts);
- builder_.add_prog_data(prog_data);
+ builder_.add_data(data);
return builder_.Finish();
}
inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRawDirect(
flatbuffers::FlatBufferBuilder &_fbb,
int64_t id = 0,
+ rpc::RequestType type = rpc::RequestType::Program,
uint64_t avoid = 0,
- const std::vector<uint8_t> *prog_data = nullptr,
+ const std::vector<uint8_t> *data = nullptr,
const rpc::ExecOptsRaw *exec_opts = nullptr,
rpc::RequestFlag flags = static_cast<rpc::RequestFlag>(0),
const std::vector<int32_t> *all_signal = nullptr) {
- auto prog_data__ = prog_data ? _fbb.CreateVector<uint8_t>(*prog_data) : 0;
+ auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
auto all_signal__ = all_signal ? _fbb.CreateVector<int32_t>(*all_signal) : 0;
return rpc::CreateExecRequestRaw(
_fbb,
id,
+ type,
avoid,
- prog_data__,
+ data__,
exec_opts,
flags,
all_signal__);
@@ -2942,7 +2952,6 @@ inline void ConnectReplyRaw::UnPackTo(ConnectReplyRawT *_o, const flatbuffers::r
{ auto _e = race_frames(); if (_e) { _o->race_frames.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->race_frames[_i] = _e->Get(_i)->str(); } } }
{ auto _e = features(); _o->features = _e; }
{ auto _e = files(); if (_e) { _o->files.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->files[_i] = _e->Get(_i)->str(); } } }
- { auto _e = globs(); if (_e) { _o->globs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->globs[_i] = _e->Get(_i)->str(); } } }
}
inline flatbuffers::Offset<ConnectReplyRaw> ConnectReplyRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConnectReplyRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -2965,7 +2974,6 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
auto _race_frames = _o->race_frames.size() ? _fbb.CreateVectorOfStrings(_o->race_frames) : 0;
auto _features = _o->features;
auto _files = _o->files.size() ? _fbb.CreateVectorOfStrings(_o->files) : 0;
- auto _globs = _o->globs.size() ? _fbb.CreateVectorOfStrings(_o->globs) : 0;
return rpc::CreateConnectReplyRaw(
_fbb,
_debug,
@@ -2979,8 +2987,7 @@ inline flatbuffers::Offset<ConnectReplyRaw> CreateConnectReplyRaw(flatbuffers::F
_leak_frames,
_race_frames,
_features,
- _files,
- _globs);
+ _files);
}
inline InfoRequestRawT::InfoRequestRawT(const InfoRequestRawT &o)
@@ -2989,15 +2996,12 @@ inline InfoRequestRawT::InfoRequestRawT(const InfoRequestRawT &o)
for (const auto &features_ : o.features) { features.emplace_back((features_) ? new rpc::FeatureInfoRawT(*features_) : nullptr); }
files.reserve(o.files.size());
for (const auto &files_ : o.files) { files.emplace_back((files_) ? new rpc::FileInfoRawT(*files_) : nullptr); }
- globs.reserve(o.globs.size());
- for (const auto &globs_ : o.globs) { globs.emplace_back((globs_) ? new rpc::GlobInfoRawT(*globs_) : nullptr); }
}
inline InfoRequestRawT &InfoRequestRawT::operator=(InfoRequestRawT o) FLATBUFFERS_NOEXCEPT {
std::swap(error, o.error);
std::swap(features, o.features);
std::swap(files, o.files);
- std::swap(globs, o.globs);
return *this;
}
@@ -3013,7 +3017,6 @@ inline void InfoRequestRaw::UnPackTo(InfoRequestRawT *_o, const flatbuffers::res
{ auto _e = error(); if (_e) _o->error = _e->str(); }
{ auto _e = features(); if (_e) { _o->features.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->features[_i] = std::unique_ptr<rpc::FeatureInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = files(); if (_e) { _o->files.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->files[_i] = std::unique_ptr<rpc::FileInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
- { auto _e = globs(); if (_e) { _o->globs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->globs[_i] = std::unique_ptr<rpc::GlobInfoRawT>(_e->Get(_i)->UnPack(_resolver)); } } }
}
inline flatbuffers::Offset<InfoRequestRaw> InfoRequestRaw::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InfoRequestRawT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -3027,13 +3030,11 @@ inline flatbuffers::Offset<InfoRequestRaw> CreateInfoRequestRaw(flatbuffers::Fla
auto _error = _o->error.empty() ? 0 : _fbb.CreateString(_o->error);
auto _features = _o->features.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::FeatureInfoRaw>> (_o->features.size(), [](size_t i, _VectorArgs *__va) { return CreateFeatureInfoRaw(*__va->__fbb, __va->__o->features[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _files = _o->files.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::FileInfoRaw>> (_o->files.size(), [](size_t i, _VectorArgs *__va) { return CreateFileInfoRaw(*__va->__fbb, __va->__o->files[i].get(), __va->__rehasher); }, &_va ) : 0;
- auto _globs = _o->globs.size() ? _fbb.CreateVector<flatbuffers::Offset<rpc::GlobInfoRaw>> (_o->globs.size(), [](size_t i, _VectorArgs *__va) { return CreateGlobInfoRaw(*__va->__fbb, __va->__o->globs[i].get(), __va->__rehasher); }, &_va ) : 0;
return rpc::CreateInfoRequestRaw(
_fbb,
_error,
_features,
- _files,
- _globs);
+ _files);
}
inline InfoReplyRawT *InfoReplyRaw::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
@@ -3218,8 +3219,9 @@ inline flatbuffers::Offset<ExecutorMessageRaw> CreateExecutorMessageRaw(flatbuff
inline ExecRequestRawT::ExecRequestRawT(const ExecRequestRawT &o)
: id(o.id),
+ type(o.type),
avoid(o.avoid),
- prog_data(o.prog_data),
+ data(o.data),
exec_opts((o.exec_opts) ? new rpc::ExecOptsRaw(*o.exec_opts) : nullptr),
flags(o.flags),
all_signal(o.all_signal) {
@@ -3227,8 +3229,9 @@ inline ExecRequestRawT::ExecRequestRawT(const ExecRequestRawT &o)
inline ExecRequestRawT &ExecRequestRawT::operator=(ExecRequestRawT o) FLATBUFFERS_NOEXCEPT {
std::swap(id, o.id);
+ std::swap(type, o.type);
std::swap(avoid, o.avoid);
- std::swap(prog_data, o.prog_data);
+ std::swap(data, o.data);
std::swap(exec_opts, o.exec_opts);
std::swap(flags, o.flags);
std::swap(all_signal, o.all_signal);
@@ -3245,8 +3248,9 @@ inline void ExecRequestRaw::UnPackTo(ExecRequestRawT *_o, const flatbuffers::res
(void)_o;
(void)_resolver;
{ auto _e = id(); _o->id = _e; }
+ { auto _e = type(); _o->type = _e; }
{ auto _e = avoid(); _o->avoid = _e; }
- { auto _e = prog_data(); if (_e) { _o->prog_data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->prog_data.begin()); } }
+ { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } }
{ auto _e = exec_opts(); if (_e) _o->exec_opts = std::unique_ptr<rpc::ExecOptsRaw>(new rpc::ExecOptsRaw(*_e)); }
{ auto _e = flags(); _o->flags = _e; }
{ auto _e = all_signal(); if (_e) { _o->all_signal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->all_signal[_i] = _e->Get(_i); } } }
@@ -3261,16 +3265,18 @@ inline flatbuffers::Offset<ExecRequestRaw> CreateExecRequestRaw(flatbuffers::Fla
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExecRequestRawT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _id = _o->id;
+ auto _type = _o->type;
auto _avoid = _o->avoid;
- auto _prog_data = _o->prog_data.size() ? _fbb.CreateVector(_o->prog_data) : 0;
+ auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0;
auto _exec_opts = _o->exec_opts ? _o->exec_opts.get() : nullptr;
auto _flags = _o->flags;
auto _all_signal = _o->all_signal.size() ? _fbb.CreateVector(_o->all_signal) : 0;
return rpc::CreateExecRequestRaw(
_fbb,
_id,
+ _type,
_avoid,
- _prog_data,
+ _data,
_exec_opts,
_flags,
_all_signal);
diff --git a/pkg/fuzzer/queue/queue.go b/pkg/fuzzer/queue/queue.go
index 1d25cbc8a..05d9cfbed 100644
--- a/pkg/fuzzer/queue/queue.go
+++ b/pkg/fuzzer/queue/queue.go
@@ -9,6 +9,7 @@ import (
"encoding/gob"
"fmt"
"math/rand"
+ "strings"
"sync"
"sync/atomic"
@@ -20,8 +21,15 @@ import (
)
type Request struct {
- Prog *prog.Prog
- ExecOpts flatrpc.ExecOpts
+ // Type of the request.
+ // RequestTypeProgram executes Prog, and is used by most requests (also the default zero value).
+ // RequestTypeBinary executes binary with file name stored in Data.
+ // RequestTypeGlob expands glob pattern stored in Data.
+ Type flatrpc.RequestType
+ ExecOpts flatrpc.ExecOpts
+ Prog *prog.Prog // for RequestTypeProgram
+ BinaryFile string // for RequestTypeBinary
+ GlobPattern string // for RequestTypeGlob
// If specified, the resulting signal for call SignalFilterCall
// will include subset of it even if it's not new.
@@ -36,9 +44,6 @@ type Request struct {
// This stat will be incremented on request completion.
Stat *stat.Val
- // Options needed by runtest.
- BinaryFile string // If set, it's executed instead of Prog.
-
// Important requests will be retried even from crashed VMs.
Important bool
@@ -123,20 +128,51 @@ func (r *Request) Validate() error {
if (collectComps) && (collectSignal || collectCover) {
return fmt.Errorf("hint collection is mutually exclusive with signal/coverage")
}
- sandboxes := flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSandboxSetuid |
- flatrpc.ExecEnvSandboxNamespace | flatrpc.ExecEnvSandboxAndroid
- if r.BinaryFile == "" && r.ExecOpts.EnvFlags&sandboxes == 0 {
- return fmt.Errorf("no sandboxes set")
+ switch r.Type {
+ case flatrpc.RequestTypeProgram:
+ if r.Prog == nil {
+ return fmt.Errorf("program is not set")
+ }
+ sandboxes := flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSandboxSetuid |
+ flatrpc.ExecEnvSandboxNamespace | flatrpc.ExecEnvSandboxAndroid
+ if r.ExecOpts.EnvFlags&sandboxes == 0 {
+ return fmt.Errorf("no sandboxes set")
+ }
+ case flatrpc.RequestTypeBinary:
+ if r.BinaryFile == "" {
+ return fmt.Errorf("binary file name is not set")
+ }
+ case flatrpc.RequestTypeGlob:
+ if r.GlobPattern == "" {
+ return fmt.Errorf("glob pattern is not set")
+ }
+ default:
+ return fmt.Errorf("unknown request type")
}
return nil
}
func (r *Request) hash() hash.Sig {
buf := new(bytes.Buffer)
- if err := gob.NewEncoder(buf).Encode(r.ExecOpts); err != nil {
+ enc := gob.NewEncoder(buf)
+ if err := enc.Encode(r.Type); err != nil {
+ panic(err)
+ }
+ if err := enc.Encode(r.ExecOpts); err != nil {
panic(err)
}
- return hash.Hash(r.Prog.Serialize(), buf.Bytes())
+ var data []byte
+ switch r.Type {
+ case flatrpc.RequestTypeProgram:
+ data = r.Prog.Serialize()
+ case flatrpc.RequestTypeBinary:
+ data = []byte(r.BinaryFile)
+ case flatrpc.RequestTypeGlob:
+ data = []byte(r.GlobPattern)
+ default:
+ panic("unknown request type")
+ }
+ return hash.Hash(data, buf.Bytes())
}
func (r *Request) initChannel() {
@@ -172,6 +208,15 @@ func (r *Result) Stop() bool {
}
}
+// Globs returns result of RequestTypeGlob.
+func (r *Result) GlobFiles() []string {
+ out := strings.Trim(string(r.Output), "\000")
+ if out == "" {
+ return nil
+ }
+ return strings.Split(out, "\000")
+}
+
type Status int
const (
diff --git a/pkg/fuzzer/queue/queue_test.go b/pkg/fuzzer/queue/queue_test.go
index 5b6a03ed0..d1909e50c 100644
--- a/pkg/fuzzer/queue/queue_test.go
+++ b/pkg/fuzzer/queue/queue_test.go
@@ -43,3 +43,10 @@ func TestPrioQueue(t *testing.T) {
assert.Equal(t, req4, pq.Next())
assert.Equal(t, req3, pq.Next())
}
+
+func TestGlobFiles(t *testing.T) {
+ r := &Result{}
+ assert.Equal(t, r.GlobFiles(), []string(nil))
+ r.Output = []byte{'a', 'b', 0, 'c', 0}
+ assert.Equal(t, r.GlobFiles(), []string{"ab", "c"})
+}
diff --git a/pkg/ifaceprobe/ifaceprobe.go b/pkg/ifaceprobe/ifaceprobe.go
index f3ab8ba4a..b2b3569df 100644
--- a/pkg/ifaceprobe/ifaceprobe.go
+++ b/pkg/ifaceprobe/ifaceprobe.go
@@ -12,7 +12,9 @@ import (
"path/filepath"
"slices"
"strings"
+ "sync"
+ "github.com/google/syzkaller/pkg/csource"
"github.com/google/syzkaller/pkg/flatrpc"
"github.com/google/syzkaller/pkg/fuzzer/queue"
"github.com/google/syzkaller/pkg/log"
@@ -38,71 +40,62 @@ type PCInfo struct {
File string
}
-// Globs returns a list of glob's that should be requested from the target machine.
-// Result of querying these globs should be later passed to Run in info.
-func Globs() []string {
- var globs []string
- for _, path := range []string{"/dev", "/sys", "/proc"} {
- // Our globs currently do not support recursion (#4906),
- // so we append N "/*" parts manully. Some of the paths can be very deep, e.g. try:
- // sudo find /sys -ls 2>/dev/null | sed "s#[^/]##g" | sort | uniq -c
- for i := 1; i < 15; i++ {
- globs = append(globs, path+strings.Repeat("/*", i))
- }
- }
- return globs
-}
-
-// Run finishes dynamic analysis and returns dynamic info.
+// Run does dynamic analysis and returns dynamic info.
// As it runs it will submit some test program requests to the exec queue.
-// Info is used to extract results of glob querying, see Globs function.
-func Run(ctx context.Context, cfg *mgrconfig.Config, exec queue.Executor, info *flatrpc.InfoRequest) (*Info, error) {
+func Run(ctx context.Context, cfg *mgrconfig.Config, features flatrpc.Feature, exec queue.Executor) (*Info, error) {
return (&prober{
- ctx: ctx,
- cfg: cfg,
- exec: exec,
- info: info,
+ ctx: ctx,
+ cfg: cfg,
+ features: features,
+ exec: exec,
+ done: make(chan *fileDesc, 100),
+ errc: make(chan error, 1),
}).run()
}
type prober struct {
- ctx context.Context
- cfg *mgrconfig.Config
- exec queue.Executor
- info *flatrpc.InfoRequest
+ ctx context.Context
+ cfg *mgrconfig.Config
+ features flatrpc.Feature
+ exec queue.Executor
+ wg sync.WaitGroup
+ done chan *fileDesc
+ errc chan error
+}
+
+type fileDesc struct {
+ file string
+ results []*queue.Result
}
func (pr *prober) run() (*Info, error) {
symb := symbolizer.NewSymbolizer(pr.cfg.SysTarget)
defer symb.Close()
- files := extractFiles(pr.info)
- var reqs [][]*queue.Request
- for _, file := range extractFiles(pr.info) {
- reqs1, err := pr.submitFile(file)
- if err != nil {
- return nil, err
- }
- reqs = append(reqs, reqs1)
+ for _, glob := range globList() {
+ pr.submitGlob(glob)
}
+ go func() {
+ pr.wg.Wait()
+ close(pr.done)
+ }()
+
info := &Info{}
dedup := make(map[uint64]bool)
kernelObj := filepath.Join(pr.cfg.KernelObj, pr.cfg.SysTarget.KernelObject)
sourceBase := filepath.Clean(pr.cfg.KernelSrc) + string(filepath.Separator)
- for i, file := range files {
+ i := 0
+ for desc := range pr.done {
+ i++
if i%500 == 0 {
- log.Logf(0, "processing file %v/%v", i, len(files))
+ log.Logf(0, "done file %v", i)
}
fi := FileInfo{
- Name: file,
+ Name: desc.file,
}
fileDedup := make(map[uint64]bool)
- for _, req := range reqs[i] {
- res := req.Wait(pr.ctx)
- if res.Status != queue.Success {
- return nil, fmt.Errorf("failed to execute prog: %w (%v)", res.Err, res.Status)
- }
+ for _, res := range desc.results {
cover := append(res.Info.Calls[0].Cover, res.Info.Calls[1].Cover...)
for _, pc := range cover {
if fileDedup[pc] {
@@ -133,13 +126,59 @@ func (pr *prober) run() (*Info, error) {
slices.Sort(fi.Cover)
info.Files = append(info.Files, fi)
}
+ slices.SortFunc(info.Files, func(a, b FileInfo) int {
+ return strings.Compare(a.Name, b.Name)
+ })
slices.SortFunc(info.PCs, func(a, b PCInfo) int {
return int(a.PC - b.PC)
})
- return info, nil
+ select {
+ case err := <-pr.errc:
+ return nil, err
+ default:
+ return info, nil
+ }
+}
+
+func (pr *prober) noteError(err error) {
+ select {
+ case pr.errc <- err:
+ default:
+ }
}
-func (pr *prober) submitFile(file string) ([]*queue.Request, error) {
+func (pr *prober) submitGlob(glob string) {
+ pr.wg.Add(1)
+ req := &queue.Request{
+ Type: flatrpc.RequestTypeGlob,
+ GlobPattern: glob,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: flatrpc.ExecEnvSandboxNone | csource.FeaturesToFlags(pr.features, nil),
+ },
+ Important: true,
+ }
+ req.OnDone(pr.onGlobDone)
+ pr.exec.Submit(req)
+}
+
+func (pr *prober) onGlobDone(req *queue.Request, res *queue.Result) bool {
+ defer pr.wg.Done()
+ if res.Status != queue.Success {
+ pr.noteError(fmt.Errorf("failed to execute glob: %w (%v)\n%s\n%s",
+ res.Err, res.Status, req.GlobPattern, res.Output))
+ }
+ files := res.GlobFiles()
+ log.Logf(0, "glob %v expanded to %v files", req.GlobPattern, len(files))
+ for _, file := range files {
+ if extractFileFilter(file) {
+ pr.submitFile(file)
+ }
+ }
+ return true
+}
+
+func (pr *prober) submitFile(file string) {
+ pr.wg.Add(1)
var fops = []struct {
mode string
call string
@@ -151,18 +190,22 @@ func (pr *prober) submitFile(file string) ([]*queue.Request, error) {
{mode: "O_RDONLY", call: "mmap(0x0, 0x1000, 0x1, 0x2, r0, 0)"},
{mode: "O_WRONLY", call: "mmap(0x0, 0x1000, 0x2, 0x2, r0, 0)"},
}
+ desc := &fileDesc{
+ file: file,
+ }
var reqs []*queue.Request
for _, desc := range fops {
text := fmt.Sprintf("r0 = openat(0x%x, &AUTO='%s', 0x%x, 0x0)\n%v",
pr.constVal("AT_FDCWD"), file, pr.constVal(desc.mode), desc.call)
p, err := pr.cfg.Target.Deserialize([]byte(text), prog.StrictUnsafe)
if err != nil {
- return nil, fmt.Errorf("failed to deserialize: %w\n%v", err, text)
+ panic(fmt.Sprintf("failed to deserialize: %v\n%v", err, text))
}
req := &queue.Request{
Prog: p,
ExecOpts: flatrpc.ExecOpts{
- EnvFlags: flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSignal,
+ EnvFlags: flatrpc.ExecEnvSandboxNone | flatrpc.ExecEnvSignal |
+ csource.FeaturesToFlags(pr.features, nil),
ExecFlags: flatrpc.ExecFlagCollectCover,
},
Important: true,
@@ -170,7 +213,19 @@ func (pr *prober) submitFile(file string) ([]*queue.Request, error) {
reqs = append(reqs, req)
pr.exec.Submit(req)
}
- return reqs, nil
+ go func() {
+ defer pr.wg.Done()
+ for _, req := range reqs {
+ res := req.Wait(pr.ctx)
+ if res.Status != queue.Success {
+ pr.noteError(fmt.Errorf("failed to execute prog: %w (%v)\n%s\n%s",
+ res.Err, res.Status, req.Prog.Serialize(), res.Output))
+ continue
+ }
+ desc.results = append(desc.results, res)
+ }
+ pr.done <- desc
+ }()
}
func (pr *prober) constVal(name string) uint64 {
@@ -181,23 +236,27 @@ func (pr *prober) constVal(name string) uint64 {
return val
}
-func extractFiles(info *flatrpc.InfoRequestRawT) []string {
- var files []string
- dedup := make(map[string]bool)
- for _, glob := range info.Globs {
- for _, file := range glob.Files {
- if dedup[file] || !extractFileFilter(file) {
- continue
- }
- dedup[file] = true
- files = append(files, file)
+// globList returns a list of glob's we are interested in.
+func globList() []string {
+ var globs []string
+ // /selinux is mounted by executor, we probably should mount it at the standard /sys/fs/selinux,
+ // but this is where it is now.
+ // Also query the test cwd, executor creates some links in there.
+ for _, path := range []string{"/dev", "/sys", "/proc", "/selinux", "."} {
+ // Our globs currently do not support recursion (#4906),
+ // so we append N "/*" parts manully. Some of the paths can be very deep, e.g. try:
+ // sudo find /sys -ls 2>/dev/null | sed "s#[^/]##g" | sort | uniq -c
+ for i := 1; i < 15; i++ {
+ globs = append(globs, path+strings.Repeat("/*", i))
}
}
- return files
+ return globs
}
func extractFileFilter(file string) bool {
- if strings.HasPrefix(file, "/dev/") {
+ if strings.HasPrefix(file, "/dev/") ||
+ strings.HasPrefix(file, "/selinux/") ||
+ strings.HasPrefix(file, "./") {
return true
}
if proc := "/proc/"; strings.HasPrefix(file, proc) {
@@ -234,5 +293,5 @@ func extractFileFilter(file string) bool {
}
return true
}
- return false
+ panic(fmt.Sprintf("unhandled file %q", file))
}
diff --git a/pkg/rpcserver/local.go b/pkg/rpcserver/local.go
index c8052138a..5faa8334b 100644
--- a/pkg/rpcserver/local.go
+++ b/pkg/rpcserver/local.go
@@ -112,8 +112,7 @@ type local struct {
setupDone chan bool
}
-func (ctx *local) MachineChecked(info *flatrpc.InfoRequest, features flatrpc.Feature,
- syscalls map[*prog.Syscall]bool) queue.Source {
+func (ctx *local) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
<-ctx.setupDone
ctx.serv.TriagedCorpus()
return ctx.cfg.MachineChecked(features, syscalls)
diff --git a/pkg/rpcserver/mocks/Manager.go b/pkg/rpcserver/mocks/Manager.go
index 0c14c8c9f..810b5028f 100644
--- a/pkg/rpcserver/mocks/Manager.go
+++ b/pkg/rpcserver/mocks/Manager.go
@@ -72,17 +72,17 @@ func (_m *Manager) CoverageFilter(modules []*vminfo.KernelModule) []uint64 {
return r0
}
-// MachineChecked provides a mock function with given fields: info, features, syscalls
-func (_m *Manager) MachineChecked(info *flatrpc.InfoRequestRawT, features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
- ret := _m.Called(info, features, syscalls)
+// MachineChecked provides a mock function with given fields: features, syscalls
+func (_m *Manager) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
+ ret := _m.Called(features, syscalls)
if len(ret) == 0 {
panic("no return value specified for MachineChecked")
}
var r0 queue.Source
- if rf, ok := ret.Get(0).(func(*flatrpc.InfoRequestRawT, flatrpc.Feature, map[*prog.Syscall]bool) queue.Source); ok {
- r0 = rf(info, features, syscalls)
+ if rf, ok := ret.Get(0).(func(flatrpc.Feature, map[*prog.Syscall]bool) queue.Source); ok {
+ r0 = rf(features, syscalls)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(queue.Source)
diff --git a/pkg/rpcserver/rpcserver.go b/pkg/rpcserver/rpcserver.go
index c064e6938..9d259b733 100644
--- a/pkg/rpcserver/rpcserver.go
+++ b/pkg/rpcserver/rpcserver.go
@@ -48,26 +48,22 @@ type Config struct {
DebugTimeouts bool
Procs int
Slowdown int
- // Extra globs that will be requested during machine checking,
- // and will be passed to MachineChecked callback.
- CheckGlobs []string
- pcBase uint64
- localModules []*vminfo.KernelModule
+ pcBase uint64
+ localModules []*vminfo.KernelModule
}
type RemoteConfig struct {
*mgrconfig.Config
- Manager Manager
- Stats Stats
- CheckGlobs []string
- Debug bool
+ Manager Manager
+ Stats Stats
+ Debug bool
}
//go:generate ../../tools/mockery.sh --name Manager --output ./mocks
type Manager interface {
MaxSignal() signal.Signal
BugFrames() (leaks []string, races []string)
- MachineChecked(info *flatrpc.InfoRequest, features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
+ MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source
CoverageFilter(modules []*vminfo.KernelModule) []uint64
}
@@ -181,7 +177,6 @@ func New(cfg *RemoteConfig) (Server, error) {
Slowdown: cfg.Timeouts.Slowdown,
pcBase: pcBase,
localModules: cfg.LocalModules,
- CheckGlobs: cfg.CheckGlobs,
}, cfg.Manager), nil
}
@@ -280,7 +275,6 @@ func (serv *server) handleRunnerConn(runner *Runner, conn *flatrpc.Conn) error {
opts.Features = serv.setupFeatures
} else {
opts.Files = append(opts.Files, serv.checker.CheckFiles()...)
- opts.Globs = append(serv.target.RequiredGlobs(), serv.cfg.CheckGlobs...)
opts.Features = serv.cfg.Features
}
@@ -321,11 +315,6 @@ func (serv *server) handleMachineInfo(infoReq *flatrpc.InfoRequestRawT) (handsha
serv.StatModules.Add(len(modules))
serv.canonicalModules = cover.NewCanonicalizer(modules, serv.cfg.Cover)
serv.coverFilter = serv.mgr.CoverageFilter(modules)
- globs := make(map[string][]string)
- for _, glob := range infoReq.Globs {
- globs[glob.Name] = glob.Files
- }
- serv.target.UpdateGlobs(globs)
// Flatbuffers don't do deep copy of byte slices,
// so clone manually since we pass it a goroutine.
for _, file := range infoReq.Files {
@@ -395,7 +384,7 @@ func (serv *server) runCheck(info *flatrpc.InfoRequest) error {
}
enabledFeatures := features.Enabled()
serv.setupFeatures = features.NeedSetup()
- newSource := serv.mgr.MachineChecked(info, enabledFeatures, enabledCalls)
+ newSource := serv.mgr.MachineChecked(enabledFeatures, enabledCalls)
serv.baseSource.Store(newSource)
serv.checkDone.Store(true)
return nil
diff --git a/pkg/rpcserver/runner.go b/pkg/rpcserver/runner.go
index 29e79bad5..9ac1a6866 100644
--- a/pkg/rpcserver/runner.go
+++ b/pkg/rpcserver/runner.go
@@ -69,7 +69,6 @@ type handshakeConfig struct {
LeakFrames []string
RaceFrames []string
Files []string
- Globs []string
Features flatrpc.Feature
// Callback() is called in the middle of the handshake process.
@@ -102,7 +101,6 @@ func (runner *Runner) Handshake(conn *flatrpc.Conn, cfg *handshakeConfig) error
LeakFrames: cfg.LeakFrames,
RaceFrames: cfg.RaceFrames,
Files: cfg.Files,
- Globs: cfg.Globs,
Features: cfg.Features,
}
if err := flatrpc.Send(conn, connectReply); err != nil {
@@ -292,7 +290,8 @@ func (runner *Runner) sendRequest(req *queue.Request) error {
opts.EnvFlags |= flatrpc.ExecEnvDebug
}
var data []byte
- if req.BinaryFile == "" {
+ switch req.Type {
+ case flatrpc.RequestTypeProgram:
progData, err := req.Prog.SerializeForExec()
if err != nil {
// It's bad if we systematically fail to serialize programs,
@@ -303,8 +302,7 @@ func (runner *Runner) sendRequest(req *queue.Request) error {
return nil
}
data = progData
- } else {
- flags |= flatrpc.RequestFlagIsBinary
+ case flatrpc.RequestTypeBinary:
fileData, err := os.ReadFile(req.BinaryFile)
if err != nil {
req.Done(&queue.Result{
@@ -314,6 +312,11 @@ func (runner *Runner) sendRequest(req *queue.Request) error {
return nil
}
data = fileData
+ case flatrpc.RequestTypeGlob:
+ data = append([]byte(req.GlobPattern), 0)
+ flags |= flatrpc.RequestFlagReturnOutput
+ default:
+ panic("unhandled request type")
}
var avoid uint64
for _, id := range req.Avoid {
@@ -326,8 +329,9 @@ func (runner *Runner) sendRequest(req *queue.Request) error {
Type: flatrpc.HostMessagesRawExecRequest,
Value: &flatrpc.ExecRequest{
Id: id,
+ Type: req.Type,
Avoid: avoid,
- ProgData: data,
+ Data: data,
Flags: flags,
ExecOpts: &opts,
AllSignal: allSignal,
@@ -361,7 +365,18 @@ func (runner *Runner) handleExecutingMessage(msg *flatrpc.ExecutingMessage) erro
} else {
runner.stats.statExecRetries.Add(1)
}
- runner.lastExec.Note(int(msg.Id), proc, req.Prog.Serialize(), osutil.MonotonicNano())
+ var data []byte
+ switch req.Type {
+ case flatrpc.RequestTypeProgram:
+ data = req.Prog.Serialize()
+ case flatrpc.RequestTypeBinary:
+ data = []byte(fmt.Sprintf("executing binary %v\n", req.BinaryFile))
+ case flatrpc.RequestTypeGlob:
+ data = []byte(fmt.Sprintf("expanding glob: %v\n", req.GlobPattern))
+ default:
+ panic(fmt.Sprintf("unhandled request type %v", req.Type))
+ }
+ runner.lastExec.Note(int(msg.Id), proc, data, osutil.MonotonicNano())
select {
case runner.injectExec <- true:
default:
@@ -385,7 +400,7 @@ func (runner *Runner) handleExecResult(msg *flatrpc.ExecResult) error {
}
delete(runner.requests, msg.Id)
delete(runner.executing, msg.Id)
- if msg.Info != nil {
+ if req.Type == flatrpc.RequestTypeProgram && msg.Info != nil {
for len(msg.Info.Calls) < len(req.Prog.Calls) {
msg.Info.Calls = append(msg.Info.Calls, &flatrpc.CallInfo{
Error: 999,
diff --git a/pkg/runtest/run.go b/pkg/runtest/run.go
index ca3ed3b4b..d9f0aa25a 100644
--- a/pkg/runtest/run.go
+++ b/pkg/runtest/run.go
@@ -113,7 +113,7 @@ func (ctx *Context) Run(waitCtx context.Context) error {
if !verbose || ctx.Verbose {
ctx.log("%-38v: %v", req.name, result)
}
- if req.Request != nil && req.Request.BinaryFile != "" {
+ if req.Request != nil && req.Type == flatrpc.RequestTypeBinary && req.BinaryFile != "" {
os.Remove(req.BinaryFile)
}
}
@@ -400,6 +400,7 @@ func (ctx *Context) createTest(req *runRequest) {
req.Request.Done(&queue.Result{})
return
}
+ req.Type = flatrpc.RequestTypeBinary
req.BinaryFile = bin
ctx.submit(req)
}()
@@ -493,7 +494,7 @@ func checkResult(req *runRequest) error {
return fmt.Errorf("non-successful result status (%v)", req.result.Status)
}
infos := []*flatrpc.ProgInfo{req.result.Info}
- isC := req.BinaryFile != ""
+ isC := req.Type == flatrpc.RequestTypeBinary
if isC {
var err error
if infos, err = parseBinOutput(req); err != nil {
diff --git a/pkg/vminfo/syscalls.go b/pkg/vminfo/syscalls.go
index 21ae6edd1..9793cfee8 100644
--- a/pkg/vminfo/syscalls.go
+++ b/pkg/vminfo/syscalls.go
@@ -57,7 +57,8 @@ func newCheckContext(ctx context.Context, cfg *Config, impl checker, executor qu
}
}
-func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
+func (ctx *checkContext) do(fileInfos []*flatrpc.FileInfo, featureInfos []*flatrpc.FeatureInfo) (
+ map[*prog.Syscall]bool, map[*prog.Syscall]string, Features, error) {
sysTarget := targets.Get(ctx.cfg.Target.OS, ctx.cfg.Target.Arch)
ctx.fs = createVirtualFilesystem(fileInfos)
for _, id := range ctx.cfg.Syscalls {
@@ -91,10 +92,37 @@ func (ctx *checkContext) start(fileInfos []*flatrpc.FileInfo) {
}()
}
ctx.startFeaturesCheck()
-}
-func (ctx *checkContext) wait(featureInfos []*flatrpc.FeatureInfo) (
- map[*prog.Syscall]bool, map[*prog.Syscall]string, Features, error) {
+ var globReqs []*queue.Request
+ for _, glob := range ctx.target.RequiredGlobs() {
+ req := &queue.Request{
+ Type: flatrpc.RequestTypeGlob,
+ GlobPattern: glob,
+ ExecOpts: flatrpc.ExecOpts{
+ EnvFlags: ctx.cfg.Sandbox,
+ SandboxArg: ctx.cfg.SandboxArg,
+ },
+ Important: true,
+ }
+ ctx.executor.Submit(req)
+ globReqs = append(globReqs, req)
+ }
+
+ // Up to this point we submit all requests (start submitting goroutines),
+ // so that all requests execute in parallel. After this point we wait
+ // for request completion and handle results.
+
+ globs := make(map[string][]string)
+ for _, req := range globReqs {
+ res := req.Wait(ctx.ctx)
+ if res.Status != queue.Success {
+ return nil, nil, nil, fmt.Errorf("failed to execute glob: %w (%v)\n%s\n%s",
+ res.Err, res.Status, req.GlobPattern, res.Output)
+ }
+ globs[req.GlobPattern] = res.GlobFiles()
+ }
+ ctx.target.UpdateGlobs(globs)
+
enabled := make(map[*prog.Syscall]bool)
disabled := make(map[*prog.Syscall]string)
for i := 0; i < ctx.pendingSyscalls; i++ {
diff --git a/pkg/vminfo/vminfo.go b/pkg/vminfo/vminfo.go
index d2a728585..dee1924b5 100644
--- a/pkg/vminfo/vminfo.go
+++ b/pkg/vminfo/vminfo.go
@@ -103,8 +103,7 @@ func (checker *Checker) Run(files []*flatrpc.FileInfo, featureInfos []*flatrpc.F
map[*prog.Syscall]bool, map[*prog.Syscall]string, Features, error) {
ctx := checker.checkContext
checker.checkContext = nil
- ctx.start(files)
- return ctx.wait(featureInfos)
+ return ctx.do(files, featureInfos)
}
// Implementation of the queue.Source interface.
diff --git a/pkg/vminfo/vminfo_test.go b/pkg/vminfo/vminfo_test.go
index 398cf93e2..4bdccc09e 100644
--- a/pkg/vminfo/vminfo_test.go
+++ b/pkg/vminfo/vminfo_test.go
@@ -104,18 +104,23 @@ func createSuccessfulResults(source queue.Source, stop chan struct{}) {
// Currently we have 641 (when we failed to properly dedup syscall tests, it was 4349).
panic("too many test programs")
}
- info := &flatrpc.ProgInfo{}
- for range req.Prog.Calls {
- info.Calls = append(info.Calls, &flatrpc.CallInfo{
- Cover: []uint64{1},
- Signal: []uint64{1},
- Comps: []*flatrpc.Comparison{{Op1: 1, Op2: 2}},
- })
- }
- req.Done(&queue.Result{
+ res := &queue.Result{
Status: queue.Success,
- Info: info,
- })
+ }
+ switch req.Type {
+ case flatrpc.RequestTypeProgram:
+ res.Info = &flatrpc.ProgInfo{}
+ for range req.Prog.Calls {
+ res.Info.Calls = append(res.Info.Calls, &flatrpc.CallInfo{
+ Cover: []uint64{1},
+ Signal: []uint64{1},
+ Comps: []*flatrpc.Comparison{{Op1: 1, Op2: 2}},
+ })
+ }
+ case flatrpc.RequestTypeGlob:
+ res.Output = []byte("/some/file\n")
+ }
+ req.Done(res)
}
}
diff --git a/syz-manager/manager.go b/syz-manager/manager.go
index 98c7b4dbd..40c1da6e5 100644
--- a/syz-manager/manager.go
+++ b/syz-manager/manager.go
@@ -302,9 +302,6 @@ func RunManager(mode *Mode, cfg *mgrconfig.Config) {
Stats: mgr.servStats,
Debug: *flagDebug,
}
- if mode == ModeIfaceProbe {
- rpcCfg.CheckGlobs = ifaceprobe.Globs()
- }
mgr.serv, err = rpcserver.New(rpcCfg)
if err != nil {
log.Fatalf("failed to create rpc server: %v", err)
@@ -1085,8 +1082,7 @@ func (mgr *Manager) BugFrames() (leaks, races []string) {
return
}
-func (mgr *Manager) MachineChecked(info *flatrpc.InfoRequest, features flatrpc.Feature,
- enabledSyscalls map[*prog.Syscall]bool) queue.Source {
+func (mgr *Manager) MachineChecked(features flatrpc.Feature, enabledSyscalls map[*prog.Syscall]bool) queue.Source {
if len(enabledSyscalls) == 0 {
log.Fatalf("all system calls are disabled")
}
@@ -1197,7 +1193,7 @@ func (mgr *Manager) MachineChecked(info *flatrpc.InfoRequest, features flatrpc.F
} else if mgr.mode == ModeIfaceProbe {
exec := queue.Plain()
go func() {
- res, err := ifaceprobe.Run(vm.ShutdownCtx(), mgr.cfg, exec, info)
+ res, err := ifaceprobe.Run(vm.ShutdownCtx(), mgr.cfg, features, exec)
if err != nil {
log.Fatalf("interface probing failed: %v", err)
}
diff --git a/tools/syz-diff/diff.go b/tools/syz-diff/diff.go
index a7609fb2a..3707867ae 100644
--- a/tools/syz-diff/diff.go
+++ b/tools/syz-diff/diff.go
@@ -310,8 +310,7 @@ func (kc *kernelContext) BugFrames() (leaks, races []string) {
return nil, nil
}
-func (kc *kernelContext) MachineChecked(_ *flatrpc.InfoRequestRawT, features flatrpc.Feature,
- syscalls map[*prog.Syscall]bool) queue.Source {
+func (kc *kernelContext) MachineChecked(features flatrpc.Feature, syscalls map[*prog.Syscall]bool) queue.Source {
if len(syscalls) == 0 {
log.Fatalf("all system calls are disabled")
}
diff --git a/tools/syz-execprog/execprog.go b/tools/syz-execprog/execprog.go
index 9d7f082ed..4ab808ad0 100644
--- a/tools/syz-execprog/execprog.go
+++ b/tools/syz-execprog/execprog.go
@@ -53,6 +53,7 @@ var (
flagDebug = flag.Bool("debug", false, "debug output from executor")
flagSlowdown = flag.Int("slowdown", 1, "execution slowdown caused by emulation/instrumentation")
flagUnsafe = flag.Bool("unsafe", false, "use unsafe program deserialization mode")
+ flagGlob = flag.String("glob", "", "run glob expansion request")
// The in the stress mode resembles simple unguided fuzzer.
// This mode can be used as an intermediate step when porting syzkaller to a new OS,
@@ -138,7 +139,7 @@ func main() {
}
progs := loadPrograms(target, flag.Args())
- if !*flagStress && len(progs) == 0 {
+ if *flagGlob == "" && !*flagStress && len(progs) == 0 {
flag.Usage()
os.Exit(1)
}
@@ -147,6 +148,7 @@ func main() {
target: target,
done: done,
progs: progs,
+ globs: strings.Split(*flagGlob, ":"),
rs: rand.NewSource(time.Now().UnixNano()),
coverFile: *flagCoverFile,
output: *flagOutput,
@@ -191,6 +193,7 @@ type Context struct {
target *prog.Target
done func()
progs []*prog.Prog
+ globs []string
defaultOpts flatrpc.ExecOpts
choiceTable *prog.ChoiceTable
logMu sync.Mutex
@@ -217,6 +220,18 @@ func (ctx *Context) machineChecked(features flatrpc.Feature, syscalls map[*prog.
}
func (ctx *Context) Next() *queue.Request {
+ if *flagGlob != "" {
+ idx := int(ctx.resultIndex.Add(1) - 1)
+ if idx >= len(ctx.globs) {
+ return nil
+ }
+ req := &queue.Request{
+ Type: flatrpc.RequestTypeGlob,
+ GlobPattern: ctx.globs[idx],
+ }
+ req.OnDone(ctx.doneGlob)
+ return req
+ }
var p *prog.Prog
if ctx.stress {
p = ctx.createStressProg()
@@ -246,6 +261,25 @@ func (ctx *Context) Next() *queue.Request {
return req
}
+func (ctx *Context) doneGlob(req *queue.Request, res *queue.Result) bool {
+ if res.Status == queue.Success {
+ files := res.GlobFiles()
+ ctx.logMu.Lock()
+ fmt.Printf("glob %q expanded to %v files\n", req.GlobPattern, len(files))
+ for _, file := range files {
+ fmt.Printf("\t%q\n", file)
+ }
+ ctx.logMu.Unlock()
+ } else {
+ fmt.Printf("request failed: %v (%v)\n%s\n", res.Status, res.Err, res.Output)
+ }
+ completed := int(ctx.completed.Add(1))
+ if completed >= len(ctx.globs) {
+ ctx.done()
+ }
+ return true
+}
+
func (ctx *Context) Done(req *queue.Request, res *queue.Result) bool {
if res.Info != nil {
ctx.printCallResults(res.Info)