Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

i#6712 record bounds: Add record filter sanity checks #6749

Merged
merged 6 commits into from
Apr 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 17 additions & 4 deletions clients/drcachesim/reader/reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,13 @@
namespace dynamorio {
namespace drmemtrace {

// We want to abort in release build for some cases.
#define assert_release_too(cond) \
do { \
if (!(cond)) \
abort(); \
} while (0)

// Work around clang-format bug: no newline after return type for single-char operator.
// clang-format off
const memref_t &
Expand Down Expand Up @@ -204,9 +211,14 @@ reader_t::process_input_entry()
// Look for encoding bits that belong to this instr.
derekbruening marked this conversation as resolved.
Show resolved Hide resolved
if (last_encoding_.size > 0) {
if (last_encoding_.size != cur_ref_.instr.size) {
ERRMSG("Encoding size %zu != instr size %zu for PC 0x%zx\n",
last_encoding_.size, cur_ref_.instr.size, cur_ref_.instr.addr);
assert(false);
ERRMSG(
"Encoding size %zu != instr size %zu for PC 0x%zx at ord %" PRIu64
" instr %" PRIu64 " last_timestamp=0x%" PRIx64 "\n",
last_encoding_.size, cur_ref_.instr.size, cur_ref_.instr.addr,
get_record_ordinal(), get_instruction_ordinal(),
get_last_timestamp());
// Encoding errors indicate serious problems so we always abort.
assert_release_too(false);
}
memcpy(cur_ref_.instr.encoding, last_encoding_.bits, last_encoding_.size);
cur_ref_.instr.encoding_is_new = true;
Expand All @@ -222,7 +234,8 @@ reader_t::process_input_entry()
// in this mode.
!core_sharded_) {
ERRMSG("Missing encoding for 0x%zx\n", cur_ref_.instr.addr);
assert(false);
// Encoding errors indicate serious problems so we always abort.
assert_release_too(false);
}
}
last_encoding_.size = 0;
Expand Down
20 changes: 18 additions & 2 deletions clients/drcachesim/tools/filter/record_filter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -603,8 +603,18 @@ record_filter_t::process_chunk_encodings(per_shard_t *per_shard, trace_entry_t &
VPRINT(this, 3,
"output new-chunk encoding chunk=%" PRIu64 " ref=%" PRIu64 "\n",
per_shard->chunk_ordinal, per_shard->cur_refs);
if (!write_trace_entries(per_shard,
per_shard->per_input->pc2encoding[entry.addr])) {
// Sanity check that the encoding size is correct.
const auto &enc = per_shard->per_input->pc2encoding[entry.addr];
size_t enc_sz = 0;
// Since all but the last entry are fixed-size we could avoid a loop
// but the loop is easier to read and we have just 1 or 2 iters.
for (const auto &record : enc)
enc_sz += record.size;
derekbruening marked this conversation as resolved.
Show resolved Hide resolved
if (enc_sz != entry.size) {
return "New-chunk encoding size " + std::to_string(enc_sz) +
" != instr size " + std::to_string(entry.size);
}
if (!write_trace_entries(per_shard, enc)) {
return "Failed to write";
}
// Avoid emitting the encoding twice.
Expand Down Expand Up @@ -708,6 +718,12 @@ record_filter_t::parallel_shard_memref(void *shard_data, const trace_entry_t &in
// It would be nice to assert that this pointer is not in use in other shards
// but that is too expensive.
per_shard->per_input = it->second.get();
// Not supposed to see a switch that splits an encoding from its instr.
// That would cause recording an incorrect encoding into pc2encoding.
if (!per_shard->last_encoding.empty()) {
per_shard->error = "Input switch immediately after encoding not supported";
return false;
}
}
if (per_shard->enabled && stop_timestamp_ != 0 &&
per_shard->shard_stream->get_last_timestamp() >= stop_timestamp_) {
Expand Down
Loading