Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitattributes
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
testdata/perf_map/* filter=lfs diff=lfs merge=lfs -text
*.gif binary filter=lfs diff=lfs merge=lfs -text
src/executor/wall_time/perf/snapshots/codspeed_runner__executor__wall_time__perf__debug_info__tests__ruff_debug_info.snap filter=lfs diff=lfs merge=lfs -text
src/executor/wall_time/perf/snapshots/codspeed_runner__executor__wall_time__perf__perf_map__tests__ruff_symbols.snap filter=lfs diff=lfs merge=lfs -text
src/executor/wall_time/perf/snapshots/codspeed_runner__executor__wall_time__perf__module_symbols__tests__ruff_symbols.snap filter=lfs diff=lfs merge=lfs -text
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ repos:
- id: check-yaml
- id: check-toml
- id: check-added-large-files
args: ["--maxkb=1000"]
- repo: https://github.com/doublify/pre-commit-rust
rev: v1.0
hooks:
Expand Down
18 changes: 6 additions & 12 deletions AGENTS.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,16 @@ cargo build
# Build in release mode
cargo build --release

# Run tests (prefer nextest if available)
cargo nextest run # preferred if installed
cargo test # fallback if nextest is not available
# Run tests
cargo test

# Run specific test
cargo nextest run <test_name> # with nextest
cargo test <test_name> # with cargo test
cargo test <test_name>

# Run tests with output
cargo nextest run --nocapture # with nextest
cargo test -- --nocapture # with cargo test
cargo test -- -nocapture
```

**Note**: Always check if `cargo nextest` is available first (with `cargo nextest --version` or `which cargo-nextest`). If available, use it instead of `cargo test` as it provides faster and more reliable test execution.

### Running the Application

```bash
Expand Down Expand Up @@ -99,7 +94,7 @@ The core functionality for running benchmarks:

The project uses:

- `cargo nextest` (preferred) or standard Rust `cargo test`
- `cargo test`
- `insta` for snapshot testing
- `rstest` for parameterized tests
- `temp-env` for environment variable testing
Expand All @@ -108,5 +103,4 @@ Test files include snapshots in `snapshots/` directories for various run environ

**Important**:

- Always prefer `cargo nextest run` over `cargo test` when running tests, as it provides better performance and reliability.
- Some walltime executor tests require `sudo` access and will fail in non-interactive environments (e.g., `test_walltime_executor::*`). These failures are expected if sudo is not available.
- Some tests require `sudo` access. They are skipped by default unless the `GITHUB_ACTIONS` env var is set.
4 changes: 4 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,7 @@ This ensures only stable runner releases are marked as "latest" in GitHub.
## Known issue

- If one of the crates is currenlty in beta version, for example the runner is in beta version 4.4.2-beta.1, any alpha release will fail for the any crate, saying that only minor, major or patch releases is supported.

## Testing

- Some tests require `sudo` access. They are skipped by default unless the `GITHUB_ACTIONS` env var is set.
1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ default-run = "codspeed"
name = "codspeed"
path = "src/main.rs"


[dependencies]
anyhow = { workspace = true }
clap = { workspace = true, features = ["env", "color"] }
Expand Down
6 changes: 6 additions & 0 deletions build.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
fn main() {
// Force a rebuild of the test target to be able to run the full test suite locally just by
// setting GITHUB_ACTIONS=1 in the environment.
// This is because `test_with` is evaluated at build time
println!("cargo::rerun-if-env-changed=GITHUB_ACTIONS");
}
5 changes: 5 additions & 0 deletions crates/memtrack/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@ use std::{env, path::PathBuf};

#[cfg(feature = "ebpf")]
fn build_ebpf() {
// Force a rebuild of the test target to be able to run the full test suite locally just by
// setting GITHUB_ACTIONS=1 in the environment.
// This is because `test_with` is evaluated at build time
println!("cargo::rerun-if-env-changed=GITHUB_ACTIONS");

use libbpf_cargo::SkeletonBuilder;

println!("cargo:rerun-if-changed=src/ebpf/c");
Expand Down
48 changes: 4 additions & 44 deletions crates/runner-shared/src/unwind_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,8 @@ use std::{hash::DefaultHasher, ops::Range};

pub const UNWIND_FILE_EXT: &str = "unwind_data";

pub type UnwindData = UnwindDataV2;

impl UnwindDataV3 {
pub type UnwindData = UnwindDataV3;
impl UnwindData {
pub fn parse(reader: &[u8]) -> anyhow::Result<Self> {
let compat: UnwindDataCompat = bincode::deserialize(reader)?;

Expand Down Expand Up @@ -93,45 +92,6 @@ impl UnwindDataV2 {
}
}
}

/// Will be removed once the backend has been deployed and we can merge the changes in the runner
pub fn save_to<P: AsRef<std::path::Path>>(&self, folder: P, pid: i32) -> anyhow::Result<()> {
let unwind_data_path = folder.as_ref().join(format!(
"{}_{:x}_{:x}_{}.{UNWIND_FILE_EXT}",
pid,
self.avma_range.start,
self.avma_range.end,
self.timestamp.unwrap_or_default()
));
self.to_file(unwind_data_path)?;

Ok(())
}

pub fn to_file<P: AsRef<std::path::Path>>(&self, path: P) -> anyhow::Result<()> {
if let Ok(true) = std::fs::exists(path.as_ref()) {
// This happens in CI for the root `systemd-run` process which execs into bash which
// also execs into bash, each process reloading common libraries like `ld-linux.so`.
// We detect this when we harvest unwind_data by parsing the perf data (exec-harness).
// Until we properly handle the process tree and deduplicate unwind data, just debug
// log here
// Any relevant occurence should have other symptoms reported by users.
log::debug!(
"{} already exists, file will be truncated",
path.as_ref().display()
);
log::debug!("{} {:x?}", self.path, self.avma_range);
}

let compat = UnwindDataCompat::V2(self.clone());
let file = std::fs::File::create(path.as_ref())?;
const BUFFER_SIZE: usize = 256 * 1024 /* 256 KB */;

let writer = BufWriter::with_capacity(BUFFER_SIZE, file);
bincode::serialize_into(writer, &compat)?;

Ok(())
}
}

impl From<UnwindDataV1> for UnwindDataV2 {
Expand Down Expand Up @@ -175,7 +135,7 @@ impl From<UnwindDataV2> for UnwindDataV3 {
}
}

impl Debug for UnwindData {
impl Debug for UnwindDataV2 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let eh_frame_hdr_hash = {
let mut hasher = DefaultHasher::new();
Expand Down Expand Up @@ -322,7 +282,7 @@ mod tests {
#[test]
fn test_parse_v3_as_v3() {
// Parse V3 binary artifact as V3 using UnwindData::parse
let parsed_v3 = UnwindDataV3::parse(V3_BINARY).expect("Failed to parse V3 data as V3");
let parsed_v3 = UnwindData::parse(V3_BINARY).expect("Failed to parse V3 data as V3");

// Should match expected V3 data
let expected_v3 = create_sample_v3();
Expand Down
14 changes: 13 additions & 1 deletion src/executor/shared/fifo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,15 @@ impl GenericFifo {
pub struct FifoBenchmarkData {
/// Name and version of the integration
pub integration: Option<(String, String)>,
pub bench_pids: HashSet<pid_t>,
}

impl FifoBenchmarkData {
pub fn is_exec_harness(&self) -> bool {
self.integration
.as_ref()
.is_some_and(|(name, _)| name == "exec-harness")
}
}

pub struct RunnerFifo {
Expand Down Expand Up @@ -254,7 +263,10 @@ impl RunnerFifo {
);
let marker_result =
ExecutionTimestamps::new(&bench_order_by_timestamp, &markers);
let fifo_data = FifoBenchmarkData { integration };
let fifo_data = FifoBenchmarkData {
integration,
bench_pids,
};
return Ok((marker_result, fifo_data, exit_status));
}
Err(e) => return Err(anyhow::Error::from(e)),
Expand Down
28 changes: 15 additions & 13 deletions src/executor/tests.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
use super::Config;
use crate::executor::ExecutionContext;
use crate::executor::Executor;
use crate::executor::memory::executor::MemoryExecutor;
use crate::executor::valgrind::executor::ValgrindExecutor;
use crate::executor::wall_time::executor::WallTimeExecutor;
use crate::runner_mode::RunnerMode;
use crate::system::SystemInfo;
use rstest_reuse::{self, *};
Expand Down Expand Up @@ -109,16 +106,6 @@ const ENV_TESTS: [(&str, &str); 8] = [
#[case(TESTS[5])]
fn test_cases(#[case] cmd: &str) {}

// Exec-harness currently does not support the inline multi command scripts
#[template]
#[rstest::rstest]
#[case(TESTS[0])]
#[case(TESTS[1])]
#[case(TESTS[2])]
fn exec_harness_test_cases() -> Vec<&'static str> {
EXEC_HARNESS_COMMANDS.to_vec()
}

#[template]
#[rstest::rstest]
#[case(ENV_TESTS[0])]
Expand Down Expand Up @@ -182,6 +169,7 @@ async fn acquire_bpf_instrumentation_lock() -> SemaphorePermit<'static> {

mod valgrind {
use super::*;
use crate::executor::valgrind::executor::ValgrindExecutor;

async fn get_valgrind_executor() -> (SemaphorePermit<'static>, &'static ValgrindExecutor) {
static VALGRIND_EXECUTOR: OnceCell<ValgrindExecutor> = OnceCell::const_new();
Expand Down Expand Up @@ -240,8 +228,10 @@ mod valgrind {
}
}

#[test_with::env(GITHUB_ACTIONS)]
mod walltime {
use super::*;
use crate::executor::wall_time::executor::WallTimeExecutor;

async fn get_walltime_executor() -> (SemaphorePermit<'static>, WallTimeExecutor) {
static WALLTIME_INIT: OnceCell<()> = OnceCell::const_new();
Expand Down Expand Up @@ -358,6 +348,16 @@ fi
})
.await;
}
//
// Exec-harness currently does not support the inline multi command scripts
#[template]
#[rstest::rstest]
#[case(TESTS[0])]
#[case(TESTS[1])]
#[case(TESTS[2])]
fn exec_harness_test_cases() -> Vec<&'static str> {
EXEC_HARNESS_COMMANDS.to_vec()
}

// Ensure that the walltime executor works with the exec-harness
#[apply(exec_harness_test_cases)]
Expand Down Expand Up @@ -391,8 +391,10 @@ fi
}
}

#[test_with::env(GITHUB_ACTIONS)]
mod memory {
use super::*;
use crate::executor::memory::executor::MemoryExecutor;

async fn get_memory_executor() -> (
SemaphorePermit<'static>,
Expand Down
Loading