From 3bc5334440507711c176495afb51146c922d033d Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Wed, 25 Mar 2026 15:10:51 +0100 Subject: [PATCH 1/6] even more thread separation --- csrc/binding.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/csrc/binding.cpp b/csrc/binding.cpp index bda14ad..2661a78 100644 --- a/csrc/binding.cpp +++ b/csrc/binding.cpp @@ -7,6 +7,7 @@ #include #include +#include #include "manager.h" int supervisor_main(int sock_fd); @@ -22,8 +23,14 @@ void do_bench(int result_fd, int input_fd, const std::string& kernel_qualname, c signature.allocate(32, rng); auto config = read_benchmark_parameters(input_fd, signature.data()); auto mgr = make_benchmark_manager(result_fd, std::move(signature), config.Seed, discard, nvtx, landlock, mseal, supervisor_sock_fd); - auto [args, expected] = mgr->setup_benchmark(nb::cast(test_generator), test_kwargs, config.Repeats); - mgr->do_bench_py(kernel_qualname, args, expected, reinterpret_cast(stream)); + nb::gil_scoped_release release; + std::thread run_thread ([&]() + { + nb::gil_scoped_acquire acquire; + auto [args, expected] = mgr->setup_benchmark(nb::cast(test_generator), test_kwargs, config.Repeats); + mgr->do_bench_py(kernel_qualname, args, expected, reinterpret_cast(stream)); + }); + run_thread.join(); } From baa2fc168bdcc49fe770e37f650e26be4d0740cc Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Wed, 25 Mar 2026 22:49:00 +0100 Subject: [PATCH 2/6] move report generation out of main benchmark function (and thus out of main benchmark thread) --- csrc/binding.cpp | 2 ++ csrc/manager.cpp | 19 +++++++++++-------- csrc/manager.h | 7 ++++++- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/csrc/binding.cpp b/csrc/binding.cpp index 2661a78..ce14f00 100644 --- a/csrc/binding.cpp +++ b/csrc/binding.cpp @@ -31,6 +31,8 @@ void do_bench(int result_fd, int input_fd, const std::string& kernel_qualname, c mgr->do_bench_py(kernel_qualname, args, expected, reinterpret_cast(stream)); }); run_thread.join(); + mgr->send_report(); + mgr->clean_up(); } diff --git a/csrc/manager.cpp b/csrc/manager.cpp index 01da925..4d01903 100644 --- a/csrc/manager.cpp +++ b/csrc/manager.cpp @@ -477,19 +477,19 @@ void BenchmarkManager::do_bench_py( mErrorCountShift = noise.at(offset); // dry run -- measure overhead of events - float median_event_time = measure_event_overhead(DRY_EVENTS, stream); + mMedianEventTime = measure_event_overhead(DRY_EVENTS, stream); // create a randomized order for running the tests - std::vector test_order(actual_calls); - std::iota(test_order.begin(), test_order.end(), 1); - std::shuffle(test_order.begin(), test_order.end(), rng); + mTestOrder.resize(actual_calls); + std::iota(mTestOrder.begin(), mTestOrder.end(), 1); + std::shuffle(mTestOrder.begin(), mTestOrder.end(), rng); std::uniform_int_distribution check_seed_generator(0, 0xffffffff); nvtx_push("benchmark"); // now do the real runs for (int i = 0; i < actual_calls; i++) { - int test_id = test_order.at(i); + int test_id = mTestOrder.at(i); // page-in real inputs. If the user kernel runs on the wrong stream, it's likely it won't see the correct inputs // unfortunately, we need to do this before clearing the cache, so there is a window of opportunity // *but* we deliberately modify a small subset of the inputs, which only get corrected immediately before @@ -522,26 +522,29 @@ void BenchmarkManager::do_bench_py( validate_result(mExpectedOutputs.at(test_id), mOutputBuffers.at(test_id), check_seed_generator(rng), stream); } nvtx_pop(); +} +void BenchmarkManager::send_report() { cudaEventSynchronize(mEndEvents.back()); unsigned error_count; CUDA_CHECK(cudaMemcpy(&error_count, mDeviceErrorCounter, sizeof(unsigned), cudaMemcpyDeviceToHost)); // subtract the nuisance shift that we applied to the counter error_count -= mErrorCountShift; - std::string message = build_result_message(test_order, error_count, median_event_time); + std::string message = build_result_message(mTestOrder, error_count, mMedianEventTime); message = encrypt_message(mSignature.data(), 32, message); fwrite(message.data(), 1, message.size(), mOutputPipe); fflush(mOutputPipe); +} - // cleanup events +void BenchmarkManager::clean_up() { for (auto& event : mStartEvents) CUDA_CHECK(cudaEventDestroy(event)); for (auto& event : mEndEvents) CUDA_CHECK(cudaEventDestroy(event)); mStartEvents.clear(); mEndEvents.clear(); } -std::string BenchmarkManager::build_result_message(const std::vector& test_order, unsigned error_count, float median_event_time) const { +std::string BenchmarkManager::build_result_message(const std::pmr::vector& test_order, unsigned error_count, float median_event_time) const { std::ostringstream oss; oss << "event-overhead\t" << median_event_time * 1000 << " µs\n"; diff --git a/csrc/manager.h b/csrc/manager.h index 7722c74..fd72ee6 100644 --- a/csrc/manager.h +++ b/csrc/manager.h @@ -50,6 +50,8 @@ class BenchmarkManager { public: std::pair, std::vector> setup_benchmark(const nb::callable& generate_test_case, const nb::dict& kwargs, int repeats); void do_bench_py(const std::string& kernel_qualname, const std::vector& args, const std::vector& expected, cudaStream_t stream); + void send_report(); + void clean_up(); private: friend BenchmarkManagerPtr make_benchmark_manager(int result_fd, ObfuscatedHexDigest signature, std::uint64_t seed, bool discard, bool nvtx, bool landlock, bool mseal, int supervisor_socket); friend BenchmarkManagerDeleter; @@ -113,10 +115,13 @@ class BenchmarkManager { std::pmr::vector mExpectedOutputs; std::pmr::vector mShadowArguments; std::pmr::vector mOutputBuffers; + std::pmr::vector mTestOrder; FILE* mOutputPipe = nullptr; ObfuscatedHexDigest mSignature; + float mMedianEventTime = -1.f; + static ShadowArgumentList make_shadow_args(const nb::tuple& args, cudaStream_t stream, std::pmr::memory_resource* resource); @@ -133,7 +138,7 @@ class BenchmarkManager { int run_warmup(nb::callable& kernel, const nb::tuple& args, cudaStream_t stream); nb::callable get_kernel(const std::string& qualname, const nb::tuple& call_args); - [[nodiscard]] std::string build_result_message(const std::vector& test_order, unsigned error_count, float median_event_time) const; + [[nodiscard]] std::string build_result_message(const std::pmr::vector& test_order, unsigned error_count, float median_event_time) const; // debug only: Any sort of test exploit that targets specific values of this class is going to be brittle, From 684812351e7256d8a5bb73d6ba81a0c79b00b37a Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Wed, 25 Mar 2026 23:00:46 +0100 Subject: [PATCH 3/6] refactor --- csrc/manager.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/csrc/manager.cpp b/csrc/manager.cpp index 4d01903..519b9e5 100644 --- a/csrc/manager.cpp +++ b/csrc/manager.cpp @@ -446,14 +446,8 @@ void BenchmarkManager::do_bench_py( setup_test_cases(args, expected, stream); install_protections(); - // at this point, we call user code as we import the kernel (executing arbitrary top-level code) - // after this, we cannot trust python anymore - nb::callable kernel = get_kernel(kernel_qualname, args.at(0)); - - // now, run a few more times for warmup; in total aim for 1 second of warmup runs - int actual_calls = run_warmup(kernel, args.at(0), stream); - constexpr int DRY_EVENTS = 100; - const int num_events = std::max(actual_calls, DRY_EVENTS); + constexpr std::size_t DRY_EVENTS = 100; + const std::size_t num_events = std::max(mShadowArguments.size(), DRY_EVENTS); mStartEvents.resize(num_events); mEndEvents.resize(num_events); for (int i = 0; i < num_events; i++) { @@ -461,6 +455,16 @@ void BenchmarkManager::do_bench_py( CUDA_CHECK(cudaEventCreate(&mEndEvents.at(i))); } + // dry run -- measure overhead of events + mMedianEventTime = measure_event_overhead(DRY_EVENTS, stream); + + // at this point, we call user code as we import the kernel (executing arbitrary top-level code) + // after this, we cannot trust python anymore + nb::callable kernel = get_kernel(kernel_qualname, args.at(0)); + + // now, run a few more times for warmup; in total aim for 1 second of warmup runs + int actual_calls = run_warmup(kernel, args.at(0), stream); + // pick a random spot for the unsigned // initialize the whole area with random junk; the error counter // will be shifted by the initial value, so just writing zero @@ -476,9 +480,6 @@ void BenchmarkManager::do_bench_py( mDeviceErrorCounter = mDeviceErrorBase + offset; mErrorCountShift = noise.at(offset); - // dry run -- measure overhead of events - mMedianEventTime = measure_event_overhead(DRY_EVENTS, stream); - // create a randomized order for running the tests mTestOrder.resize(actual_calls); std::iota(mTestOrder.begin(), mTestOrder.end(), 1); @@ -489,7 +490,7 @@ void BenchmarkManager::do_bench_py( nvtx_push("benchmark"); // now do the real runs for (int i = 0; i < actual_calls; i++) { - int test_id = mTestOrder.at(i); + const int test_id = mTestOrder.at(i); // page-in real inputs. If the user kernel runs on the wrong stream, it's likely it won't see the correct inputs // unfortunately, we need to do this before clearing the cache, so there is a window of opportunity // *but* we deliberately modify a small subset of the inputs, which only get corrected immediately before From 9c5df2a4336bf366a8f915eae58d0debf4d37e93 Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Thu, 26 Mar 2026 00:06:20 +0100 Subject: [PATCH 4/6] also move warmup into protected thread --- csrc/manager.cpp | 74 +++++++++++++++++++++++------------------------- csrc/manager.h | 3 +- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/csrc/manager.cpp b/csrc/manager.cpp index 519b9e5..5a7d19d 100644 --- a/csrc/manager.cpp +++ b/csrc/manager.cpp @@ -336,39 +336,6 @@ void BenchmarkManager::install_protections() { install_seccomp_filter(); } -int BenchmarkManager::run_warmup(nb::callable& kernel, const nb::tuple& args, cudaStream_t stream) { - std::chrono::high_resolution_clock::time_point cpu_start = std::chrono::high_resolution_clock::now(); - int warmup_run_count = 0; - double time_estimate; - nvtx_push("timing"); - while (true) { - // note: we are assuming here that calling the kernel multiple times for the same input is a safe operation - // this is only potentially problematic for in-place kernels; - CUDA_CHECK(cudaDeviceSynchronize()); - clear_cache(stream); - kernel(*args); - CUDA_CHECK(cudaDeviceSynchronize()); - std::chrono::high_resolution_clock::time_point cpu_end = std::chrono::high_resolution_clock::now(); - std::chrono::duration elapsed_seconds = cpu_end - cpu_start; - ++warmup_run_count; - if (elapsed_seconds.count() > mWarmupSeconds) { - time_estimate = elapsed_seconds.count() / warmup_run_count; - break; - } - } - nvtx_pop(); - - // note: this is a very conservative estimate. Timing above was measured with syncs between every kernel. - int calls = mOutputBuffers.size() - 1; - const int actual_calls = std::clamp(static_cast(std::ceil(mBenchmarkSeconds / time_estimate)), 1, calls); - - if (actual_calls < 3) { - throw std::runtime_error("The initial speed test indicated that running times are too slow to generate meaningful benchmark numbers: " + std::to_string(time_estimate)); - } - - return actual_calls; -} - static inline std::uintptr_t page_mask() { std::uintptr_t page_size = getpagesize(); return ~(page_size - 1u); @@ -381,12 +348,16 @@ void protect_range(void* ptr, size_t size, int prot) { throw std::system_error(errno, std::system_category(), "mprotect"); } -nb::callable BenchmarkManager::get_kernel(const std::string& qualname, const nb::tuple& call_args) { +nb::callable BenchmarkManager::initial_kernel_setup(double& time_estimate, const std::string& qualname, const nb::tuple& call_args, cudaStream_t stream) { nb::gil_scoped_release release; const std::uintptr_t lo = reinterpret_cast(this->mArena); const std::uintptr_t hi = lo + BenchmarkManagerArenaSize; nb::callable kernel; + double warmup_seconds = mWarmupSeconds; + void* cc_memory = mDeviceDummyMemory; + std::size_t l2_clear_size = mL2CacheSize; + bool discard_cache = mDiscardCache; std::exception_ptr thread_exception; int sock = mSupervisorSock; bool install_notify = mSeal || supports_seccomp_notify(); @@ -398,7 +369,8 @@ nb::callable BenchmarkManager::get_kernel(const std::string& qualname, const nb: // TODO make stack inaccessible (may be impossible) or read-only during the call // call the python kernel generation function from a different thread. - std::thread make_kernel_thread([&kernel, sock, lo, hi, qualname, &call_args, &thread_exception, install_notify]() { + std::thread make_kernel_thread([&kernel, sock, lo, hi, qualname, &call_args, &thread_exception, + install_notify, &time_estimate, warmup_seconds, cc_memory, l2_clear_size, discard_cache, stream]() { try { if (sock >= 0) { try { @@ -417,6 +389,24 @@ nb::callable BenchmarkManager::get_kernel(const std::string& qualname, const nb: CUDA_CHECK(cudaDeviceSynchronize()); kernel(*call_args); CUDA_CHECK(cudaDeviceSynchronize()); + + // warmup + CUDA_CHECK(cudaDeviceSynchronize()); + auto cpu_start = std::chrono::high_resolution_clock::now(); + int warmup_run_count = 0; + + while (true) { + ::clear_cache(cc_memory, 2 * l2_clear_size, discard_cache, stream); + kernel(*call_args); + CUDA_CHECK(cudaDeviceSynchronize()); + + auto elapsed = std::chrono::high_resolution_clock::now() - cpu_start; + ++warmup_run_count; + if (std::chrono::duration(elapsed).count() > warmup_seconds) { + time_estimate = std::chrono::duration(elapsed).count() / warmup_run_count; + break; + } + } } catch (...) { thread_exception = std::current_exception(); } @@ -458,12 +448,20 @@ void BenchmarkManager::do_bench_py( // dry run -- measure overhead of events mMedianEventTime = measure_event_overhead(DRY_EVENTS, stream); + double time_estimate = 0.0; // at this point, we call user code as we import the kernel (executing arbitrary top-level code) // after this, we cannot trust python anymore - nb::callable kernel = get_kernel(kernel_qualname, args.at(0)); + nb::callable kernel = initial_kernel_setup(time_estimate, kernel_qualname, args.at(0), stream); - // now, run a few more times for warmup; in total aim for 1 second of warmup runs - int actual_calls = run_warmup(kernel, args.at(0), stream); + int calls = mOutputBuffers.size() - 1; + const int actual_calls = std::clamp( + static_cast(std::ceil(mBenchmarkSeconds / time_estimate)), 1, calls); + + if (actual_calls < 3) { + throw std::runtime_error( + "The initial speed test indicated that running times are too slow to generate " + "meaningful benchmark numbers: " + std::to_string(time_estimate)); + } // pick a random spot for the unsigned // initialize the whole area with random junk; the error counter diff --git a/csrc/manager.h b/csrc/manager.h index fd72ee6..c592d93 100644 --- a/csrc/manager.h +++ b/csrc/manager.h @@ -135,8 +135,7 @@ class BenchmarkManager { void setup_test_cases(const std::vector& args, const std::vector& expected, cudaStream_t stream); void install_protections(); - int run_warmup(nb::callable& kernel, const nb::tuple& args, cudaStream_t stream); - nb::callable get_kernel(const std::string& qualname, const nb::tuple& call_args); + nb::callable initial_kernel_setup(double& time_estimate, const std::string& qualname, const nb::tuple& call_args, cudaStream_t stream); [[nodiscard]] std::string build_result_message(const std::pmr::vector& test_order, unsigned error_count, float median_event_time) const; From 044007cebb8501b305177aab40add224cb482c8a Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Thu, 26 Mar 2026 00:24:02 +0100 Subject: [PATCH 5/6] refactor --- csrc/manager.cpp | 127 ++++++++++++++++++++++++----------------------- 1 file changed, 66 insertions(+), 61 deletions(-) diff --git a/csrc/manager.cpp b/csrc/manager.cpp index 5a7d19d..fd88829 100644 --- a/csrc/manager.cpp +++ b/csrc/manager.cpp @@ -348,81 +348,86 @@ void protect_range(void* ptr, size_t size, int prot) { throw std::system_error(errno, std::system_category(), "mprotect"); } -nb::callable BenchmarkManager::initial_kernel_setup(double& time_estimate, const std::string& qualname, const nb::tuple& call_args, cudaStream_t stream) { - nb::gil_scoped_release release; - const std::uintptr_t lo = reinterpret_cast(this->mArena); +static void setup_seccomp(int sock, bool install_notify, std::uintptr_t lo, std::uintptr_t hi) { + if (sock < 0) + return; + try { + if (install_notify) + seccomp_install_memory_notify(sock, lo, hi); + } catch (...) { + close(sock); + throw; + } + close(sock); +} + +static double run_warmup_loop(nb::callable& kernel, const nb::tuple& args, cudaStream_t stream, + void* cc_memory, std::size_t l2_clear_size, bool discard_cache, + double warmup_seconds) { + CUDA_CHECK(cudaDeviceSynchronize()); + auto cpu_start = std::chrono::high_resolution_clock::now(); + int run_count = 0; + + while (true) { + ::clear_cache(cc_memory, 2 * l2_clear_size, discard_cache, stream); + kernel(*args); + CUDA_CHECK(cudaDeviceSynchronize()); + + ++run_count; + double elapsed = std::chrono::duration( + std::chrono::high_resolution_clock::now() - cpu_start).count(); + if (elapsed > warmup_seconds) + return elapsed / run_count; + } +} + +nb::callable BenchmarkManager::initial_kernel_setup(double& time_estimate, const std::string& qualname, + const nb::tuple& call_args, cudaStream_t stream) { + const std::uintptr_t lo = reinterpret_cast(mArena); const std::uintptr_t hi = lo + BenchmarkManagerArenaSize; + // snapshot all member state needed in the thread before protecting the arena + const int sock = mSupervisorSock; + const bool install_notify = mSeal || supports_seccomp_notify(); + const double warmup_seconds = mWarmupSeconds; + void* const cc_memory = mDeviceDummyMemory; + const std::size_t l2_clear_size = mL2CacheSize; + const bool discard_cache = mDiscardCache; + nb::callable kernel; - double warmup_seconds = mWarmupSeconds; - void* cc_memory = mDeviceDummyMemory; - std::size_t l2_clear_size = mL2CacheSize; - bool discard_cache = mDiscardCache; std::exception_ptr thread_exception; - int sock = mSupervisorSock; - bool install_notify = mSeal || supports_seccomp_notify(); nvtx_push("trigger-compile"); - - // make the BenchmarkManager inaccessible protect_range(reinterpret_cast(lo), hi - lo, PROT_NONE); - // TODO make stack inaccessible (may be impossible) or read-only during the call - // call the python kernel generation function from a different thread. - - std::thread make_kernel_thread([&kernel, sock, lo, hi, qualname, &call_args, &thread_exception, - install_notify, &time_estimate, warmup_seconds, cc_memory, l2_clear_size, discard_cache, stream]() { - try { - if (sock >= 0) { - try { - if (install_notify) - seccomp_install_memory_notify(sock, lo, hi); - } catch (...) { - close(sock); - throw; - } - close(sock); - } - nb::gil_scoped_acquire guard; - kernel = kernel_from_qualname(qualname); - - // ok, first run for compilations etc - CUDA_CHECK(cudaDeviceSynchronize()); - kernel(*call_args); - CUDA_CHECK(cudaDeviceSynchronize()); - - // warmup - CUDA_CHECK(cudaDeviceSynchronize()); - auto cpu_start = std::chrono::high_resolution_clock::now(); - int warmup_run_count = 0; - - while (true) { - ::clear_cache(cc_memory, 2 * l2_clear_size, discard_cache, stream); - kernel(*call_args); - CUDA_CHECK(cudaDeviceSynchronize()); - - auto elapsed = std::chrono::high_resolution_clock::now() - cpu_start; - ++warmup_run_count; - if (std::chrono::duration(elapsed).count() > warmup_seconds) { - time_estimate = std::chrono::duration(elapsed).count() / warmup_run_count; - break; - } + + { + nb::gil_scoped_release release; + std::thread worker([&] { + try { + setup_seccomp(sock, install_notify, lo, hi); + + nb::gil_scoped_acquire guard; + + kernel = kernel_from_qualname(qualname); + CUDA_CHECK(cudaDeviceSynchronize()); + kernel(*call_args); // trigger JIT compile + + time_estimate = run_warmup_loop(kernel, call_args, stream, + cc_memory, l2_clear_size, discard_cache, + warmup_seconds); + } catch (...) { + thread_exception = std::current_exception(); } - } catch (...) { - thread_exception = std::current_exception(); - } - }); + }); + worker.join(); + } - make_kernel_thread.join(); - // make it accessible again. This is in the original thread, so the tightened seccomp - // policy does not apply here. protect_range(reinterpret_cast(lo), hi - lo, PROT_READ | PROT_WRITE); - // closed now, so set to -1 mSupervisorSock = -1; nvtx_pop(); - if (thread_exception) { + if (thread_exception) std::rethrow_exception(thread_exception); - } return kernel; } From c38f2261b2df5279d645c065258f5f3011692023 Mon Sep 17 00:00:00 2001 From: Erik Schultheis Date: Sat, 28 Mar 2026 00:02:05 +0100 Subject: [PATCH 6/6] fixes --- csrc/binding.cpp | 29 ++++++++++++++++++++++------- csrc/manager.cpp | 8 ++++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/csrc/binding.cpp b/csrc/binding.cpp index ce14f00..d093127 100644 --- a/csrc/binding.cpp +++ b/csrc/binding.cpp @@ -9,6 +9,7 @@ #include #include #include "manager.h" +#include "utils.h" int supervisor_main(int sock_fd); @@ -23,14 +24,28 @@ void do_bench(int result_fd, int input_fd, const std::string& kernel_qualname, c signature.allocate(32, rng); auto config = read_benchmark_parameters(input_fd, signature.data()); auto mgr = make_benchmark_manager(result_fd, std::move(signature), config.Seed, discard, nvtx, landlock, mseal, supervisor_sock_fd); - nb::gil_scoped_release release; - std::thread run_thread ([&]() + { - nb::gil_scoped_acquire acquire; - auto [args, expected] = mgr->setup_benchmark(nb::cast(test_generator), test_kwargs, config.Repeats); - mgr->do_bench_py(kernel_qualname, args, expected, reinterpret_cast(stream)); - }); - run_thread.join(); + nb::gil_scoped_release release; + std::exception_ptr thread_exception; + int device; + CUDA_CHECK(cudaGetDevice(&device)); + std::thread run_thread ([&]() + { + try { + CUDA_CHECK(cudaSetDevice(device)); + nb::gil_scoped_acquire acquire; + auto [args, expected] = mgr->setup_benchmark(nb::cast(test_generator), test_kwargs, config.Repeats); + mgr->do_bench_py(kernel_qualname, args, expected, reinterpret_cast(stream)); + } catch (...) { + thread_exception = std::current_exception(); + } + }); + run_thread.join(); + if (thread_exception) + std::rethrow_exception(thread_exception); + } + mgr->send_report(); mgr->clean_up(); } diff --git a/csrc/manager.cpp b/csrc/manager.cpp index fd88829..962d3e3 100644 --- a/csrc/manager.cpp +++ b/csrc/manager.cpp @@ -181,7 +181,8 @@ BenchmarkManager::BenchmarkManager(std::byte* arena, std::size_t arena_size, mEndEvents(&mResource), mExpectedOutputs(&mResource), mShadowArguments(&mResource), - mOutputBuffers(&mResource) + mOutputBuffers(&mResource), + mTestOrder(&mResource) { int device; CUDA_CHECK(cudaGetDevice(&device)); @@ -393,6 +394,8 @@ nb::callable BenchmarkManager::initial_kernel_setup(double& time_estimate, const void* const cc_memory = mDeviceDummyMemory; const std::size_t l2_clear_size = mL2CacheSize; const bool discard_cache = mDiscardCache; + int device; + CUDA_CHECK(cudaGetDevice(&device)); nb::callable kernel; std::exception_ptr thread_exception; @@ -404,6 +407,7 @@ nb::callable BenchmarkManager::initial_kernel_setup(double& time_estimate, const nb::gil_scoped_release release; std::thread worker([&] { try { + CUDA_CHECK(cudaSetDevice(device)); setup_seccomp(sock, install_notify, lo, hi); nb::gil_scoped_acquire guard; @@ -529,7 +533,7 @@ void BenchmarkManager::do_bench_py( } void BenchmarkManager::send_report() { - cudaEventSynchronize(mEndEvents.back()); + CUDA_CHECK(cudaEventSynchronize(mEndEvents.at(mTestOrder.size() - 1))); unsigned error_count; CUDA_CHECK(cudaMemcpy(&error_count, mDeviceErrorCounter, sizeof(unsigned), cudaMemcpyDeviceToHost)); // subtract the nuisance shift that we applied to the counter