diff --git a/tests/e2e/async_delegation_test.cpp b/tests/e2e/async_delegation_test.cpp index 104ba770..64044bcc 100644 --- a/tests/e2e/async_delegation_test.cpp +++ b/tests/e2e/async_delegation_test.cpp @@ -77,7 +77,7 @@ TEST(E2E_PhaseA, AsyncDelegationWithWorkStealing) { std::cout << "✓ All agents registered and configured" << std::endl; // ACT: Send commands to different task agents - std::map> commands; // msg_id -> (agent, expected) + std::map> commands; // msg_id -> (agent, expected) std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> dis(1, 50); @@ -228,7 +228,7 @@ TEST(E2E_PhaseA, WorkStealingLoadBalancing) { std::this_thread::sleep_for(std::chrono::milliseconds(300)); // Count how many agents received messages - int agents_with_messages = 0; + int32_t agents_with_messages = 0; for (auto& agent : task_agents) { int32_t count = 0; while (agent->getMessage().has_value()) { diff --git a/tests/e2e/basic_delegation_test.cpp b/tests/e2e/basic_delegation_test.cpp index 5317b870..5a102cdd 100644 --- a/tests/e2e/basic_delegation_test.cpp +++ b/tests/e2e/basic_delegation_test.cpp @@ -51,7 +51,7 @@ TEST(E2E_Phase1, ChiefArchitectDelegatesToTaskAgent) { int32_t num1 = dis(gen); int32_t num2 = dis(gen); - int expected_result = num1 + num2; + int32_t expected_result = num1 + num2; std::cout << "\nTest: Adding " << num1 << " + " << num2 << " = " << expected_result << std::endl; @@ -112,7 +112,9 @@ TEST(E2E_Phase1, ChiefArchitectSendsMultipleCommands) { task_agent->setMessageBus(bus.get()); // Test cases: (num1, num2, expected_result) - std::vector> test_cases = {{5, 3, 8}, {15, 27, 42}, {100, 200, 300}}; + std::vector> test_cases = {{5, 3, 8}, + {15, 27, 42}, + {100, 200, 300}}; // ACT & ASSERT: Send each command for (const auto& [num1, num2, expected] : test_cases) { diff --git a/tests/e2e/chaos_engineering_test.cpp b/tests/e2e/chaos_engineering_test.cpp index 194c86c0..1f97b323 100644 --- a/tests/e2e/chaos_engineering_test.cpp +++ b/tests/e2e/chaos_engineering_test.cpp @@ -297,7 +297,7 @@ TEST_F(Phase5ProbabilisticFailureTest, AgentsFailBasedOnInjectorRate) { } // Count failed agents - int failed_agents = 0; + int32_t failed_agents = 0; for (auto& agent : agents) { if (agent->isFailed()) { failed_agents++; @@ -471,7 +471,7 @@ TEST_F(Phase5NetworkPartitionTest, MessagesDroppedAcrossPartition) { EXPECT_EQ(network.getPartitionDroppedMessages(), 0); // Send message across partition (should be dropped) - int executed = 0; + int32_t executed = 0; network.send(0, 2, [&executed]() { executed++; }); EXPECT_EQ(network.getPartitionDroppedMessages(), 1); @@ -565,10 +565,10 @@ TEST_F(Phase5NetworkPartitionTest, SplitBrainWorkDistribution) { SimulatedNetwork network(config); // Simulate 4 nodes with work - std::atomic node0_work{0}; - std::atomic node1_work{0}; - std::atomic node2_work{0}; - std::atomic node3_work{0}; + std::atomic node0_work{0}; + std::atomic node1_work{0}; + std::atomic node2_work{0}; + std::atomic node3_work{0}; // Create partition: [0, 1] vs [2, 3] network.createPartition({0, 1}, {2, 3}); @@ -715,7 +715,7 @@ TEST_F(Phase5MessageLossTest, MessageLossWithSimulatedNetwork) { SimulatedNetwork network(config); // Send 100 messages - std::atomic delivered{0}; + std::atomic delivered{0}; for (int32_t i = 0; i < 100; ++i) { network.send(0, 1, [&delivered]() { delivered++; }); } @@ -821,8 +821,8 @@ TEST_F(Phase5MessageLossTest, MessageLossWithManualRetries) { RetryPolicy policy(retry_config); // Try sending 10 messages with retry logic - std::atomic delivered{0}; - std::atomic total_attempts{0}; + std::atomic delivered{0}; + std::atomic total_attempts{0}; for (int32_t i = 0; i < 10; ++i) { std::string msg_id = "msg" + std::to_string(i); @@ -884,7 +884,7 @@ TEST_F(Phase5MessageLossTest, CombinedPartitionAndLoss) { network.createPartition({0, 1}, {2, 3}); // Send messages in various scenarios - std::atomic delivered{0}; + std::atomic delivered{0}; // Within partition: should work (with some loss) // Increased to 50 messages for statistical reliability diff --git a/tests/e2e/component_coordination_test.cpp b/tests/e2e/component_coordination_test.cpp index 53efe4b7..f5819e96 100644 --- a/tests/e2e/component_coordination_test.cpp +++ b/tests/e2e/component_coordination_test.cpp @@ -56,7 +56,7 @@ TEST(E2E_Phase3, ComponentLeadCoordinatesMultipleModules) { // Create 6 TaskAgents (3 per module) std::vector> task_agents; - for (int i = 1; i <= 6; ++i) { + for (int32_t i = 1; i <= 6; ++i) { auto agent = std::make_shared("task_" + std::to_string(i)); task_agents.push_back(agent); } @@ -130,7 +130,7 @@ TEST(E2E_Phase3, ComponentLeadCoordinatesMultipleModules) { std::cout << "4. ModuleLeads → TaskAgents (6 total)..." << std::endl; // All 6 TaskAgents process their tasks - int tasks_processed = 0; + int32_t tasks_processed = 0; for (auto& agent : task_agents) { auto task_msg = agent->getMessage(); if (task_msg.has_value()) { diff --git a/tests/e2e/distributed_grpc_test.cpp b/tests/e2e/distributed_grpc_test.cpp index cb15f3f1..230e6cea 100644 --- a/tests/e2e/distributed_grpc_test.cpp +++ b/tests/e2e/distributed_grpc_test.cpp @@ -54,7 +54,7 @@ class YamlSpecBuilder { return *this; } - YamlSpecBuilder& setTargetLevel(int level) { + YamlSpecBuilder& setTargetLevel(int32_t level) { spec_.routing.target_level = level; return *this; } @@ -377,7 +377,7 @@ TEST_F(DistributedGrpcTest, HeartbeatMonitoring) { EXPECT_EQ(alive_agents[0].agent_id, "agent-alive"); // Cleanup dead agents - int removed = registry_->cleanupDeadAgents(); + int32_t removed = registry_->cleanupDeadAgents(); EXPECT_EQ(removed, 1); EXPECT_EQ(registry_->getAgentCount(), 1); } @@ -922,7 +922,7 @@ TEST_F(DistributedGrpcTest, TaskCleanupOldTasks) { std::this_thread::sleep_for(100ms); // Cleanup tasks older than 50ms (should remove old tasks) - int cleaned = coordinator_->cleanupOldTasks(50); + int32_t cleaned = coordinator_->cleanupOldTasks(50); // Note: This test depends on coordinator implementation // If cleanup only removes completed/failed tasks, it should work diff --git a/tests/e2e/distributed_hierarchy_test.cpp b/tests/e2e/distributed_hierarchy_test.cpp index e82611d9..3f3ee55f 100644 --- a/tests/e2e/distributed_hierarchy_test.cpp +++ b/tests/e2e/distributed_hierarchy_test.cpp @@ -56,10 +56,10 @@ TEST_F(DistributedHierarchyTest, FourLayerHierarchyAcrossNodes) { cluster.registerAgent("task_agent_3", 3); // Counters for each layer - std::atomic chief_executions{0}; - std::atomic component_executions{0}; - std::atomic module_executions{0}; - std::atomic task_executions{0}; + std::atomic chief_executions{0}; + std::atomic component_executions{0}; + std::atomic module_executions{0}; + std::atomic task_executions{0}; // Simulate hierarchical delegation using network send // Chief sends to ComponentLead via network @@ -120,7 +120,7 @@ TEST_F(DistributedHierarchyTest, MultipleCommandsDistributed) { cluster.registerAgent("module_lead_2", 2); cluster.registerAgent("task_agent", 3); - std::atomic total_task_executions{0}; + std::atomic total_task_executions{0}; // Send 10 commands through the hierarchy using network for (int32_t cmd = 0; cmd < 10; ++cmd) { @@ -167,7 +167,7 @@ TEST_F(DistributedHierarchyTest, LoadBalancingAcrossNodes) { cluster.registerAgent("task_" + std::to_string(i), 3); } - std::atomic completed_tasks{0}; + std::atomic completed_tasks{0}; // Submit concentrated workload to node 3 for (int32_t i = 0; i < 100; ++i) { @@ -219,7 +219,7 @@ TEST_F(DistributedHierarchyTest, NetworkLatencyImpact) { low_latency_cluster.registerAgent("sender", 0); low_latency_cluster.registerAgent("receiver", 1); - std::atomic low_latency_count{0}; + std::atomic low_latency_count{0}; auto start_low = std::chrono::steady_clock::now(); @@ -252,7 +252,7 @@ TEST_F(DistributedHierarchyTest, NetworkLatencyImpact) { high_latency_cluster.registerAgent("sender", 0); high_latency_cluster.registerAgent("receiver", 1); - std::atomic high_latency_count{0}; + std::atomic high_latency_count{0}; auto start_high = std::chrono::steady_clock::now(); @@ -297,9 +297,9 @@ TEST_F(DistributedHierarchyTest, AgentMigrationBetweenNodes) { // Initially on node 0 cluster.registerAgent("mobile_agent", 0); - std::atomic executions_node0{0}; - std::atomic executions_node1{0}; - std::atomic executions_node2{0}; + std::atomic executions_node0{0}; + std::atomic executions_node1{0}; + std::atomic executions_node2{0}; // Execute on node 0 for (int32_t i = 0; i < 10; ++i) { diff --git a/tests/e2e/full_async_hierarchy_test.cpp b/tests/e2e/full_async_hierarchy_test.cpp index e2728549..62293f42 100644 --- a/tests/e2e/full_async_hierarchy_test.cpp +++ b/tests/e2e/full_async_hierarchy_test.cpp @@ -67,7 +67,7 @@ TEST(E2E_PhaseB, FullAsync4LayerHierarchy) { // Level 3: Task Agents (6 total: 3 per module) std::vector> task_agents; - for (int i = 1; i <= 6; ++i) { + for (int32_t i = 1; i <= 6; ++i) { auto task = std::make_shared("task" + std::to_string(i)); task->setMessageBus(&bus); task->setScheduler(&scheduler); @@ -103,7 +103,7 @@ TEST(E2E_PhaseB, FullAsync4LayerHierarchy) { EXPECT_GE(mod2_trace.size(), 4); // Verify all task agents executed their commands - int total_commands = 0; + int32_t total_commands = 0; for (const auto& task : task_agents) { total_commands += task->getCommandHistory().size(); } @@ -158,7 +158,7 @@ TEST(E2E_PhaseB, Async4LayerConcurrentExecution) { // Create slow task agents (sleep 0.05s per task) std::vector> task_agents; - for (int i = 1; i <= 6; ++i) { + for (int32_t i = 1; i <= 6; ++i) { auto task = std::make_shared("task" + std::to_string(i)); task->setMessageBus(&bus); task->setScheduler(&scheduler); @@ -189,7 +189,7 @@ TEST(E2E_PhaseB, Async4LayerConcurrentExecution) { EXPECT_LT(elapsed, 600); // Should complete well under sequential time // Verify all tasks completed - int total_commands = 0; + int32_t total_commands = 0; for (const auto& task : task_agents) { total_commands += task->getCommandHistory().size(); } diff --git a/tests/e2e/module_coordination_test.cpp b/tests/e2e/module_coordination_test.cpp index 726b94ee..6afdb269 100644 --- a/tests/e2e/module_coordination_test.cpp +++ b/tests/e2e/module_coordination_test.cpp @@ -48,7 +48,7 @@ TEST(E2E_Phase2, ModuleLeadSynthesizesTaskResults) { // Create 3 TaskAgents for parallel execution std::vector> task_agents; - for (int i = 1; i <= 3; ++i) { + for (int32_t i = 1; i <= 3; ++i) { auto agent = std::make_shared("task_" + std::to_string(i)); task_agents.push_back(agent); } @@ -102,7 +102,7 @@ TEST(E2E_Phase2, ModuleLeadSynthesizesTaskResults) { std::cout << "3. ModuleLead delegates to 3 TaskAgents..." << std::endl; // Each TaskAgent processes their assigned task - int tasks_processed = 0; + int32_t tasks_processed = 0; for (auto& agent : task_agents) { auto task_msg = agent->getMessage(); if (task_msg.has_value()) { @@ -118,7 +118,7 @@ TEST(E2E_Phase2, ModuleLeadSynthesizesTaskResults) { // ModuleLead receives results from all TaskAgents std::cout << "4. ModuleLead receives results from TaskAgents..." << std::endl; - int results_received = 0; + int32_t results_received = 0; for (int32_t i = 0; i < 3; ++i) { auto result_msg = module_lead->getMessage(); if (result_msg.has_value()) { @@ -189,7 +189,7 @@ TEST(E2E_Phase2, ModuleLeadHandlesVariableTaskCount) { auto module_lead = std::make_shared("module_math"); std::vector> task_agents; - for (int i = 1; i <= 3; ++i) { + for (int32_t i = 1; i <= 3; ++i) { auto agent = std::make_shared("task_" + std::to_string(i)); task_agents.push_back(agent); } @@ -224,7 +224,7 @@ TEST(E2E_Phase2, ModuleLeadHandlesVariableTaskCount) { module_lead->processMessage(*module_msg).get(); // Process tasks (should be 2) - int tasks_processed = 0; + int32_t tasks_processed = 0; for (auto& agent : task_agents) { auto task_msg = agent->getMessage(); if (task_msg.has_value()) { diff --git a/tests/fixtures/grpc_test_fixture.hpp b/tests/fixtures/grpc_test_fixture.hpp index f70200b1..0d3f8d42 100644 --- a/tests/fixtures/grpc_test_fixture.hpp +++ b/tests/fixtures/grpc_test_fixture.hpp @@ -81,8 +81,8 @@ class GrpcTestFixture : public ::testing::Test { std::unique_ptr registry_server_; // Server ports (ephemeral, assigned by OS) - int coordinator_port_ = 0; - int registry_port_ = 0; + int32_t coordinator_port_ = 0; + int32_t registry_port_ = 0; }; } // namespace test diff --git a/tests/integration/test_nats_integration.cpp b/tests/integration/test_nats_integration.cpp index 0f961ad0..6438cc3c 100644 --- a/tests/integration/test_nats_integration.cpp +++ b/tests/integration/test_nats_integration.cpp @@ -191,8 +191,8 @@ TEST_F(NatsIntegrationTest, PipelineShutdownDrainsCleanly) { bus_->registerAgent(agent->getAgentId(), agent); // Queue several work messages before the shutdown signal. - constexpr int kWorkMessages = 5; - for (int i = 0; i < kWorkMessages; ++i) { + constexpr int32_t kWorkMessages = 5; + for (int32_t i = 0; i < kWorkMessages; ++i) { auto work = KeystoneMessage::create("bridge", "drain_agent", ActionType::EXECUTE, @@ -207,7 +207,7 @@ TEST_F(NatsIntegrationTest, PipelineShutdownDrainsCleanly) { EXPECT_TRUE(bus_->routeMessage(shutdown_msg)); // Drain: consume all kWorkMessages + 1 shutdown. - int drained = 0; + int32_t drained = 0; bool saw_shutdown = false; bool drained_all = waitFor( [&]() { @@ -254,7 +254,7 @@ TEST_F(NatsIntegrationTest, PipelinePriorityMessagesDeliveredInOrder) { bool got_first = waitFor([&]() { return agent->getMessage().has_value(); }); ASSERT_TRUE(got_first) << "No messages delivered to priority_agent"; - int delivered = 1; + int32_t delivered = 1; while (agent->getMessage().has_value()) { ++delivered; } @@ -346,7 +346,7 @@ TEST_F(NatsServerTest, NatsConnectionSucceeds) { // Use nc (netcat) to test TCP connectivity; fall back to bash /dev/tcp. std::string check_cmd = "bash -c 'echo > /dev/tcp/" + host + "/" + port + "' 2>/dev/null"; - int rc = std::system(check_cmd.c_str()); // NOLINT(cert-env33-c) + int32_t rc = std::system(check_cmd.c_str()); // NOLINT(cert-env33-c) EXPECT_EQ(rc, 0) << "Could not connect to NATS server at " << url << ". Is the server running? (docker-compose -f docker-compose.test.yml up)"; } @@ -411,8 +411,8 @@ TEST_F(NatsServerTest, NatsShutdownDrainsSubscription) { agent->setMessageBus(bus_.get()); bus_->registerAgent(agent->getAgentId(), agent); - constexpr int kPending = 3; - for (int i = 0; i < kPending; ++i) { + constexpr int32_t kPending = 3; + for (int32_t i = 0; i < kPending; ++i) { auto work = KeystoneMessage::create("nats.bridge:hi.tasks.execute", "hi.myrmidon.tasks.0", ActionType::EXECUTE, @@ -428,7 +428,7 @@ TEST_F(NatsServerTest, NatsShutdownDrainsSubscription) { "drain-session"); EXPECT_TRUE(bus_->routeMessage(shutdown)); - int count = 0; + int32_t count = 0; bool saw_shutdown = false; bool ok = waitFor( [&]() { diff --git a/tests/integration/test_registry_integration.cpp b/tests/integration/test_registry_integration.cpp index a0282660..157b9d68 100644 --- a/tests/integration/test_registry_integration.cpp +++ b/tests/integration/test_registry_integration.cpp @@ -431,7 +431,7 @@ TEST_F(RegistryIntegrationTest, ThreadSafeRegistryOperations) { std::vector threads; // 5 threads: register 20 agents each (100 total) - std::atomic registered{0}; + std::atomic registered{0}; for (int32_t t = 0; t < 5; ++t) { threads.emplace_back([this, t, &all_agents, ®istered]() { for (int32_t i = 0; i < 20; ++i) { diff --git a/tests/load/hmas_load_test.cpp b/tests/load/hmas_load_test.cpp index c112a42b..0ff6d5b8 100644 --- a/tests/load/hmas_load_test.cpp +++ b/tests/load/hmas_load_test.cpp @@ -44,11 +44,11 @@ using namespace std::chrono_literals; */ struct LoadTestConfig { std::string scenario; - int duration_seconds{600}; - int message_rate{100}; // messages per second - int num_component_leads{2}; - int num_module_leads{4}; - int num_task_agents{16}; + int32_t duration_seconds{600}; + int32_t message_rate{100}; // messages per second + int32_t num_component_leads{2}; + int32_t num_module_leads{4}; + int32_t num_task_agents{16}; double high_priority_pct{0.2}; double normal_priority_pct{0.7}; double low_priority_pct{0.1}; @@ -146,7 +146,7 @@ class MessageGenerator { */ class MetricsCollector { public: - explicit MetricsCollector(int sample_interval_ms = 1000) + explicit MetricsCollector(int32_t sample_interval_ms = 1000) : sample_interval_(sample_interval_ms), running_(false) {} void start() { @@ -208,7 +208,7 @@ class MetricsCollector { } } - int sample_interval_; + int32_t sample_interval_; std::atomic running_; std::thread collector_thread_; std::vector samples_; @@ -348,7 +348,7 @@ class LoadTestHarness { } private: - int getTotalAgents() const { + int32_t getTotalAgents() const { return 1 + config_.num_component_leads + config_.num_module_leads + config_.num_task_agents; } @@ -461,7 +461,7 @@ class LoadTestHarness { LoadTestConfig parseArgs(int argc, char** argv) { LoadTestConfig config; - for (int i = 1; i < argc; ++i) { + for (int32_t i = 1; i < argc; ++i) { std::string arg = argv[i]; if (arg.find("--scenario=") == 0) { diff --git a/tests/unit/test_agent_id_interning.cpp b/tests/unit/test_agent_id_interning.cpp index 68ccc65c..4183ad62 100644 --- a/tests/unit/test_agent_id_interning.cpp +++ b/tests/unit/test_agent_id_interning.cpp @@ -101,7 +101,7 @@ TEST(AgentIdInterningTest, ThreadSafety) { AgentIdInterning interning; constexpr int32_t num_threads = 10; constexpr int32_t iterations_per_thread = 100; - std::atomic successes{0}; + std::atomic successes{0}; std::vector threads; threads.reserve(num_threads); @@ -215,7 +215,7 @@ TEST(AgentIdInterningTest, ConcurrentReads) { constexpr int32_t num_readers = 20; constexpr int32_t reads_per_thread = 1000; - std::atomic successes{0}; + std::atomic successes{0}; std::vector readers; readers.reserve(num_readers); diff --git a/tests/unit/test_coordination_state.cpp b/tests/unit/test_coordination_state.cpp index 5ed7d03d..2378b687 100644 --- a/tests/unit/test_coordination_state.cpp +++ b/tests/unit/test_coordination_state.cpp @@ -122,7 +122,7 @@ TEST_F(CoordinationStateTest, StatePersistence) { * @brief Test 7: Thread-safe state transitions */ TEST_F(CoordinationStateTest, ThreadSafeStateTransitions) { - constexpr int NUM_THREADS = 10; + constexpr int32_t NUM_THREADS = 10; std::vector threads; // 10 threads concurrently transition states @@ -247,7 +247,7 @@ TEST_F(CoordinationStateTest, GetResultsBeforeRecording) { * @brief Test 15: Thread-safe result recording */ TEST_F(CoordinationStateTest, RecordResultThreadSafety) { - constexpr int NUM_THREADS = 10; + constexpr int32_t NUM_THREADS = 10; state_.initializeCoordination(NUM_THREADS); std::vector threads; @@ -269,7 +269,7 @@ TEST_F(CoordinationStateTest, RecordResultThreadSafety) { * @brief Test 16: Custom result type (integers) */ TEST_F(CoordinationStateTest, ResultTypeCustom) { - CoordinationState int_state; + CoordinationState int_state; int_state.initializeCoordination(3); int_state.recordResult(42); @@ -498,7 +498,7 @@ TEST_F(CoordinationStateTest, MultipleWorkflowCycles) { * @brief Test 28: Concurrent workflows (stress test) */ TEST_F(CoordinationStateTest, ConcurrentWorkflows) { - constexpr int NUM_THREADS = 5; + constexpr int32_t NUM_THREADS = 5; std::vector threads; for (int32_t i = 0; i < NUM_THREADS; ++i) { @@ -705,7 +705,7 @@ TEST_F(CoordinationStateTest, InitializeClearsPreviousFailures) { } TEST_F(CoordinationStateTest, RecordFailureThreadSafety) { - constexpr int NUM_THREADS = 10; + constexpr int32_t NUM_THREADS = 10; state_.initializeCoordination(NUM_THREADS); std::vector threads; diff --git a/tests/unit/test_cpu_affinity.cpp b/tests/unit/test_cpu_affinity.cpp index b1b0ce36..b937575c 100644 --- a/tests/unit/test_cpu_affinity.cpp +++ b/tests/unit/test_cpu_affinity.cpp @@ -15,7 +15,7 @@ TEST(CPUAffinityTest, EnableCPUAffinity) { scheduler.start(); // Submit some work to ensure workers are running - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 100; ++i) { scheduler.submit([&counter]() { counter.fetch_add(1, std::memory_order_relaxed); @@ -38,7 +38,7 @@ TEST(CPUAffinityTest, DisabledByDefault) { WorkStealingScheduler scheduler(2); // affinity disabled by default scheduler.start(); - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 50; ++i) { scheduler.submit([&counter]() { counter.fetch_add(1, std::memory_order_relaxed); }); } @@ -57,7 +57,7 @@ TEST(CPUAffinityTest, MoreWorkersThanCores) { WorkStealingScheduler scheduler(num_workers, true); scheduler.start(); - std::atomic counter{0}; + std::atomic counter{0}; for (size_t i = 0; i < 100; ++i) { scheduler.submit([&counter]() { counter.fetch_add(1, std::memory_order_relaxed); }); } diff --git a/tests/unit/test_health_check_server.cpp b/tests/unit/test_health_check_server.cpp index 400b82ed..5c17cd3c 100644 --- a/tests/unit/test_health_check_server.cpp +++ b/tests/unit/test_health_check_server.cpp @@ -87,7 +87,7 @@ class HealthCheckServerTest : public ::testing::Test { /** * @brief Extract HTTP status code from response */ - int getStatusCode(const std::string& response) { + int32_t getStatusCode(const std::string& response) { if (response.empty()) return 0; @@ -118,7 +118,7 @@ class HealthCheckServerTest : public ::testing::Test { return response.substr(body_start + 4); } - int port_; + int32_t port_; std::unique_ptr server_; }; @@ -389,7 +389,7 @@ TEST_F(HealthCheckServerTest, ConcurrentRequests) { // Send 10 concurrent requests std::vector threads; - std::atomic success_count{0}; + std::atomic success_count{0}; for (int32_t i = 0; i < 10; ++i) { threads.emplace_back([this, &success_count]() { diff --git a/tests/unit/test_heartbeat_monitor.cpp b/tests/unit/test_heartbeat_monitor.cpp index 67b77193..6c635ce5 100644 --- a/tests/unit/test_heartbeat_monitor.cpp +++ b/tests/unit/test_heartbeat_monitor.cpp @@ -44,7 +44,7 @@ TEST_F(HeartbeatMonitorTest, DetectFailure) { std::this_thread::sleep_for(std::chrono::milliseconds(350)); // Check agents should detect failure - int failures = monitor.checkAgents(); + int32_t failures = monitor.checkAgents(); EXPECT_EQ(failures, 1); EXPECT_FALSE(monitor.isAlive("agent1")); EXPECT_EQ(monitor.getTotalFailures(), 1); diff --git a/tests/unit/test_message_bus_async.cpp b/tests/unit/test_message_bus_async.cpp index 6da43251..7483fdbc 100644 --- a/tests/unit/test_message_bus_async.cpp +++ b/tests/unit/test_message_bus_async.cpp @@ -21,7 +21,7 @@ using namespace keystone::concurrency; class CountingAgent : public AsyncAgent { public: explicit CountingAgent(const std::string& agent_id) - : AsyncAgent(agent_id), count_(std::make_shared>(0)) {} + : AsyncAgent(agent_id), count_(std::make_shared>(0)) {} Task processMessage(const KeystoneMessage& msg) override { count_->fetch_add(1); @@ -35,10 +35,10 @@ class CountingAgent : public AsyncAgent { co_return resp; } - int getCount() const { return count_->load(); } + int32_t getCount() const { return count_->load(); } private: - std::shared_ptr> count_; + std::shared_ptr> count_; }; // Test: MessageBus without scheduler (synchronous routing) @@ -145,7 +145,7 @@ TEST(MessageBusAsyncTest, MultipleAgentsAsyncRouting) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); // Check both agents received messages - int count2 = 0, count3 = 0; + int32_t count2 = 0, count3 = 0; while (agent2->getMessage()) count2++; while (agent3->getMessage()) @@ -212,7 +212,7 @@ TEST(MessageBusAsyncTest, HighLoadAsyncRouting) { agent2->setMessageBus(&bus); // Send many messages - const int num_messages = 1000; + const int32_t num_messages = 1000; for (int32_t i = 0; i < num_messages; ++i) { auto msg = KeystoneMessage::create("agent1", "agent2", "msg" + std::to_string(i)); bus.routeMessage(msg); @@ -222,7 +222,7 @@ TEST(MessageBusAsyncTest, HighLoadAsyncRouting) { std::this_thread::sleep_for(std::chrono::milliseconds(500)); // Count received messages - int received = 0; + int32_t received = 0; while (agent2->getMessage()) { received++; } @@ -259,7 +259,7 @@ TEST(MessageBusAsyncTest, SchedulerShutdownGraceful) { scheduler.shutdown(); // All messages should have been delivered before shutdown completed - int received = 0; + int32_t received = 0; while (agent2->getMessage()) { received++; } @@ -280,13 +280,13 @@ TEST(MessageBusAsyncTest, ConcurrentLifecycleStressTest) { bus.setScheduler(&scheduler); std::atomic stop{false}; - std::atomic agents_created{0}; - std::atomic agents_destroyed{0}; - std::atomic messages_sent{0}; - std::atomic registration_errors{0}; + std::atomic agents_created{0}; + std::atomic agents_destroyed{0}; + std::atomic messages_sent{0}; + std::atomic registration_errors{0}; // Helper to generate unique agent IDs - auto get_agent_id = [](int counter) { + auto get_agent_id = [](int32_t counter) { return "stress_agent_" + std::to_string(counter); }; @@ -487,7 +487,7 @@ TEST(MessageBusAsyncTest, MessageToDeletedAgentGraceful) { bus.unregisterAgent("agent2"); // Try to send more messages (should fail gracefully) - for (int i = 10; i < 20; ++i) { + for (int32_t i = 10; i < 20; ++i) { auto msg = KeystoneMessage::create("agent1", "agent2", "msg_" + std::to_string(i)); bool routed = bus.routeMessage(msg); // May or may not route depending on timing diff --git a/tests/unit/test_message_pool.cpp b/tests/unit/test_message_pool.cpp index 546f331f..962ef485 100644 --- a/tests/unit/test_message_pool.cpp +++ b/tests/unit/test_message_pool.cpp @@ -158,8 +158,8 @@ TEST_F(MessagePoolTest, StatisticsTracking) { TEST_F(MessagePoolTest, ThreadLocalIsolation) { // Each thread should have its own pool - std::atomic thread1_pool_size{0}; - std::atomic thread2_pool_size{0}; + std::atomic thread1_pool_size{0}; + std::atomic thread2_pool_size{0}; auto worker1 = [&]() { MessagePool::clear(); @@ -289,7 +289,7 @@ TEST_F(MessagePoolTest, ResetStats) { TEST_F(MessagePoolTest, HighLoadScenario) { // Simulate high-load scenario with many acquires and releases - const int iterations = 10000; + const int32_t iterations = 10000; for (int32_t i = 0; i < iterations; ++i) { auto msg = MessagePool::acquire(); diff --git a/tests/unit/test_metrics.cpp b/tests/unit/test_metrics.cpp index e152d29f..c3f6a0f4 100644 --- a/tests/unit/test_metrics.cpp +++ b/tests/unit/test_metrics.cpp @@ -209,8 +209,8 @@ TEST_F(MetricsTest, ReportGeneration) { TEST_F(MetricsTest, ThreadSafety) { auto& metrics = Metrics::getInstance(); - const int num_threads = 10; - const int msgs_per_thread = 100; + const int32_t num_threads = 10; + const int32_t msgs_per_thread = 100; std::vector threads; diff --git a/tests/unit/test_nats_connection.cpp b/tests/unit/test_nats_connection.cpp index 84c58fdb..53be91fb 100644 --- a/tests/unit/test_nats_connection.cpp +++ b/tests/unit/test_nats_connection.cpp @@ -189,7 +189,7 @@ class NatsCallbackTest : public ::testing::Test { }; TEST_F(NatsCallbackTest, ErrorCallbackFiredOnError) { - std::atomic call_count{0}; + std::atomic call_count{0}; conn_.setErrorCallback([&](const std::string& /*err*/) { ++call_count; }); conn_.fireError(); @@ -198,7 +198,7 @@ TEST_F(NatsCallbackTest, ErrorCallbackFiredOnError) { } TEST_F(NatsCallbackTest, DisconnectedCallbackFiredOnDisconnect) { - std::atomic call_count{0}; + std::atomic call_count{0}; conn_.setDisconnectedCallback([&]() { ++call_count; }); conn_.fireDisconnected(); @@ -207,7 +207,7 @@ TEST_F(NatsCallbackTest, DisconnectedCallbackFiredOnDisconnect) { } TEST_F(NatsCallbackTest, ReconnectedCallbackFiredOnReconnect) { - std::atomic call_count{0}; + std::atomic call_count{0}; conn_.setReconnectedCallback([&]() { ++call_count; }); conn_.fireReconnected(); @@ -216,7 +216,7 @@ TEST_F(NatsCallbackTest, ReconnectedCallbackFiredOnReconnect) { } TEST_F(NatsCallbackTest, ClosedCallbackFiredOnClose) { - std::atomic call_count{0}; + std::atomic call_count{0}; conn_.setClosedCallback([&]() { ++call_count; }); conn_.fireClosed(); @@ -311,8 +311,8 @@ class NatsCallbackOverrideTest : public ::testing::Test { }; TEST_F(NatsCallbackOverrideTest, ReplacedCallbackIsInvokedInsteadOfOriginal) { - std::atomic first_count{0}; - std::atomic second_count{0}; + std::atomic first_count{0}; + std::atomic second_count{0}; conn_.setReconnectedCallback([&]() { ++first_count; }); conn_.setReconnectedCallback([&]() { ++second_count; }); diff --git a/tests/unit/test_nats_status.cpp b/tests/unit/test_nats_status.cpp index f02492cc..a5edde72 100644 --- a/tests/unit/test_nats_status.cpp +++ b/tests/unit/test_nats_status.cpp @@ -77,15 +77,15 @@ TEST(NatsStatusTrackerTest, MultipleSetConnectedUpdatesTimestamp) { TEST(NatsStatusTrackerTest, ConcurrentStateUpdatesAreSafe) { NatsStatusTracker tracker; std::atomic start{false}; - constexpr int kThreads = 8; - constexpr int kIters = 200; + constexpr int32_t kThreads = 8; + constexpr int32_t kIters = 200; std::vector threads; threads.reserve(kThreads); - for (int i = 0; i < kThreads; ++i) { + for (int32_t i = 0; i < kThreads; ++i) { threads.emplace_back([&tracker, &start, i]() { while (!start.load()) {} - for (int j = 0; j < kIters; ++j) { + for (int32_t j = 0; j < kIters; ++j) { switch ((i + j) % 3) { case 0: tracker.setConnected(); diff --git a/tests/unit/test_profiling.cpp b/tests/unit/test_profiling.cpp index 48fc9d28..b66859fd 100644 --- a/tests/unit/test_profiling.cpp +++ b/tests/unit/test_profiling.cpp @@ -69,7 +69,7 @@ TEST_F(ProfilingTest, PercentileCalculation) { // Create known distribution with longer sleeps (10x) to minimize overhead impact // Using 10-1000µs range so profiling overhead (~1000µs) becomes negligible percentage - for (int i = 1; i <= 100; ++i) { + for (int32_t i = 1; i <= 100; ++i) { auto session = ProfilingSession::start("percentiles"); std::this_thread::sleep_for(std::chrono::microseconds(i * 10)); session.end(); diff --git a/tests/unit/test_scheduler_backoff.cpp b/tests/unit/test_scheduler_backoff.cpp index c95d3e29..bd46a166 100644 --- a/tests/unit/test_scheduler_backoff.cpp +++ b/tests/unit/test_scheduler_backoff.cpp @@ -173,7 +173,7 @@ TEST_F(SchedulerBackoffTest, WakeUpNotification) { // Let workers enter SLEEP phase std::this_thread::sleep_for(50ms); - auto work_executed = std::make_shared>(0); + auto work_executed = std::make_shared>(0); // Submit multiple work items rapidly // All workers should wake up immediately via notify_all() @@ -197,7 +197,7 @@ TEST_F(SchedulerBackoffTest, MultipleWorkersBackoff) { std::this_thread::sleep_for(100ms); // Verify scheduler is still running and responsive - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); scheduler.submit([counter]() { counter->fetch_add(1); }); std::this_thread::sleep_for(50ms); @@ -211,7 +211,7 @@ TEST_F(SchedulerBackoffTest, BackoffDoesNotLoseWork) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit 1000 tasks rapidly for (int32_t i = 0; i < 1000; ++i) { @@ -236,7 +236,7 @@ TEST_F(SchedulerBackoffTest, LatencyUnderLoad) { scheduler.start(); auto total_latency_us = std::make_shared>(0); - auto task_count = std::make_shared>(0); + auto task_count = std::make_shared>(0); // Submit continuous work to keep workers in SPIN phase for (int32_t i = 0; i < 100; ++i) { @@ -257,7 +257,7 @@ TEST_F(SchedulerBackoffTest, LatencyUnderLoad) { // Wait for completion std::this_thread::sleep_for(200ms); - int count = task_count->load(); + int32_t count = task_count->load(); EXPECT_EQ(count, 100); // Average latency should be low (<= 150μs) @@ -279,7 +279,7 @@ TEST_F(SchedulerBackoffTest, BackoffPhaseProgression) { // We can't directly observe phase transitions, but we can verify // that the scheduler remains responsive at different time points - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // After 1ms - likely in SPIN/YIELD phase std::this_thread::sleep_for(1ms); diff --git a/tests/unit/test_simulated_cluster.cpp b/tests/unit/test_simulated_cluster.cpp index 98968539..d4bef68b 100644 --- a/tests/unit/test_simulated_cluster.cpp +++ b/tests/unit/test_simulated_cluster.cpp @@ -86,8 +86,8 @@ TEST_F(SimulatedClusterTest, SubmitToRegisteredAgent) { SimulatedCluster cluster(config); cluster.start(); - std::atomic node0_counter{0}; - std::atomic node1_counter{0}; + std::atomic node0_counter{0}; + std::atomic node1_counter{0}; cluster.registerAgent("agent_A", 0); cluster.registerAgent("agent_B", 1); @@ -111,7 +111,7 @@ TEST_F(SimulatedClusterTest, SubmitToUnregisteredAgent) { SimulatedCluster cluster(config); cluster.start(); - std::atomic total_counter{0}; + std::atomic total_counter{0}; // Submit to unregistered agents - should round-robin for (int32_t i = 0; i < 10; ++i) { @@ -130,7 +130,7 @@ TEST_F(SimulatedClusterTest, SubmitDirectlyToNode) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter0{0}, counter1{0}, counter2{0}; + std::atomic counter0{0}, counter1{0}, counter2{0}; cluster.submitToNode(0, [&]() { counter0++; }); cluster.submitToNode(1, [&]() { counter1++; }); @@ -171,7 +171,7 @@ TEST_F(SimulatedClusterTest, ProcessNetworkMessages) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter{0}; + std::atomic counter{0}; // Manually send work via network cluster.getNetwork()->send(0, 1, [&]() { counter++; }); @@ -311,7 +311,7 @@ TEST_F(SimulatedClusterTest, MultiNodeWorkDistribution) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counters[4] = {0, 0, 0, 0}; + std::atomic counters[4] = {0, 0, 0, 0}; // Submit 10 tasks to each node for (int32_t node = 0; node < 4; ++node) { diff --git a/tests/unit/test_simulated_network.cpp b/tests/unit/test_simulated_network.cpp index 54770a6d..dc6cbb9f 100644 --- a/tests/unit/test_simulated_network.cpp +++ b/tests/unit/test_simulated_network.cpp @@ -73,7 +73,7 @@ TEST_F(SimulatedNetworkTest, MultipleMessages) { SimulatedNetwork::Config config{.min_latency = 10us, .max_latency = 20us}; SimulatedNetwork network(config); - std::atomic counter{0}; + std::atomic counter{0}; // Send 10 messages for (int32_t i = 0; i < 10; ++i) { @@ -87,7 +87,7 @@ TEST_F(SimulatedNetworkTest, MultipleMessages) { std::this_thread::sleep_for(50us); // Receive all messages - int received = 0; + int32_t received = 0; while (auto work = network.receive(1)) { (*work)(); received++; @@ -103,7 +103,7 @@ TEST_F(SimulatedNetworkTest, DifferentDestinations) { SimulatedNetwork::Config config{.min_latency = 10us, .max_latency = 20us}; SimulatedNetwork network(config); - std::atomic counter0{0}, counter1{0}, counter2{0}; + std::atomic counter0{0}, counter1{0}, counter2{0}; // Send to different nodes network.send(0, 0, [&]() { counter0++; }); @@ -176,7 +176,7 @@ TEST_F(SimulatedNetworkTest, NoPacketLoss) { std::this_thread::sleep_for(50us); // All messages should be receivable - int received = 0; + int32_t received = 0; while (auto work = network.receive(1)) { received++; } diff --git a/tests/unit/test_simulated_numa_node.cpp b/tests/unit/test_simulated_numa_node.cpp index 54b33d93..d4b57361 100644 --- a/tests/unit/test_simulated_numa_node.cpp +++ b/tests/unit/test_simulated_numa_node.cpp @@ -38,7 +38,7 @@ TEST_F(SimulatedNUMANodeTest, SubmitWork) { SimulatedNUMANode node(0, 4); node.start(); - std::atomic counter{0}; + std::atomic counter{0}; // Submit multiple work items for (int32_t i = 0; i < 10; ++i) { @@ -56,7 +56,7 @@ TEST_F(SimulatedNUMANodeTest, SubmitToSpecificWorker) { SimulatedNUMANode node(0, 4); node.start(); - std::atomic counter{0}; + std::atomic counter{0}; // Submit to worker 0 for (int32_t i = 0; i < 5; ++i) { @@ -187,7 +187,7 @@ TEST_F(SimulatedNUMANodeTest, MultipleNodes) { node1.start(); node2.start(); - std::atomic counter0{0}, counter1{0}, counter2{0}; + std::atomic counter0{0}, counter1{0}, counter2{0}; node0.submit([&]() { counter0++; }); node1.submit([&]() { counter1++; }); @@ -209,7 +209,7 @@ TEST_F(SimulatedNUMANodeTest, SuccessfulWorkStealing) { node.start(); // Submit work that will remain in queue - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 10; ++i) { node.submitToWorker(0, [&]() { counter++; }); } @@ -267,7 +267,7 @@ TEST_F(SimulatedNUMANodeTest, CrossNodeWorkStealing) { node1.start(); // Submit work to node1 - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 5; ++i) { node1.submitToWorker(0, [&]() { counter++; }); } @@ -299,7 +299,7 @@ TEST_F(SimulatedNUMANodeTest, MultipleSuccessfulSteals) { node.start(); // Submit many work items that are slow to execute - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 20; ++i) { node.submit([&]() { counter++; diff --git a/tests/unit/test_simulation_corner_cases.cpp b/tests/unit/test_simulation_corner_cases.cpp index 7079fb12..6e07f080 100644 --- a/tests/unit/test_simulation_corner_cases.cpp +++ b/tests/unit/test_simulation_corner_cases.cpp @@ -36,7 +36,7 @@ TEST_F(SimulationCornerCaseTest, SingleNodeCluster) { EXPECT_NE(cluster.getNode(0), nullptr); EXPECT_EQ(cluster.getNode(1), nullptr); - std::atomic counter{0}; + std::atomic counter{0}; cluster.submitToNode(0, [&]() { counter++; }); std::this_thread::sleep_for(100ms); @@ -50,7 +50,7 @@ TEST_F(SimulationCornerCaseTest, SingleWorkerPerNode) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 10; ++i) { cluster.submitToNode(i % 2, [&]() { counter++; }); } @@ -146,7 +146,7 @@ TEST_F(SimulationCornerCaseTest, UnregisteredAgentSubmit) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter{0}; + std::atomic counter{0}; // Submit to unregistered agent should use round-robin for (int32_t i = 0; i < 10; ++i) { @@ -193,8 +193,8 @@ TEST_F(SimulationCornerCaseTest, MessageFlood) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter{0}; - const int MESSAGE_COUNT = 10000; + std::atomic counter{0}; + const int32_t MESSAGE_COUNT = 10000; // Flood with messages for (int32_t i = 0; i < MESSAGE_COUNT; ++i) { @@ -214,7 +214,7 @@ TEST_F(SimulationCornerCaseTest, NetworkMessageFlood) { SimulatedNetwork::Config config{.min_latency = 1us, .max_latency = 10us}; SimulatedNetwork network(config); - const int FLOOD_SIZE = 1000; + const int32_t FLOOD_SIZE = 1000; for (int32_t i = 0; i < FLOOD_SIZE; ++i) { network.send(0, 1, []() {}); @@ -225,7 +225,7 @@ TEST_F(SimulationCornerCaseTest, NetworkMessageFlood) { // Consume all messages std::this_thread::sleep_for(50ms); - int received = 0; + int32_t received = 0; while (auto work = network.receive(1)) { received++; } @@ -240,7 +240,7 @@ TEST_F(SimulationCornerCaseTest, HighQueueDepth) { cluster.start(); // Submit many blocking tasks - std::atomic completed{0}; + std::atomic completed{0}; for (int32_t i = 0; i < 1000; ++i) { cluster.submitToNode(0, [&]() { std::this_thread::sleep_for(1ms); @@ -268,9 +268,9 @@ TEST_F(SimulationCornerCaseTest, ParallelSubmitFromMultipleThreads) { SimulatedCluster cluster(config); cluster.start(); - std::atomic counter{0}; - const int THREADS = 4; - const int SUBMITS_PER_THREAD = 100; + std::atomic counter{0}; + const int32_t THREADS = 4; + const int32_t SUBMITS_PER_THREAD = 100; std::vector threads; for (int32_t t = 0; t < THREADS; ++t) { @@ -297,8 +297,8 @@ TEST_F(SimulationCornerCaseTest, ShutdownDuringActiveWork) { SimulatedCluster cluster(config); cluster.start(); - std::atomic started{0}; - std::atomic completed{0}; + std::atomic started{0}; + std::atomic completed{0}; // Submit long-running tasks for (int32_t i = 0; i < 100; ++i) { @@ -323,8 +323,8 @@ TEST_F(SimulationCornerCaseTest, ConcurrentAgentRegistration) { SimulatedCluster::Config config{.num_nodes = 4, .network_config = {}}; SimulatedCluster cluster(config); - const int THREADS = 4; - const int AGENTS_PER_THREAD = 20; + const int32_t THREADS = 4; + const int32_t AGENTS_PER_THREAD = 20; std::vector threads; for (int32_t t = 0; t < THREADS; ++t) { @@ -390,7 +390,7 @@ TEST_F(SimulationCornerCaseTest, PartialPacketLoss) { }; SimulatedNetwork network(config); - const int MESSAGE_COUNT = 1000; + const int32_t MESSAGE_COUNT = 1000; for (int32_t i = 0; i < MESSAGE_COUNT; ++i) { network.send(0, 1, []() {}); diff --git a/tests/unit/test_task.cpp b/tests/unit/test_task.cpp index 39ac5984..69ee0055 100644 --- a/tests/unit/test_task.cpp +++ b/tests/unit/test_task.cpp @@ -18,7 +18,7 @@ using namespace keystone::concurrency; // Test: Simple Task creation and get() TEST(TaskTest, SimpleIntTask) { - auto task = []() -> Task { + auto task = []() -> Task { co_return 42; }(); @@ -60,7 +60,7 @@ TEST(TaskTest, StringTask) { // Test: Exception propagation TEST(TaskTest, ExceptionPropagation) { - auto task = []() -> Task { + auto task = []() -> Task { throw std::runtime_error("Test exception"); co_return 42; // Never reached }(); @@ -70,11 +70,11 @@ TEST(TaskTest, ExceptionPropagation) { // Test: Task move constructor TEST(TaskTest, MoveConstructor) { - auto task1 = []() -> Task { + auto task1 = []() -> Task { co_return 100; }(); - Task task2 = std::move(task1); + Task task2 = std::move(task1); int32_t result = task2.get(); EXPECT_EQ(result, 100); @@ -82,11 +82,11 @@ TEST(TaskTest, MoveConstructor) { // Test: Task move assignment TEST(TaskTest, MoveAssignment) { - auto task1 = []() -> Task { + auto task1 = []() -> Task { co_return 200; }(); - auto task2 = []() -> Task { + auto task2 = []() -> Task { co_return 300; }(); @@ -98,7 +98,7 @@ TEST(TaskTest, MoveAssignment) { // Test: Manual resume TEST(TaskTest, ManualResume) { - auto task = []() -> Task { + auto task = []() -> Task { co_return 42; }(); @@ -113,12 +113,12 @@ TEST(TaskTest, ManualResume) { // Test: Chaining coroutines with co_await TEST(TaskTest, CoroutineChaining) { - auto inner = []() -> Task { + auto inner = []() -> Task { co_return 10; }; // Keep outer lambda alive until get() completes to avoid stack-use-after-scope - auto outerLambda = [&]() -> Task { + auto outerLambda = [&]() -> Task { int32_t value = co_await inner(); co_return value * 2; }; @@ -129,12 +129,12 @@ TEST(TaskTest, CoroutineChaining) { // Test: Multiple co_await in sequence TEST(TaskTest, MultipleCoAwait) { - auto getValue = [](int x) -> Task { + auto getValue = [](int32_t x) -> Task { co_return x; }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto sumLambda = [&]() -> Task { + auto sumLambda = [&]() -> Task { int32_t a = co_await getValue(10); int32_t b = co_await getValue(20); int32_t c = co_await getValue(30); @@ -147,13 +147,13 @@ TEST(TaskTest, MultipleCoAwait) { // Test: Exception in chained coroutine TEST(TaskTest, ExceptionInChainedCoroutine) { - auto throwingTask = []() -> Task { + auto throwingTask = []() -> Task { throw std::runtime_error("Inner exception"); co_return 0; }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto outerLambda = [&]() -> Task { + auto outerLambda = [&]() -> Task { int32_t value = co_await throwingTask(); co_return value; }; @@ -164,7 +164,7 @@ TEST(TaskTest, ExceptionInChainedCoroutine) { // Test: Task chaining TEST(TaskTest, VoidTaskChaining) { - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); auto increment = [counter]() -> Task { counter->fetch_add(1); @@ -189,7 +189,7 @@ TEST(TaskTest, VoidTaskChaining) { // Test: await_ready returns correct value TEST(TaskTest, AwaitReady) { - auto task = []() -> Task { + auto task = []() -> Task { co_return 42; }(); @@ -203,7 +203,7 @@ TEST(TaskTest, AwaitReady) { // Test: Complex computation with multiple steps TEST(TaskTest, ComplexComputation) { - auto fibonacci = [](int n) -> Task { + auto fibonacci = [](int32_t n) -> Task { if (n <= 1) { co_return n; } @@ -212,7 +212,7 @@ TEST(TaskTest, ComplexComputation) { }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto computeLambda = [&]() -> Task { + auto computeLambda = [&]() -> Task { int32_t a = co_await fibonacci(5); int32_t b = co_await fibonacci(10); co_return a + b; @@ -227,7 +227,7 @@ TEST(TaskTest, ComplexComputation) { TEST(TaskTest, EarlyDestruction) { // This test verifies that destroying a Task before completion is safe { - auto task = []() -> Task { + auto task = []() -> Task { co_return 42; }(); @@ -240,12 +240,12 @@ TEST(TaskTest, EarlyDestruction) { // Test: Multiple get() calls return same result TEST(TaskTest, MultipleGetCalls) { - auto task = []() -> Task { + auto task = []() -> Task { co_return 42; }(); - int result1 = task.get(); - int result2 = task.get(); + int32_t result1 = task.get(); + int32_t result2 = task.get(); EXPECT_EQ(result1, 42); EXPECT_EQ(result2, 42); @@ -260,7 +260,7 @@ TEST(TaskTest, ExceptionMessagePreservation) { const std::string error_msg = "Detailed error message with context"; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto taskLambda = [&error_msg]() -> Task { + auto taskLambda = [&error_msg]() -> Task { throw std::runtime_error(error_msg); co_return 0; }; @@ -287,14 +287,14 @@ TEST(TaskTest, VoidTaskExceptionHandling) { // Test: Different exception types TEST(TaskTest, DifferentExceptionTypes) { // std::invalid_argument - auto task1 = []() -> Task { + auto task1 = []() -> Task { throw std::invalid_argument("Invalid argument"); co_return 0; }(); EXPECT_THROW({ task1.get(); }, std::invalid_argument); // std::out_of_range - auto task2 = []() -> Task { + auto task2 = []() -> Task { throw std::out_of_range("Out of range"); co_return 0; }(); @@ -306,7 +306,7 @@ TEST(TaskTest, DifferentExceptionTypes) { const char* what() const noexcept override { return "Custom exception"; } }; - auto task3 = []() -> Task { + auto task3 = []() -> Task { throw CustomException(); co_return 0; }(); @@ -335,18 +335,18 @@ TEST(TaskTest, DifferentExceptionTypes) { // Test: Exception in co_await chain preserves stack TEST(TaskTest, ExceptionInCoAwaitChain) { - auto innerTask = []() -> Task { + auto innerTask = []() -> Task { throw std::runtime_error("Inner task failed"); co_return 10; }; // Keep lambdas alive until get() completes to avoid stack-use-after-scope - auto middleTask = [&]() -> Task { + auto middleTask = [&]() -> Task { int32_t val = co_await innerTask(); co_return val * 2; // Never reached }; - auto outerLambda = [&]() -> Task { + auto outerLambda = [&]() -> Task { int32_t val = co_await middleTask(); co_return val * 3; // Never reached }; @@ -362,7 +362,7 @@ TEST(TaskTest, ExceptionInCoAwaitChain) { // Test: Multiple exceptions - only first is captured TEST(TaskTest, MultipleExceptionsFirstCaptured) { - auto task = []() -> Task { + auto task = []() -> Task { try { throw std::runtime_error("First exception"); } catch (...) { @@ -378,9 +378,9 @@ TEST(TaskTest, MultipleExceptionsFirstCaptured) { // Test: Exception with move-only types TEST(TaskTest, ExceptionWithMoveOnlyType) { - auto task = []() -> Task> { + auto task = []() -> Task> { throw std::runtime_error("Move-only exception test"); - co_return std::make_unique(42); + co_return std::make_unique(42); }(); EXPECT_THROW({ task.get(); }, std::runtime_error); @@ -388,7 +388,7 @@ TEST(TaskTest, ExceptionWithMoveOnlyType) { // Test: Verify exception stored in promise TEST(TaskTest, ExceptionStoredInPromise) { - auto task = []() -> Task { + auto task = []() -> Task { throw std::runtime_error("Stored in promise"); co_return 0; }(); @@ -408,15 +408,15 @@ TEST(TaskTest, ExceptionStoredInPromise) { // Test: Verify symmetric transfer chains coroutines properly TEST(TaskTest, SymmetricTransferChaining) { - std::vector execution_order; + std::vector execution_order; - auto task1 = [&]() -> Task { + auto task1 = [&]() -> Task { execution_order.push_back(1); co_return 10; }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto task2Lambda = [&]() -> Task { + auto task2Lambda = [&]() -> Task { execution_order.push_back(2); int32_t val = co_await task1(); execution_order.push_back(3); @@ -427,7 +427,7 @@ TEST(TaskTest, SymmetricTransferChaining) { EXPECT_EQ(task2.get(), 20); // Execution order: task2 starts (2), awaits task1 (1), resumes task2 (3) - std::vector expected = {2, 1, 3}; + std::vector expected = {2, 1, 3}; EXPECT_EQ(execution_order, expected); } @@ -451,17 +451,17 @@ TEST(TaskTest, SymmetricTransferChaining) { // Test: Multiple levels of coroutine chaining TEST(TaskTest, DeepCoroutineChaining) { - auto level3 = []() -> Task { + auto level3 = []() -> Task { co_return 1; }; // Keep lambdas alive until get() completes to avoid stack-use-after-scope - auto level2 = [&]() -> Task { + auto level2 = [&]() -> Task { int32_t val = co_await level3(); co_return val + 10; }; - auto level1Lambda = [&]() -> Task { + auto level1Lambda = [&]() -> Task { int32_t val = co_await level2(); co_return val + 100; }; @@ -472,7 +472,7 @@ TEST(TaskTest, DeepCoroutineChaining) { // Test: Symmetric transfer with Task TEST(TaskTest, SymmetricTransferVoidTask) { - std::vector execution_order; + std::vector execution_order; auto voidTask = [&]() -> Task { execution_order.push_back(1); @@ -480,7 +480,7 @@ TEST(TaskTest, SymmetricTransferVoidTask) { }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto wrapperLambda = [&]() -> Task { + auto wrapperLambda = [&]() -> Task { execution_order.push_back(2); co_await voidTask(); execution_order.push_back(3); @@ -490,18 +490,18 @@ TEST(TaskTest, SymmetricTransferVoidTask) { EXPECT_EQ(wrapper.get(), 42); - std::vector expected = {2, 1, 3}; + std::vector expected = {2, 1, 3}; EXPECT_EQ(execution_order, expected); } // Test: Verify no stack overflow with many chained coroutines TEST(TaskTest, NoStackOverflowWithManyChainedCoroutines) { // Create a chain of 1000 coroutines - const int chain_length = 1000; - std::atomic counter{0}; + const int32_t chain_length = 1000; + std::atomic counter{0}; - std::function(int)> chainedTask; - chainedTask = [&](int depth) -> Task { + std::function(int32_t)> chainedTask; + chainedTask = [&](int32_t depth) -> Task { counter.fetch_add(1); if (depth <= 0) { co_return 0; @@ -519,13 +519,13 @@ TEST(TaskTest, NoStackOverflowWithManyChainedCoroutines) { // Test: Exception propagation through symmetric transfer TEST(TaskTest, ExceptionPropagationThroughSymmetricTransfer) { - auto throwingTask = []() -> Task { + auto throwingTask = []() -> Task { throw std::runtime_error("Inner exception"); co_return 0; }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto catchingLambda = [&]() -> Task { + auto catchingLambda = [&]() -> Task { int32_t val = co_await throwingTask(); co_return val; // Never reached }; @@ -536,25 +536,25 @@ TEST(TaskTest, ExceptionPropagationThroughSymmetricTransfer) { // Test: Multiple co_awaits with symmetric transfer TEST(TaskTest, MultipleCoAwaitsWithSymmetricTransfer) { - std::vector execution_order; + std::vector execution_order; - auto task1 = [&]() -> Task { + auto task1 = [&]() -> Task { execution_order.push_back(1); co_return 10; }; - auto task2 = [&]() -> Task { + auto task2 = [&]() -> Task { execution_order.push_back(2); co_return 20; }; - auto task3 = [&]() -> Task { + auto task3 = [&]() -> Task { execution_order.push_back(3); co_return 30; }; // Keep lambda alive until get() completes to avoid stack-use-after-scope - auto aggregatorLambda = [&]() -> Task { + auto aggregatorLambda = [&]() -> Task { execution_order.push_back(0); int32_t a = co_await task1(); int32_t b = co_await task2(); @@ -566,6 +566,6 @@ TEST(TaskTest, MultipleCoAwaitsWithSymmetricTransfer) { EXPECT_EQ(aggregator.get(), 60); // Execution order: aggregator starts, then task1, task2, task3 - std::vector expected = {0, 1, 2, 3}; + std::vector expected = {0, 1, 2, 3}; EXPECT_EQ(execution_order, expected); } diff --git a/tests/unit/test_task_phase_utils.cpp b/tests/unit/test_task_phase_utils.cpp index c7718bcd..f65d1eb6 100644 --- a/tests/unit/test_task_phase_utils.cpp +++ b/tests/unit/test_task_phase_utils.cpp @@ -93,7 +93,7 @@ TEST_F(CoordinatorTerminalStateTest, GetTaskProgressIsCompleteForAllTerminalStat TEST_F(CoordinatorTerminalStateTest, CleanupRemovesAllTerminalStates) { // All 5 terminal tasks were set at SetUp; pass age_threshold_ms=0 to force cleanup - int removed = coordinator_->cleanupOldTasks(0); + int32_t removed = coordinator_->cleanupOldTasks(0); EXPECT_EQ(removed, 5); for (const auto& task_id : diff --git a/tests/unit/test_thread_pool.cpp b/tests/unit/test_thread_pool.cpp index ad96a804..7c9cad10 100644 --- a/tests/unit/test_thread_pool.cpp +++ b/tests/unit/test_thread_pool.cpp @@ -26,7 +26,7 @@ TEST(ThreadPoolTest, DISABLED_CreateAndDestroy) { // Test: Submit and execute function TEST(ThreadPoolTest, SubmitFunction) { ThreadPool pool(2); - std::atomic counter{0}; + std::atomic counter{0}; pool.submit([&]() { counter.fetch_add(1); }); @@ -39,7 +39,7 @@ TEST(ThreadPoolTest, SubmitFunction) { // Test: Submit multiple functions TEST(ThreadPoolTest, SubmitMultipleFunctions) { ThreadPool pool(4); - std::atomic counter{0}; + std::atomic counter{0}; for (int32_t i = 0; i < 10; ++i) { pool.submit([&]() { counter.fetch_add(1); }); @@ -77,9 +77,9 @@ TEST(ThreadPoolTest, SubmitCoroutineHandle) { // Test: Parallel execution TEST(ThreadPoolTest, ParallelExecution) { ThreadPool pool(4); - std::atomic counter{0}; - std::atomic max_concurrent{0}; - std::atomic current_concurrent{0}; + std::atomic counter{0}; + std::atomic max_concurrent{0}; + std::atomic current_concurrent{0}; auto work = [&]() { int32_t concurrent = current_concurrent.fetch_add(1) + 1; @@ -115,7 +115,7 @@ TEST(ThreadPoolTest, ParallelExecution) { // Test: Graceful shutdown TEST(ThreadPoolTest, GracefulShutdown) { ThreadPool pool(2); - std::atomic counter{0}; + std::atomic counter{0}; // Submit some work for (int32_t i = 0; i < 5; ++i) { @@ -135,7 +135,7 @@ TEST(ThreadPoolTest, GracefulShutdown) { // Test: No new work accepted after shutdown TEST(ThreadPoolTest, NoWorkAfterShutdown) { ThreadPool pool(2); - std::atomic counter{0}; + std::atomic counter{0}; pool.shutdown(); @@ -162,7 +162,7 @@ TEST(ThreadPoolTest, DISABLED_HardwareConcurrency) { // Test: Exception handling in worker TEST(ThreadPoolTest, ExceptionHandling) { ThreadPool pool(2); - std::atomic counter{0}; + std::atomic counter{0}; // Submit task that throws pool.submit([]() { throw std::runtime_error("Test exception"); }); @@ -179,7 +179,7 @@ TEST(ThreadPoolTest, ExceptionHandling) { // Test: Thread safety with concurrent submissions TEST(ThreadPoolTest, ConcurrentSubmissions) { ThreadPool pool(4); - std::atomic counter{0}; + std::atomic counter{0}; // Launch multiple threads that submit work std::vector submitters; @@ -204,7 +204,7 @@ TEST(ThreadPoolTest, ConcurrentSubmissions) { // Test: Destructor calls shutdown TEST(ThreadPoolTest, DestructorShutdown) { - std::atomic counter{0}; + std::atomic counter{0}; { ThreadPool pool(2); diff --git a/tests/unit/test_work_stealing_queue.cpp b/tests/unit/test_work_stealing_queue.cpp index 90440612..cce87937 100644 --- a/tests/unit/test_work_stealing_queue.cpp +++ b/tests/unit/test_work_stealing_queue.cpp @@ -49,7 +49,7 @@ TEST(WorkStealingQueueTest, PopEmpty) { // Test: Steal from queue TEST(WorkStealingQueueTest, Steal) { WorkStealingQueue queue; - std::atomic counter{0}; + std::atomic counter{0}; queue.push(WorkItem::makeFunction([&]() { counter.fetch_add(1); })); @@ -71,7 +71,7 @@ TEST(WorkStealingQueueTest, StealEmpty) { // Test: Multiple push and pop TEST(WorkStealingQueueTest, MultiplePushPop) { WorkStealingQueue queue; - std::atomic counter{0}; + std::atomic counter{0}; // Push 10 items for (int32_t i = 0; i < 10; ++i) { @@ -95,7 +95,7 @@ TEST(WorkStealingQueueTest, MultiplePushPop) { // Test: Concurrent push from multiple threads TEST(WorkStealingQueueTest, ConcurrentPush) { WorkStealingQueue queue; - std::atomic push_count{0}; + std::atomic push_count{0}; constexpr int32_t num_threads = 4; constexpr int32_t items_per_thread = 25; @@ -118,7 +118,7 @@ TEST(WorkStealingQueueTest, ConcurrentPush) { // Test: Work stealing from multiple threads TEST(WorkStealingQueueTest, WorkStealingMultipleThreads) { WorkStealingQueue queue; - std::atomic executed_count{0}; + std::atomic executed_count{0}; constexpr int32_t total_items = 100; // Push work items diff --git a/tests/unit/test_work_stealing_scheduler.cpp b/tests/unit/test_work_stealing_scheduler.cpp index 69bd8b3e..2cd74b22 100644 --- a/tests/unit/test_work_stealing_scheduler.cpp +++ b/tests/unit/test_work_stealing_scheduler.cpp @@ -33,7 +33,7 @@ TEST(WorkStealingSchedulerTest, SubmitFunction) { WorkStealingScheduler scheduler(2); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit 10 work items for (int32_t i = 0; i < 10; ++i) { @@ -80,7 +80,7 @@ TEST(WorkStealingSchedulerTest, RoundRobinDistribution) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit many work items (should be distributed round-robin) for (int32_t i = 0; i < 100; ++i) { @@ -100,7 +100,7 @@ TEST(WorkStealingSchedulerTest, SubmitToSpecificWorker) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit all work to worker 2 for (int32_t i = 0; i < 10; ++i) { @@ -120,7 +120,7 @@ TEST(WorkStealingSchedulerTest, WorkStealing) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit all work to worker 0 (other workers will steal) for (int32_t i = 0; i < 100; ++i) { @@ -143,7 +143,7 @@ TEST(WorkStealingSchedulerTest, ShutdownWithPendingWork) { WorkStealingScheduler scheduler(2); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit work items for (int32_t i = 0; i < 20; ++i) { @@ -184,7 +184,7 @@ TEST(WorkStealingSchedulerTest, MultipleShutdownCalls) { // Test: Destructor shuts down automatically TEST(WorkStealingSchedulerTest, DestructorShutdown) { - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); { WorkStealingScheduler scheduler(2); @@ -232,9 +232,9 @@ TEST(WorkStealingSchedulerTest, ParallelExecution) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); - auto max_concurrent = std::make_shared>(0); - auto current_executing = std::make_shared>(0); + auto counter = std::make_shared>(0); + auto max_concurrent = std::make_shared>(0); + auto current_executing = std::make_shared>(0); // Submit work that tracks concurrency for (int32_t i = 0; i < 20; ++i) { @@ -269,7 +269,7 @@ TEST(WorkStealingSchedulerTest, SubmitToInvalidIndex) { WorkStealingScheduler scheduler(2); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit to invalid index (should log error but not crash) scheduler.submitTo(999, [counter]() { counter->fetch_add(1); }); @@ -287,7 +287,7 @@ TEST(WorkStealingSchedulerTest, HeavyLoad) { WorkStealingScheduler scheduler(4); scheduler.start(); - auto counter = std::make_shared>(0); + auto counter = std::make_shared>(0); // Submit 1000 work items for (int32_t i = 0; i < 1000; ++i) {