From d5fdc00951f3b96553ef1017bc6685800a463f50 Mon Sep 17 00:00:00 2001 From: Caio Lima Date: Mon, 3 Nov 2025 13:36:07 -0300 Subject: [PATCH 1/5] deps: V8: backport fe81545e6d14 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Original commit message: [api] Adding total allocated bytes in HeapStatistics This change exposes total allocated bytes in v8::HeapStatistics API by introducing a new total_allocated_bytes() method that tracks all heap allocations since an Isolate creation. The implementation adds: - uint64_t total_allocated_bytes_ field to HeapStatistics. - An atomic total allocation counter is stored in the Heap class. - The counter is incremented whenever a RestLab is called. This approach can overestimate the total allocation for cases where the LAB is not fully used, but the leftover compared to the LAB itself is quite small, so it seems tolerable. Design doc reference: https://docs.google.com/document/d/1O4JPsoaxTQsX_7T5Fz4rsGeHMiM16jUrvDuq9FrtbNM Change-Id: Ic531698aaeb1578f943b7fdd346b9159ffd9b6c9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6996467 Reviewed-by: Dominik Inführ Reviewed-by: Michael Lippautz Commit-Queue: Dmitry Bezhetskov Cr-Commit-Position: refs/heads/main@{#103296} Refs: https://github.com/v8/v8/commit/fe81545e6d14397cabb39ba3a5163eedf7624bb1 Co-authored-by: Caio Lima PR-URL: https://github.com/nodejs/node/pull/60429 Reviewed-By: Joyee Cheung --- deps/v8/include/v8-statistics.h | 8 ++ deps/v8/src/api/api.cc | 4 +- deps/v8/src/heap/heap-allocator.cc | 30 ++++-- deps/v8/src/heap/heap.cc | 4 + deps/v8/src/heap/heap.h | 7 ++ deps/v8/src/heap/main-allocator.cc | 6 ++ deps/v8/test/cctest/test-api.cc | 146 +++++++++++++++++++++++++++++ 7 files changed, 198 insertions(+), 7 deletions(-) diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h index 82b78f5ec65729..2dc10e0fbfd304 100644 --- a/deps/v8/include/v8-statistics.h +++ b/deps/v8/include/v8-statistics.h @@ -154,6 +154,13 @@ class V8_EXPORT HeapStatistics { size_t number_of_native_contexts() { return number_of_native_contexts_; } size_t number_of_detached_contexts() { return number_of_detached_contexts_; } + /** + * Returns the total number of bytes allocated since the Isolate was created. + * This includes all heap objects allocated in any space (new, old, code, + * etc.). + */ + uint64_t total_allocated_bytes() { return total_allocated_bytes_; } + /** * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap * garbage with a bit pattern. @@ -175,6 +182,7 @@ class V8_EXPORT HeapStatistics { size_t number_of_detached_contexts_; size_t total_global_handles_size_; size_t used_global_handles_size_; + uint64_t total_allocated_bytes_; friend class V8; friend class Isolate; diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index ba759168aa92f5..ad4f166ee7e498 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -6560,7 +6560,8 @@ HeapStatistics::HeapStatistics() peak_malloced_memory_(0), does_zap_garbage_(false), number_of_native_contexts_(0), - number_of_detached_contexts_(0) {} + number_of_detached_contexts_(0), + total_allocated_bytes_(0) {} HeapSpaceStatistics::HeapSpaceStatistics() : space_name_(nullptr), @@ -10353,6 +10354,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts(); heap_statistics->number_of_detached_contexts_ = heap->NumberOfDetachedContexts(); + heap_statistics->total_allocated_bytes_ = heap->GetTotalAllocatedBytes(); heap_statistics->does_zap_garbage_ = i::heap::ShouldZapGarbage(); #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc index 6f5946fc2374c3..88491df2ce2388 100644 --- a/deps/v8/src/heap/heap-allocator.cc +++ b/deps/v8/src/heap/heap-allocator.cc @@ -65,24 +65,42 @@ AllocationResult HeapAllocator::AllocateRawLargeInternal( int size_in_bytes, AllocationType allocation, AllocationOrigin origin, AllocationAlignment alignment) { DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation)); + AllocationResult allocation_result; switch (allocation) { case AllocationType::kYoung: - return new_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + new_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kOld: - return lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kCode: - return code_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + code_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kSharedOld: - return shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kTrusted: - return trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kSharedTrusted: - return shared_trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = shared_trusted_lo_space()->AllocateRaw( + local_heap_, size_in_bytes); + break; case AllocationType::kMap: case AllocationType::kReadOnly: case AllocationType::kSharedMap: UNREACHABLE(); } + if (!allocation_result.IsFailure()) { + int allocated_size = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes); + heap_->AddTotalAllocatedBytes(allocated_size); + } + return allocation_result; } namespace { diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc index f4e94e406e2690..392edeec5e84a6 100644 --- a/deps/v8/src/heap/heap.cc +++ b/deps/v8/src/heap/heap.cc @@ -7446,6 +7446,10 @@ int Heap::NextStackTraceId() { return last_id; } +uint64_t Heap::GetTotalAllocatedBytes() { + return total_allocated_bytes_.load(std::memory_order_relaxed); +} + EmbedderStackStateScope::EmbedderStackStateScope( Heap* heap, EmbedderStackStateOrigin origin, StackState stack_state) : heap_(heap), diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index f2c5a25010d2a8..27894b9f3dab32 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -1635,6 +1635,11 @@ class Heap final { bool ShouldUseBackgroundThreads() const; bool ShouldUseIncrementalMarking() const; + void AddTotalAllocatedBytes(size_t size) { + total_allocated_bytes_.fetch_add(size, std::memory_order_relaxed); + } + uint64_t GetTotalAllocatedBytes(); + HeapAllocator* allocator() { return heap_allocator_; } const HeapAllocator* allocator() const { return heap_allocator_; } @@ -2409,6 +2414,8 @@ class Heap final { // actually finished. bool is_full_gc_during_loading_ = false; + std::atomic total_allocated_bytes_ = 0; + // Classes in "heap" can be friends. friend class ActivateMemoryReducerTask; friend class AlwaysAllocateScope; diff --git a/deps/v8/src/heap/main-allocator.cc b/deps/v8/src/heap/main-allocator.cc index 375cc521989352..d4040f183f60cd 100644 --- a/deps/v8/src/heap/main-allocator.cc +++ b/deps/v8/src/heap/main-allocator.cc @@ -298,6 +298,12 @@ void MainAllocator::ResetLab(Address start, Address end, Address extended_end) { MemoryChunkMetadata::UpdateHighWaterMark(top()); } + // This is going to overestimate a bit of the total allocated bytes, since the + // LAB was not used yet. However the leftover compared to the LAB itself is + // quite small, so it seems tolerable. + if (local_heap_) { + local_heap_->heap()->AddTotalAllocatedBytes(end - start); + } allocation_info().Reset(start, end); if (SupportsPendingAllocation()) { diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 876de6efb525ab..c9258ad0221b70 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -17492,6 +17492,152 @@ TEST(GetHeapSpaceStatistics) { CHECK_EQ(total_physical_size, heap_statistics.total_physical_size()); } +UNINITIALIZED_TEST(GetHeapTotalAllocatedBytes) { + // This test is incompatible with concurrent allocation, which may occur + // while collecting the statistics and break the final `CHECK_EQ`s. + if (i::v8_flags.stress_concurrent_allocation) return; + + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + + const uint32_t number_of_elements = 1; + const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); + const uint32_t trusted_allocation_size = + i::TrustedFixedArray::SizeFor(number_of_elements); + const uint32_t lo_number_of_elements = 256 * 1024; + const uint32_t lo_allocation_size = + i::FixedArray::SizeFor(lo_number_of_elements); + const uint32_t trusted_lo_allocation_size = + i::TrustedFixedArray::SizeFor(lo_number_of_elements); + const uint32_t expected_allocation_size = + allocation_size * 2 + lo_allocation_size * 2 + trusted_allocation_size + + trusted_lo_allocation_size; + + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + LocalContext env(isolate); + i::Isolate* i_isolate = reinterpret_cast(isolate); + + v8::HeapStatistics heap_stats_before; + isolate->GetHeapStatistics(&heap_stats_before); + size_t initial_allocated = heap_stats_before.total_allocated_bytes(); + + i::MaybeHandle young_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kYoung); + USE(young_alloc); + i::MaybeHandle old_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kOld); + USE(old_alloc); + i::Handle trusted_alloc = + i_isolate->factory()->NewTrustedFixedArray(number_of_elements, + i::AllocationType::kTrusted); + USE(trusted_alloc); + i::MaybeHandle old_lo_alloc = + i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, + i::AllocationType::kOld); + USE(old_lo_alloc); + + { + v8::HandleScope inner_handle_scope(isolate); + auto young_lo_alloc = i_isolate->factory()->TryNewFixedArray( + lo_number_of_elements, i::AllocationType::kYoung); + USE(young_lo_alloc); + } + + auto trusted_lo_alloc = i_isolate->factory()->NewTrustedFixedArray( + lo_number_of_elements, i::AllocationType::kTrusted); + USE(trusted_lo_alloc); + + v8::HeapStatistics heap_stats_after; + isolate->GetHeapStatistics(&heap_stats_after); + uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); + + CHECK_GT(final_allocated, initial_allocated); + uint64_t allocated_diff = final_allocated - initial_allocated; + CHECK_GE(allocated_diff, expected_allocation_size); + + // This either tests counting happening when a LAB freed and validate + // there's no double counting on evacuated/promoted objects. + v8::internal::heap::InvokeAtomicMajorGC(i_isolate->heap()); + + v8::HeapStatistics heap_stats_after_gc; + isolate->GetHeapStatistics(&heap_stats_after_gc); + uint64_t total_allocation_after_gc = + heap_stats_after_gc.total_allocated_bytes(); + + CHECK_EQ(total_allocation_after_gc, final_allocated); + } + + isolate->Dispose(); +} + +#if V8_CAN_CREATE_SHARED_HEAP_BOOL + +UNINITIALIZED_TEST(GetHeapTotalAllocatedBytesSharedSpaces) { + // This test is incompatible with concurrent allocation, which may occur + // while collecting the statistics and break the final `CHECK_EQ`s. + if (i::v8_flags.stress_concurrent_allocation) return; + if (COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL) return; + + i::v8_flags.shared_heap = true; + i::FlagList::EnforceFlagImplications(); + + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + LocalContext env(isolate); + + v8::HeapStatistics heap_stats_before; + isolate->GetHeapStatistics(&heap_stats_before); + size_t initial_allocated = heap_stats_before.total_allocated_bytes(); + + i::Isolate* i_isolate = reinterpret_cast(isolate); + + const uint32_t number_of_elements = 1; + const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); + const uint32_t trusted_allocation_size = + i::TrustedFixedArray::SizeFor(number_of_elements); + const uint32_t lo_number_of_elements = 256 * 1024; + const uint32_t lo_allocation_size = + i::FixedArray::SizeFor(lo_number_of_elements); + const uint32_t expected_allocation_size = + allocation_size + trusted_allocation_size + lo_allocation_size; + + i::MaybeHandle shared_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kSharedOld); + USE(shared_alloc); + i::Handle shared_trusted_alloc = + i_isolate->factory()->NewTrustedFixedArray( + number_of_elements, i::AllocationType::kSharedTrusted); + USE(shared_trusted_alloc); + i::MaybeHandle shared_lo_alloc = + i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, + i::AllocationType::kSharedOld); + USE(shared_lo_alloc); + + v8::HeapStatistics heap_stats_after; + isolate->GetHeapStatistics(&heap_stats_after); + uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); + + CHECK_GT(final_allocated, initial_allocated); + uint64_t allocated_diff = final_allocated - initial_allocated; + CHECK_GE(allocated_diff, expected_allocation_size); + } + + isolate->Dispose(); +} + +#endif // V8_CAN_CREATE_SHARED_HEAP_BOOL + TEST(NumberOfNativeContexts) { static const size_t kNumTestContexts = 10; i::Isolate* isolate = CcTest::i_isolate(); From 4cda026459ef5037aa7e9b8ebf15510790baec00 Mon Sep 17 00:00:00 2001 From: Caio Lima Date: Fri, 7 Nov 2025 12:56:41 -0300 Subject: [PATCH 2/5] v8: adding total_allocated_bytes to HeapStatistics PR-URL: https://github.com/nodejs/node/pull/60573 Reviewed-By: theanarkh Reviewed-By: Anna Henningsen Reviewed-By: Joyee Cheung Reviewed-By: Chengzhong Wu Reviewed-By: Colin Ihrig Reviewed-By: Marco Ippolito Reviewed-By: James M Snell fixing tab error. --- common.gypi | 2 +- deps/v8/include/v8-isolate.h | 7 + deps/v8/include/v8-statistics.h | 8 - deps/v8/src/api/api.cc | 9 +- deps/v8/src/heap/heap.h | 2 +- deps/v8/test/cctest/test-api.cc | 146 ------------------- doc/api/v8.md | 7 +- lib/v8.js | 2 + src/node_v8.cc | 12 ++ src/node_worker.cc | 4 +- test/parallel/test-v8-stats.js | 1 + test/parallel/test-worker-heap-statistics.js | 1 + 12 files changed, 40 insertions(+), 161 deletions(-) diff --git a/common.gypi b/common.gypi index a6896876d45e4c..479fd0e6d7d60f 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.31', + 'v8_embedder_string': '-node.32', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 332688e77b48e6..5f20c568608081 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -950,6 +950,13 @@ class V8_EXPORT Isolate { */ void GetHeapStatistics(HeapStatistics* heap_statistics); + /** + * Get total allocated bytes since isolate creation. + * This should be used only by Node.JS, since it's a temporary method + * to avoid breaking ABI on HeapStatistics. + */ + uint64_t GetTotalAllocatedBytes(); + /** * Returns the number of spaces in the heap. */ diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h index 2dc10e0fbfd304..82b78f5ec65729 100644 --- a/deps/v8/include/v8-statistics.h +++ b/deps/v8/include/v8-statistics.h @@ -154,13 +154,6 @@ class V8_EXPORT HeapStatistics { size_t number_of_native_contexts() { return number_of_native_contexts_; } size_t number_of_detached_contexts() { return number_of_detached_contexts_; } - /** - * Returns the total number of bytes allocated since the Isolate was created. - * This includes all heap objects allocated in any space (new, old, code, - * etc.). - */ - uint64_t total_allocated_bytes() { return total_allocated_bytes_; } - /** * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap * garbage with a bit pattern. @@ -182,7 +175,6 @@ class V8_EXPORT HeapStatistics { size_t number_of_detached_contexts_; size_t total_global_handles_size_; size_t used_global_handles_size_; - uint64_t total_allocated_bytes_; friend class V8; friend class Isolate; diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index ad4f166ee7e498..71c07e78865efc 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -6560,8 +6560,7 @@ HeapStatistics::HeapStatistics() peak_malloced_memory_(0), does_zap_garbage_(false), number_of_native_contexts_(0), - number_of_detached_contexts_(0), - total_allocated_bytes_(0) {} + number_of_detached_contexts_(0) {} HeapSpaceStatistics::HeapSpaceStatistics() : space_name_(nullptr), @@ -10354,7 +10353,6 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts(); heap_statistics->number_of_detached_contexts_ = heap->NumberOfDetachedContexts(); - heap_statistics->total_allocated_bytes_ = heap->GetTotalAllocatedBytes(); heap_statistics->does_zap_garbage_ = i::heap::ShouldZapGarbage(); #if V8_ENABLE_WEBASSEMBLY @@ -10365,6 +10363,11 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { #endif // V8_ENABLE_WEBASSEMBLY } +uint64_t Isolate::GetTotalAllocatedBytes() { + i::Isolate* i_isolate = reinterpret_cast(this); + return i_isolate->heap()->GetTotalAllocatedBytes(); +} + size_t Isolate::NumberOfHeapSpaces() { return i::LAST_SPACE - i::FIRST_SPACE + 1; } diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index 27894b9f3dab32..ef4de1c4117947 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -2414,7 +2414,7 @@ class Heap final { // actually finished. bool is_full_gc_during_loading_ = false; - std::atomic total_allocated_bytes_ = 0; + std::atomic total_allocated_bytes_ = 0; // Classes in "heap" can be friends. friend class ActivateMemoryReducerTask; diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index c9258ad0221b70..876de6efb525ab 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -17492,152 +17492,6 @@ TEST(GetHeapSpaceStatistics) { CHECK_EQ(total_physical_size, heap_statistics.total_physical_size()); } -UNINITIALIZED_TEST(GetHeapTotalAllocatedBytes) { - // This test is incompatible with concurrent allocation, which may occur - // while collecting the statistics and break the final `CHECK_EQ`s. - if (i::v8_flags.stress_concurrent_allocation) return; - - v8::Isolate::CreateParams create_params; - create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); - v8::Isolate* isolate = v8::Isolate::New(create_params); - - const uint32_t number_of_elements = 1; - const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); - const uint32_t trusted_allocation_size = - i::TrustedFixedArray::SizeFor(number_of_elements); - const uint32_t lo_number_of_elements = 256 * 1024; - const uint32_t lo_allocation_size = - i::FixedArray::SizeFor(lo_number_of_elements); - const uint32_t trusted_lo_allocation_size = - i::TrustedFixedArray::SizeFor(lo_number_of_elements); - const uint32_t expected_allocation_size = - allocation_size * 2 + lo_allocation_size * 2 + trusted_allocation_size + - trusted_lo_allocation_size; - - { - v8::Isolate::Scope isolate_scope(isolate); - v8::HandleScope handle_scope(isolate); - LocalContext env(isolate); - i::Isolate* i_isolate = reinterpret_cast(isolate); - - v8::HeapStatistics heap_stats_before; - isolate->GetHeapStatistics(&heap_stats_before); - size_t initial_allocated = heap_stats_before.total_allocated_bytes(); - - i::MaybeHandle young_alloc = - i_isolate->factory()->TryNewFixedArray(number_of_elements, - i::AllocationType::kYoung); - USE(young_alloc); - i::MaybeHandle old_alloc = - i_isolate->factory()->TryNewFixedArray(number_of_elements, - i::AllocationType::kOld); - USE(old_alloc); - i::Handle trusted_alloc = - i_isolate->factory()->NewTrustedFixedArray(number_of_elements, - i::AllocationType::kTrusted); - USE(trusted_alloc); - i::MaybeHandle old_lo_alloc = - i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, - i::AllocationType::kOld); - USE(old_lo_alloc); - - { - v8::HandleScope inner_handle_scope(isolate); - auto young_lo_alloc = i_isolate->factory()->TryNewFixedArray( - lo_number_of_elements, i::AllocationType::kYoung); - USE(young_lo_alloc); - } - - auto trusted_lo_alloc = i_isolate->factory()->NewTrustedFixedArray( - lo_number_of_elements, i::AllocationType::kTrusted); - USE(trusted_lo_alloc); - - v8::HeapStatistics heap_stats_after; - isolate->GetHeapStatistics(&heap_stats_after); - uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); - - CHECK_GT(final_allocated, initial_allocated); - uint64_t allocated_diff = final_allocated - initial_allocated; - CHECK_GE(allocated_diff, expected_allocation_size); - - // This either tests counting happening when a LAB freed and validate - // there's no double counting on evacuated/promoted objects. - v8::internal::heap::InvokeAtomicMajorGC(i_isolate->heap()); - - v8::HeapStatistics heap_stats_after_gc; - isolate->GetHeapStatistics(&heap_stats_after_gc); - uint64_t total_allocation_after_gc = - heap_stats_after_gc.total_allocated_bytes(); - - CHECK_EQ(total_allocation_after_gc, final_allocated); - } - - isolate->Dispose(); -} - -#if V8_CAN_CREATE_SHARED_HEAP_BOOL - -UNINITIALIZED_TEST(GetHeapTotalAllocatedBytesSharedSpaces) { - // This test is incompatible with concurrent allocation, which may occur - // while collecting the statistics and break the final `CHECK_EQ`s. - if (i::v8_flags.stress_concurrent_allocation) return; - if (COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL) return; - - i::v8_flags.shared_heap = true; - i::FlagList::EnforceFlagImplications(); - - v8::Isolate::CreateParams create_params; - create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); - v8::Isolate* isolate = v8::Isolate::New(create_params); - - { - v8::Isolate::Scope isolate_scope(isolate); - v8::HandleScope handle_scope(isolate); - LocalContext env(isolate); - - v8::HeapStatistics heap_stats_before; - isolate->GetHeapStatistics(&heap_stats_before); - size_t initial_allocated = heap_stats_before.total_allocated_bytes(); - - i::Isolate* i_isolate = reinterpret_cast(isolate); - - const uint32_t number_of_elements = 1; - const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); - const uint32_t trusted_allocation_size = - i::TrustedFixedArray::SizeFor(number_of_elements); - const uint32_t lo_number_of_elements = 256 * 1024; - const uint32_t lo_allocation_size = - i::FixedArray::SizeFor(lo_number_of_elements); - const uint32_t expected_allocation_size = - allocation_size + trusted_allocation_size + lo_allocation_size; - - i::MaybeHandle shared_alloc = - i_isolate->factory()->TryNewFixedArray(number_of_elements, - i::AllocationType::kSharedOld); - USE(shared_alloc); - i::Handle shared_trusted_alloc = - i_isolate->factory()->NewTrustedFixedArray( - number_of_elements, i::AllocationType::kSharedTrusted); - USE(shared_trusted_alloc); - i::MaybeHandle shared_lo_alloc = - i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, - i::AllocationType::kSharedOld); - USE(shared_lo_alloc); - - v8::HeapStatistics heap_stats_after; - isolate->GetHeapStatistics(&heap_stats_after); - uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); - - CHECK_GT(final_allocated, initial_allocated); - uint64_t allocated_diff = final_allocated - initial_allocated; - CHECK_GE(allocated_diff, expected_allocation_size); - } - - isolate->Dispose(); -} - -#endif // V8_CAN_CREATE_SHARED_HEAP_BOOL - TEST(NumberOfNativeContexts) { static const size_t kNumTestContexts = 10; i::Isolate* isolate = CcTest::i_isolate(); diff --git a/doc/api/v8.md b/doc/api/v8.md index 3b0795bba8e648..9b038cce419c4f 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -197,6 +197,7 @@ Returns an object with the following properties: * `total_global_handles_size` {number} * `used_global_handles_size` {number} * `external_memory` {number} +* `total_allocated_bytes` {number} `total_heap_size` The value of total\_heap\_size is the number of bytes V8 has allocated for the heap. This can grow if used\_heap needs more memory. @@ -250,6 +251,9 @@ used memory size of V8 global handles. `external_memory` The value of external\_memory is the memory size of array buffers and external strings. +`total_allocated_bytes` The value of total allocated bytes since the Isolate +creation. + ```js @@ -267,7 +271,8 @@ buffers and external strings. number_of_detached_contexts: 0, total_global_handles_size: 8192, used_global_handles_size: 3296, - external_memory: 318824 + external_memory: 318824, + total_allocated_bytes: 24970208 } ``` diff --git a/lib/v8.js b/lib/v8.js index 47f694103719aa..7f1789f1e04f99 100644 --- a/lib/v8.js +++ b/lib/v8.js @@ -117,6 +117,7 @@ const { stopCpuProfile: _stopCpuProfile, isStringOneByteRepresentation: _isStringOneByteRepresentation, updateHeapStatisticsBuffer, + getTotalAllocatedBytes, updateHeapSpaceStatisticsBuffer, updateHeapCodeStatisticsBuffer, setHeapSnapshotNearHeapLimit: _setHeapSnapshotNearHeapLimit, @@ -246,6 +247,7 @@ function getHeapStatistics() { total_global_handles_size: buffer[kTotalGlobalHandlesSizeIndex], used_global_handles_size: buffer[kUsedGlobalHandlesSizeIndex], external_memory: buffer[kExternalMemoryIndex], + total_allocated_bytes: getTotalAllocatedBytes(), }; } diff --git a/src/node_v8.cc b/src/node_v8.cc index 8dd32dad262679..ac818812ccb84f 100644 --- a/src/node_v8.cc +++ b/src/node_v8.cc @@ -212,6 +212,12 @@ void UpdateHeapStatisticsBuffer(const FunctionCallbackInfo& args) { #undef V } +void GetTotalAllocatedBytes(const FunctionCallbackInfo& args) { + Isolate* isolate = args.GetIsolate(); + uint64_t allocated_bytes = isolate->GetTotalAllocatedBytes(); + args.GetReturnValue().Set(Number::New(isolate, allocated_bytes)); +} + void UpdateHeapSpaceStatisticsBuffer(const FunctionCallbackInfo& args) { BindingData* data = Realm::GetBindingData(args); @@ -692,6 +698,11 @@ void Initialize(Local target, "updateHeapStatisticsBuffer", UpdateHeapStatisticsBuffer); + SetMethod(context, + target, + "getTotalAllocatedBytes", + GetTotalAllocatedBytes); + SetMethod(context, target, "updateHeapCodeStatisticsBuffer", @@ -773,6 +784,7 @@ void Initialize(Local target, void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(CachedDataVersionTag); registry->Register(UpdateHeapStatisticsBuffer); + registry->Register(GetTotalAllocatedBytes); registry->Register(UpdateHeapCodeStatisticsBuffer); registry->Register(UpdateHeapSpaceStatisticsBuffer); registry->Register(SetFlagsFromString); diff --git a/src/node_worker.cc b/src/node_worker.cc index 7bae29747d8cd8..f09561f77e3a91 100644 --- a/src/node_worker.cc +++ b/src/node_worker.cc @@ -1263,6 +1263,7 @@ void Worker::GetHeapStatistics(const FunctionCallbackInfo& args) { "total_global_handles_size", "used_global_handles_size", "external_memory", + "total_allocated_bytes", }; tmpl = DictionaryTemplate::New(isolate, heap_stats_names); env->set_heap_statistics_template(tmpl); @@ -1283,7 +1284,8 @@ void Worker::GetHeapStatistics(const FunctionCallbackInfo& args) { Number::New(isolate, heap_stats->number_of_detached_contexts()), Number::New(isolate, heap_stats->total_global_handles_size()), Number::New(isolate, heap_stats->used_global_handles_size()), - Number::New(isolate, heap_stats->external_memory())}; + Number::New(isolate, heap_stats->external_memory()), + Number::New(isolate, isolate->GetTotalAllocatedBytes())}; Local obj; if (!NewDictionaryInstanceNullProto( diff --git a/test/parallel/test-v8-stats.js b/test/parallel/test-v8-stats.js index 07be833e6e749a..5ee4c5aeb31adf 100644 --- a/test/parallel/test-v8-stats.js +++ b/test/parallel/test-v8-stats.js @@ -12,6 +12,7 @@ const keys = [ 'number_of_detached_contexts', 'number_of_native_contexts', 'peak_malloced_memory', + 'total_allocated_bytes', 'total_available_size', 'total_global_handles_size', 'total_heap_size', diff --git a/test/parallel/test-worker-heap-statistics.js b/test/parallel/test-worker-heap-statistics.js index 12a748c303a026..ba3165aa24aba0 100644 --- a/test/parallel/test-worker-heap-statistics.js +++ b/test/parallel/test-worker-heap-statistics.js @@ -40,6 +40,7 @@ if (isMainThread) { `total_global_handles_size`, `used_global_handles_size`, `external_memory`, + `total_allocated_bytes`, ].sort(); assert.deepStrictEqual(keys, Object.keys(stats).sort()); for (const key of keys) { From 0b01b63d3b7c29ba05cd3b0470a694af47e4aa43 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Tue, 10 Jun 2025 15:41:56 +0800 Subject: [PATCH 3/5] deps: V8: backport bbaae8e36164 Original commit message: Reland "[riscv] Fix Check failed in bind_to" This is a reland of commit fdb5de2c741658e94944f2ec1218530e98601c23 Original change's description: > [riscv] Fix Check failed in bind_to > > The trampoline should be emitted before the constant pool. > > Bug: 420232092 > > Change-Id: I3a909b122607e37aca9d8765f28810ec74d5dc0b > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6578135 > Auto-Submit: Yahan Lu (LuYahan) > Reviewed-by: Ji Qiu > Commit-Queue: Ji Qiu > Cr-Commit-Position: refs/heads/main@{#100480} Bug: 420232092 Change-Id: I1fac1ed8c349383ef4510abea338b3d695ed57ab Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6595668 Commit-Queue: Ji Qiu Reviewed-by: Ji Qiu Cr-Commit-Position: refs/heads/main@{#100745} Refs: https://github.com/v8/v8/commit/bbaae8e36164b02b678966c7612bf3d23644b22c Co-authored-by: kxxt PR-URL: https://github.com/nodejs/node/pull/60962 Reviewed-By: Richard Lau Reviewed-By: Stewart X Addison --- common.gypi | 2 +- deps/v8/src/codegen/riscv/assembler-riscv.cc | 4 ++-- deps/v8/src/codegen/riscv/assembler-riscv.h | 15 +++++++++++++-- .../src/codegen/riscv/macro-assembler-riscv.cc | 16 ++++++++++++---- 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/common.gypi b/common.gypi index db1625378697d9..eac3302d385349 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.37', + 'v8_embedder_string': '-node.38', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.cc b/deps/v8/src/codegen/riscv/assembler-riscv.cc index a9093ed33122c1..6cc3724b25ab10 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/assembler-riscv.cc @@ -720,8 +720,8 @@ void Assembler::bind_to(Label* L, int pos) { trampoline_pos = get_trampoline_entry(fixup_pos); CHECK_NE(trampoline_pos, kInvalidSlotPos); } - CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); + CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); target_at_put(fixup_pos, trampoline_pos, false); fixup_pos = trampoline_pos; } @@ -1486,6 +1486,7 @@ void Assembler::BlockTrampolinePoolFor(int instructions) { } void Assembler::CheckTrampolinePool() { + if (trampoline_emitted_) return; // Some small sequences of instructions must not be broken up by the // insertion of a trampoline pool; such sequences are protected by setting // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, @@ -1507,7 +1508,6 @@ void Assembler::CheckTrampolinePool() { return; } - DCHECK(!trampoline_emitted_); DCHECK_GE(unbound_labels_count_, 0); if (unbound_labels_count_ > 0) { // First we emit jump, then we emit trampoline pool. diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.h b/deps/v8/src/codegen/riscv/assembler-riscv.h index 2577e12a5d8038..5c408bfd2eeace 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv.h @@ -303,6 +303,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // See Assembler::CheckConstPool for more info. void EmitPoolGuard(); + void FinishCode() { ForceConstantPoolEmissionWithoutJump(); } + #if defined(V8_TARGET_ARCH_RISCV64) static void set_target_value_at( Address pc, uint64_t target, @@ -617,6 +619,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, } } + inline int next_buffer_check() { return next_buffer_check_; } + friend class VectorUnit; class VectorUnit { public: @@ -728,16 +732,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // Block the emission of the trampoline pool before pc_offset. void BlockTrampolinePoolBefore(int pc_offset) { - if (no_trampoline_pool_before_ < pc_offset) + if (no_trampoline_pool_before_ < pc_offset) { + DEBUG_PRINTF("\tBlockTrampolinePoolBefore %d\n", pc_offset); no_trampoline_pool_before_ = pc_offset; + } } void StartBlockTrampolinePool() { - DEBUG_PRINTF("\tStartBlockTrampolinePool\n"); + DEBUG_PRINTF("\tStartBlockTrampolinePool %d\n", pc_offset()); trampoline_pool_blocked_nesting_++; } void EndBlockTrampolinePool() { + DEBUG_PRINTF("\tEndBlockTrampolinePool\n"); trampoline_pool_blocked_nesting_--; DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", trampoline_pool_blocked_nesting_); @@ -767,6 +774,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, bool is_buffer_growth_blocked() const { return block_buffer_growth_; } + inline int ConstpoolComputesize() { + return constpool_.ComputeSize(Jump::kOmitted, Alignment::kOmitted); + } + private: // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512 * MB; diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index 9ac7746ad14e15..28e648fb0c2beb 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -4926,11 +4926,22 @@ void MacroAssembler::LoadRootRegisterOffset(Register destination, void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt) { - BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { jr(target); + DEBUG_PRINTF("\tCheckTrampolinePool pc_offset:%d %d\n", pc_offset(), + next_buffer_check() - ConstpoolComputesize()); + if (!is_trampoline_emitted() && v8_flags.debug_code && + pc_offset() >= (next_buffer_check() - ConstpoolComputesize())) { + // Debug mode will emit more instrs than Release mode. + // so we need to check trampoline pool before Constant pool. + // Here need to emit trampoline first. + // Jump(ra, al) will block trampoline pool for 1 instr. + nop(); + CheckTrampolinePool(); + } ForceConstantPoolEmissionWithoutJump(); } else { + BlockTrampolinePoolScope block_trampoline_pool(this); BRANCH_ARGS_CHECK(cond, rs, rt); Branch(kInstrSize * 2, NegateCondition(cond), rs, rt); jr(target); @@ -5342,9 +5353,6 @@ void MacroAssembler::StoreReturnAddressAndCall(Register target) { void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) { Jump(ra, cond, rs, rt); - if (cond == al) { - ForceConstantPoolEmissionWithoutJump(); - } } void MacroAssembler::BranchLong(Label* L) { From 105fbdd62455b410f3082a93f6f6fea92348a8b0 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Wed, 18 Jun 2025 09:02:36 +0800 Subject: [PATCH 4/5] deps: V8: cherry-pick 394a8053b59e Original commit message: [riscv] Check trampoline before Constant pool in Release mode Change-Id: I9645cded9328dabb2c11c7859b998c838b95f97b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6653368 Commit-Queue: Ji Qiu Reviewed-by: Ji Qiu Cr-Commit-Position: refs/heads/main@{#100935} Refs: https://github.com/v8/v8/commit/394a8053b59e719f9347a9695062acafe6007def PR-URL: https://github.com/nodejs/node/pull/60962 Reviewed-By: Richard Lau Reviewed-By: Stewart X Addison --- common.gypi | 2 +- deps/v8/src/codegen/riscv/macro-assembler-riscv.cc | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/common.gypi b/common.gypi index eac3302d385349..6f8da680a2dd0f 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.38', + 'v8_embedder_string': '-node.39', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index 28e648fb0c2beb..12379ec0ff6fd2 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -4930,10 +4930,9 @@ void MacroAssembler::Jump(Register target, Condition cond, Register rs, jr(target); DEBUG_PRINTF("\tCheckTrampolinePool pc_offset:%d %d\n", pc_offset(), next_buffer_check() - ConstpoolComputesize()); - if (!is_trampoline_emitted() && v8_flags.debug_code && + if (!is_trampoline_emitted() && pc_offset() >= (next_buffer_check() - ConstpoolComputesize())) { - // Debug mode will emit more instrs than Release mode. - // so we need to check trampoline pool before Constant pool. + // We need to check trampoline pool before Constant pool. // Here need to emit trampoline first. // Jump(ra, al) will block trampoline pool for 1 instr. nop(); From d4dceb93cb7737d0f3d46fd188a685d2e3e7d1f8 Mon Sep 17 00:00:00 2001 From: Domagoj Stolfa Date: Thu, 27 Mar 2025 11:34:38 +0000 Subject: [PATCH 5/5] deps: V8: cherry-pick 1441665e0d87 Original commit message: [riscv] Fix the RISC-V build. Due to recent changes, there were missing implementations of various methods needed in the simulator and Maglev. Additionally, a static_cast is needed in the assembler to silence a warning. Port commit dfc894cd22d86ce42830e3bfdf485d963f6396ad Port commit c33af9bd408eadd6b62571f862bcb5b763c98ad9 Change-Id: Ie37a1cfa8225fc12f367ff62139cc7cd8fa967d0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6396542 Reviewed-by: Victor Gomes Commit-Queue: Yahan Lu (LuYahan) Reviewed-by: Ji Qiu Reviewed-by: Yahan Lu (LuYahan) Cr-Commit-Position: refs/heads/main@{#99706} Refs: https://github.com/v8/v8/commit/1441665e0d87b541932390385101840758b44924 PR-URL: https://github.com/nodejs/node/pull/60989 Reviewed-By: Richard Lau Reviewed-By: Beth Griggs --- common.gypi | 2 +- deps/v8/AUTHORS | 1 + .../src/codegen/riscv/assembler-riscv-inl.h | 5 +-- deps/v8/src/execution/riscv/simulator-riscv.h | 1 + deps/v8/src/maglev/riscv/maglev-ir-riscv.cc | 34 +++++++++++++++++++ 5 files changed, 40 insertions(+), 3 deletions(-) diff --git a/common.gypi b/common.gypi index 6f8da680a2dd0f..7fd9ec1401b962 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.39', + 'v8_embedder_string': '-node.40', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index e5a0957b3a928e..280e33e6841cc1 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -334,3 +334,4 @@ Kotaro Ohsugi Jing Peiyang magic-akari Ryuhei Shima +Domagoj Stolfa diff --git a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h index d5e11e562b334c..2d73dc572ec2d5 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h @@ -115,8 +115,9 @@ void Assembler::set_target_compressed_address_at( Address pc, Address constant_pool, Tagged_t target, WritableJitAllocation* jit_allocation, ICacheFlushMode icache_flush_mode) { if (COMPRESS_POINTERS_BOOL) { - Assembler::set_uint32_constant_at(pc, constant_pool, target, jit_allocation, - icache_flush_mode); + Assembler::set_uint32_constant_at(pc, constant_pool, + static_cast(target), + jit_allocation, icache_flush_mode); } else { UNREACHABLE(); } diff --git a/deps/v8/src/execution/riscv/simulator-riscv.h b/deps/v8/src/execution/riscv/simulator-riscv.h index 0ec51ff3db967c..82164754a904e2 100644 --- a/deps/v8/src/execution/riscv/simulator-riscv.h +++ b/deps/v8/src/execution/riscv/simulator-riscv.h @@ -538,6 +538,7 @@ class Simulator : public SimulatorBase { // Return central stack view, without additional safety margins. // Users, for example wasm::StackMemory, can add their own. base::Vector GetCentralStackView() const; + static constexpr int JSStackLimitMargin() { return kAdditionalStackMargin; } void IterateRegistersAndStack(::heap::base::StackVisitor* visitor); diff --git a/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc b/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc index 53c8c3ce3ac667..2bf5e1f5089825 100644 --- a/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc +++ b/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc @@ -224,6 +224,40 @@ void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm, Operand(std::numeric_limits::min())); } +void CheckFloat64SameValue::SetValueLocationConstraints() { + UseRegister(target_input()); + // We need two because LoadFPRImmediate needs to acquire one as well in the + // case where value() is not 0.0 or -0.0. + set_temporaries_needed((value().get_scalar() == 0) ? 1 : 2); + set_double_temporaries_needed( + value().is_nan() || (value().get_scalar() == 0) ? 0 : 1); +} + +void CheckFloat64SameValue::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Label* fail = __ GetDeoptLabel(this, deoptimize_reason()); + MaglevAssembler::TemporaryRegisterScope temps(masm); + DoubleRegister target = ToDoubleRegister(target_input()); + if (value().is_nan()) { + __ JumpIfNotNan(target, fail); + } else { + DoubleRegister double_scratch = temps.AcquireScratchDouble(); + Register scratch = temps.AcquireScratch(); + __ Move(double_scratch, value().get_scalar()); + __ CompareF64(scratch, EQ, double_scratch, target); + __ BranchFalseF(scratch, fail); + if (value().get_scalar() == 0) { // +0.0 or -0.0. + __ MacroAssembler::Move(scratch, target); + __ And(scratch, scratch, Operand(1ULL << 63)); + if (value().get_bits() == 0) { + __ BranchTrueF(scratch, fail); + } else { + __ BranchFalseF(scratch, fail); + } + } + } +} + void Int32AddWithOverflow::SetValueLocationConstraints() { UseRegister(left_input()); UseRegister(right_input());