diff --git a/common.gypi b/common.gypi index db1625378697d9..807a37ee4f6f6b 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.37', + 'v8_embedder_string': '-node.41', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index e5a0957b3a928e..280e33e6841cc1 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -334,3 +334,4 @@ Kotaro Ohsugi Jing Peiyang magic-akari Ryuhei Shima +Domagoj Stolfa diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 332688e77b48e6..5f20c568608081 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -950,6 +950,13 @@ class V8_EXPORT Isolate { */ void GetHeapStatistics(HeapStatistics* heap_statistics); + /** + * Get total allocated bytes since isolate creation. + * This should be used only by Node.JS, since it's a temporary method + * to avoid breaking ABI on HeapStatistics. + */ + uint64_t GetTotalAllocatedBytes(); + /** * Returns the number of spaces in the heap. */ diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index ba759168aa92f5..71c07e78865efc 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -10363,6 +10363,11 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { #endif // V8_ENABLE_WEBASSEMBLY } +uint64_t Isolate::GetTotalAllocatedBytes() { + i::Isolate* i_isolate = reinterpret_cast(this); + return i_isolate->heap()->GetTotalAllocatedBytes(); +} + size_t Isolate::NumberOfHeapSpaces() { return i::LAST_SPACE - i::FIRST_SPACE + 1; } diff --git a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h index d5e11e562b334c..2d73dc572ec2d5 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv-inl.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv-inl.h @@ -115,8 +115,9 @@ void Assembler::set_target_compressed_address_at( Address pc, Address constant_pool, Tagged_t target, WritableJitAllocation* jit_allocation, ICacheFlushMode icache_flush_mode) { if (COMPRESS_POINTERS_BOOL) { - Assembler::set_uint32_constant_at(pc, constant_pool, target, jit_allocation, - icache_flush_mode); + Assembler::set_uint32_constant_at(pc, constant_pool, + static_cast(target), + jit_allocation, icache_flush_mode); } else { UNREACHABLE(); } diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.cc b/deps/v8/src/codegen/riscv/assembler-riscv.cc index a9093ed33122c1..6cc3724b25ab10 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/assembler-riscv.cc @@ -720,8 +720,8 @@ void Assembler::bind_to(Label* L, int pos) { trampoline_pos = get_trampoline_entry(fixup_pos); CHECK_NE(trampoline_pos, kInvalidSlotPos); } - CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); DEBUG_PRINTF("\t\ttrampolining: %d\n", trampoline_pos); + CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset); target_at_put(fixup_pos, trampoline_pos, false); fixup_pos = trampoline_pos; } @@ -1486,6 +1486,7 @@ void Assembler::BlockTrampolinePoolFor(int instructions) { } void Assembler::CheckTrampolinePool() { + if (trampoline_emitted_) return; // Some small sequences of instructions must not be broken up by the // insertion of a trampoline pool; such sequences are protected by setting // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, @@ -1507,7 +1508,6 @@ void Assembler::CheckTrampolinePool() { return; } - DCHECK(!trampoline_emitted_); DCHECK_GE(unbound_labels_count_, 0); if (unbound_labels_count_ > 0) { // First we emit jump, then we emit trampoline pool. diff --git a/deps/v8/src/codegen/riscv/assembler-riscv.h b/deps/v8/src/codegen/riscv/assembler-riscv.h index 2577e12a5d8038..5c408bfd2eeace 100644 --- a/deps/v8/src/codegen/riscv/assembler-riscv.h +++ b/deps/v8/src/codegen/riscv/assembler-riscv.h @@ -303,6 +303,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // See Assembler::CheckConstPool for more info. void EmitPoolGuard(); + void FinishCode() { ForceConstantPoolEmissionWithoutJump(); } + #if defined(V8_TARGET_ARCH_RISCV64) static void set_target_value_at( Address pc, uint64_t target, @@ -617,6 +619,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, } } + inline int next_buffer_check() { return next_buffer_check_; } + friend class VectorUnit; class VectorUnit { public: @@ -728,16 +732,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, // Block the emission of the trampoline pool before pc_offset. void BlockTrampolinePoolBefore(int pc_offset) { - if (no_trampoline_pool_before_ < pc_offset) + if (no_trampoline_pool_before_ < pc_offset) { + DEBUG_PRINTF("\tBlockTrampolinePoolBefore %d\n", pc_offset); no_trampoline_pool_before_ = pc_offset; + } } void StartBlockTrampolinePool() { - DEBUG_PRINTF("\tStartBlockTrampolinePool\n"); + DEBUG_PRINTF("\tStartBlockTrampolinePool %d\n", pc_offset()); trampoline_pool_blocked_nesting_++; } void EndBlockTrampolinePool() { + DEBUG_PRINTF("\tEndBlockTrampolinePool\n"); trampoline_pool_blocked_nesting_--; DEBUG_PRINTF("\ttrampoline_pool_blocked_nesting:%d\n", trampoline_pool_blocked_nesting_); @@ -767,6 +774,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase, bool is_buffer_growth_blocked() const { return block_buffer_growth_; } + inline int ConstpoolComputesize() { + return constpool_.ComputeSize(Jump::kOmitted, Alignment::kOmitted); + } + private: // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512 * MB; diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index 9ac7746ad14e15..12379ec0ff6fd2 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -4926,11 +4926,21 @@ void MacroAssembler::LoadRootRegisterOffset(Register destination, void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt) { - BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { jr(target); + DEBUG_PRINTF("\tCheckTrampolinePool pc_offset:%d %d\n", pc_offset(), + next_buffer_check() - ConstpoolComputesize()); + if (!is_trampoline_emitted() && + pc_offset() >= (next_buffer_check() - ConstpoolComputesize())) { + // We need to check trampoline pool before Constant pool. + // Here need to emit trampoline first. + // Jump(ra, al) will block trampoline pool for 1 instr. + nop(); + CheckTrampolinePool(); + } ForceConstantPoolEmissionWithoutJump(); } else { + BlockTrampolinePoolScope block_trampoline_pool(this); BRANCH_ARGS_CHECK(cond, rs, rt); Branch(kInstrSize * 2, NegateCondition(cond), rs, rt); jr(target); @@ -5342,9 +5352,6 @@ void MacroAssembler::StoreReturnAddressAndCall(Register target) { void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) { Jump(ra, cond, rs, rt); - if (cond == al) { - ForceConstantPoolEmissionWithoutJump(); - } } void MacroAssembler::BranchLong(Label* L) { diff --git a/deps/v8/src/execution/riscv/simulator-riscv.h b/deps/v8/src/execution/riscv/simulator-riscv.h index 0ec51ff3db967c..82164754a904e2 100644 --- a/deps/v8/src/execution/riscv/simulator-riscv.h +++ b/deps/v8/src/execution/riscv/simulator-riscv.h @@ -538,6 +538,7 @@ class Simulator : public SimulatorBase { // Return central stack view, without additional safety margins. // Users, for example wasm::StackMemory, can add their own. base::Vector GetCentralStackView() const; + static constexpr int JSStackLimitMargin() { return kAdditionalStackMargin; } void IterateRegistersAndStack(::heap::base::StackVisitor* visitor); diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc index 6f5946fc2374c3..88491df2ce2388 100644 --- a/deps/v8/src/heap/heap-allocator.cc +++ b/deps/v8/src/heap/heap-allocator.cc @@ -65,24 +65,42 @@ AllocationResult HeapAllocator::AllocateRawLargeInternal( int size_in_bytes, AllocationType allocation, AllocationOrigin origin, AllocationAlignment alignment) { DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation)); + AllocationResult allocation_result; switch (allocation) { case AllocationType::kYoung: - return new_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + new_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kOld: - return lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kCode: - return code_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + code_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kSharedOld: - return shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kTrusted: - return trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = + trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + break; case AllocationType::kSharedTrusted: - return shared_trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes); + allocation_result = shared_trusted_lo_space()->AllocateRaw( + local_heap_, size_in_bytes); + break; case AllocationType::kMap: case AllocationType::kReadOnly: case AllocationType::kSharedMap: UNREACHABLE(); } + if (!allocation_result.IsFailure()) { + int allocated_size = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes); + heap_->AddTotalAllocatedBytes(allocated_size); + } + return allocation_result; } namespace { diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc index 62ae8089e9d73a..785df34b8ab2a3 100644 --- a/deps/v8/src/heap/heap.cc +++ b/deps/v8/src/heap/heap.cc @@ -7447,6 +7447,10 @@ int Heap::NextStackTraceId() { return last_id; } +uint64_t Heap::GetTotalAllocatedBytes() { + return total_allocated_bytes_.load(std::memory_order_relaxed); +} + EmbedderStackStateScope::EmbedderStackStateScope( Heap* heap, EmbedderStackStateOrigin origin, StackState stack_state) : heap_(heap), diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index c209a1c98d9860..b3b6fe46ed1987 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -1635,6 +1635,11 @@ class Heap final { bool ShouldUseBackgroundThreads() const; bool ShouldUseIncrementalMarking() const; + void AddTotalAllocatedBytes(size_t size) { + total_allocated_bytes_.fetch_add(size, std::memory_order_relaxed); + } + uint64_t GetTotalAllocatedBytes(); + HeapAllocator* allocator() { return heap_allocator_; } const HeapAllocator* allocator() const { return heap_allocator_; } @@ -2409,6 +2414,8 @@ class Heap final { // actually finished. bool is_full_gc_during_loading_ = false; + std::atomic total_allocated_bytes_ = 0; + // Classes in "heap" can be friends. friend class ActivateMemoryReducerTask; friend class AlwaysAllocateScope; diff --git a/deps/v8/src/heap/main-allocator.cc b/deps/v8/src/heap/main-allocator.cc index 375cc521989352..d4040f183f60cd 100644 --- a/deps/v8/src/heap/main-allocator.cc +++ b/deps/v8/src/heap/main-allocator.cc @@ -298,6 +298,12 @@ void MainAllocator::ResetLab(Address start, Address end, Address extended_end) { MemoryChunkMetadata::UpdateHighWaterMark(top()); } + // This is going to overestimate a bit of the total allocated bytes, since the + // LAB was not used yet. However the leftover compared to the LAB itself is + // quite small, so it seems tolerable. + if (local_heap_) { + local_heap_->heap()->AddTotalAllocatedBytes(end - start); + } allocation_info().Reset(start, end); if (SupportsPendingAllocation()) { diff --git a/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc b/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc index 53c8c3ce3ac667..2bf5e1f5089825 100644 --- a/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc +++ b/deps/v8/src/maglev/riscv/maglev-ir-riscv.cc @@ -224,6 +224,40 @@ void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm, Operand(std::numeric_limits::min())); } +void CheckFloat64SameValue::SetValueLocationConstraints() { + UseRegister(target_input()); + // We need two because LoadFPRImmediate needs to acquire one as well in the + // case where value() is not 0.0 or -0.0. + set_temporaries_needed((value().get_scalar() == 0) ? 1 : 2); + set_double_temporaries_needed( + value().is_nan() || (value().get_scalar() == 0) ? 0 : 1); +} + +void CheckFloat64SameValue::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Label* fail = __ GetDeoptLabel(this, deoptimize_reason()); + MaglevAssembler::TemporaryRegisterScope temps(masm); + DoubleRegister target = ToDoubleRegister(target_input()); + if (value().is_nan()) { + __ JumpIfNotNan(target, fail); + } else { + DoubleRegister double_scratch = temps.AcquireScratchDouble(); + Register scratch = temps.AcquireScratch(); + __ Move(double_scratch, value().get_scalar()); + __ CompareF64(scratch, EQ, double_scratch, target); + __ BranchFalseF(scratch, fail); + if (value().get_scalar() == 0) { // +0.0 or -0.0. + __ MacroAssembler::Move(scratch, target); + __ And(scratch, scratch, Operand(1ULL << 63)); + if (value().get_bits() == 0) { + __ BranchTrueF(scratch, fail); + } else { + __ BranchFalseF(scratch, fail); + } + } + } +} + void Int32AddWithOverflow::SetValueLocationConstraints() { UseRegister(left_input()); UseRegister(right_input()); diff --git a/doc/api/v8.md b/doc/api/v8.md index 6a50e7f2109a9d..076d76f023eb24 100644 --- a/doc/api/v8.md +++ b/doc/api/v8.md @@ -197,6 +197,7 @@ Returns an object with the following properties: * `total_global_handles_size` {number} * `used_global_handles_size` {number} * `external_memory` {number} +* `total_allocated_bytes` {number} `total_heap_size` The value of total\_heap\_size is the number of bytes V8 has allocated for the heap. This can grow if used\_heap needs more memory. @@ -250,6 +251,9 @@ used memory size of V8 global handles. `external_memory` The value of external\_memory is the memory size of array buffers and external strings. +`total_allocated_bytes` The value of total allocated bytes since the Isolate +creation. + ```js @@ -267,7 +271,8 @@ buffers and external strings. number_of_detached_contexts: 0, total_global_handles_size: 8192, used_global_handles_size: 3296, - external_memory: 318824 + external_memory: 318824, + total_allocated_bytes: 24970208 } ``` diff --git a/lib/v8.js b/lib/v8.js index 47f694103719aa..7f1789f1e04f99 100644 --- a/lib/v8.js +++ b/lib/v8.js @@ -117,6 +117,7 @@ const { stopCpuProfile: _stopCpuProfile, isStringOneByteRepresentation: _isStringOneByteRepresentation, updateHeapStatisticsBuffer, + getTotalAllocatedBytes, updateHeapSpaceStatisticsBuffer, updateHeapCodeStatisticsBuffer, setHeapSnapshotNearHeapLimit: _setHeapSnapshotNearHeapLimit, @@ -246,6 +247,7 @@ function getHeapStatistics() { total_global_handles_size: buffer[kTotalGlobalHandlesSizeIndex], used_global_handles_size: buffer[kUsedGlobalHandlesSizeIndex], external_memory: buffer[kExternalMemoryIndex], + total_allocated_bytes: getTotalAllocatedBytes(), }; } diff --git a/src/node_v8.cc b/src/node_v8.cc index 8dd32dad262679..ac818812ccb84f 100644 --- a/src/node_v8.cc +++ b/src/node_v8.cc @@ -212,6 +212,12 @@ void UpdateHeapStatisticsBuffer(const FunctionCallbackInfo& args) { #undef V } +void GetTotalAllocatedBytes(const FunctionCallbackInfo& args) { + Isolate* isolate = args.GetIsolate(); + uint64_t allocated_bytes = isolate->GetTotalAllocatedBytes(); + args.GetReturnValue().Set(Number::New(isolate, allocated_bytes)); +} + void UpdateHeapSpaceStatisticsBuffer(const FunctionCallbackInfo& args) { BindingData* data = Realm::GetBindingData(args); @@ -692,6 +698,11 @@ void Initialize(Local target, "updateHeapStatisticsBuffer", UpdateHeapStatisticsBuffer); + SetMethod(context, + target, + "getTotalAllocatedBytes", + GetTotalAllocatedBytes); + SetMethod(context, target, "updateHeapCodeStatisticsBuffer", @@ -773,6 +784,7 @@ void Initialize(Local target, void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(CachedDataVersionTag); registry->Register(UpdateHeapStatisticsBuffer); + registry->Register(GetTotalAllocatedBytes); registry->Register(UpdateHeapCodeStatisticsBuffer); registry->Register(UpdateHeapSpaceStatisticsBuffer); registry->Register(SetFlagsFromString); diff --git a/src/node_worker.cc b/src/node_worker.cc index 7bae29747d8cd8..f09561f77e3a91 100644 --- a/src/node_worker.cc +++ b/src/node_worker.cc @@ -1263,6 +1263,7 @@ void Worker::GetHeapStatistics(const FunctionCallbackInfo& args) { "total_global_handles_size", "used_global_handles_size", "external_memory", + "total_allocated_bytes", }; tmpl = DictionaryTemplate::New(isolate, heap_stats_names); env->set_heap_statistics_template(tmpl); @@ -1283,7 +1284,8 @@ void Worker::GetHeapStatistics(const FunctionCallbackInfo& args) { Number::New(isolate, heap_stats->number_of_detached_contexts()), Number::New(isolate, heap_stats->total_global_handles_size()), Number::New(isolate, heap_stats->used_global_handles_size()), - Number::New(isolate, heap_stats->external_memory())}; + Number::New(isolate, heap_stats->external_memory()), + Number::New(isolate, isolate->GetTotalAllocatedBytes())}; Local obj; if (!NewDictionaryInstanceNullProto( diff --git a/test/parallel/test-v8-stats.js b/test/parallel/test-v8-stats.js index 07be833e6e749a..5ee4c5aeb31adf 100644 --- a/test/parallel/test-v8-stats.js +++ b/test/parallel/test-v8-stats.js @@ -12,6 +12,7 @@ const keys = [ 'number_of_detached_contexts', 'number_of_native_contexts', 'peak_malloced_memory', + 'total_allocated_bytes', 'total_available_size', 'total_global_handles_size', 'total_heap_size', diff --git a/test/parallel/test-worker-heap-statistics.js b/test/parallel/test-worker-heap-statistics.js index 12a748c303a026..ba3165aa24aba0 100644 --- a/test/parallel/test-worker-heap-statistics.js +++ b/test/parallel/test-worker-heap-statistics.js @@ -40,6 +40,7 @@ if (isMainThread) { `total_global_handles_size`, `used_global_handles_size`, `external_memory`, + `total_allocated_bytes`, ].sort(); assert.deepStrictEqual(keys, Object.keys(stats).sort()); for (const key of keys) {