diff --git a/ddprof-lib/src/main/cpp/javaApi.cpp b/ddprof-lib/src/main/cpp/javaApi.cpp index cbee0ab2..355fcd51 100644 --- a/ddprof-lib/src/main/cpp/javaApi.cpp +++ b/ddprof-lib/src/main/cpp/javaApi.cpp @@ -348,7 +348,7 @@ Java_com_datadoghq_profiler_JVMAccess_findStringJVMFlag0(JNIEnv *env, jobject unused, jstring flagName) { JniString flag_str(env, flagName); - JVMFlag *f = JVMFlag::find(flag_str.c_str(), {JVMFlag::Type::String, JVMFlag::Type::Stringlist}); + VMFlag *f = VMFlag::find(flag_str.c_str(), {VMFlag::Type::String, VMFlag::Type::Stringlist}); if (f) { char** value = static_cast(f->addr()); if (value != NULL && *value != NULL) { @@ -365,7 +365,7 @@ Java_com_datadoghq_profiler_JVMAccess_setStringJVMFlag0(JNIEnv *env, jstring flagValue) { JniString flag_str(env, flagName); JniString value_str(env, flagValue); - JVMFlag *f = JVMFlag::find(flag_str.c_str(), {JVMFlag::Type::String, JVMFlag::Type::Stringlist}); + VMFlag *f = VMFlag::find(flag_str.c_str(), {VMFlag::Type::String, VMFlag::Type::Stringlist}); if (f) { char** value = static_cast(f->addr()); if (value != NULL) { @@ -379,7 +379,7 @@ Java_com_datadoghq_profiler_JVMAccess_findBooleanJVMFlag0(JNIEnv *env, jobject unused, jstring flagName) { JniString flag_str(env, flagName); - JVMFlag *f = JVMFlag::find(flag_str.c_str(), {JVMFlag::Type::Bool}); + VMFlag *f = VMFlag::find(flag_str.c_str(), {VMFlag::Type::Bool}); if (f) { char* value = static_cast(f->addr()); if (value != NULL) { @@ -395,7 +395,7 @@ Java_com_datadoghq_profiler_JVMAccess_setBooleanJVMFlag0(JNIEnv *env, jstring flagName, jboolean flagValue) { JniString flag_str(env, flagName); - JVMFlag *f = JVMFlag::find(flag_str.c_str(), {JVMFlag::Type::Bool}); + VMFlag *f = VMFlag::find(flag_str.c_str(), {VMFlag::Type::Bool}); if (f) { char* value = static_cast(f->addr()); if (value != NULL) { @@ -409,7 +409,7 @@ Java_com_datadoghq_profiler_JVMAccess_findIntJVMFlag0(JNIEnv *env, jobject unused, jstring flagName) { JniString flag_str(env, flagName); - JVMFlag *f = JVMFlag::find(flag_str.c_str(), {JVMFlag::Type::Int, JVMFlag::Type::Uint, JVMFlag::Type::Intx, JVMFlag::Type::Uintx, JVMFlag::Type::Uint64_t, JVMFlag::Type::Size_t}); + VMFlag *f = VMFlag::find(flag_str.c_str(), {VMFlag::Type::Int, VMFlag::Type::Uint, VMFlag::Type::Intx, VMFlag::Type::Uintx, VMFlag::Type::Uint64_t, VMFlag::Type::Size_t}); if (f) { long* value = static_cast(f->addr()); if (value != NULL) { @@ -424,7 +424,7 @@ Java_com_datadoghq_profiler_JVMAccess_findFloatJVMFlag0(JNIEnv *env, jobject unused, jstring flagName) { JniString flag_str(env, flagName); - JVMFlag *f = JVMFlag::find(flag_str.c_str(),{ JVMFlag::Type::Double}); + VMFlag *f = VMFlag::find(flag_str.c_str(),{ VMFlag::Type::Double}); if (f) { double* value = static_cast(f->addr()); if (value != NULL) { diff --git a/ddprof-lib/src/main/cpp/profiler.cpp b/ddprof-lib/src/main/cpp/profiler.cpp index b44edfcc..7158b145 100644 --- a/ddprof-lib/src/main/cpp/profiler.cpp +++ b/ddprof-lib/src/main/cpp/profiler.cpp @@ -587,7 +587,7 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, VM::_asyncGetCallTrace(&trace, max_depth, ucontext); } } else if (VMStructs::hasMethodStructs()) { - NMethod *nmethod = CodeHeap::findNMethod((const void *)frame.pc()); + VMNMethod *nmethod = CodeHeap::findNMethod((const void *)frame.pc()); if (nmethod != NULL && nmethod->isNMethod() && nmethod->isAlive()) { VMMethod *method = nmethod->method(); if (method != NULL) { @@ -623,7 +623,7 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, } } else if (trace.num_frames == ticks_unknown_not_Java && !(_safe_mode & LAST_JAVA_PC)) { - JavaFrameAnchor* anchor = vm_thread->anchor(); + VMJavaFrameAnchor* anchor = vm_thread->anchor(); uintptr_t sp = anchor->lastJavaSP(); const void* pc = anchor->lastJavaPC(); if (sp != 0 && pc == NULL) { @@ -632,7 +632,7 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, pc = ((const void**)sp)[-1]; anchor->setLastJavaPC(pc); - NMethod *m = CodeHeap::findNMethod(pc); + VMNMethod *m = CodeHeap::findNMethod(pc); if (m != NULL) { // AGCT fails if the last Java frame is a Runtime Stub with an invalid // _frame_complete_offset. In this case we patch _frame_complete_offset @@ -650,13 +650,13 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, } } else if (trace.num_frames == ticks_not_walkable_not_Java && !(_safe_mode & LAST_JAVA_PC)) { - JavaFrameAnchor* anchor = vm_thread->anchor(); + VMJavaFrameAnchor* anchor = vm_thread->anchor(); uintptr_t sp = anchor->lastJavaSP(); const void* pc = anchor->lastJavaPC(); if (sp != 0 && pc != NULL) { // Similar to the above: last Java frame is set, // but points to a Runtime Stub with an invalid _frame_complete_offset - NMethod *m = CodeHeap::findNMethod(pc); + VMNMethod *m = CodeHeap::findNMethod(pc); if (m != NULL && !m->isNMethod() && m->frameSize() > 0 && m->frameCompleteOffset() == -1) { m->setFrameCompleteOffset(0); @@ -691,7 +691,7 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, } void Profiler::fillFrameTypes(ASGCT_CallFrame *frames, int num_frames, - NMethod *nmethod) { + VMNMethod *nmethod) { if (nmethod->isNMethod() && nmethod->isAlive()) { VMMethod *method = nmethod->method(); if (method == NULL) { @@ -850,7 +850,7 @@ void Profiler::recordSample(void *ucontext, u64 counter, int tid, if (mutex.acquired()) { java_frames = getJavaTraceAsync(ucontext, frames + num_frames, max_remaining, &java_ctx, &truncated); if (java_frames > 0 && java_ctx.pc != NULL && VMStructs::hasMethodStructs()) { - NMethod* nmethod = CodeHeap::findNMethod(java_ctx.pc); + VMNMethod* nmethod = CodeHeap::findNMethod(java_ctx.pc); if (nmethod != NULL) { fillFrameTypes(frames + num_frames, java_frames, nmethod); } diff --git a/ddprof-lib/src/main/cpp/profiler.h b/ddprof-lib/src/main/cpp/profiler.h index ed5c3650..0977a0a9 100644 --- a/ddprof-lib/src/main/cpp/profiler.h +++ b/ddprof-lib/src/main/cpp/profiler.h @@ -52,7 +52,7 @@ union CallTraceBuffer { }; class FrameName; -class NMethod; +class VMNMethod; class StackContext; class VM; @@ -178,7 +178,7 @@ class alignas(alignof(SpinLock)) Profiler { int getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames, int max_depth, StackContext *java_ctx, bool *truncated); void fillFrameTypes(ASGCT_CallFrame *frames, int num_frames, - NMethod *nmethod); + VMNMethod *nmethod); void updateThreadName(jvmtiEnv *jvmti, JNIEnv *jni, jthread thread, bool self = false); void updateJavaThreadNames(); diff --git a/ddprof-lib/src/main/cpp/safeAccess.h b/ddprof-lib/src/main/cpp/safeAccess.h index 75b62cf8..16eb1e3f 100644 --- a/ddprof-lib/src/main/cpp/safeAccess.h +++ b/ddprof-lib/src/main/cpp/safeAccess.h @@ -19,6 +19,7 @@ #include "arch.h" #include "codeCache.h" +#include "os.h" #include #include @@ -74,10 +75,24 @@ class SafeAccess { NOINLINE __attribute__((aligned(16))) static void *loadPtr(void** ptr, void* default_value); - static inline bool isReadable(void* ptr) { + static inline bool isReadable(const void* ptr) { return load32((int32_t*)ptr, 1) != 1 || load32((int32_t*)ptr, -1) != -1; } + + static inline bool isReadableRange(const void* start, size_t size) { + assert(size > 0); + void* start_page = (void*)align_down((uintptr_t)start, OS::page_size); + void* end_page = (void*)align_down((uintptr_t)start + size - 1, OS::page_size); + // Memory readability is determined at the page level, so we check each page in the range for readability. + // This is more efficient than checking each byte. + for (void* page = start_page; page <= end_page; page = (void*)((uintptr_t)page + OS::page_size)) { + if (!isReadable(page)) { + return false; + } + } + return true; + } }; #endif // _SAFEACCESS_H diff --git a/ddprof-lib/src/main/cpp/stackFrame.h b/ddprof-lib/src/main/cpp/stackFrame.h index a8d5b6fa..07bd2da6 100644 --- a/ddprof-lib/src/main/cpp/stackFrame.h +++ b/ddprof-lib/src/main/cpp/stackFrame.h @@ -12,7 +12,7 @@ #include "arch.h" -class NMethod; +class VMNMethod; class StackFrame { private: @@ -61,7 +61,7 @@ class StackFrame { return unwindStub(entry, name, pc(), sp(), fp()); } - bool unwindCompiled(NMethod* nm) { + bool unwindCompiled(VMNMethod* nm) { return unwindCompiled(nm, pc(), sp(), fp()); } @@ -69,15 +69,15 @@ class StackFrame { bool unwindAtomicStub(const void*& pc); // TODO: this function will be removed once `vm` becomes the default stack walking mode - bool unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); + bool unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); - bool unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); - bool unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); + bool unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); + bool unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp); void adjustSP(const void* entry, const void* pc, uintptr_t& sp); // SP baseline helpers for compiled frame unwinding - uintptr_t sender_sp_baseline(const NMethod* nm, uintptr_t sp, uintptr_t fp, const void* pc); + uintptr_t sender_sp_baseline(const VMNMethod* nm, uintptr_t sp, uintptr_t fp, const void* pc); const void* read_caller_pc_from_sp(uintptr_t sp_base); uintptr_t read_saved_fp_from_sp(uintptr_t sp_base); diff --git a/ddprof-lib/src/main/cpp/stackFrame_aarch64.cpp b/ddprof-lib/src/main/cpp/stackFrame_aarch64.cpp index 12d17611..45f02f91 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_aarch64.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_aarch64.cpp @@ -197,7 +197,7 @@ static inline bool isEntryBarrier(instruction_t* ip) { return ip[0] == 0xb9402389 && ip[1] == 0xeb09011f; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if ((*ip & 0xffe07fff) == 0xa9007bfd) { @@ -236,7 +236,7 @@ static inline bool isFrameComplete(instruction_t* entry, instruction_t* ip) { return false; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // C1/C2 methods: // {stack_bang} // sub sp, sp, #0x40 @@ -312,7 +312,7 @@ static inline bool isPollReturn(instruction_t* ip) { return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // ldp x29, x30, [sp, #32] // add sp, sp, #0x30 // {poll_return} @@ -329,7 +329,7 @@ bool StackFrame::unwindAtomicStub(const void*& pc) { // VM threads may call generated atomic stubs, which are not normally walkable const void* lr = (const void*)link(); if (VMStructs::libjvm()->contains(lr)) { - NMethod* nm = CodeHeap::findNMethod(pc); + VMNMethod* nm = CodeHeap::findNMethod(pc); if (nm != NULL && strncmp(nm->name(), "Stub", 4) == 0) { pc = lr; return true; diff --git a/ddprof-lib/src/main/cpp/stackFrame_arm.cpp b/ddprof-lib/src/main/cpp/stackFrame_arm.cpp index e175c964..a044795c 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_arm.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_arm.cpp @@ -78,7 +78,7 @@ bool StackFrame::unwindStub(instruction_t* entry, const char* name, uintptr_t& p return false; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if (ip > entry && ip <= entry + 4 && (*ip & 0xffffff00) == 0xe24dd000) { @@ -101,7 +101,7 @@ bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintp return true; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if (ip <= entry) { @@ -111,7 +111,7 @@ bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintp return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } diff --git a/ddprof-lib/src/main/cpp/stackFrame_i386.cpp b/ddprof-lib/src/main/cpp/stackFrame_i386.cpp index a30d16f4..428696a2 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_i386.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_i386.cpp @@ -95,7 +95,7 @@ bool StackFrame::unwindStub(instruction_t* entry, const char* name, uintptr_t& p return false; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if (ip <= entry @@ -116,7 +116,7 @@ bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintp return false; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if (ip <= entry || *ip == 0x55) { // push ebp @@ -127,7 +127,7 @@ bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintp return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; if (*ip == 0xc3) { // ret pc = *(uintptr_t*)sp; diff --git a/ddprof-lib/src/main/cpp/stackFrame_loongarch64.cpp b/ddprof-lib/src/main/cpp/stackFrame_loongarch64.cpp index 99c15ace..3e09d3fa 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_loongarch64.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_loongarch64.cpp @@ -77,17 +77,17 @@ bool StackFrame::unwindStub(instruction_t* entry, const char* name, uintptr_t& p return false; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } diff --git a/ddprof-lib/src/main/cpp/stackFrame_ppc64.cpp b/ddprof-lib/src/main/cpp/stackFrame_ppc64.cpp index ad20ed5b..14bc6a8c 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_ppc64.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_ppc64.cpp @@ -105,7 +105,7 @@ bool StackFrame::unwindStub(instruction_t* entry, const char* name, uintptr_t& p return true; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // On PPC there is a valid back link to the previous frame at all times. The callee stores // the return address in the caller's frame before it constructs its own frame. After it // has destroyed its frame it restores the link register and returns. A problematic sequence @@ -127,12 +127,12 @@ bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintp return true; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } diff --git a/ddprof-lib/src/main/cpp/stackFrame_riscv64.cpp b/ddprof-lib/src/main/cpp/stackFrame_riscv64.cpp index 54454177..6b2f68ec 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_riscv64.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_riscv64.cpp @@ -77,17 +77,17 @@ bool StackFrame::unwindStub(instruction_t* entry, const char* name, uintptr_t& p return false; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // Not yet implemented return false; } diff --git a/ddprof-lib/src/main/cpp/stackFrame_x64.cpp b/ddprof-lib/src/main/cpp/stackFrame_x64.cpp index 7e61a266..5d84e587 100644 --- a/ddprof-lib/src/main/cpp/stackFrame_x64.cpp +++ b/ddprof-lib/src/main/cpp/stackFrame_x64.cpp @@ -102,7 +102,7 @@ __attribute__((no_sanitize("address"))) bool StackFrame::unwindStub(instruction_ return false; } -bool StackFrame::unwindCompiled(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindCompiled(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { instruction_t* ip = (instruction_t*)pc; instruction_t* entry = (instruction_t*)nm->entry(); if (ip <= entry @@ -156,7 +156,7 @@ static inline bool isFrameComplete(instruction_t* entry, instruction_t* ip) { return false; } -bool StackFrame::unwindPrologue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindPrologue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // 0: mov %eax,-0x14000(%rsp) // 7: push %rbp // 8: mov %rsp,%rbp ; for native methods only @@ -219,7 +219,7 @@ static inline bool isPollReturn(instruction_t* ip) { return false; } -bool StackFrame::unwindEpilogue(NMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { +bool StackFrame::unwindEpilogue(VMNMethod* nm, uintptr_t& pc, uintptr_t& sp, uintptr_t& fp) { // add $0x40,%rsp // pop %rbp // {poll_return} diff --git a/ddprof-lib/src/main/cpp/stackWalker.cpp b/ddprof-lib/src/main/cpp/stackWalker.cpp index 0f1eb93d..512524fe 100644 --- a/ddprof-lib/src/main/cpp/stackWalker.cpp +++ b/ddprof-lib/src/main/cpp/stackWalker.cpp @@ -11,7 +11,7 @@ #include "safeAccess.h" #include "stackFrame.h" #include "symbols.h" -#include "vmStructs.h" +#include "vmStructs.inline.h" #include "thread.h" @@ -232,7 +232,7 @@ __attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, } } -__attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, ASGCT_CallFrame* frames, int max_depth, JavaFrameAnchor* anchor, EventType event_type, int lock_index, bool* truncated) { +__attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, ASGCT_CallFrame* frames, int max_depth, VMJavaFrameAnchor* anchor, EventType event_type, int lock_index, bool* truncated) { uintptr_t sp = anchor->lastJavaSP(); if (sp == 0) { return 0; @@ -269,7 +269,7 @@ __attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, volatile int depth = 0; int actual_max_depth = truncated ? max_depth + 1 : max_depth; - JavaFrameAnchor* anchor = NULL; + VMJavaFrameAnchor* anchor = NULL; if (vm_thread != NULL) { anchor = vm_thread->anchor(); vm_thread->exception() = &crash_protection_ctx; @@ -313,7 +313,7 @@ __attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, break; } prev_native_pc = NULL; // we are in JVM code, no previous 'native' PC - NMethod* nm = CodeHeap::findNMethod(pc); + VMNMethod* nm = CodeHeap::findNMethod(pc); if (nm == NULL) { if (anchor == NULL) { // Add an error frame only if we cannot recover @@ -417,7 +417,7 @@ __attribute__((no_sanitize("address"))) int StackWalker::walkVM(void* ucontext, fillFrame(frames[depth++], BCI_ERROR, "break_interpreted"); break; } else if (nm->isEntryFrame(pc) && !features.mixed) { - JavaFrameAnchor* next_anchor = JavaFrameAnchor::fromEntryFrame(fp); + VMJavaFrameAnchor* next_anchor = VMJavaFrameAnchor::fromEntryFrame(fp); if (next_anchor == NULL) { fillFrame(frames[depth++], BCI_ERROR, "break_entry_frame"); break; diff --git a/ddprof-lib/src/main/cpp/stackWalker.h b/ddprof-lib/src/main/cpp/stackWalker.h index 2ac77690..f0f1d855 100644 --- a/ddprof-lib/src/main/cpp/stackWalker.h +++ b/ddprof-lib/src/main/cpp/stackWalker.h @@ -13,7 +13,7 @@ #include "vmEntry.h" -class JavaFrameAnchor; +class VMJavaFrameAnchor; class ProfiledThread; struct StackContext { @@ -61,7 +61,7 @@ class StackWalker { static int walkFP(void* ucontext, const void** callchain, int max_depth, StackContext* java_ctx, bool* truncated = nullptr); static int walkDwarf(void* ucontext, const void** callchain, int max_depth, StackContext* java_ctx, bool* truncated = nullptr); static int walkVM(void* ucontext, ASGCT_CallFrame* frames, int max_depth, StackWalkFeatures features, EventType event_type, int lock_index, bool* truncated = nullptr); - static int walkVM(void* ucontext, ASGCT_CallFrame* frames, int max_depth, JavaFrameAnchor* anchor, EventType event_type, int lock_index, bool* truncated = nullptr); + static int walkVM(void* ucontext, ASGCT_CallFrame* frames, int max_depth, VMJavaFrameAnchor* anchor, EventType event_type, int lock_index, bool* truncated = nullptr); static void checkFault(ProfiledThread* thrd = nullptr); }; diff --git a/ddprof-lib/src/main/cpp/utils.h b/ddprof-lib/src/main/cpp/utils.h index b972f2c2..1d0473e5 100644 --- a/ddprof-lib/src/main/cpp/utils.h +++ b/ddprof-lib/src/main/cpp/utils.h @@ -1,3 +1,6 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License (Version 2.0). +// This product includes software developed at Datadog (https://www.datadoghq.com/) Copyright 2025 Datadog, Inc. + #ifndef _UTILS_H #define _UTILS_H @@ -29,7 +32,4 @@ inline size_t align_up(size_t size, size_t alignment) noexcept { return align_down(size + alignment - 1, alignment); } - - - #endif // _UTILS_H \ No newline at end of file diff --git a/ddprof-lib/src/main/cpp/vmEntry.cpp b/ddprof-lib/src/main/cpp/vmEntry.cpp index 72ce6ab2..c0700a58 100644 --- a/ddprof-lib/src/main/cpp/vmEntry.cpp +++ b/ddprof-lib/src/main/cpp/vmEntry.cpp @@ -447,7 +447,7 @@ bool VM::initProfilerBridge(JavaVM *vm, bool attach) { } else { // DebugNonSafepoints is automatically enabled with CompiledMethodLoad, // otherwise we set the flag manually - JVMFlag* f = JVMFlag::find("DebugNonSafepoints", {JVMFlag::Type::Bool}); + VMFlag* f = VMFlag::find("DebugNonSafepoints", {VMFlag::Type::Bool}); if (f != NULL && f->isDefault()) { f->set(1); } @@ -457,7 +457,7 @@ bool VM::initProfilerBridge(JavaVM *vm, bool attach) { // profiler to avoid the risk of crashing flag was made obsolete (inert) in 15 // (see JDK-8228991) and removed in 16 (see JDK-8231560) if (hotspot_version() < 15) { - JVMFlag *f = JVMFlag::find("UseAdaptiveGCBoundary", {JVMFlag::Type::Bool}); + VMFlag *f = VMFlag::find("UseAdaptiveGCBoundary", {VMFlag::Type::Bool}); _is_adaptive_gc_boundary_flag_set = f != NULL && f->get(); } @@ -528,7 +528,7 @@ void VM::loadMethodIDs(jvmtiEnv *jvmti, JNIEnv *jni, jclass klass) { VMKlass *vmklass = VMKlass::fromJavaClass(jni, klass); int method_count = vmklass->methodCount(); if (method_count > 0) { - ClassLoaderData *cld = vmklass->classLoaderData(); + VMClassLoaderData *cld = vmklass->classLoaderData(); cld->lock(); for (int i = 0; i < method_count; i += MethodList::SIZE) { *cld->methodList() = new MethodList(*cld->methodList()); diff --git a/ddprof-lib/src/main/cpp/vmStructs.cpp b/ddprof-lib/src/main/cpp/vmStructs.cpp index d11d8eb3..5cec56cc 100644 --- a/ddprof-lib/src/main/cpp/vmStructs.cpp +++ b/ddprof-lib/src/main/cpp/vmStructs.cpp @@ -5,6 +5,7 @@ #include #include +#include #include "vmStructs.h" #include "vmEntry.h" #include "j9Ext.h" @@ -22,7 +23,6 @@ bool VMStructs::_has_compiler_structs = false; bool VMStructs::_has_stack_structs = false; bool VMStructs::_has_class_loader_data = false; bool VMStructs::_has_native_thread_id = false; -bool VMStructs::_has_perm_gen = false; bool VMStructs::_can_dereference_jmethod_id = false; bool VMStructs::_compact_object_headers = false; @@ -68,7 +68,6 @@ int VMStructs::_method_constmethod_offset = -1; int VMStructs::_method_code_offset = -1; int VMStructs::_constmethod_constants_offset = -1; int VMStructs::_constmethod_idnum_offset = -1; -int VMStructs::_constmethod_size = -1; int VMStructs::_pool_holder_offset = -1; int VMStructs::_array_len_offset = 0; int VMStructs::_array_data_offset = -1; @@ -85,7 +84,6 @@ int VMStructs::_flag_addr_offset = -1; int VMStructs::_flag_origin_offset = -1; const char* VMStructs::_flags_addr = NULL; int VMStructs::_flag_count = 0; -int VMStructs::_flag_size = 0; char* VMStructs::_code_heap[3] = {}; const void* VMStructs::_code_heap_low = NO_MIN_ADDRESS; const void* VMStructs::_code_heap_high = NO_MAX_ADDRESS; @@ -98,8 +96,6 @@ char* VMStructs::_narrow_klass_base = NULL; int* VMStructs::_narrow_klass_shift_addr = NULL; int VMStructs::_narrow_klass_shift = -1; char** VMStructs::_collected_heap_addr = NULL; -char* VMStructs::_collected_heap = NULL; -int VMStructs::_collected_heap_reserved_offset = -1; int VMStructs::_region_start_offset = -1; int VMStructs::_region_size_offset = -1; int VMStructs::_markword_klass_shift = -1; @@ -112,6 +108,12 @@ const void* VMStructs::_call_stub_return = NULL; const void* VMStructs::_interpreted_frame_valid_start = NULL; const void* VMStructs::_interpreted_frame_valid_end = NULL; +// Initialize type size to 0 +#define INIT_TYPE_SIZE(name, ...) uint64_t VMStructs::TYPE_SIZE_NAME(name) = 0; +DECLARE_TYPES_DO(INIT_TYPE_SIZE) +#undef INIT_TYPE_SIZE + + jfieldID VMStructs::_eetop; jfieldID VMStructs::_tid; jfieldID VMStructs::_klass = NULL; @@ -159,6 +161,23 @@ void VMStructs::ready() { initThreadBridge(); } +bool initTypeSize(uint64_t& size, const char* type, uint64_t value, ...) { + va_list args; + va_start(args, value); + const char* match_type = nullptr; + bool found = false; + while ((match_type = va_arg(args, const char*)) != nullptr) { + if (strcmp(type, match_type) == 0) { + size = value; + found = true; + break; + } + } + + va_end(args); + return found; +} + void VMStructs::initOffsets() { uintptr_t entry = readSymbol("gHotSpotVMStructs"); uintptr_t stride = readSymbol("gHotSpotVMStructEntryArrayStride"); @@ -199,10 +218,6 @@ void VMStructs::initOffsets() { } else if (strcmp(field, "_collectedHeap") == 0) { _collected_heap_addr = *(char***)(entry + address_offset); } - } else if (strcmp(type, "CollectedHeap") == 0) { - if (strcmp(field, "_reserved") == 0) { - _collected_heap_reserved_offset = *(int*)(entry + offset_offset); - } } else if (strcmp(type, "MemRegion") == 0) { if (strcmp(field, "_start") == 0) { _region_start_offset = *(int*)(entry + offset_offset); @@ -392,8 +407,6 @@ void VMStructs::initOffsets() { } } else if (strcmp(type, "PcDesc") == 0) { // TODO - } else if (strcmp(type, "PermGen") == 0) { - _has_perm_gen = true; } } } @@ -410,11 +423,15 @@ void VMStructs::initOffsets() { break; } - if (strcmp(type, "JVMFlag") == 0 || strcmp(type, "Flag") == 0) { - _flag_size = *(int*)(entry + size_offset); - } else if (strcmp(type, "ConstMethod") == 0) { - _constmethod_size = *(int*)(entry + size_offset); - } + uint64_t size = *(uint64_t*)(entry + size_offset); + + #define TYPE_SIZE_MATCH(name, ...) \ + if (initTypeSize(VMStructs::TYPE_SIZE_NAME(name), type, size, ##__VA_ARGS__)) continue; + + DECLARE_TYPES_DO(TYPE_SIZE_MATCH) + +#undef TYPE_SIZE_MATCH + } } @@ -469,13 +486,13 @@ void VMStructs::resolveOffsets() { _klass = (jfieldID)(uintptr_t)(*_klass_offset_addr << 2 | 2); } - JVMFlag* ccp = JVMFlag::find("UseCompressedClassPointers"); + VMFlag* ccp = VMFlag::find("UseCompressedClassPointers"); if (ccp != NULL && ccp->get() && _narrow_klass_base_addr != NULL && _narrow_klass_shift_addr != NULL) { _narrow_klass_base = *_narrow_klass_base_addr; _narrow_klass_shift = *_narrow_klass_shift_addr; } - JVMFlag* coh = JVMFlag::find("UseCompactObjectHeaders"); + VMFlag* coh = VMFlag::find("UseCompactObjectHeaders"); if (coh != NULL && coh->get()) { _compact_object_headers = true; } @@ -495,7 +512,7 @@ void VMStructs::resolveOffsets() { && _method_code_offset >= 0 && _constmethod_constants_offset >= 0 && _constmethod_idnum_offset >= 0 - && _constmethod_size >= 0 + && VMConstMethod::type_size() > 0 && _pool_holder_offset >= 0; _has_compiler_structs = _comp_env_offset >= 0 @@ -544,7 +561,7 @@ void VMStructs::resolveOffsets() { && ((_mutable_data_offset >= 0 && _relocation_size_offset >= 0) || _nmethod_metadata_offset >= 0) && _thread_vframe_offset >= 0 && _thread_exception_offset >= 0 - && _constmethod_size >= 0; + && VMThread::type_size() > 0; // Since JDK-8268406, it is no longer possible to get VMMethod* by dereferencing jmethodID _can_dereference_jmethod_id = _has_method_structs && VM::hotspot_version() <= 25; @@ -573,11 +590,6 @@ void VMStructs::resolveOffsets() { _heap_block_used_offset < 0) { memset(_code_heap, 0, sizeof(_code_heap)); } - - if (_collected_heap_addr != NULL && _collected_heap_reserved_offset >= 0 && - _region_start_offset >= 0 && _region_size_offset >= 0) { - _collected_heap = *_collected_heap_addr + _collected_heap_reserved_offset; - } } void VMStructs::initJvmFunctions() { @@ -687,7 +699,7 @@ void VMStructs::initUnsafeFunctions() { void VMStructs::initCriticalJNINatives() { #ifdef __aarch64__ // aarch64 does not support CriticalJNINatives - JVMFlag* flag = JVMFlag::find("CriticalJNINatives", {JVMFlag::Type::Bool}); + VMFlag* flag = VMFlag::find("CriticalJNINatives", {VMFlag::Type::Bool}); if (flag != nullptr && flag->get()) { flag->set(0); } @@ -700,7 +712,7 @@ const void *VMStructs::findHeapUsageFunc() { // just disable it return nullptr; } else { - JVMFlag* flag = JVMFlag::find("UseG1GC", {JVMFlag::Type::Bool}); + VMFlag* flag = VMFlag::find("UseG1GC", {VMFlag::Type::Bool}); if (flag != NULL && flag->get()) { // The CollectedHeap::memory_usage function is a virtual one - // G1, Shenandoah and ZGC are overriding it and calling the base class @@ -708,11 +720,11 @@ const void *VMStructs::findHeapUsageFunc() { // concrete overridden method form. return _libjvm->findSymbol("_ZN15G1CollectedHeap12memory_usageEv"); } - flag = JVMFlag::find("UseShenandoahGC", {JVMFlag::Type::Bool}); + flag = VMFlag::find("UseShenandoahGC", {VMFlag::Type::Bool}); if (flag != NULL && flag->get()) { return _libjvm->findSymbol("_ZN14ShenandoahHeap12memory_usageEv"); } - flag = JVMFlag::find("UseZGC", {JVMFlag::Type::Bool}); + flag = VMFlag::find("UseZGC", {VMFlag::Type::Bool}); if (flag != NULL && flag->get() && VM::hotspot_version() < 21) { // acessing this method in JDK 21 (generational ZGC) wil cause SIGSEGV return _libjvm->findSymbol("_ZN14ZCollectedHeap12memory_usageEv"); @@ -846,7 +858,7 @@ jmethodID VMMethod::validatedId() { return NULL; } -NMethod* CodeHeap::findNMethod(char* heap, const void* pc) { +VMNMethod* CodeHeap::findNMethod(char* heap, const void* pc) { unsigned char* heap_start = *(unsigned char**)(heap + _code_heap_memory_offset + _vs_low_offset); unsigned char* segmap = *(unsigned char**)(heap + _code_heap_segmap_offset + _vs_low_offset); size_t idx = ((unsigned char*)pc - heap_start) >> _code_heap_segment_shift; @@ -859,10 +871,10 @@ NMethod* CodeHeap::findNMethod(char* heap, const void* pc) { } unsigned char* block = heap_start + (idx << _code_heap_segment_shift) + _heap_block_used_offset; - return *block ? align(block + sizeof(uintptr_t)) : NULL; + return *block ? align(block + sizeof(uintptr_t)) : NULL; } -int NMethod::findScopeOffset(const void* pc) { +int VMNMethod::findScopeOffset(const void* pc) { intptr_t pc_offset = (const char*)pc - code(); if (pc_offset < 0 || pc_offset > 0x7fffffff) { return -1; @@ -901,10 +913,10 @@ int ScopeDesc::readInt() { return n; } -JVMFlag* JVMFlag::find(const char* name) { - if (_flags_addr != NULL && _flag_size > 0) { +VMFlag* VMFlag::find(const char* name) { + if (_flags_addr != NULL && VMFlag::type_size() > 0) { for (int i = 0; i < _flag_count; i++) { - JVMFlag* f = (JVMFlag*)(_flags_addr + i * _flag_size); + VMFlag* f = VMFlag::cast(_flags_addr + i * VMFlag::type_size()); if (f->name() != NULL && strcmp(f->name(), name) == 0 && f->addr() != NULL) { return f; } @@ -913,7 +925,7 @@ JVMFlag* JVMFlag::find(const char* name) { return NULL; } -JVMFlag *JVMFlag::find(const char *name, std::initializer_list types) { +VMFlag *VMFlag::find(const char *name, std::initializer_list types) { int mask = 0; for (int type : types) { mask |= 0x1 << type; @@ -921,14 +933,14 @@ JVMFlag *JVMFlag::find(const char *name, std::initializer_list ty return find(name, mask); } -JVMFlag *JVMFlag::find(const char *name, int type_mask) { - if (_flags_addr != NULL && _flag_size > 0) { +VMFlag *VMFlag::find(const char *name, int type_mask) { + if (_flags_addr != NULL && VMFlag::type_size() > 0) { for (int i = 0; i < _flag_count; i++) { - JVMFlag *f = (JVMFlag *)(_flags_addr + i * _flag_size); + VMFlag* f = VMFlag::cast(_flags_addr + i * VMFlag::type_size()); if (f->name() != NULL && strcmp(f->name(), name) == 0) { int masked = 0x1 << f->type(); if (masked & type_mask) { - return (JVMFlag*)f; + return (VMFlag*)f; } } } @@ -936,7 +948,7 @@ JVMFlag *JVMFlag::find(const char *name, int type_mask) { return NULL; } -int JVMFlag::type() { +int VMFlag::type() { if (VM::hotspot_version() < 16) { // in JDK 16 the JVM flag implementation has changed char* type_name = *(char **)at(_flag_type_offset); if (type_name == NULL) { diff --git a/ddprof-lib/src/main/cpp/vmStructs.h b/ddprof-lib/src/main/cpp/vmStructs.h index 7c91c270..4db172c4 100644 --- a/ddprof-lib/src/main/cpp/vmStructs.h +++ b/ddprof-lib/src/main/cpp/vmStructs.h @@ -19,6 +19,42 @@ class GCHeapSummary; class HeapUsage; +#define TYPE_SIZE_NAME(name) _##name##_size + +template +inline T* cast_to(const void* ptr) { + assert(T::type_size() > 0); // Ensure type size has been initialized + assert(ptr == nullptr || SafeAccess::isReadableRange(ptr, T::type_size())); + return reinterpret_cast(const_cast(ptr)); +} + +#define DECLARE(name) \ + class name : VMStructs { \ + public: \ + static uint64_t type_size() { return TYPE_SIZE_NAME(name); } \ + static name * cast(const void* ptr) { return cast_to(ptr); } \ + static name * load_then_cast(const void* ptr) { \ + assert(ptr != nullptr); \ + return cast(*(const void**)ptr); } + +#define DECLARE_END }; + +#define MATCH_SYMBOLS(...) __VA_ARGS__, nullptr + +// Defines a type and its matching symbols in vmStructs. +// A type may match multiple names in different JVM versions. +#define DECLARE_TYPES_DO(f) \ + f(VMClassLoaderData, MATCH_SYMBOLS("ClassLoaderData")) \ + f(VMConstantPool, MATCH_SYMBOLS("ConstantPool")) \ + f(VMConstMethod, MATCH_SYMBOLS("ConstMethod")) \ + f(VMFlag, MATCH_SYMBOLS("JVMFlag", "Flag")) \ + f(VMJavaFrameAnchor, MATCH_SYMBOLS("JavaFrameAnchor")) \ + f(VMKlass, MATCH_SYMBOLS("Klass")) \ + f(VMMethod, MATCH_SYMBOLS("Method")) \ + f(VMNMethod, MATCH_SYMBOLS("nmethod")) \ + f(VMSymbol, MATCH_SYMBOLS("Symbol")) \ + f(VMThread, MATCH_SYMBOLS("Thread")) + class VMStructs { public: typedef bool (*IsValidMethodFunc)(void *); @@ -34,7 +70,6 @@ class VMStructs { static bool _has_stack_structs; static bool _has_class_loader_data; static bool _has_native_thread_id; - static bool _has_perm_gen; static bool _can_dereference_jmethod_id; static bool _compact_object_headers; @@ -80,7 +115,6 @@ class VMStructs { static int _method_code_offset; static int _constmethod_constants_offset; static int _constmethod_idnum_offset; - static int _constmethod_size; static int _pool_holder_offset; static int _array_len_offset; static int _array_data_offset; @@ -97,7 +131,6 @@ class VMStructs { static int _flag_origin_offset; static const char* _flags_addr; static int _flag_count; - static int _flag_size; static char* _code_heap[3]; static const void* _code_heap_low; static const void* _code_heap_high; @@ -110,8 +143,6 @@ class VMStructs { static int* _narrow_klass_shift_addr; static int _narrow_klass_shift; static char** _collected_heap_addr; - static char* _collected_heap; - static int _collected_heap_reserved_offset; static int _region_start_offset; static int _region_size_offset; static int _markword_klass_shift; @@ -124,6 +155,15 @@ class VMStructs { static const void* _interpreted_frame_valid_start; static const void* _interpreted_frame_valid_end; +// Declare type size variables + #define DECLARE_TYPE_SIZE_VAR(name, ...) \ + static uint64_t TYPE_SIZE_NAME(name); + + DECLARE_TYPES_DO(DECLARE_TYPE_SIZE_VAR) + +#undef DECLARE_TYPE_SIZE_VAR + + static jfieldID _eetop; static jfieldID _tid; static jfieldID _klass; @@ -162,7 +202,9 @@ class VMStructs { static const void *findHeapUsageFunc(); const char* at(int offset) { - return (const char*)this + offset; + const char* ptr = (const char*)this + offset; + assert(SafeAccess::isReadable(ptr)); + return ptr; } static bool goodPtr(const void* ptr) { @@ -306,10 +348,10 @@ class MethodList { }; -class NMethod; +class VMNMethod; class VMMethod; -class VMSymbol : VMStructs { +DECLARE(VMSymbol) public: unsigned short length() { if (_symbol_length_offset >= 0) { @@ -322,9 +364,9 @@ class VMSymbol : VMStructs { const char* body() { return at(_symbol_body_offset); } -}; +DECLARE_END -class ClassLoaderData : VMStructs { +DECLARE(VMClassLoaderData) private: void* mutex() { return *(void**) at(sizeof(uintptr_t) * 3); @@ -342,28 +384,20 @@ class ClassLoaderData : VMStructs { MethodList** methodList() { return (MethodList**) at(sizeof(uintptr_t) * 6 + 8); } -}; +DECLARE_END -class VMKlass : VMStructs { +DECLARE(VMKlass) public: static VMKlass* fromJavaClass(JNIEnv* env, jclass cls) { - if (_has_perm_gen) { - jobject klassOop = env->GetObjectField(cls, _klass); - return (VMKlass*)(*(uintptr_t**)klassOop + 2); - } else if (sizeof(VMKlass*) == 8) { - return (VMKlass*)(uintptr_t)env->GetLongField(cls, _klass); + if (sizeof(VMKlass*) == 8) { + return VMKlass::cast((const void*)(intptr_t)env->GetLongField(cls, _klass)); } else { - return (VMKlass*)(uintptr_t)env->GetIntField(cls, _klass); + return VMKlass::cast((const void*)(intptr_t)env->GetIntField(cls, _klass)); } } static VMKlass* fromHandle(uintptr_t handle) { - if (_has_perm_gen) { - // On JDK 7 KlassHandle is a pointer to klassOop, hence one more indirection - return (VMKlass*)(*(uintptr_t**)handle + 2); - } else { - return (VMKlass*)handle; - } + return VMKlass::cast((const void*)handle); } static VMKlass* fromOop(uintptr_t oop) { @@ -378,56 +412,66 @@ class VMKlass : VMStructs { } else { narrow_klass = *(unsigned int*)(oop + _oop_klass_offset); } - return (VMKlass*)(_narrow_klass_base + (narrow_klass << _narrow_klass_shift)); + return VMKlass::cast((const void*)(_narrow_klass_base + (narrow_klass << _narrow_klass_shift))); } else { - return *(VMKlass**)(oop + _oop_klass_offset); + return VMKlass::load_then_cast((const void*)(oop + _oop_klass_offset)); } } VMSymbol* name() { - return *(VMSymbol**) at(_klass_name_offset); + assert(_klass_name_offset >= 0); + return VMSymbol::load_then_cast(at(_klass_name_offset)); } - ClassLoaderData* classLoaderData() { - return *(ClassLoaderData**) at(_class_loader_data_offset); + VMClassLoaderData* classLoaderData() { + assert(_class_loader_data_offset >= 0); + return VMClassLoaderData::load_then_cast(at(_class_loader_data_offset)); } int methodCount() { + assert(_methods_offset >= 0); int* methods = *(int**) at(_methods_offset); return methods == NULL ? 0 : *methods & 0xffff; } jmethodID* jmethodIDs() { + assert(_jmethod_ids_offset >= 0); return __atomic_load_n((jmethodID**) at(_jmethod_ids_offset), __ATOMIC_ACQUIRE); } -}; +DECLARE_END -class JavaFrameAnchor : VMStructs { +DECLARE(VMJavaFrameAnchor) private: enum { MAX_CALL_WRAPPER_DISTANCE = 512 }; public: - static JavaFrameAnchor* fromEntryFrame(uintptr_t fp) { + static VMJavaFrameAnchor* fromEntryFrame(uintptr_t fp) { + assert(_entry_frame_call_wrapper_offset != -1); + assert(_call_wrapper_anchor_offset >= 0); const char* call_wrapper = (const char*) SafeAccess::loadPtr((void**)(fp + _entry_frame_call_wrapper_offset), nullptr); if (!goodPtr(call_wrapper) || (uintptr_t)call_wrapper - fp > MAX_CALL_WRAPPER_DISTANCE) { return NULL; } - return (JavaFrameAnchor*)(call_wrapper + _call_wrapper_anchor_offset); + return VMJavaFrameAnchor::cast((const void*)(call_wrapper + _call_wrapper_anchor_offset)); } uintptr_t lastJavaSP() { + assert(_anchor_sp_offset >= 0); return (uintptr_t) SafeAccess::loadPtr((void**) at(_anchor_sp_offset), nullptr); } uintptr_t lastJavaFP() { + assert(_anchor_fp_offset >= 0); return (uintptr_t) SafeAccess::loadPtr((void**) at(_anchor_fp_offset), nullptr); } const void* lastJavaPC() { + assert(_anchor_pc_offset >= 0); return SafeAccess::loadPtr((void**) at(_anchor_pc_offset), nullptr); } void setLastJavaPC(const void* pc) { + assert(_anchor_pc_offset >= 0); *(const void**) at(_anchor_pc_offset) = pc; } @@ -440,7 +484,7 @@ class JavaFrameAnchor : VMStructs { } return false; } -}; +DECLARE_END // Copied from JDK's globalDefinitions.hpp 'JavaThreadState' enum enum JVMJavaThreadState { @@ -458,7 +502,7 @@ enum JVMJavaThreadState { _thread_max_state = 12 // maximum thread state+1 - used for statistics allocation }; -class VMThread : VMStructs { +DECLARE(VMThread) public: static VMThread* current(); @@ -467,7 +511,7 @@ class VMThread : VMStructs { } static VMThread* fromJavaThread(JNIEnv* env, jthread thread) { - return (VMThread*)(uintptr_t)env->GetLongField(thread, _eetop); + return VMThread::cast((const void*)env->GetLongField(thread, _eetop)); } static jlong javaThreadId(JNIEnv* env, jthread thread) { @@ -481,6 +525,7 @@ class VMThread : VMStructs { JNIEnv* jni(); const void** vtable() { + assert(SafeAccess::isReadable(this)); return *(const void***)this; } @@ -503,31 +548,29 @@ class VMThread : VMStructs { } bool inDeopt() { + assert(_thread_vframe_offset >= 0); return SafeAccess::loadPtr((void**) at(_thread_vframe_offset), nullptr) != NULL; } void*& exception() { + assert(_thread_exception_offset >= 0); return *(void**) at(_thread_exception_offset); } - JavaFrameAnchor* anchor() { - return (JavaFrameAnchor*) at(_thread_anchor_offset); + VMJavaFrameAnchor* anchor() { + assert(_thread_anchor_offset >= 0); + return VMJavaFrameAnchor::cast(at(_thread_anchor_offset)); } - VMMethod* compiledMethod() { - const char* env = *(const char**) at(_comp_env_offset); - if (env != NULL) { - const char* task = *(const char**) (env + _comp_task_offset); - if (task != NULL) { - return *(VMMethod**) (task + _comp_method_offset); - } - } - return NULL; - } -}; + inline VMMethod* compiledMethod(); +DECLARE_END -class VMMethod : public /* TODO make private when consolidating VMMethod? */ VMStructs { - private: +DECLARE(VMConstMethod) +DECLARE_END + + +DECLARE(VMMethod) + private: static bool check_jmethodID_J9(jmethodID id); static bool check_jmethodID_hotspot(jmethodID id); @@ -540,32 +583,35 @@ class VMMethod : public /* TODO make private when consolidating VMMethod? */ VMS // Workaround for JDK-8313816 static bool isStaleMethodId(jmethodID id) { if (!_can_dereference_jmethod_id) return false; - VMMethod* vm_method = *(VMMethod**)id; + + VMMethod* vm_method = VMMethod::load_then_cast((const void*)id); return vm_method == NULL || vm_method->id() == NULL; } const char* bytecode() { - return *(const char**) at(_method_constmethod_offset) + _constmethod_size; + assert(_method_constmethod_offset >= 0); + return *(const char**) at(_method_constmethod_offset) + VMConstMethod::type_size(); } - NMethod* code() { - return *(NMethod**) at(_method_code_offset); - } + inline VMNMethod* code(); static bool check_jmethodID(jmethodID id); -}; +DECLARE_END -class NMethod : VMStructs { +DECLARE(VMNMethod) public: int size() { + assert(_blob_size_offset >= 0); return *(int*) at(_blob_size_offset); } int frameSize() { + assert(_frame_size_offset >= 0); return *(int*) at(_frame_size_offset); } short frameCompleteOffset() { + assert(_frame_complete_offset >= 0); return *(short*) at(_frame_complete_offset); } @@ -622,6 +668,7 @@ class NMethod : VMStructs { } const char* name() { + assert(_nmethod_name_offset >= 0); return *(const char**) at(_nmethod_name_offset); } @@ -646,7 +693,8 @@ class NMethod : VMStructs { } VMMethod* method() { - return *(VMMethod**) at(_nmethod_method_offset); + assert(_nmethod_method_offset >= 0); + return VMMethod::load_then_cast((const void*)at(_nmethod_method_offset)); } char state() { @@ -664,16 +712,20 @@ class NMethod : VMStructs { VMMethod** metadata() { if (_mutable_data_offset >= 0) { // Since JDK 25 + assert(_relocation_size_offset >= 0); return (VMMethod**) (*(char**) at(_mutable_data_offset) + *(int*) at(_relocation_size_offset)); } else if (_data_offset > 0) { // since JDK 23 + assert(_nmethod_metadata_offset >= 0); + assert(_data_offset >= 0); return (VMMethod**) at(*(int*) at(_data_offset) + *(unsigned short*) at(_nmethod_metadata_offset)); } + assert(_nmethod_metadata_offset >= 0); return (VMMethod**) at(*(int*) at(_nmethod_metadata_offset)); } int findScopeOffset(const void* pc); -}; +DECLARE_END class CodeHeap : VMStructs { private: @@ -683,7 +735,7 @@ class CodeHeap : VMStructs { pc < *(const void**)(heap + _code_heap_memory_offset + _vs_high_offset); } - static NMethod* findNMethod(char* heap, const void* pc); + static VMNMethod* findNMethod(char* heap, const void* pc); public: static bool available() { @@ -703,7 +755,7 @@ class CodeHeap : VMStructs { high = _code_heap_high); } - static NMethod* findNMethod(const void* pc) { + static VMNMethod* findNMethod(const void* pc) { if (contains(_code_heap[0], pc)) return findNMethod(_code_heap[0], pc); if (contains(_code_heap[1], pc)) return findNMethod(_code_heap[1], pc); if (contains(_code_heap[2], pc)) return findNMethod(_code_heap[2], pc); @@ -711,33 +763,14 @@ class CodeHeap : VMStructs { } }; -class CollectedHeap : VMStructs { - public: - static bool created() { - return _collected_heap_addr != NULL && *_collected_heap_addr != NULL; - } - - static CollectedHeap* heap() { - return (CollectedHeap*)_collected_heap; - } - - uintptr_t start() { - return *(uintptr_t*) at(_region_start_offset); - } - - uintptr_t size() { - return (*(uintptr_t*) at(_region_size_offset)) * sizeof(uintptr_t); - } -}; - -class JVMFlag : VMStructs { +DECLARE(VMFlag) private: enum { ORIGIN_DEFAULT = 0, ORIGIN_MASK = 15, SET_ON_CMDLINE = 1 << 17 }; - static JVMFlag* find(const char *name, int type_mask); + static VMFlag* find(const char *name, int type_mask); public: enum Type { @@ -754,16 +787,18 @@ class JVMFlag : VMStructs { Unknown = -1 }; - static JVMFlag* find(const char* name); - static JVMFlag *find(const char* name, std::initializer_list types); + static VMFlag* find(const char* name); + static VMFlag *find(const char* name, std::initializer_list types); const char* name() { + assert(_flag_name_offset >= 0); return *(const char**) at(_flag_name_offset); } int type(); void* addr() { + assert(_flag_addr_offset >= 0); return *(void**) at(_flag_addr_offset); } @@ -788,7 +823,7 @@ class JVMFlag : VMStructs { void set(char value) { *((char*)addr()) = value; } -}; +DECLARE_END class PcDesc { public: @@ -809,7 +844,7 @@ class ScopeDesc : VMStructs { int readInt(); public: - ScopeDesc(NMethod* nm) { + ScopeDesc(VMNMethod* nm) { _scopes = (const unsigned char*)nm->scopes(); _metadata = nm->metadata(); } diff --git a/ddprof-lib/src/main/cpp/vmStructs.inline.h b/ddprof-lib/src/main/cpp/vmStructs.inline.h new file mode 100644 index 00000000..f867443a --- /dev/null +++ b/ddprof-lib/src/main/cpp/vmStructs.inline.h @@ -0,0 +1,30 @@ +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache License (Version 2.0). +// This product includes software developed at Datadog (https://www.datadoghq.com/) Copyright 2025 Datadog, Inc. + +#ifndef _VMSTRUCTS_INLINE_H +#define _VMSTRUCTS_INLINE_H + +#include "vmStructs.h" + +VMNMethod* VMMethod::code() { + assert(_method_code_offset >= 0); + const void* code_ptr = *(const void**) at(_method_code_offset); + return VMNMethod::cast(code_ptr); +} + +VMMethod* VMThread::compiledMethod() { + assert(_comp_method_offset >= 0); + assert(_comp_env_offset >= 0); + assert(_comp_task_offset >= 0); + const char* env = *(const char**) at(_comp_env_offset); + if (env != NULL) { + const char* task = *(const char**) (env + _comp_task_offset); + if (task != NULL) { + return VMMethod::load_then_cast((const void*)(task + _comp_method_offset)); + } + } + return NULL; +} + + +#endif // _VMSTRUCTS_INLINE_H