Skip to content

Commit cffcf37

Browse files
committed
feat(heaptrack): allow enabling/disabling via IPC
1 parent 3230634 commit cffcf37

6 files changed

Lines changed: 351 additions & 8 deletions

File tree

crates/heaptrack/src/bpf.rs

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,38 @@ impl HeaptrackBpf {
5555
Ok(())
5656
}
5757

58+
/// Enable event tracking
59+
pub fn enable_tracking(&mut self) -> Result<()> {
60+
let key = 0u32;
61+
let value = true as u8;
62+
self.skel
63+
.maps
64+
.tracking_enabled
65+
.update(
66+
&key.to_le_bytes(),
67+
&value.to_le_bytes(),
68+
libbpf_rs::MapFlags::ANY,
69+
)
70+
.context("Failed to enable tracking")?;
71+
Ok(())
72+
}
73+
74+
/// Disable event tracking
75+
pub fn disable_tracking(&mut self) -> Result<()> {
76+
let key = 0u32;
77+
let value = false as u8;
78+
self.skel
79+
.maps
80+
.tracking_enabled
81+
.update(
82+
&key.to_le_bytes(),
83+
&value.to_le_bytes(),
84+
libbpf_rs::MapFlags::ANY,
85+
)
86+
.context("Failed to disable tracking")?;
87+
Ok(())
88+
}
89+
5890
pub fn attach_malloc(&mut self, libc_path: &Path) -> Result<()> {
5991
let malloc_opts = UprobeOpts {
6092
func_name: Some("malloc".to_string()),

crates/heaptrack/src/bpf/heaptrack.bpf.c

Lines changed: 185 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,26 @@ struct {
2929
__uint(max_entries, 256 * 1024);
3030
} events SEC(".maps");
3131

32+
/* Map to control whether tracking is enabled (0 = disabled, 1 = enabled) */
33+
struct {
34+
__uint(type, BPF_MAP_TYPE_ARRAY);
35+
__uint(max_entries, 1);
36+
__type(key, __u32);
37+
__type(value, __u8);
38+
} tracking_enabled SEC(".maps");
39+
40+
/* Helper to check if tracking is currently enabled */
41+
static __always_inline int is_enabled(void) {
42+
__u32 key = 0;
43+
__u8 *enabled = bpf_map_lookup_elem(&tracking_enabled, &key);
44+
45+
/* Default to enabled if map not initialized */
46+
if (!enabled) {
47+
return 1;
48+
}
49+
50+
return *enabled;
51+
}
3252

3353
/* Helper to check if a PID or any of its ancestors should be tracked */
3454
static __always_inline int is_tracked(__u32 pid) {
@@ -76,14 +96,16 @@ int tracepoint_sched_fork(struct trace_event_raw_sched_process_fork *ctx) {
7696

7797
SEC("tracepoint/syscalls/sys_enter_execve")
7898
int tracepoint_sys_execve(struct trace_event_raw_sys_enter *ctx) {
79-
__u32 pid = bpf_get_current_pid_tgid() >> 32;
99+
__u64 tid = bpf_get_current_pid_tgid();
100+
__u32 pid = tid >> 32;
80101

81102
/* Check if this process or any parent is being tracked */
82-
if (is_tracked(pid)) {
103+
if (is_tracked(pid) && is_enabled()) {
83104
struct event *e = bpf_ringbuf_reserve(&events, sizeof(*e), 0);
84105
if (e) {
85106
e->timestamp = bpf_ktime_get_ns();
86107
e->pid = pid;
108+
e->tid = tid & 0xFFFFFFFF;
87109
e->event_type = EVENT_TYPE_EXECVE;
88110
e->addr = 0;
89111
e->size = 0;
@@ -123,7 +145,7 @@ int uretprobe_malloc(struct pt_regs *ctx) {
123145
__u32 pid = tid >> 32;
124146

125147
/* Check if this PID is being tracked */
126-
if (is_tracked(pid)) {
148+
if (is_tracked(pid) && is_enabled()) {
127149
__u64 *size_ptr = bpf_map_lookup_elem(&malloc_size, &tid);
128150
if (size_ptr) {
129151
__u64 addr = PT_REGS_RC(ctx);
@@ -135,6 +157,7 @@ int uretprobe_malloc(struct pt_regs *ctx) {
135157
if (e) {
136158
e->timestamp = bpf_ktime_get_ns();
137159
e->pid = pid;
160+
e->tid = tid & 0xFFFFFFFF;
138161
e->event_type = EVENT_TYPE_MALLOC;
139162
e->addr = addr;
140163
e->size = size;
@@ -151,10 +174,11 @@ int uretprobe_malloc(struct pt_regs *ctx) {
151174

152175
SEC("uprobe")
153176
int uprobe_free(struct pt_regs *ctx) {
154-
__u32 pid = bpf_get_current_pid_tgid() >> 32;
177+
__u64 tid = bpf_get_current_pid_tgid();
178+
__u32 pid = tid >> 32;
155179

156180
/* Check if this PID is being tracked */
157-
if (is_tracked(pid)) {
181+
if (is_tracked(pid) && is_enabled()) {
158182
__u64 addr = PT_REGS_PARM1(ctx);
159183

160184
/* Only track non-NULL frees */
@@ -163,6 +187,7 @@ int uprobe_free(struct pt_regs *ctx) {
163187
if (e) {
164188
e->timestamp = bpf_ktime_get_ns();
165189
e->pid = pid;
190+
e->tid = tid & 0xFFFFFFFF;
166191
e->event_type = EVENT_TYPE_FREE;
167192
e->addr = addr;
168193
e->size = 0; /* size unknown for free */
@@ -205,7 +230,7 @@ int uretprobe_calloc(struct pt_regs *ctx) {
205230
__u32 pid = tid >> 32;
206231

207232
/* Check if this PID is being tracked */
208-
if (is_tracked(pid)) {
233+
if (is_tracked(pid) && is_enabled()) {
209234
__u64 *size_ptr = bpf_map_lookup_elem(&calloc_size, &tid);
210235
if (size_ptr) {
211236
__u64 addr = PT_REGS_RC(ctx);
@@ -217,6 +242,7 @@ int uretprobe_calloc(struct pt_regs *ctx) {
217242
if (e) {
218243
e->timestamp = bpf_ktime_get_ns();
219244
e->pid = pid;
245+
e->tid = tid & 0xFFFFFFFF;
220246
e->event_type = EVENT_TYPE_CALLOC;
221247
e->addr = addr;
222248
e->size = size;
@@ -260,7 +286,7 @@ int uretprobe_realloc(struct pt_regs *ctx) {
260286
__u32 pid = tid >> 32;
261287

262288
/* Check if this PID is being tracked */
263-
if (is_tracked(pid)) {
289+
if (is_tracked(pid) && is_enabled()) {
264290
__u64 *size_ptr = bpf_map_lookup_elem(&realloc_size, &tid);
265291
if (size_ptr) {
266292
__u64 addr = PT_REGS_RC(ctx);
@@ -272,6 +298,7 @@ int uretprobe_realloc(struct pt_regs *ctx) {
272298
if (e) {
273299
e->timestamp = bpf_ktime_get_ns();
274300
e->pid = pid;
301+
e->tid = tid & 0xFFFFFFFF;
275302
e->event_type = EVENT_TYPE_REALLOC;
276303
e->addr = addr;
277304
e->size = size;
@@ -315,7 +342,7 @@ int uretprobe_aligned_alloc(struct pt_regs *ctx) {
315342
__u32 pid = tid >> 32;
316343

317344
/* Check if this PID is being tracked */
318-
if (is_tracked(pid)) {
345+
if (is_tracked(pid) && is_enabled()) {
319346
__u64 *size_ptr = bpf_map_lookup_elem(&aligned_alloc_size, &tid);
320347
if (size_ptr) {
321348
__u64 addr = PT_REGS_RC(ctx);
@@ -327,6 +354,7 @@ int uretprobe_aligned_alloc(struct pt_regs *ctx) {
327354
if (e) {
328355
e->timestamp = bpf_ktime_get_ns();
329356
e->pid = pid;
357+
e->tid = tid & 0xFFFFFFFF;
330358
e->event_type = EVENT_TYPE_ALIGNED_ALLOC;
331359
e->addr = addr;
332360
e->size = size;
@@ -340,3 +368,152 @@ int uretprobe_aligned_alloc(struct pt_regs *ctx) {
340368

341369
return 0;
342370
}
371+
372+
/* Map to store mmap parameters between entry and return */
373+
struct mmap_args {
374+
__u64 addr;
375+
__u64 len;
376+
};
377+
378+
struct {
379+
__uint(type, BPF_MAP_TYPE_HASH);
380+
__uint(max_entries, 10000);
381+
__type(key, __u64); /* tid */
382+
__type(value, struct mmap_args);
383+
} mmap_temp SEC(".maps");
384+
385+
SEC("tracepoint/syscalls/sys_enter_mmap")
386+
int tracepoint_sys_enter_mmap(struct trace_event_raw_sys_enter *ctx) {
387+
__u64 tid = bpf_get_current_pid_tgid();
388+
__u32 pid = tid >> 32;
389+
390+
if (is_tracked(pid)) {
391+
struct mmap_args args = {0};
392+
393+
/* mmap(addr, len, prot, flags, fd, offset)
394+
* We care about addr (can be 0 for kernel choice) and len */
395+
args.addr = ctx->args[0];
396+
args.len = ctx->args[1];
397+
398+
bpf_map_update_elem(&mmap_temp, &tid, &args, BPF_ANY);
399+
}
400+
401+
return 0;
402+
}
403+
404+
SEC("tracepoint/syscalls/sys_exit_mmap")
405+
int tracepoint_sys_exit_mmap(struct trace_event_raw_sys_exit *ctx) {
406+
__u64 tid = bpf_get_current_pid_tgid();
407+
__u32 pid = tid >> 32;
408+
409+
if (is_tracked(pid) && is_enabled()) {
410+
struct mmap_args *args = bpf_map_lookup_elem(&mmap_temp, &tid);
411+
if (args) {
412+
__s64 ret = ctx->ret;
413+
414+
/* Only track successful mmap calls (ret != MAP_FAILED which is -1) */
415+
if (ret > 0) {
416+
struct event *e = bpf_ringbuf_reserve(&events, sizeof(*e), 0);
417+
if (e) {
418+
e->timestamp = bpf_ktime_get_ns();
419+
e->pid = pid;
420+
e->tid = tid & 0xFFFFFFFF;
421+
e->event_type = EVENT_TYPE_MMAP;
422+
e->addr = (__u64)ret; /* actual mapped address */
423+
e->size = args->len;
424+
bpf_ringbuf_submit(e, 0);
425+
}
426+
}
427+
428+
bpf_map_delete_elem(&mmap_temp, &tid);
429+
}
430+
}
431+
432+
return 0;
433+
}
434+
435+
/* munmap tracking */
436+
SEC("tracepoint/syscalls/sys_enter_munmap")
437+
int tracepoint_sys_enter_munmap(struct trace_event_raw_sys_enter *ctx) {
438+
__u64 tid = bpf_get_current_pid_tgid();
439+
__u32 pid = tid >> 32;
440+
441+
if (is_tracked(pid) && is_enabled()) {
442+
/* munmap(addr, len) */
443+
__u64 addr = ctx->args[0];
444+
__u64 len = ctx->args[1];
445+
446+
/* Report the munmap attempt (we track entry, not exit,
447+
* because we want to know what was requested even if it fails) */
448+
if (addr != 0 && len > 0) {
449+
struct event *e = bpf_ringbuf_reserve(&events, sizeof(*e), 0);
450+
if (e) {
451+
e->timestamp = bpf_ktime_get_ns();
452+
e->pid = pid;
453+
e->tid = tid & 0xFFFFFFFF;
454+
e->event_type = EVENT_TYPE_MUNMAP;
455+
e->addr = addr;
456+
e->size = len;
457+
bpf_ringbuf_submit(e, 0);
458+
}
459+
}
460+
}
461+
462+
return 0;
463+
}
464+
465+
/* brk tracking - adjusts the program break (heap boundary) */
466+
struct {
467+
__uint(type, BPF_MAP_TYPE_HASH);
468+
__uint(max_entries, 10000);
469+
__type(key, __u64); /* tid */
470+
__type(value, __u64); /* requested brk value */
471+
} brk_temp SEC(".maps");
472+
473+
SEC("tracepoint/syscalls/sys_enter_brk")
474+
int tracepoint_sys_enter_brk(struct trace_event_raw_sys_enter *ctx) {
475+
__u64 tid = bpf_get_current_pid_tgid();
476+
__u32 pid = tid >> 32;
477+
478+
if (is_tracked(pid)) {
479+
/* brk(addr) - if addr is 0, just queries current break */
480+
__u64 requested_brk = ctx->args[0];
481+
bpf_map_update_elem(&brk_temp, &tid, &requested_brk, BPF_ANY);
482+
}
483+
484+
return 0;
485+
}
486+
487+
SEC("tracepoint/syscalls/sys_exit_brk")
488+
int tracepoint_sys_exit_brk(struct trace_event_raw_sys_exit *ctx) {
489+
__u64 tid = bpf_get_current_pid_tgid();
490+
__u32 pid = tid >> 32;
491+
492+
if (is_tracked(pid) && is_enabled()) {
493+
__u64 *requested_brk = bpf_map_lookup_elem(&brk_temp, &tid);
494+
if (requested_brk) {
495+
__u64 new_brk = ctx->ret;
496+
__u64 req_brk = *requested_brk;
497+
498+
/* Only track actual changes (not queries where req == 0) */
499+
if (req_brk != 0 && new_brk > 0) {
500+
struct event *e = bpf_ringbuf_reserve(&events, sizeof(*e), 0);
501+
if (e) {
502+
e->timestamp = bpf_ktime_get_ns();
503+
e->pid = pid;
504+
e->tid = tid & 0xFFFFFFFF;
505+
e->event_type = EVENT_TYPE_BRK;
506+
e->addr = new_brk;
507+
/* We can't easily determine size change without tracking previous brk,
508+
* so we just report the new brk address. Userspace can track deltas. */
509+
e->size = 0;
510+
bpf_ringbuf_submit(e, 0);
511+
}
512+
}
513+
514+
bpf_map_delete_elem(&brk_temp, &tid);
515+
}
516+
}
517+
518+
return 0;
519+
}

0 commit comments

Comments
 (0)