Skip to content

Commit cfb3275

Browse files
authored
Merge pull request #44 from ZHaoXiangyuA/kindling-dev
Dynamic addition of kprobe and tracepoint
2 parents 5a0edd8 + d052e59 commit cfb3275

10 files changed

Lines changed: 470 additions & 29 deletions

File tree

driver/ppm_events_public.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1526,6 +1526,8 @@ struct ppm_evt_hdr {
15261526
#define PPM_IOCTL_GET_PROBE_VERSION _IO(PPM_IOCTL_MAGIC, 21)
15271527
#define PPM_IOCTL_SET_FULLCAPTURE_PORT_RANGE _IO(PPM_IOCTL_MAGIC, 22)
15281528
#define PPM_IOCTL_SET_STATSD_PORT _IO(PPM_IOCTL_MAGIC, 23)
1529+
#define PPM_IOCTL_MASK_SET_TP _IO(PPM_IOCTL_MAGIC, 24)
1530+
#define PPM_IOCTL_MASK_UNSET_TP _IO(PPM_IOCTL_MAGIC, 25)
15291531
#endif // CYGWING_AGENT
15301532

15311533
extern const struct ppm_name_value socket_families[];

userspace/libscap/scap-int.h

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,17 @@ typedef struct wh_t wh_t;
7272
//
7373
#define BPF_PROGS_MAX 128
7474
#define BPF_MAPS_MAX 32
75-
75+
struct bpf_prog {
76+
int fd;
77+
int efd;
78+
char name[255];
79+
};
80+
//For the real index that loaded kprobe and tracepoint events in struct bpf_prog bpf_progs.
81+
struct kt_index {
82+
char name[255];
83+
int index;
84+
bool interest;
85+
};
7686
//
7787
// The device descriptor
7888
//
@@ -156,13 +166,19 @@ struct scap
156166
// Anonymous struct with bpf stuff
157167
struct
158168
{
159-
int m_bpf_prog_fds[BPF_PROGS_MAX];
169+
char m_filepath[SCAP_MAX_PATH_SIZE];
170+
struct bpf_prog m_bpf_progs[BPF_PROGS_MAX];
160171
int m_bpf_prog_cnt;
161172
bool m_bpf_fillers[BPF_PROGS_MAX];
162-
int m_bpf_event_fd[BPF_PROGS_MAX];
163173
int m_bpf_map_fds[BPF_MAPS_MAX];
164174
int m_bpf_prog_array_map_idx;
165175
};
176+
// Anonymous struct with tracepoints and kprobe of interest
177+
struct {
178+
struct kt_index kt_indices[BPF_PROGS_MAX];
179+
int m_bpf_prog_real_size; // the real size of bpf program in probe.o
180+
};
181+
166182

167183
// The set of process names that are suppressed
168184
char **m_suppressed_comms;

userspace/libscap/scap.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2200,6 +2200,10 @@ int32_t scap_unset_eventmask(scap_t* handle, uint32_t event_id) {
22002200
#endif
22012201
}
22022202

2203+
int32_t scap_set_ktmask(scap_t* handle, uint32_t kt, bool enabled){
2204+
return scap_set_ktmask_bpf(handle, kt, enabled);
2205+
}
2206+
22032207
uint32_t scap_event_get_dump_flags(scap_t* handle)
22042208
{
22052209
return handle->m_last_evt_dump_flags;

userspace/libscap/scap.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -994,7 +994,14 @@ int32_t scap_set_eventmask(scap_t* handle, uint32_t event_id);
994994
*/
995995
int32_t scap_unset_eventmask(scap_t* handle, uint32_t event_id);
996996

997+
/*!
998+
\brief enabled the event fd
997999
1000+
\param handle Handle to the capture instance.
1001+
\param enabled true or false
1002+
\note This function can only be called for live captures.
1003+
*/
1004+
int32_t scap_set_ktmask(scap_t* handle, uint32_t kt, bool enabled);
9981005
/*!
9991006
\brief Get the root directory of the system. This usually changes
10001007
if running in a container, so that all the information for the

userspace/libscap/scap_bpf.c

Lines changed: 163 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -522,6 +522,7 @@ static int32_t load_tracepoint(scap_t* handle, const char *event, struct bpf_ins
522522
bool is_raw_tracepoint = strncmp(event, "raw_tracepoint/", 15) == 0;
523523

524524
insns_cnt = size / sizeof(struct bpf_insn);
525+
char *full_event = event;
525526

526527
attr.type = PERF_TYPE_TRACEPOINT;
527528
attr.sample_type = PERF_SAMPLE_RAW;
@@ -593,7 +594,21 @@ static int32_t load_tracepoint(scap_t* handle, const char *event, struct bpf_ins
593594

594595
free(error);
595596

596-
handle->m_bpf_prog_fds[handle->m_bpf_prog_cnt++] = fd;
597+
handle->m_bpf_progs[handle->m_bpf_prog_cnt].fd = fd;
598+
strncpy(handle->m_bpf_progs[handle->m_bpf_prog_cnt].name, full_event, NAME_MAX);
599+
600+
601+
//When loading all eBPF programs for the first time, update the kt_indices.
602+
if(handle->m_bpf_prog_cnt + 1 > handle->m_bpf_prog_real_size){
603+
strncpy(handle->kt_indices[handle->m_bpf_prog_real_size].name, full_event, NAME_MAX);
604+
handle->kt_indices[handle->m_bpf_prog_real_size].index = handle->m_bpf_prog_cnt;
605+
handle->kt_indices[handle->m_bpf_prog_real_size].interest = true;
606+
handle->m_bpf_prog_real_size++;
607+
}
608+
609+
handle->m_bpf_prog_cnt++;
610+
//printf("prog_type:%d, insns_cnt:%d, license: %s, fd: %d\n", program_type, insns_cnt, license, fd);
611+
597612

598613
if(memcmp(event, "filler/", sizeof("filler/") - 1) == 0)
599614
{
@@ -687,14 +702,27 @@ static int32_t load_tracepoint(scap_t* handle, const char *event, struct bpf_ins
687702
return SCAP_FAILURE;
688703
}
689704
}
690-
691-
handle->m_bpf_event_fd[handle->m_bpf_prog_cnt - 1] = efd;
705+
handle->m_bpf_progs[handle->m_bpf_prog_cnt - 1].efd = efd;
692706

693707
return SCAP_SUCCESS;
694708
}
695709

696710
#ifndef MINIMAL_BUILD
697-
static int32_t load_bpf_file(scap_t *handle, const char *path)
711+
712+
static bool is_kt_enabled(scap_t *handle, char* event_name){
713+
bool enabled = true;
714+
int i;
715+
for(i = 0; i < handle->m_bpf_prog_real_size; i++){
716+
if(strcmp(event_name, handle->kt_indices[i].name) == 0){
717+
enabled = handle->kt_indices[i].interest;
718+
break;
719+
}
720+
}
721+
722+
return enabled;
723+
}
724+
725+
static int32_t load_bpf_file(scap_t *handle)
698726
{
699727
int j;
700728
int maps_shndx = 0;
@@ -706,8 +734,8 @@ static int32_t load_bpf_file(scap_t *handle, const char *path)
706734
Elf_Data *symbols = NULL;
707735
char *shname;
708736
char *shname_prog;
709-
int nr_maps = 0;
710-
struct bpf_map_data maps[BPF_MAPS_MAX];
737+
static int nr_maps = 0;
738+
static struct bpf_map_data maps[BPF_MAPS_MAX];
711739
struct utsname osname;
712740
int32_t res = SCAP_FAILURE;
713741

@@ -723,10 +751,10 @@ static int32_t load_bpf_file(scap_t *handle, const char *path)
723751
return SCAP_FAILURE;
724752
}
725753

726-
int program_fd = open(path, O_RDONLY, 0);
754+
int program_fd = open(handle->m_filepath, O_RDONLY, 0);
727755
if(program_fd < 0)
728756
{
729-
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "can't open BPF probe '%s': %s", path, scap_strerror(handle, errno));
757+
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "can't open BPF probe '%s': %s", handle->m_filepath, scap_strerror(handle, errno));
730758
return SCAP_FAILURE;
731759
}
732760

@@ -788,9 +816,11 @@ static int32_t load_bpf_file(scap_t *handle, const char *path)
788816
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "missing SHT_SYMTAB section");
789817
goto cleanup;
790818
}
791-
792-
if(maps_shndx)
819+
//Map initialization occurs only upon the map's first load.
820+
static bool first_load_map = true;
821+
if(maps_shndx && first_load_map)
793822
{
823+
first_load_map = false;
794824
if(load_elf_maps_section(handle, maps, maps_shndx, elf, symbols, strtabidx, &nr_maps) != SCAP_SUCCESS)
795825
{
796826
goto cleanup;
@@ -833,7 +863,19 @@ static int32_t load_bpf_file(scap_t *handle, const char *path)
833863
{
834864
continue;
835865
}
836-
866+
if(is_kt_enabled(handle, shname))
867+
{
868+
bool already_attached = false;
869+
int i;
870+
for(i = 0; i < handle->m_bpf_prog_cnt && !already_attached; i++)
871+
{
872+
if(strcmp(handle->m_bpf_progs[i].name, shname) == 0)
873+
{
874+
already_attached = true;
875+
}
876+
}
877+
if(!already_attached)
878+
{
837879
if(memcmp(shname, "tracepoint/", sizeof("tracepoint/") - 1) == 0 ||
838880
memcmp(shname, "raw_tracepoint/", sizeof("raw_tracepoint/") - 1) == 0 ||
839881
memcmp(shname, "kprobe/", sizeof("kprobe/") - 1) == 0 ||
@@ -851,6 +893,9 @@ static int32_t load_bpf_file(scap_t *handle, const char *path)
851893
goto cleanup;
852894
}
853895
}
896+
}
897+
}
898+
854899
}
855900

856901
res = SCAP_SUCCESS;
@@ -1263,6 +1308,19 @@ int32_t scap_bpf_enable_tracers_capture(scap_t* handle)
12631308
return SCAP_SUCCESS;
12641309
}
12651310

1311+
static void close_prog(struct bpf_prog *prog)
1312+
{
1313+
if(prog->efd > 0)
1314+
{
1315+
int res = close(prog->efd);
1316+
}
1317+
if(prog->fd > 0)
1318+
{
1319+
int res = close(prog->fd);
1320+
}
1321+
memset(prog, 0, sizeof(*prog));
1322+
}
1323+
12661324
int32_t scap_bpf_close(scap_t *handle)
12671325
{
12681326
int j;
@@ -1291,23 +1349,16 @@ int32_t scap_bpf_close(scap_t *handle)
12911349
}
12921350
}
12931351

1294-
for(j = 0; j < sizeof(handle->m_bpf_event_fd) / sizeof(handle->m_bpf_event_fd[0]); ++j)
1295-
{
1296-
if(handle->m_bpf_event_fd[j] > 0)
1352+
for(j = 0; j < sizeof(handle->m_bpf_progs) / sizeof(handle->m_bpf_progs[0]); ++j)
12971353
{
1298-
close(handle->m_bpf_event_fd[j]);
1299-
handle->m_bpf_event_fd[j] = 0;
1300-
}
1354+
close_prog(&handle->m_bpf_progs[j]);
13011355
}
13021356

1303-
for(j = 0; j < sizeof(handle->m_bpf_prog_fds) / sizeof(handle->m_bpf_prog_fds[0]); ++j)
1304-
{
1305-
if(handle->m_bpf_prog_fds[j] > 0)
1357+
for(j = 0; j < handle->m_bpf_prog_real_size; ++j)
13061358
{
1307-
close(handle->m_bpf_prog_fds[j]);
1308-
handle->m_bpf_prog_fds[j] = 0;
1359+
handle->kt_indices[j].index = -1;
13091360
}
1310-
}
1361+
13111362

13121363
for(j = 0; j < sizeof(handle->m_bpf_map_fds) / sizeof(handle->m_bpf_map_fds[0]); ++j)
13131364
{
@@ -1324,6 +1375,93 @@ int32_t scap_bpf_close(scap_t *handle)
13241375
return SCAP_SUCCESS;
13251376
}
13261377

1378+
static int32_t scap_bpf_handle_kt_mask( scap_t *handle, uint32_t op, uint32_t kt_index)
1379+
{
1380+
// error kt_index
1381+
if(kt_index < 0 || kt_index > handle->m_bpf_prog_real_size)
1382+
return SCAP_SUCCESS;
1383+
1384+
int prg_idx = handle->kt_indices[kt_index].index;
1385+
1386+
// We want to unload a never loaded tracepoint
1387+
if (prg_idx == -1 && op != PPM_IOCTL_MASK_SET_TP)
1388+
{
1389+
return SCAP_SUCCESS;
1390+
}
1391+
// We want to load an already loaded tracepoint
1392+
if (prg_idx >= 0 && op != PPM_IOCTL_MASK_UNSET_TP)
1393+
{
1394+
return SCAP_SUCCESS;
1395+
}
1396+
1397+
if (op == PPM_IOCTL_MASK_UNSET_TP)
1398+
{
1399+
// Algo:
1400+
// Close the event and tracepoint fds,
1401+
// reduce number of prog cnt
1402+
// move left remaining array elements
1403+
// reset last array element
1404+
handle->kt_indices[kt_index].index = -1;
1405+
handle->kt_indices[kt_index].interest = false;
1406+
1407+
close_prog(&handle->m_bpf_progs[prg_idx]);
1408+
handle->m_bpf_prog_cnt--;
1409+
size_t byte_size = (handle->m_bpf_prog_cnt - prg_idx) * sizeof(handle->m_bpf_progs[prg_idx]);
1410+
if (byte_size > 0)
1411+
{
1412+
memmove(&handle->m_bpf_progs[prg_idx], &handle->m_bpf_progs[prg_idx + 1], byte_size);
1413+
}
1414+
memset(&handle->m_bpf_progs[handle->m_bpf_prog_cnt], 0, sizeof(handle->m_bpf_progs[handle->m_bpf_prog_cnt]));
1415+
return SCAP_SUCCESS;
1416+
}
1417+
1418+
handle->kt_indices[kt_index].interest = true;
1419+
return load_bpf_file(handle);
1420+
}
1421+
1422+
static int32_t scap_handle_ktmask(scap_t* handle, uint32_t op, uint32_t kt)
1423+
{
1424+
switch(op)
1425+
{
1426+
case PPM_IOCTL_MASK_SET_TP:
1427+
case PPM_IOCTL_MASK_UNSET_TP:
1428+
break;
1429+
1430+
default:
1431+
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "%s(%d) internal error", __FUNCTION__, op);
1432+
ASSERT(false);
1433+
return SCAP_FAILURE;
1434+
break;
1435+
}
1436+
1437+
if (kt >= BPF_PROGS_MAX)
1438+
{
1439+
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "%s(%d) wrong param", __FUNCTION__, kt);
1440+
ASSERT(false);
1441+
return SCAP_FAILURE;
1442+
}
1443+
1444+
if(handle)
1445+
{
1446+
return scap_bpf_handle_kt_mask(handle, op, kt);
1447+
}
1448+
#if !defined(HAS_CAPTURE) || defined(_WIN32)
1449+
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "tpmask not supported on %s", PLATFORM_NAME);
1450+
return SCAP_FAILURE;
1451+
#else
1452+
if (handle == NULL)
1453+
{
1454+
return SCAP_FAILURE;
1455+
}
1456+
1457+
snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "manipulating tpmask not supported on this scap mode");
1458+
return SCAP_FAILURE;
1459+
#endif // HAS_CAPTURE
1460+
}
1461+
int32_t scap_set_ktmask_bpf(scap_t* handle, uint32_t kt, bool enabled) {
1462+
return(scap_handle_ktmask(handle, enabled ? PPM_IOCTL_MASK_SET_TP : PPM_IOCTL_MASK_UNSET_TP, kt));
1463+
}
1464+
13271465
//
13281466
// This is completely horrible, revisit this shameful code
13291467
// with a proper solution
@@ -1491,8 +1629,8 @@ int32_t scap_bpf_load(scap_t *handle, const char *bpf_probe)
14911629
ASSERT(false);
14921630
return SCAP_FAILURE;
14931631
}
1494-
1495-
if(load_bpf_file(handle, bpf_probe) != SCAP_SUCCESS)
1632+
snprintf(handle->m_filepath, SCAP_MAX_PATH_SIZE, "%s", bpf_probe);
1633+
if(load_bpf_file(handle) != SCAP_SUCCESS)
14961634
{
14971635
return SCAP_FAILURE;
14981636
}

userspace/libscap/scap_bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ int32_t scap_bpf_get_n_tracepoint_hit(scap_t* handle, long* ret);
4949
int32_t scap_bpf_enable_skb_capture(scap_t *handle, const char *ifname);
5050
int32_t scap_bpf_disable_skb_capture(scap_t *handle);
5151
int32_t scap_bpf_handle_eventmask(scap_t* handle, uint32_t op, uint32_t event_id);
52+
int32_t scap_set_ktmask_bpf(scap_t* handle, uint32_t kt, bool enabled);
5253

5354
static inline scap_evt *scap_bpf_evt_from_perf_sample(void *evt)
5455
{

userspace/libsinsp/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ set(SINSP_SOURCES
8383
threadinfo.cpp
8484
tuples.cpp
8585
sinsp.cpp
86+
sinsp_tp.cpp
8687
stats.cpp
8788
table.cpp
8889
token_bucket.cpp

0 commit comments

Comments
 (0)