xref: /linux/tools/lib/bpf/usdt.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <ctype.h>
4 #include <stdio.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <libelf.h>
8 #include <gelf.h>
9 #include <unistd.h>
10 #include <linux/ptrace.h>
11 #include <linux/kernel.h>
12 
13 /* s8 will be marked as poison while it's a reg of riscv */
14 #if defined(__riscv)
15 #define rv_s8 s8
16 #endif
17 
18 #include "bpf.h"
19 #include "libbpf.h"
20 #include "libbpf_common.h"
21 #include "libbpf_internal.h"
22 #include "hashmap.h"
23 #include "str_error.h"
24 
25 /* libbpf's USDT support consists of BPF-side state/code and user-space
26  * state/code working together in concert. BPF-side parts are defined in
27  * usdt.bpf.h header library. User-space state is encapsulated by struct
28  * usdt_manager and all the supporting code centered around usdt_manager.
29  *
30  * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
31  * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
32  * don't support BPF cookie (see below). These two maps are implicitly
33  * embedded into user's end BPF object file when user's code included
34  * usdt.bpf.h. This means that libbpf doesn't do anything special to create
35  * these USDT support maps. They are created by normal libbpf logic of
36  * instantiating BPF maps when opening and loading BPF object.
37  *
38  * As such, libbpf is basically unaware of the need to do anything
39  * USDT-related until the very first call to bpf_program__attach_usdt(), which
40  * can be called by user explicitly or happen automatically during skeleton
41  * attach (or, equivalently, through generic bpf_program__attach() call). At
42  * this point, libbpf will instantiate and initialize struct usdt_manager and
43  * store it in bpf_object. USDT manager is per-BPF object construct, as each
44  * independent BPF object might or might not have USDT programs, and thus all
45  * the expected USDT-related state. There is no coordination between two
46  * bpf_object in parts of USDT attachment, they are oblivious of each other's
47  * existence and libbpf is just oblivious, dealing with bpf_object-specific
48  * USDT state.
49  *
50  * Quick crash course on USDTs.
51  *
52  * From user-space application's point of view, USDT is essentially just
53  * a slightly special function call that normally has zero overhead, unless it
54  * is being traced by some external entity (e.g, BPF-based tool). Here's how
55  * a typical application can trigger USDT probe:
56  *
57  * #include <sys/sdt.h>  // provided by systemtap-sdt-devel package
58  * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
59  *
60  * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
61  *
62  * USDT is identified by its <provider-name>:<probe-name> pair of names. Each
63  * individual USDT has a fixed number of arguments (3 in the above example)
64  * and specifies values of each argument as if it was a function call.
65  *
66  * USDT call is actually not a function call, but is instead replaced by
67  * a single NOP instruction (thus zero overhead, effectively). But in addition
68  * to that, those USDT macros generate special SHT_NOTE ELF records in
69  * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
70  * `readelf -n <binary>`:
71  *
72  *   stapsdt              0x00000089       NT_STAPSDT (SystemTap probe descriptors)
73  *   Provider: test
74  *   Name: usdt12
75  *   Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
76  *   Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
77  *
78  * In this case we have USDT test:usdt12 with 12 arguments.
79  *
80  * Location and base are offsets used to calculate absolute IP address of that
81  * NOP instruction that kernel can replace with an interrupt instruction to
82  * trigger instrumentation code (BPF program for all that we care about).
83  *
84  * Semaphore above is an optional feature. It records an address of a 2-byte
85  * refcount variable (normally in '.probes' ELF section) used for signaling if
86  * there is anything that is attached to USDT. This is useful for user
87  * applications if, for example, they need to prepare some arguments that are
88  * passed only to USDTs and preparation is expensive. By checking if USDT is
89  * "activated", an application can avoid paying those costs unnecessarily.
90  * Recent enough kernel has built-in support for automatically managing this
91  * refcount, which libbpf expects and relies on. If USDT is defined without
92  * associated semaphore, this value will be zero. See selftests for semaphore
93  * examples.
94  *
95  * Arguments is the most interesting part. This USDT specification string is
96  * providing information about all the USDT arguments and their locations. The
97  * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
98  * whether the argument is signed or unsigned (negative size means signed).
99  * The part after @ sign is assembly-like definition of argument location
100  * (see [0] for more details). Technically, assembler can provide some pretty
101  * advanced definitions, but libbpf is currently supporting three most common
102  * cases:
103  *   1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
104  *   2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
105  *      whose value is in register %rdx";
106  *   3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
107  *      specifies signed 32-bit integer stored at offset -1204 bytes from
108  *      memory address stored in %rbp.
109  *
110  *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
111  *
112  * During attachment, libbpf parses all the relevant USDT specifications and
113  * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
114  * code through spec map. This allows BPF applications to quickly fetch the
115  * actual value at runtime using a simple BPF-side code.
116  *
117  * With basics out of the way, let's go over less immediately obvious aspects
118  * of supporting USDTs.
119  *
120  * First, there is no special USDT BPF program type. It is actually just
121  * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
122  * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
123  * that uprobe is usually attached at the function entry, while USDT will
124  * normally be somewhere inside the function. But it should always be
125  * pointing to NOP instruction, which makes such uprobes the fastest uprobe
126  * kind.
127  *
128  * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
129  * macro invocations can end up being inlined many-many times, depending on
130  * specifics of each individual user application. So single conceptual USDT
131  * (identified by provider:name pair of identifiers) is, generally speaking,
132  * multiple uprobe locations (USDT call sites) in different places in user
133  * application. Further, again due to inlining, each USDT call site might end
134  * up having the same argument #N be located in a different place. In one call
135  * site it could be a constant, in another will end up in a register, and in
136  * yet another could be some other register or even somewhere on the stack.
137  *
138  * As such, "attaching to USDT" means (in general case) attaching the same
139  * uprobe BPF program to multiple target locations in user application, each
140  * potentially having a completely different USDT spec associated with it.
141  * To wire all this up together libbpf allocates a unique integer spec ID for
142  * each unique USDT spec. Spec IDs are allocated as sequential small integers
143  * so that they can be used as keys in array BPF map (for performance reasons).
144  * Spec ID allocation and accounting is big part of what usdt_manager is
145  * about. This state has to be maintained per-BPF object and coordinate
146  * between different USDT attachments within the same BPF object.
147  *
148  * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
149  * as struct usdt_spec. Each invocation of BPF program at runtime needs to
150  * know its associated spec ID. It gets it either through BPF cookie, which
151  * libbpf sets to spec ID during attach time, or, if kernel is too old to
152  * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
153  * case. The latter means that some modes of operation can't be supported
154  * without BPF cookie. Such a mode is attaching to shared library "generically",
155  * without specifying target process. In such case, it's impossible to
156  * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
157  * is not supported without BPF cookie support.
158  *
159  * Note that libbpf is using BPF cookie functionality for its own internal
160  * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
161  * provides conceptually equivalent USDT cookie support. It's still u64
162  * user-provided value that can be associated with USDT attachment. Note that
163  * this will be the same value for all USDT call sites within the same single
164  * *logical* USDT attachment. This makes sense because to user attaching to
165  * USDT is a single BPF program triggered for singular USDT probe. The fact
166  * that this is done at multiple actual locations is a mostly hidden
167  * implementation details. This USDT cookie value can be fetched with
168  * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
169  *
170  * Lastly, while single USDT can have tons of USDT call sites, it doesn't
171  * necessarily have that many different USDT specs. It very well might be
172  * that 1000 USDT call sites only need 5 different USDT specs, because all the
173  * arguments are typically contained in a small set of registers or stack
174  * locations. As such, it's wasteful to allocate as many USDT spec IDs as
175  * there are USDT call sites. So libbpf tries to be frugal and performs
176  * on-the-fly deduplication during a single USDT attachment to only allocate
177  * the minimal required amount of unique USDT specs (and thus spec IDs). This
178  * is trivially achieved by using USDT spec string (Arguments string from USDT
179  * note) as a lookup key in a hashmap. USDT spec string uniquely defines
180  * everything about how to fetch USDT arguments, so two USDT call sites
181  * sharing USDT spec string can safely share the same USDT spec and spec ID.
182  * Note, this spec string deduplication is happening only during the same USDT
183  * attachment, so each USDT spec shares the same USDT cookie value. This is
184  * not generally true for other USDT attachments within the same BPF object,
185  * as even if USDT spec string is the same, USDT cookie value can be
186  * different. It was deemed excessive to try to deduplicate across independent
187  * USDT attachments by taking into account USDT spec string *and* USDT cookie
188  * value, which would complicate spec ID accounting significantly for little
189  * gain.
190  */
191 
192 #define USDT_BASE_SEC ".stapsdt.base"
193 #define USDT_SEMA_SEC ".probes"
194 #define USDT_NOTE_SEC  ".note.stapsdt"
195 #define USDT_NOTE_TYPE 3
196 #define USDT_NOTE_NAME "stapsdt"
197 
198 /* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
199 enum usdt_arg_type {
200 	USDT_ARG_CONST,
201 	USDT_ARG_REG,
202 	USDT_ARG_REG_DEREF,
203 	USDT_ARG_SIB,
204 };
205 
206 /* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
207 struct usdt_arg_spec {
208 	__u64 val_off;
209 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
210 	enum usdt_arg_type arg_type: 8;
211 	__u16	idx_reg_off: 12;
212 	__u16	scale_bitshift: 4;
213 	__u8 __reserved: 8;     /* keep reg_off offset stable */
214 #else
215 	__u8 __reserved: 8;     /* keep reg_off offset stable */
216 	__u16	idx_reg_off: 12;
217 	__u16	scale_bitshift: 4;
218 	enum usdt_arg_type arg_type: 8;
219 #endif
220 	short reg_off;
221 	bool arg_signed;
222 	char arg_bitshift;
223 };
224 
225 /* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
226 #define USDT_MAX_ARG_CNT 12
227 
228 /* should match struct __bpf_usdt_spec from usdt.bpf.h */
229 struct usdt_spec {
230 	struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
231 	__u64 usdt_cookie;
232 	short arg_cnt;
233 };
234 
235 struct usdt_note {
236 	const char *provider;
237 	const char *name;
238 	/* USDT args specification string, e.g.:
239 	 * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
240 	 */
241 	const char *args;
242 	long loc_addr;
243 	long base_addr;
244 	long sema_addr;
245 };
246 
247 struct usdt_target {
248 	long abs_ip;
249 	long rel_ip;
250 	long sema_off;
251 	struct usdt_spec spec;
252 	const char *spec_str;
253 };
254 
255 struct usdt_manager {
256 	struct bpf_map *specs_map;
257 	struct bpf_map *ip_to_spec_id_map;
258 
259 	int *free_spec_ids;
260 	size_t free_spec_cnt;
261 	size_t next_free_spec_id;
262 
263 	bool has_bpf_cookie;
264 	bool has_sema_refcnt;
265 	bool has_uprobe_multi;
266 };
267 
268 struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
269 {
270 	static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
271 	struct usdt_manager *man;
272 	struct bpf_map *specs_map, *ip_to_spec_id_map;
273 
274 	specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
275 	ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
276 	if (!specs_map || !ip_to_spec_id_map) {
277 		pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
278 		return ERR_PTR(-ESRCH);
279 	}
280 
281 	man = calloc(1, sizeof(*man));
282 	if (!man)
283 		return ERR_PTR(-ENOMEM);
284 
285 	man->specs_map = specs_map;
286 	man->ip_to_spec_id_map = ip_to_spec_id_map;
287 
288 	/* Detect if BPF cookie is supported for kprobes.
289 	 * We don't need IP-to-ID mapping if we can use BPF cookies.
290 	 * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
291 	 */
292 	man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
293 
294 	/* Detect kernel support for automatic refcounting of USDT semaphore.
295 	 * If this is not supported, USDTs with semaphores will not be supported.
296 	 * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
297 	 */
298 	man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
299 
300 	/*
301 	 * Detect kernel support for uprobe multi link to be used for attaching
302 	 * usdt probes.
303 	 */
304 	man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
305 	return man;
306 }
307 
308 void usdt_manager_free(struct usdt_manager *man)
309 {
310 	if (IS_ERR_OR_NULL(man))
311 		return;
312 
313 	free(man->free_spec_ids);
314 	free(man);
315 }
316 
317 static int sanity_check_usdt_elf(Elf *elf, const char *path)
318 {
319 	GElf_Ehdr ehdr;
320 	int endianness;
321 
322 	if (elf_kind(elf) != ELF_K_ELF) {
323 		pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
324 		return -EBADF;
325 	}
326 
327 	switch (gelf_getclass(elf)) {
328 	case ELFCLASS64:
329 		if (sizeof(void *) != 8) {
330 			pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
331 			return -EBADF;
332 		}
333 		break;
334 	case ELFCLASS32:
335 		if (sizeof(void *) != 4) {
336 			pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
337 			return -EBADF;
338 		}
339 		break;
340 	default:
341 		pr_warn("usdt: unsupported ELF class for '%s'\n", path);
342 		return -EBADF;
343 	}
344 
345 	if (!gelf_getehdr(elf, &ehdr))
346 		return -EINVAL;
347 
348 	if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
349 		pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
350 			path, ehdr.e_type);
351 		return -EBADF;
352 	}
353 
354 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
355 	endianness = ELFDATA2LSB;
356 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
357 	endianness = ELFDATA2MSB;
358 #else
359 # error "Unrecognized __BYTE_ORDER__"
360 #endif
361 	if (endianness != ehdr.e_ident[EI_DATA]) {
362 		pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
363 		return -EBADF;
364 	}
365 
366 	return 0;
367 }
368 
369 static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
370 {
371 	Elf_Scn *sec = NULL;
372 	size_t shstrndx;
373 
374 	if (elf_getshdrstrndx(elf, &shstrndx))
375 		return -EINVAL;
376 
377 	/* check if ELF is corrupted and avoid calling elf_strptr if yes */
378 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
379 		return -EINVAL;
380 
381 	while ((sec = elf_nextscn(elf, sec)) != NULL) {
382 		char *name;
383 
384 		if (!gelf_getshdr(sec, shdr))
385 			return -EINVAL;
386 
387 		name = elf_strptr(elf, shstrndx, shdr->sh_name);
388 		if (name && strcmp(sec_name, name) == 0) {
389 			*scn = sec;
390 			return 0;
391 		}
392 	}
393 
394 	return -ENOENT;
395 }
396 
397 struct elf_seg {
398 	long start;
399 	long end;
400 	long offset;
401 	bool is_exec;
402 };
403 
404 static int cmp_elf_segs(const void *_a, const void *_b)
405 {
406 	const struct elf_seg *a = _a;
407 	const struct elf_seg *b = _b;
408 
409 	return a->start < b->start ? -1 : 1;
410 }
411 
412 static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
413 {
414 	GElf_Phdr phdr;
415 	size_t n;
416 	int i, err;
417 	struct elf_seg *seg;
418 	void *tmp;
419 
420 	*seg_cnt = 0;
421 
422 	if (elf_getphdrnum(elf, &n)) {
423 		err = -errno;
424 		return err;
425 	}
426 
427 	for (i = 0; i < n; i++) {
428 		if (!gelf_getphdr(elf, i, &phdr)) {
429 			err = -errno;
430 			return err;
431 		}
432 
433 		pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
434 			 i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
435 			 (long)phdr.p_type, (long)phdr.p_flags);
436 		if (phdr.p_type != PT_LOAD)
437 			continue;
438 
439 		tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
440 		if (!tmp)
441 			return -ENOMEM;
442 
443 		*segs = tmp;
444 		seg = *segs + *seg_cnt;
445 		(*seg_cnt)++;
446 
447 		seg->start = phdr.p_vaddr;
448 		seg->end = phdr.p_vaddr + phdr.p_memsz;
449 		seg->offset = phdr.p_offset;
450 		seg->is_exec = phdr.p_flags & PF_X;
451 	}
452 
453 	if (*seg_cnt == 0) {
454 		pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
455 		return -ESRCH;
456 	}
457 
458 	qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
459 	return 0;
460 }
461 
462 static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
463 {
464 	char path[PATH_MAX], line[PATH_MAX], mode[16];
465 	size_t seg_start, seg_end, seg_off;
466 	struct elf_seg *seg;
467 	int tmp_pid, i, err;
468 	FILE *f;
469 
470 	*seg_cnt = 0;
471 
472 	/* Handle containerized binaries only accessible from
473 	 * /proc/<pid>/root/<path>. They will be reported as just /<path> in
474 	 * /proc/<pid>/maps.
475 	 */
476 	if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
477 		goto proceed;
478 
479 	if (!realpath(lib_path, path)) {
480 		pr_warn("usdt: failed to get absolute path of '%s' (err %s), using path as is...\n",
481 			lib_path, errstr(-errno));
482 		libbpf_strlcpy(path, lib_path, sizeof(path));
483 	}
484 
485 proceed:
486 	sprintf(line, "/proc/%d/maps", pid);
487 	f = fopen(line, "re");
488 	if (!f) {
489 		err = -errno;
490 		pr_warn("usdt: failed to open '%s' to get base addr of '%s': %s\n",
491 			line, lib_path, errstr(err));
492 		return err;
493 	}
494 
495 	/* We need to handle lines with no path at the end:
496 	 *
497 	 * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613      /usr/lib64/libc-2.17.so
498 	 * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
499 	 * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598    /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
500 	 */
501 	while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
502 		      &seg_start, &seg_end, mode, &seg_off, line) == 5) {
503 		void *tmp;
504 
505 		/* to handle no path case (see above) we need to capture line
506 		 * without skipping any whitespaces. So we need to strip
507 		 * leading whitespaces manually here
508 		 */
509 		i = 0;
510 		while (isblank(line[i]))
511 			i++;
512 		if (strcmp(line + i, path) != 0)
513 			continue;
514 
515 		pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
516 			 path, seg_start, seg_end, mode, seg_off);
517 
518 		/* ignore non-executable sections for shared libs */
519 		if (mode[2] != 'x')
520 			continue;
521 
522 		tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
523 		if (!tmp) {
524 			err = -ENOMEM;
525 			goto err_out;
526 		}
527 
528 		*segs = tmp;
529 		seg = *segs + *seg_cnt;
530 		*seg_cnt += 1;
531 
532 		seg->start = seg_start;
533 		seg->end = seg_end;
534 		seg->offset = seg_off;
535 		seg->is_exec = true;
536 	}
537 
538 	if (*seg_cnt == 0) {
539 		pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
540 			lib_path, path, pid);
541 		err = -ESRCH;
542 		goto err_out;
543 	}
544 
545 	qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
546 	err = 0;
547 err_out:
548 	fclose(f);
549 	return err;
550 }
551 
552 static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
553 {
554 	struct elf_seg *seg;
555 	int i;
556 
557 	/* for ELF binaries (both executables and shared libraries), we are
558 	 * given virtual address (absolute for executables, relative for
559 	 * libraries) which should match address range of [seg_start, seg_end)
560 	 */
561 	for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
562 		if (seg->start <= virtaddr && virtaddr < seg->end)
563 			return seg;
564 	}
565 	return NULL;
566 }
567 
568 static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
569 {
570 	struct elf_seg *seg;
571 	int i;
572 
573 	/* for VMA segments from /proc/<pid>/maps file, provided "address" is
574 	 * actually a file offset, so should be fall within logical
575 	 * offset-based range of [offset_start, offset_end)
576 	 */
577 	for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
578 		if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
579 			return seg;
580 	}
581 	return NULL;
582 }
583 
584 static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off,
585 			   size_t desc_off, struct usdt_note *usdt_note);
586 
587 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
588 
589 static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
590 				const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
591 				struct usdt_target **out_targets, size_t *out_target_cnt)
592 {
593 	size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
594 	struct elf_seg *segs = NULL, *vma_segs = NULL;
595 	struct usdt_target *targets = NULL, *target;
596 	long base_addr = 0;
597 	Elf_Scn *notes_scn, *base_scn;
598 	GElf_Shdr base_shdr, notes_shdr;
599 	GElf_Ehdr ehdr;
600 	GElf_Nhdr nhdr;
601 	Elf_Data *data;
602 	int err;
603 
604 	*out_targets = NULL;
605 	*out_target_cnt = 0;
606 
607 	err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
608 	if (err) {
609 		pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
610 		return err;
611 	}
612 
613 	if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
614 		pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
615 		return -EINVAL;
616 	}
617 
618 	err = parse_elf_segs(elf, path, &segs, &seg_cnt);
619 	if (err) {
620 		pr_warn("usdt: failed to process ELF program segments for '%s': %s\n",
621 			path, errstr(err));
622 		goto err_out;
623 	}
624 
625 	/* .stapsdt.base ELF section is optional, but is used for prelink
626 	 * offset compensation (see a big comment further below)
627 	 */
628 	if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
629 		base_addr = base_shdr.sh_addr;
630 
631 	data = elf_getdata(notes_scn, 0);
632 	off = 0;
633 	while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
634 		long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
635 		struct usdt_note note;
636 		struct elf_seg *seg = NULL;
637 		void *tmp;
638 
639 		err = parse_usdt_note(&nhdr, data->d_buf, name_off, desc_off, &note);
640 		if (err)
641 			goto err_out;
642 
643 		if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
644 			continue;
645 
646 		/* We need to compensate "prelink effect". See [0] for details,
647 		 * relevant parts quoted here:
648 		 *
649 		 * Each SDT probe also expands into a non-allocated ELF note. You can
650 		 * find this by looking at SHT_NOTE sections and decoding the format;
651 		 * see below for details. Because the note is non-allocated, it means
652 		 * there is no runtime cost, and also preserved in both stripped files
653 		 * and .debug files.
654 		 *
655 		 * However, this means that prelink won't adjust the note's contents
656 		 * for address offsets. Instead, this is done via the .stapsdt.base
657 		 * section. This is a special section that is added to the text. We
658 		 * will only ever have one of these sections in a final link and it
659 		 * will only ever be one byte long. Nothing about this section itself
660 		 * matters, we just use it as a marker to detect prelink address
661 		 * adjustments.
662 		 *
663 		 * Each probe note records the link-time address of the .stapsdt.base
664 		 * section alongside the probe PC address. The decoder compares the
665 		 * base address stored in the note with the .stapsdt.base section's
666 		 * sh_addr. Initially these are the same, but the section header will
667 		 * be adjusted by prelink. So the decoder applies the difference to
668 		 * the probe PC address to get the correct prelinked PC address; the
669 		 * same adjustment is applied to the semaphore address, if any.
670 		 *
671 		 *   [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
672 		 */
673 		usdt_abs_ip = note.loc_addr;
674 		if (base_addr && note.base_addr)
675 			usdt_abs_ip += base_addr - note.base_addr;
676 
677 		/* When attaching uprobes (which is what USDTs basically are)
678 		 * kernel expects file offset to be specified, not a relative
679 		 * virtual address, so we need to translate virtual address to
680 		 * file offset, for both ET_EXEC and ET_DYN binaries.
681 		 */
682 		seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
683 		if (!seg) {
684 			err = -ESRCH;
685 			pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
686 				usdt_provider, usdt_name, path, usdt_abs_ip);
687 			goto err_out;
688 		}
689 		if (!seg->is_exec) {
690 			err = -ESRCH;
691 			pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
692 				path, seg->start, seg->end, usdt_provider, usdt_name,
693 				usdt_abs_ip);
694 			goto err_out;
695 		}
696 		/* translate from virtual address to file offset */
697 		usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
698 
699 		if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
700 			/* If we don't have BPF cookie support but need to
701 			 * attach to a shared library, we'll need to know and
702 			 * record absolute addresses of attach points due to
703 			 * the need to lookup USDT spec by absolute IP of
704 			 * triggered uprobe. Doing this resolution is only
705 			 * possible when we have a specific PID of the process
706 			 * that's using specified shared library. BPF cookie
707 			 * removes the absolute address limitation as we don't
708 			 * need to do this lookup (we just use BPF cookie as
709 			 * an index of USDT spec), so for newer kernels with
710 			 * BPF cookie support libbpf supports USDT attachment
711 			 * to shared libraries with no PID filter.
712 			 */
713 			if (pid < 0) {
714 				pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
715 				err = -ENOTSUP;
716 				goto err_out;
717 			}
718 
719 			/* vma_segs are lazily initialized only if necessary */
720 			if (vma_seg_cnt == 0) {
721 				err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
722 				if (err) {
723 					pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %s\n",
724 						pid, path, errstr(err));
725 					goto err_out;
726 				}
727 			}
728 
729 			seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
730 			if (!seg) {
731 				err = -ESRCH;
732 				pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
733 					usdt_provider, usdt_name, path, usdt_rel_ip);
734 				goto err_out;
735 			}
736 
737 			usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
738 		}
739 
740 		pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
741 			 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
742 			 note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
743 			 seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
744 
745 		/* Adjust semaphore address to be a file offset */
746 		if (note.sema_addr) {
747 			if (!man->has_sema_refcnt) {
748 				pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
749 					usdt_provider, usdt_name, path);
750 				err = -ENOTSUP;
751 				goto err_out;
752 			}
753 
754 			seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
755 			if (!seg) {
756 				err = -ESRCH;
757 				pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
758 					usdt_provider, usdt_name, path, note.sema_addr);
759 				goto err_out;
760 			}
761 			if (seg->is_exec) {
762 				err = -ESRCH;
763 				pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
764 					path, seg->start, seg->end, usdt_provider, usdt_name,
765 					note.sema_addr);
766 				goto err_out;
767 			}
768 
769 			usdt_sema_off = note.sema_addr - seg->start + seg->offset;
770 
771 			pr_debug("usdt: sema  for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
772 				 usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
773 				 path, note.sema_addr, note.base_addr, usdt_sema_off,
774 				 seg->start, seg->end, seg->offset);
775 		}
776 
777 		/* Record adjusted addresses and offsets and parse USDT spec */
778 		tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
779 		if (!tmp) {
780 			err = -ENOMEM;
781 			goto err_out;
782 		}
783 		targets = tmp;
784 
785 		target = &targets[target_cnt];
786 		memset(target, 0, sizeof(*target));
787 
788 		target->abs_ip = usdt_abs_ip;
789 		target->rel_ip = usdt_rel_ip;
790 		target->sema_off = usdt_sema_off;
791 
792 		/* notes.args references strings from ELF itself, so they can
793 		 * be referenced safely until elf_end() call
794 		 */
795 		target->spec_str = note.args;
796 
797 		err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
798 		if (err)
799 			goto err_out;
800 
801 		target_cnt++;
802 	}
803 
804 	*out_targets = targets;
805 	*out_target_cnt = target_cnt;
806 	err = target_cnt;
807 
808 err_out:
809 	free(segs);
810 	free(vma_segs);
811 	if (err < 0)
812 		free(targets);
813 	return err;
814 }
815 
816 struct bpf_link_usdt {
817 	struct bpf_link link;
818 
819 	struct usdt_manager *usdt_man;
820 
821 	size_t spec_cnt;
822 	int *spec_ids;
823 
824 	size_t uprobe_cnt;
825 	struct {
826 		long abs_ip;
827 		struct bpf_link *link;
828 	} *uprobes;
829 
830 	struct bpf_link *multi_link;
831 };
832 
833 static int bpf_link_usdt_detach(struct bpf_link *link)
834 {
835 	struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
836 	struct usdt_manager *man = usdt_link->usdt_man;
837 	int i;
838 
839 	bpf_link__destroy(usdt_link->multi_link);
840 
841 	/* When having multi_link, uprobe_cnt is 0 */
842 	for (i = 0; i < usdt_link->uprobe_cnt; i++) {
843 		/* detach underlying uprobe link */
844 		bpf_link__destroy(usdt_link->uprobes[i].link);
845 		/* there is no need to update specs map because it will be
846 		 * unconditionally overwritten on subsequent USDT attaches,
847 		 * but if BPF cookies are not used we need to remove entry
848 		 * from ip_to_spec_id map, otherwise we'll run into false
849 		 * conflicting IP errors
850 		 */
851 		if (!man->has_bpf_cookie) {
852 			/* not much we can do about errors here */
853 			(void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
854 						  &usdt_link->uprobes[i].abs_ip);
855 		}
856 	}
857 
858 	/* try to return the list of previously used spec IDs to usdt_manager
859 	 * for future reuse for subsequent USDT attaches
860 	 */
861 	if (!man->free_spec_ids) {
862 		/* if there were no free spec IDs yet, just transfer our IDs */
863 		man->free_spec_ids = usdt_link->spec_ids;
864 		man->free_spec_cnt = usdt_link->spec_cnt;
865 		usdt_link->spec_ids = NULL;
866 	} else {
867 		/* otherwise concat IDs */
868 		size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
869 		int *new_free_ids;
870 
871 		new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
872 						   sizeof(*new_free_ids));
873 		/* If we couldn't resize free_spec_ids, we'll just leak
874 		 * a bunch of free IDs; this is very unlikely to happen and if
875 		 * system is so exhausted on memory, it's the least of user's
876 		 * concerns, probably.
877 		 * So just do our best here to return those IDs to usdt_manager.
878 		 * Another edge case when we can legitimately get NULL is when
879 		 * new_cnt is zero, which can happen in some edge cases, so we
880 		 * need to be careful about that.
881 		 */
882 		if (new_free_ids || new_cnt == 0) {
883 			memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
884 			       usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
885 			man->free_spec_ids = new_free_ids;
886 			man->free_spec_cnt = new_cnt;
887 		}
888 	}
889 
890 	return 0;
891 }
892 
893 static void bpf_link_usdt_dealloc(struct bpf_link *link)
894 {
895 	struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
896 
897 	free(usdt_link->spec_ids);
898 	free(usdt_link->uprobes);
899 	free(usdt_link);
900 }
901 
902 static size_t specs_hash_fn(long key, void *ctx)
903 {
904 	return str_hash((char *)key);
905 }
906 
907 static bool specs_equal_fn(long key1, long key2, void *ctx)
908 {
909 	return strcmp((char *)key1, (char *)key2) == 0;
910 }
911 
912 static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
913 			    struct bpf_link_usdt *link, struct usdt_target *target,
914 			    int *spec_id, bool *is_new)
915 {
916 	long tmp;
917 	void *new_ids;
918 	int err;
919 
920 	/* check if we already allocated spec ID for this spec string */
921 	if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
922 		*spec_id = tmp;
923 		*is_new = false;
924 		return 0;
925 	}
926 
927 	/* otherwise it's a new ID that needs to be set up in specs map and
928 	 * returned back to usdt_manager when USDT link is detached
929 	 */
930 	new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
931 	if (!new_ids)
932 		return -ENOMEM;
933 	link->spec_ids = new_ids;
934 
935 	/* get next free spec ID, giving preference to free list, if not empty */
936 	if (man->free_spec_cnt) {
937 		*spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
938 
939 		/* cache spec ID for current spec string for future lookups */
940 		err = hashmap__add(specs_hash, target->spec_str, *spec_id);
941 		if (err)
942 			 return err;
943 
944 		man->free_spec_cnt--;
945 	} else {
946 		/* don't allocate spec ID bigger than what fits in specs map */
947 		if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
948 			return -E2BIG;
949 
950 		*spec_id = man->next_free_spec_id;
951 
952 		/* cache spec ID for current spec string for future lookups */
953 		err = hashmap__add(specs_hash, target->spec_str, *spec_id);
954 		if (err)
955 			 return err;
956 
957 		man->next_free_spec_id++;
958 	}
959 
960 	/* remember new spec ID in the link for later return back to free list on detach */
961 	link->spec_ids[link->spec_cnt] = *spec_id;
962 	link->spec_cnt++;
963 	*is_new = true;
964 	return 0;
965 }
966 
967 struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
968 					  pid_t pid, const char *path,
969 					  const char *usdt_provider, const char *usdt_name,
970 					  __u64 usdt_cookie)
971 {
972 	unsigned long *offsets = NULL, *ref_ctr_offsets = NULL;
973 	int i, err, spec_map_fd, ip_map_fd;
974 	LIBBPF_OPTS(bpf_uprobe_opts, opts);
975 	struct hashmap *specs_hash = NULL;
976 	struct bpf_link_usdt *link = NULL;
977 	struct usdt_target *targets = NULL;
978 	__u64 *cookies = NULL;
979 	struct elf_fd elf_fd;
980 	size_t target_cnt;
981 
982 	spec_map_fd = bpf_map__fd(man->specs_map);
983 	ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
984 
985 	err = elf_open(path, &elf_fd);
986 	if (err)
987 		return libbpf_err_ptr(err);
988 
989 	err = sanity_check_usdt_elf(elf_fd.elf, path);
990 	if (err)
991 		goto err_out;
992 
993 	/* normalize PID filter */
994 	if (pid < 0)
995 		pid = -1;
996 	else if (pid == 0)
997 		pid = getpid();
998 
999 	/* discover USDT in given binary, optionally limiting
1000 	 * activations to a given PID, if pid > 0
1001 	 */
1002 	err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name,
1003 				   usdt_cookie, &targets, &target_cnt);
1004 	if (err <= 0) {
1005 		err = (err == 0) ? -ENOENT : err;
1006 		goto err_out;
1007 	}
1008 
1009 	specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
1010 	if (IS_ERR(specs_hash)) {
1011 		err = PTR_ERR(specs_hash);
1012 		goto err_out;
1013 	}
1014 
1015 	link = calloc(1, sizeof(*link));
1016 	if (!link) {
1017 		err = -ENOMEM;
1018 		goto err_out;
1019 	}
1020 
1021 	link->usdt_man = man;
1022 	link->link.detach = &bpf_link_usdt_detach;
1023 	link->link.dealloc = &bpf_link_usdt_dealloc;
1024 
1025 	if (man->has_uprobe_multi) {
1026 		offsets = calloc(target_cnt, sizeof(*offsets));
1027 		cookies = calloc(target_cnt, sizeof(*cookies));
1028 		ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets));
1029 
1030 		if (!offsets || !ref_ctr_offsets || !cookies) {
1031 			err = -ENOMEM;
1032 			goto err_out;
1033 		}
1034 	} else {
1035 		link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
1036 		if (!link->uprobes) {
1037 			err = -ENOMEM;
1038 			goto err_out;
1039 		}
1040 	}
1041 
1042 	for (i = 0; i < target_cnt; i++) {
1043 		struct usdt_target *target = &targets[i];
1044 		struct bpf_link *uprobe_link;
1045 		bool is_new;
1046 		int spec_id;
1047 
1048 		/* Spec ID can be either reused or newly allocated. If it is
1049 		 * newly allocated, we'll need to fill out spec map, otherwise
1050 		 * entire spec should be valid and can be just used by a new
1051 		 * uprobe. We reuse spec when USDT arg spec is identical. We
1052 		 * also never share specs between two different USDT
1053 		 * attachments ("links"), so all the reused specs already
1054 		 * share USDT cookie value implicitly.
1055 		 */
1056 		err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
1057 		if (err)
1058 			goto err_out;
1059 
1060 		if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
1061 			err = -errno;
1062 			pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %s\n",
1063 				spec_id, usdt_provider, usdt_name, path, errstr(err));
1064 			goto err_out;
1065 		}
1066 		if (!man->has_bpf_cookie &&
1067 		    bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
1068 			err = -errno;
1069 			if (err == -EEXIST) {
1070 				pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
1071 				        spec_id, usdt_provider, usdt_name, path);
1072 			} else {
1073 				pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %s\n",
1074 					target->abs_ip, spec_id, usdt_provider, usdt_name,
1075 					path, errstr(err));
1076 			}
1077 			goto err_out;
1078 		}
1079 
1080 		if (man->has_uprobe_multi) {
1081 			offsets[i] = target->rel_ip;
1082 			ref_ctr_offsets[i] = target->sema_off;
1083 			cookies[i] = spec_id;
1084 		} else {
1085 			opts.ref_ctr_offset = target->sema_off;
1086 			opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
1087 			uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
1088 								      target->rel_ip, &opts);
1089 			err = libbpf_get_error(uprobe_link);
1090 			if (err) {
1091 				pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %s\n",
1092 					i, usdt_provider, usdt_name, path, errstr(err));
1093 				goto err_out;
1094 			}
1095 
1096 			link->uprobes[i].link = uprobe_link;
1097 			link->uprobes[i].abs_ip = target->abs_ip;
1098 			link->uprobe_cnt++;
1099 		}
1100 	}
1101 
1102 	if (man->has_uprobe_multi) {
1103 		LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi,
1104 			.ref_ctr_offsets = ref_ctr_offsets,
1105 			.offsets = offsets,
1106 			.cookies = cookies,
1107 			.cnt = target_cnt,
1108 		);
1109 
1110 		link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path,
1111 								    NULL, &opts_multi);
1112 		if (!link->multi_link) {
1113 			err = -errno;
1114 			pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %s\n",
1115 				usdt_provider, usdt_name, path, errstr(err));
1116 			goto err_out;
1117 		}
1118 
1119 		free(offsets);
1120 		free(ref_ctr_offsets);
1121 		free(cookies);
1122 	}
1123 
1124 	free(targets);
1125 	hashmap__free(specs_hash);
1126 	elf_close(&elf_fd);
1127 	return &link->link;
1128 
1129 err_out:
1130 	free(offsets);
1131 	free(ref_ctr_offsets);
1132 	free(cookies);
1133 
1134 	if (link)
1135 		bpf_link__destroy(&link->link);
1136 	free(targets);
1137 	hashmap__free(specs_hash);
1138 	elf_close(&elf_fd);
1139 	return libbpf_err_ptr(err);
1140 }
1141 
1142 /* Parse out USDT ELF note from '.note.stapsdt' section.
1143  * Logic inspired by perf's code.
1144  */
1145 static int parse_usdt_note(GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
1146 			   struct usdt_note *note)
1147 {
1148 	const char *provider, *name, *args;
1149 	long addrs[3];
1150 	size_t len;
1151 
1152 	/* sanity check USDT note name and type first */
1153 	if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
1154 		return -EINVAL;
1155 	if (nhdr->n_type != USDT_NOTE_TYPE)
1156 		return -EINVAL;
1157 
1158 	/* sanity check USDT note contents ("description" in ELF terminology) */
1159 	len = nhdr->n_descsz;
1160 	data = data + desc_off;
1161 
1162 	/* +3 is the very minimum required to store three empty strings */
1163 	if (len < sizeof(addrs) + 3)
1164 		return -EINVAL;
1165 
1166 	/* get location, base, and semaphore addrs */
1167 	memcpy(&addrs, data, sizeof(addrs));
1168 
1169 	/* parse string fields: provider, name, args */
1170 	provider = data + sizeof(addrs);
1171 
1172 	name = (const char *)memchr(provider, '\0', data + len - provider);
1173 	if (!name) /* non-zero-terminated provider */
1174 		return -EINVAL;
1175 	name++;
1176 	if (name >= data + len || *name == '\0') /* missing or empty name */
1177 		return -EINVAL;
1178 
1179 	args = memchr(name, '\0', data + len - name);
1180 	if (!args) /* non-zero-terminated name */
1181 		return -EINVAL;
1182 	++args;
1183 	if (args >= data + len) /* missing arguments spec */
1184 		return -EINVAL;
1185 
1186 	note->provider = provider;
1187 	note->name = name;
1188 	if (*args == '\0' || *args == ':')
1189 		note->args = "";
1190 	else
1191 		note->args = args;
1192 	note->loc_addr = addrs[0];
1193 	note->base_addr = addrs[1];
1194 	note->sema_addr = addrs[2];
1195 
1196 	return 0;
1197 }
1198 
1199 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz);
1200 
1201 static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
1202 {
1203 	struct usdt_arg_spec *arg;
1204 	const char *s;
1205 	int arg_sz, len;
1206 
1207 	spec->usdt_cookie = usdt_cookie;
1208 	spec->arg_cnt = 0;
1209 
1210 	s = note->args;
1211 	while (s[0]) {
1212 		if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
1213 			pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
1214 				USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
1215 			return -E2BIG;
1216 		}
1217 
1218 		arg = &spec->args[spec->arg_cnt];
1219 		len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz);
1220 		if (len < 0)
1221 			return len;
1222 
1223 		arg->arg_signed = arg_sz < 0;
1224 		if (arg_sz < 0)
1225 			arg_sz = -arg_sz;
1226 
1227 		switch (arg_sz) {
1228 		case 1: case 2: case 4: case 8:
1229 			arg->arg_bitshift = 64 - arg_sz * 8;
1230 			break;
1231 		default:
1232 			pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
1233 				spec->arg_cnt, s, arg_sz);
1234 			return -EINVAL;
1235 		}
1236 
1237 		s += len;
1238 		spec->arg_cnt++;
1239 	}
1240 
1241 	return 0;
1242 }
1243 
1244 /* Architecture-specific logic for parsing USDT argument location specs */
1245 
1246 #if defined(__x86_64__) || defined(__i386__)
1247 
1248 static int calc_pt_regs_off(const char *reg_name)
1249 {
1250 	static struct {
1251 		const char *names[4];
1252 		size_t pt_regs_off;
1253 	} reg_map[] = {
1254 #ifdef __x86_64__
1255 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
1256 #else
1257 #define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
1258 #endif
1259 		{ {"rip", "eip", "", ""}, reg_off(rip, eip) },
1260 		{ {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
1261 		{ {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
1262 		{ {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
1263 		{ {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
1264 		{ {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
1265 		{ {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
1266 		{ {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
1267 		{ {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
1268 #undef reg_off
1269 #ifdef __x86_64__
1270 		{ {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
1271 		{ {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
1272 		{ {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
1273 		{ {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
1274 		{ {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
1275 		{ {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
1276 		{ {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
1277 		{ {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
1278 #endif
1279 	};
1280 	int i, j;
1281 
1282 	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1283 		for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
1284 			if (strcmp(reg_name, reg_map[i].names[j]) == 0)
1285 				return reg_map[i].pt_regs_off;
1286 		}
1287 	}
1288 
1289 	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1290 	return -ENOENT;
1291 }
1292 
1293 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1294 {
1295 	char reg_name[16] = {0}, idx_reg_name[16] = {0};
1296 	int len, reg_off, idx_reg_off, scale = 1;
1297 	long off = 0;
1298 
1299 	if (sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^,] , %d ) %n",
1300 		   arg_sz, &off, reg_name, idx_reg_name, &scale, &len) == 5 ||
1301 		sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^,] , %d ) %n",
1302 		       arg_sz, reg_name, idx_reg_name, &scale, &len) == 4 ||
1303 		sscanf(arg_str, " %d @ %ld ( %%%15[^,] , %%%15[^)] ) %n",
1304 		       arg_sz, &off, reg_name, idx_reg_name, &len) == 4 ||
1305 		sscanf(arg_str, " %d @ ( %%%15[^,] , %%%15[^)] ) %n",
1306 		       arg_sz, reg_name, idx_reg_name, &len) == 3
1307 		) {
1308 		/*
1309 		 * Scale Index Base case:
1310 		 * 1@-96(%rbp,%rax,8)
1311 		 * 1@(%rbp,%rax,8)
1312 		 * 1@-96(%rbp,%rax)
1313 		 * 1@(%rbp,%rax)
1314 		 */
1315 		arg->arg_type = USDT_ARG_SIB;
1316 		arg->val_off = off;
1317 
1318 		reg_off = calc_pt_regs_off(reg_name);
1319 		if (reg_off < 0)
1320 			return reg_off;
1321 		arg->reg_off = reg_off;
1322 
1323 		idx_reg_off = calc_pt_regs_off(idx_reg_name);
1324 		if (idx_reg_off < 0)
1325 			return idx_reg_off;
1326 		arg->idx_reg_off = idx_reg_off;
1327 
1328 		/* validate scale factor and set fields directly */
1329 		switch (scale) {
1330 		case 1: arg->scale_bitshift = 0; break;
1331 		case 2: arg->scale_bitshift = 1; break;
1332 		case 4: arg->scale_bitshift = 2; break;
1333 		case 8: arg->scale_bitshift = 3; break;
1334 		default:
1335 			pr_warn("usdt: invalid SIB scale %d, expected 1, 2, 4, 8\n", scale);
1336 			return -EINVAL;
1337 		}
1338 	} else if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n",
1339 				arg_sz, &off, reg_name, &len) == 3) {
1340 		/* Memory dereference case, e.g., -4@-20(%rbp) */
1341 		arg->arg_type = USDT_ARG_REG_DEREF;
1342 		arg->val_off = off;
1343 		reg_off = calc_pt_regs_off(reg_name);
1344 		if (reg_off < 0)
1345 			return reg_off;
1346 		arg->reg_off = reg_off;
1347 	} else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) {
1348 		/* Memory dereference case without offset, e.g., 8@(%rsp) */
1349 		arg->arg_type = USDT_ARG_REG_DEREF;
1350 		arg->val_off = 0;
1351 		reg_off = calc_pt_regs_off(reg_name);
1352 		if (reg_off < 0)
1353 			return reg_off;
1354 		arg->reg_off = reg_off;
1355 	} else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
1356 		/* Register read case, e.g., -4@%eax */
1357 		arg->arg_type = USDT_ARG_REG;
1358 		/* register read has no memory offset */
1359 		arg->val_off = 0;
1360 
1361 		reg_off = calc_pt_regs_off(reg_name);
1362 		if (reg_off < 0)
1363 			return reg_off;
1364 		arg->reg_off = reg_off;
1365 	} else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) {
1366 		/* Constant value case, e.g., 4@$71 */
1367 		arg->arg_type = USDT_ARG_CONST;
1368 		arg->val_off = off;
1369 		arg->reg_off = 0;
1370 	} else {
1371 		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1372 		return -EINVAL;
1373 	}
1374 
1375 	return len;
1376 }
1377 
1378 #elif defined(__s390x__)
1379 
1380 /* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
1381 
1382 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1383 {
1384 	unsigned int reg;
1385 	int len;
1386 	long off;
1387 
1388 	if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, &reg, &len) == 3) {
1389 		/* Memory dereference case, e.g., -2@-28(%r15) */
1390 		arg->arg_type = USDT_ARG_REG_DEREF;
1391 		arg->val_off = off;
1392 		if (reg > 15) {
1393 			pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1394 			return -EINVAL;
1395 		}
1396 		arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1397 	} else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, &reg, &len) == 2) {
1398 		/* Register read case, e.g., -8@%r0 */
1399 		arg->arg_type = USDT_ARG_REG;
1400 		arg->val_off = 0;
1401 		if (reg > 15) {
1402 			pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
1403 			return -EINVAL;
1404 		}
1405 		arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
1406 	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1407 		/* Constant value case, e.g., 4@71 */
1408 		arg->arg_type = USDT_ARG_CONST;
1409 		arg->val_off = off;
1410 		arg->reg_off = 0;
1411 	} else {
1412 		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1413 		return -EINVAL;
1414 	}
1415 
1416 	return len;
1417 }
1418 
1419 #elif defined(__aarch64__)
1420 
1421 static int calc_pt_regs_off(const char *reg_name)
1422 {
1423 	int reg_num;
1424 
1425 	if (sscanf(reg_name, "x%d", &reg_num) == 1) {
1426 		if (reg_num >= 0 && reg_num < 31)
1427 			return offsetof(struct user_pt_regs, regs[reg_num]);
1428 	} else if (strcmp(reg_name, "sp") == 0) {
1429 		return offsetof(struct user_pt_regs, sp);
1430 	}
1431 	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1432 	return -ENOENT;
1433 }
1434 
1435 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1436 {
1437 	char reg_name[16];
1438 	int len, reg_off;
1439 	long off;
1440 
1441 	if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) {
1442 		/* Memory dereference case, e.g., -4@[sp, 96] */
1443 		arg->arg_type = USDT_ARG_REG_DEREF;
1444 		arg->val_off = off;
1445 		reg_off = calc_pt_regs_off(reg_name);
1446 		if (reg_off < 0)
1447 			return reg_off;
1448 		arg->reg_off = reg_off;
1449 	} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1450 		/* Memory dereference case, e.g., -4@[sp] */
1451 		arg->arg_type = USDT_ARG_REG_DEREF;
1452 		arg->val_off = 0;
1453 		reg_off = calc_pt_regs_off(reg_name);
1454 		if (reg_off < 0)
1455 			return reg_off;
1456 		arg->reg_off = reg_off;
1457 	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1458 		/* Constant value case, e.g., 4@5 */
1459 		arg->arg_type = USDT_ARG_CONST;
1460 		arg->val_off = off;
1461 		arg->reg_off = 0;
1462 	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1463 		/* Register read case, e.g., -8@x4 */
1464 		arg->arg_type = USDT_ARG_REG;
1465 		arg->val_off = 0;
1466 		reg_off = calc_pt_regs_off(reg_name);
1467 		if (reg_off < 0)
1468 			return reg_off;
1469 		arg->reg_off = reg_off;
1470 	} else {
1471 		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1472 		return -EINVAL;
1473 	}
1474 
1475 	return len;
1476 }
1477 
1478 #elif defined(__riscv)
1479 
1480 static int calc_pt_regs_off(const char *reg_name)
1481 {
1482 	static struct {
1483 		const char *name;
1484 		size_t pt_regs_off;
1485 	} reg_map[] = {
1486 		{ "ra", offsetof(struct user_regs_struct, ra) },
1487 		{ "sp", offsetof(struct user_regs_struct, sp) },
1488 		{ "gp", offsetof(struct user_regs_struct, gp) },
1489 		{ "tp", offsetof(struct user_regs_struct, tp) },
1490 		{ "a0", offsetof(struct user_regs_struct, a0) },
1491 		{ "a1", offsetof(struct user_regs_struct, a1) },
1492 		{ "a2", offsetof(struct user_regs_struct, a2) },
1493 		{ "a3", offsetof(struct user_regs_struct, a3) },
1494 		{ "a4", offsetof(struct user_regs_struct, a4) },
1495 		{ "a5", offsetof(struct user_regs_struct, a5) },
1496 		{ "a6", offsetof(struct user_regs_struct, a6) },
1497 		{ "a7", offsetof(struct user_regs_struct, a7) },
1498 		{ "s0", offsetof(struct user_regs_struct, s0) },
1499 		{ "s1", offsetof(struct user_regs_struct, s1) },
1500 		{ "s2", offsetof(struct user_regs_struct, s2) },
1501 		{ "s3", offsetof(struct user_regs_struct, s3) },
1502 		{ "s4", offsetof(struct user_regs_struct, s4) },
1503 		{ "s5", offsetof(struct user_regs_struct, s5) },
1504 		{ "s6", offsetof(struct user_regs_struct, s6) },
1505 		{ "s7", offsetof(struct user_regs_struct, s7) },
1506 		{ "s8", offsetof(struct user_regs_struct, rv_s8) },
1507 		{ "s9", offsetof(struct user_regs_struct, s9) },
1508 		{ "s10", offsetof(struct user_regs_struct, s10) },
1509 		{ "s11", offsetof(struct user_regs_struct, s11) },
1510 		{ "t0", offsetof(struct user_regs_struct, t0) },
1511 		{ "t1", offsetof(struct user_regs_struct, t1) },
1512 		{ "t2", offsetof(struct user_regs_struct, t2) },
1513 		{ "t3", offsetof(struct user_regs_struct, t3) },
1514 		{ "t4", offsetof(struct user_regs_struct, t4) },
1515 		{ "t5", offsetof(struct user_regs_struct, t5) },
1516 		{ "t6", offsetof(struct user_regs_struct, t6) },
1517 	};
1518 	int i;
1519 
1520 	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1521 		if (strcmp(reg_name, reg_map[i].name) == 0)
1522 			return reg_map[i].pt_regs_off;
1523 	}
1524 
1525 	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1526 	return -ENOENT;
1527 }
1528 
1529 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1530 {
1531 	char reg_name[16];
1532 	int len, reg_off;
1533 	long off;
1534 
1535 	if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) {
1536 		/* Memory dereference case, e.g., -8@-88(s0) */
1537 		arg->arg_type = USDT_ARG_REG_DEREF;
1538 		arg->val_off = off;
1539 		reg_off = calc_pt_regs_off(reg_name);
1540 		if (reg_off < 0)
1541 			return reg_off;
1542 		arg->reg_off = reg_off;
1543 	} else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
1544 		/* Constant value case, e.g., 4@5 */
1545 		arg->arg_type = USDT_ARG_CONST;
1546 		arg->val_off = off;
1547 		arg->reg_off = 0;
1548 	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1549 		/* Register read case, e.g., -8@a1 */
1550 		arg->arg_type = USDT_ARG_REG;
1551 		arg->val_off = 0;
1552 		reg_off = calc_pt_regs_off(reg_name);
1553 		if (reg_off < 0)
1554 			return reg_off;
1555 		arg->reg_off = reg_off;
1556 	} else {
1557 		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1558 		return -EINVAL;
1559 	}
1560 
1561 	return len;
1562 }
1563 
1564 #elif defined(__arm__)
1565 
1566 static int calc_pt_regs_off(const char *reg_name)
1567 {
1568 	static struct {
1569 		const char *name;
1570 		size_t pt_regs_off;
1571 	} reg_map[] = {
1572 		{ "r0", offsetof(struct pt_regs, uregs[0]) },
1573 		{ "r1", offsetof(struct pt_regs, uregs[1]) },
1574 		{ "r2", offsetof(struct pt_regs, uregs[2]) },
1575 		{ "r3", offsetof(struct pt_regs, uregs[3]) },
1576 		{ "r4", offsetof(struct pt_regs, uregs[4]) },
1577 		{ "r5", offsetof(struct pt_regs, uregs[5]) },
1578 		{ "r6", offsetof(struct pt_regs, uregs[6]) },
1579 		{ "r7", offsetof(struct pt_regs, uregs[7]) },
1580 		{ "r8", offsetof(struct pt_regs, uregs[8]) },
1581 		{ "r9", offsetof(struct pt_regs, uregs[9]) },
1582 		{ "r10", offsetof(struct pt_regs, uregs[10]) },
1583 		{ "fp", offsetof(struct pt_regs, uregs[11]) },
1584 		{ "ip", offsetof(struct pt_regs, uregs[12]) },
1585 		{ "sp", offsetof(struct pt_regs, uregs[13]) },
1586 		{ "lr", offsetof(struct pt_regs, uregs[14]) },
1587 		{ "pc", offsetof(struct pt_regs, uregs[15]) },
1588 	};
1589 	int i;
1590 
1591 	for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
1592 		if (strcmp(reg_name, reg_map[i].name) == 0)
1593 			return reg_map[i].pt_regs_off;
1594 	}
1595 
1596 	pr_warn("usdt: unrecognized register '%s'\n", reg_name);
1597 	return -ENOENT;
1598 }
1599 
1600 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1601 {
1602 	char reg_name[16];
1603 	int len, reg_off;
1604 	long off;
1605 
1606 	if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n",
1607 		   arg_sz, reg_name, &off, &len) == 3) {
1608 		/* Memory dereference case, e.g., -4@[fp, #96] */
1609 		arg->arg_type = USDT_ARG_REG_DEREF;
1610 		arg->val_off = off;
1611 		reg_off = calc_pt_regs_off(reg_name);
1612 		if (reg_off < 0)
1613 			return reg_off;
1614 		arg->reg_off = reg_off;
1615 	} else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
1616 		/* Memory dereference case, e.g., -4@[sp] */
1617 		arg->arg_type = USDT_ARG_REG_DEREF;
1618 		arg->val_off = 0;
1619 		reg_off = calc_pt_regs_off(reg_name);
1620 		if (reg_off < 0)
1621 			return reg_off;
1622 		arg->reg_off = reg_off;
1623 	} else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) {
1624 		/* Constant value case, e.g., 4@#5 */
1625 		arg->arg_type = USDT_ARG_CONST;
1626 		arg->val_off = off;
1627 		arg->reg_off = 0;
1628 	} else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
1629 		/* Register read case, e.g., -8@r4 */
1630 		arg->arg_type = USDT_ARG_REG;
1631 		arg->val_off = 0;
1632 		reg_off = calc_pt_regs_off(reg_name);
1633 		if (reg_off < 0)
1634 			return reg_off;
1635 		arg->reg_off = reg_off;
1636 	} else {
1637 		pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
1638 		return -EINVAL;
1639 	}
1640 
1641 	return len;
1642 }
1643 
1644 #else
1645 
1646 static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
1647 {
1648 	pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
1649 	return -ENOTSUP;
1650 }
1651 
1652 #endif
1653