1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. 4 * 5 * Rewritten and vastly simplified by Rusty Russell for in-kernel 6 * module loader: 7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 8 * 9 * ChangeLog: 10 * 11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> 12 * Changed the compression method from stem compression to "table lookup" 13 * compression (see scripts/kallsyms.c for a more complete description) 14 */ 15 #include <linux/kallsyms.h> 16 #include <linux/init.h> 17 #include <linux/seq_file.h> 18 #include <linux/fs.h> 19 #include <linux/kdb.h> 20 #include <linux/err.h> 21 #include <linux/proc_fs.h> 22 #include <linux/sched.h> /* for cond_resched */ 23 #include <linux/ctype.h> 24 #include <linux/slab.h> 25 #include <linux/filter.h> 26 #include <linux/ftrace.h> 27 #include <linux/kprobes.h> 28 #include <linux/build_bug.h> 29 #include <linux/compiler.h> 30 #include <linux/module.h> 31 #include <linux/kernel.h> 32 #include <linux/bsearch.h> 33 #include <linux/btf_ids.h> 34 35 #include "kallsyms_internal.h" 36 37 /* 38 * Expand a compressed symbol data into the resulting uncompressed string, 39 * if uncompressed string is too long (>= maxlen), it will be truncated, 40 * given the offset to where the symbol is in the compressed stream. 41 */ 42 static unsigned int kallsyms_expand_symbol(unsigned int off, 43 char *result, size_t maxlen) 44 { 45 int len, skipped_first = 0; 46 const char *tptr; 47 const u8 *data; 48 49 /* Get the compressed symbol length from the first symbol byte. */ 50 data = &kallsyms_names[off]; 51 len = *data; 52 data++; 53 off++; 54 55 /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */ 56 if ((len & 0x80) != 0) { 57 len = (len & 0x7F) | (*data << 7); 58 data++; 59 off++; 60 } 61 62 /* 63 * Update the offset to return the offset for the next symbol on 64 * the compressed stream. 65 */ 66 off += len; 67 68 /* 69 * For every byte on the compressed symbol data, copy the table 70 * entry for that byte. 71 */ 72 while (len) { 73 tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; 74 data++; 75 len--; 76 77 while (*tptr) { 78 if (skipped_first) { 79 if (maxlen <= 1) 80 goto tail; 81 *result = *tptr; 82 result++; 83 maxlen--; 84 } else 85 skipped_first = 1; 86 tptr++; 87 } 88 } 89 90 tail: 91 if (maxlen) 92 *result = '\0'; 93 94 /* Return to offset to the next symbol. */ 95 return off; 96 } 97 98 /* 99 * Get symbol type information. This is encoded as a single char at the 100 * beginning of the symbol name. 101 */ 102 static char kallsyms_get_symbol_type(unsigned int off) 103 { 104 /* 105 * Get just the first code, look it up in the token table, 106 * and return the first char from this token. 107 */ 108 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; 109 } 110 111 112 /* 113 * Find the offset on the compressed stream given and index in the 114 * kallsyms array. 115 */ 116 static unsigned int get_symbol_offset(unsigned long pos) 117 { 118 const u8 *name; 119 int i, len; 120 121 /* 122 * Use the closest marker we have. We have markers every 256 positions, 123 * so that should be close enough. 124 */ 125 name = &kallsyms_names[kallsyms_markers[pos >> 8]]; 126 127 /* 128 * Sequentially scan all the symbols up to the point we're searching 129 * for. Every symbol is stored in a [<len>][<len> bytes of data] format, 130 * so we just need to add the len to the current pointer for every 131 * symbol we wish to skip. 132 */ 133 for (i = 0; i < (pos & 0xFF); i++) { 134 len = *name; 135 136 /* 137 * If MSB is 1, it is a "big" symbol, so we need to look into 138 * the next byte (and skip it, too). 139 */ 140 if ((len & 0x80) != 0) 141 len = ((len & 0x7F) | (name[1] << 7)) + 1; 142 143 name = name + len + 1; 144 } 145 146 return name - kallsyms_names; 147 } 148 149 unsigned long kallsyms_sym_address(int idx) 150 { 151 /* values are unsigned offsets if --absolute-percpu is not in effect */ 152 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) 153 return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; 154 155 /* ...otherwise, positive offsets are absolute values */ 156 if (kallsyms_offsets[idx] >= 0) 157 return kallsyms_offsets[idx]; 158 159 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ 160 return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; 161 } 162 163 static void cleanup_symbol_name(char *s) 164 { 165 char *res; 166 167 if (!IS_ENABLED(CONFIG_LTO_CLANG)) 168 return; 169 170 /* 171 * LLVM appends various suffixes for local functions and variables that 172 * must be promoted to global scope as part of LTO. This can break 173 * hooking of static functions with kprobes. '.' is not a valid 174 * character in an identifier in C. Suffixes only in LLVM LTO observed: 175 * - foo.llvm.[0-9a-f]+ 176 */ 177 res = strstr(s, ".llvm."); 178 if (res) 179 *res = '\0'; 180 181 return; 182 } 183 184 static int compare_symbol_name(const char *name, char *namebuf) 185 { 186 /* The kallsyms_seqs_of_names is sorted based on names after 187 * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled. 188 * To ensure correct bisection in kallsyms_lookup_names(), do 189 * cleanup_symbol_name(namebuf) before comparing name and namebuf. 190 */ 191 cleanup_symbol_name(namebuf); 192 return strcmp(name, namebuf); 193 } 194 195 static unsigned int get_symbol_seq(int index) 196 { 197 unsigned int i, seq = 0; 198 199 for (i = 0; i < 3; i++) 200 seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i]; 201 202 return seq; 203 } 204 205 static int kallsyms_lookup_names(const char *name, 206 unsigned int *start, 207 unsigned int *end) 208 { 209 int ret; 210 int low, mid, high; 211 unsigned int seq, off; 212 char namebuf[KSYM_NAME_LEN]; 213 214 low = 0; 215 high = kallsyms_num_syms - 1; 216 217 while (low <= high) { 218 mid = low + (high - low) / 2; 219 seq = get_symbol_seq(mid); 220 off = get_symbol_offset(seq); 221 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 222 ret = compare_symbol_name(name, namebuf); 223 if (ret > 0) 224 low = mid + 1; 225 else if (ret < 0) 226 high = mid - 1; 227 else 228 break; 229 } 230 231 if (low > high) 232 return -ESRCH; 233 234 low = mid; 235 while (low) { 236 seq = get_symbol_seq(low - 1); 237 off = get_symbol_offset(seq); 238 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 239 if (compare_symbol_name(name, namebuf)) 240 break; 241 low--; 242 } 243 *start = low; 244 245 if (end) { 246 high = mid; 247 while (high < kallsyms_num_syms - 1) { 248 seq = get_symbol_seq(high + 1); 249 off = get_symbol_offset(seq); 250 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 251 if (compare_symbol_name(name, namebuf)) 252 break; 253 high++; 254 } 255 *end = high; 256 } 257 258 return 0; 259 } 260 261 /* Lookup the address for this symbol. Returns 0 if not found. */ 262 unsigned long kallsyms_lookup_name(const char *name) 263 { 264 int ret; 265 unsigned int i; 266 267 /* Skip the search for empty string. */ 268 if (!*name) 269 return 0; 270 271 ret = kallsyms_lookup_names(name, &i, NULL); 272 if (!ret) 273 return kallsyms_sym_address(get_symbol_seq(i)); 274 275 return module_kallsyms_lookup_name(name); 276 } 277 278 /* 279 * Iterate over all symbols in vmlinux. For symbols from modules use 280 * module_kallsyms_on_each_symbol instead. 281 */ 282 int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), 283 void *data) 284 { 285 char namebuf[KSYM_NAME_LEN]; 286 unsigned long i; 287 unsigned int off; 288 int ret; 289 290 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { 291 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); 292 ret = fn(data, namebuf, kallsyms_sym_address(i)); 293 if (ret != 0) 294 return ret; 295 cond_resched(); 296 } 297 return 0; 298 } 299 300 int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), 301 const char *name, void *data) 302 { 303 int ret; 304 unsigned int i, start, end; 305 306 ret = kallsyms_lookup_names(name, &start, &end); 307 if (ret) 308 return 0; 309 310 for (i = start; !ret && i <= end; i++) { 311 ret = fn(data, kallsyms_sym_address(get_symbol_seq(i))); 312 cond_resched(); 313 } 314 315 return ret; 316 } 317 318 static unsigned long get_symbol_pos(unsigned long addr, 319 unsigned long *symbolsize, 320 unsigned long *offset) 321 { 322 unsigned long symbol_start = 0, symbol_end = 0; 323 unsigned long i, low, high, mid; 324 325 /* Do a binary search on the sorted kallsyms_offsets array. */ 326 low = 0; 327 high = kallsyms_num_syms; 328 329 while (high - low > 1) { 330 mid = low + (high - low) / 2; 331 if (kallsyms_sym_address(mid) <= addr) 332 low = mid; 333 else 334 high = mid; 335 } 336 337 /* 338 * Search for the first aliased symbol. Aliased 339 * symbols are symbols with the same address. 340 */ 341 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) 342 --low; 343 344 symbol_start = kallsyms_sym_address(low); 345 346 /* Search for next non-aliased symbol. */ 347 for (i = low + 1; i < kallsyms_num_syms; i++) { 348 if (kallsyms_sym_address(i) > symbol_start) { 349 symbol_end = kallsyms_sym_address(i); 350 break; 351 } 352 } 353 354 /* If we found no next symbol, we use the end of the section. */ 355 if (!symbol_end) { 356 if (is_kernel_inittext(addr)) 357 symbol_end = (unsigned long)_einittext; 358 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) 359 symbol_end = (unsigned long)_end; 360 else 361 symbol_end = (unsigned long)_etext; 362 } 363 364 if (symbolsize) 365 *symbolsize = symbol_end - symbol_start; 366 if (offset) 367 *offset = addr - symbol_start; 368 369 return low; 370 } 371 372 /* 373 * Lookup an address but don't bother to find any names. 374 */ 375 int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, 376 unsigned long *offset) 377 { 378 char namebuf[KSYM_NAME_LEN]; 379 380 if (is_ksym_addr(addr)) { 381 get_symbol_pos(addr, symbolsize, offset); 382 return 1; 383 } 384 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || 385 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 386 } 387 388 static int kallsyms_lookup_buildid(unsigned long addr, 389 unsigned long *symbolsize, 390 unsigned long *offset, char **modname, 391 const unsigned char **modbuildid, char *namebuf) 392 { 393 int ret; 394 395 namebuf[KSYM_NAME_LEN - 1] = 0; 396 namebuf[0] = 0; 397 398 if (is_ksym_addr(addr)) { 399 unsigned long pos; 400 401 pos = get_symbol_pos(addr, symbolsize, offset); 402 /* Grab name */ 403 kallsyms_expand_symbol(get_symbol_offset(pos), 404 namebuf, KSYM_NAME_LEN); 405 if (modname) 406 *modname = NULL; 407 if (modbuildid) 408 *modbuildid = NULL; 409 410 ret = strlen(namebuf); 411 goto found; 412 } 413 414 /* See if it's in a module or a BPF JITed image. */ 415 ret = module_address_lookup(addr, symbolsize, offset, 416 modname, modbuildid, namebuf); 417 if (!ret) 418 ret = bpf_address_lookup(addr, symbolsize, 419 offset, modname, namebuf); 420 421 if (!ret) 422 ret = ftrace_mod_address_lookup(addr, symbolsize, 423 offset, modname, namebuf); 424 425 found: 426 cleanup_symbol_name(namebuf); 427 return ret; 428 } 429 430 /* 431 * Lookup an address 432 * - modname is set to NULL if it's in the kernel. 433 * - We guarantee that the returned name is valid until we reschedule even if. 434 * It resides in a module. 435 * - We also guarantee that modname will be valid until rescheduled. 436 */ 437 const char *kallsyms_lookup(unsigned long addr, 438 unsigned long *symbolsize, 439 unsigned long *offset, 440 char **modname, char *namebuf) 441 { 442 int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname, 443 NULL, namebuf); 444 445 if (!ret) 446 return NULL; 447 448 return namebuf; 449 } 450 451 int lookup_symbol_name(unsigned long addr, char *symname) 452 { 453 int res; 454 455 symname[0] = '\0'; 456 symname[KSYM_NAME_LEN - 1] = '\0'; 457 458 if (is_ksym_addr(addr)) { 459 unsigned long pos; 460 461 pos = get_symbol_pos(addr, NULL, NULL); 462 /* Grab name */ 463 kallsyms_expand_symbol(get_symbol_offset(pos), 464 symname, KSYM_NAME_LEN); 465 goto found; 466 } 467 /* See if it's in a module. */ 468 res = lookup_module_symbol_name(addr, symname); 469 if (res) 470 return res; 471 472 found: 473 cleanup_symbol_name(symname); 474 return 0; 475 } 476 477 /* Look up a kernel symbol and return it in a text buffer. */ 478 static int __sprint_symbol(char *buffer, unsigned long address, 479 int symbol_offset, int add_offset, int add_buildid) 480 { 481 char *modname; 482 const unsigned char *buildid; 483 unsigned long offset, size; 484 int len; 485 486 address += symbol_offset; 487 len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid, 488 buffer); 489 if (!len) 490 return sprintf(buffer, "0x%lx", address - symbol_offset); 491 492 offset -= symbol_offset; 493 494 if (add_offset) 495 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); 496 497 if (modname) { 498 len += sprintf(buffer + len, " [%s", modname); 499 #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) 500 if (add_buildid && buildid) { 501 /* build ID should match length of sprintf */ 502 #if IS_ENABLED(CONFIG_MODULES) 503 static_assert(sizeof(typeof_member(struct module, build_id)) == 20); 504 #endif 505 len += sprintf(buffer + len, " %20phN", buildid); 506 } 507 #endif 508 len += sprintf(buffer + len, "]"); 509 } 510 511 return len; 512 } 513 514 /** 515 * sprint_symbol - Look up a kernel symbol and return it in a text buffer 516 * @buffer: buffer to be stored 517 * @address: address to lookup 518 * 519 * This function looks up a kernel symbol with @address and stores its name, 520 * offset, size and module name to @buffer if possible. If no symbol was found, 521 * just saves its @address as is. 522 * 523 * This function returns the number of bytes stored in @buffer. 524 */ 525 int sprint_symbol(char *buffer, unsigned long address) 526 { 527 return __sprint_symbol(buffer, address, 0, 1, 0); 528 } 529 EXPORT_SYMBOL_GPL(sprint_symbol); 530 531 /** 532 * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer 533 * @buffer: buffer to be stored 534 * @address: address to lookup 535 * 536 * This function looks up a kernel symbol with @address and stores its name, 537 * offset, size, module name and module build ID to @buffer if possible. If no 538 * symbol was found, just saves its @address as is. 539 * 540 * This function returns the number of bytes stored in @buffer. 541 */ 542 int sprint_symbol_build_id(char *buffer, unsigned long address) 543 { 544 return __sprint_symbol(buffer, address, 0, 1, 1); 545 } 546 EXPORT_SYMBOL_GPL(sprint_symbol_build_id); 547 548 /** 549 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer 550 * @buffer: buffer to be stored 551 * @address: address to lookup 552 * 553 * This function looks up a kernel symbol with @address and stores its name 554 * and module name to @buffer if possible. If no symbol was found, just saves 555 * its @address as is. 556 * 557 * This function returns the number of bytes stored in @buffer. 558 */ 559 int sprint_symbol_no_offset(char *buffer, unsigned long address) 560 { 561 return __sprint_symbol(buffer, address, 0, 0, 0); 562 } 563 EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); 564 565 /** 566 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 567 * @buffer: buffer to be stored 568 * @address: address to lookup 569 * 570 * This function is for stack backtrace and does the same thing as 571 * sprint_symbol() but with modified/decreased @address. If there is a 572 * tail-call to the function marked "noreturn", gcc optimized out code after 573 * the call so that the stack-saved return address could point outside of the 574 * caller. This function ensures that kallsyms will find the original caller 575 * by decreasing @address. 576 * 577 * This function returns the number of bytes stored in @buffer. 578 */ 579 int sprint_backtrace(char *buffer, unsigned long address) 580 { 581 return __sprint_symbol(buffer, address, -1, 1, 0); 582 } 583 584 /** 585 * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer 586 * @buffer: buffer to be stored 587 * @address: address to lookup 588 * 589 * This function is for stack backtrace and does the same thing as 590 * sprint_symbol() but with modified/decreased @address. If there is a 591 * tail-call to the function marked "noreturn", gcc optimized out code after 592 * the call so that the stack-saved return address could point outside of the 593 * caller. This function ensures that kallsyms will find the original caller 594 * by decreasing @address. This function also appends the module build ID to 595 * the @buffer if @address is within a kernel module. 596 * 597 * This function returns the number of bytes stored in @buffer. 598 */ 599 int sprint_backtrace_build_id(char *buffer, unsigned long address) 600 { 601 return __sprint_symbol(buffer, address, -1, 1, 1); 602 } 603 604 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 605 struct kallsym_iter { 606 loff_t pos; 607 loff_t pos_mod_end; 608 loff_t pos_ftrace_mod_end; 609 loff_t pos_bpf_end; 610 unsigned long value; 611 unsigned int nameoff; /* If iterating in core kernel symbols. */ 612 char type; 613 char name[KSYM_NAME_LEN]; 614 char module_name[MODULE_NAME_LEN]; 615 int exported; 616 int show_value; 617 }; 618 619 static int get_ksymbol_mod(struct kallsym_iter *iter) 620 { 621 int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, 622 &iter->value, &iter->type, 623 iter->name, iter->module_name, 624 &iter->exported); 625 if (ret < 0) { 626 iter->pos_mod_end = iter->pos; 627 return 0; 628 } 629 630 return 1; 631 } 632 633 /* 634 * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace 635 * purposes. In that case "__builtin__ftrace" is used as a module name, even 636 * though "__builtin__ftrace" is not a module. 637 */ 638 static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) 639 { 640 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, 641 &iter->value, &iter->type, 642 iter->name, iter->module_name, 643 &iter->exported); 644 if (ret < 0) { 645 iter->pos_ftrace_mod_end = iter->pos; 646 return 0; 647 } 648 649 return 1; 650 } 651 652 static int get_ksymbol_bpf(struct kallsym_iter *iter) 653 { 654 int ret; 655 656 strscpy(iter->module_name, "bpf", MODULE_NAME_LEN); 657 iter->exported = 0; 658 ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, 659 &iter->value, &iter->type, 660 iter->name); 661 if (ret < 0) { 662 iter->pos_bpf_end = iter->pos; 663 return 0; 664 } 665 666 return 1; 667 } 668 669 /* 670 * This uses "__builtin__kprobes" as a module name for symbols for pages 671 * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a 672 * module. 673 */ 674 static int get_ksymbol_kprobe(struct kallsym_iter *iter) 675 { 676 strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); 677 iter->exported = 0; 678 return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, 679 &iter->value, &iter->type, 680 iter->name) < 0 ? 0 : 1; 681 } 682 683 /* Returns space to next name. */ 684 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) 685 { 686 unsigned off = iter->nameoff; 687 688 iter->module_name[0] = '\0'; 689 iter->value = kallsyms_sym_address(iter->pos); 690 691 iter->type = kallsyms_get_symbol_type(off); 692 693 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); 694 695 return off - iter->nameoff; 696 } 697 698 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) 699 { 700 iter->name[0] = '\0'; 701 iter->nameoff = get_symbol_offset(new_pos); 702 iter->pos = new_pos; 703 if (new_pos == 0) { 704 iter->pos_mod_end = 0; 705 iter->pos_ftrace_mod_end = 0; 706 iter->pos_bpf_end = 0; 707 } 708 } 709 710 /* 711 * The end position (last + 1) of each additional kallsyms section is recorded 712 * in iter->pos_..._end as each section is added, and so can be used to 713 * determine which get_ksymbol_...() function to call next. 714 */ 715 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) 716 { 717 iter->pos = pos; 718 719 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && 720 get_ksymbol_mod(iter)) 721 return 1; 722 723 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && 724 get_ksymbol_ftrace_mod(iter)) 725 return 1; 726 727 if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && 728 get_ksymbol_bpf(iter)) 729 return 1; 730 731 return get_ksymbol_kprobe(iter); 732 } 733 734 /* Returns false if pos at or past end of file. */ 735 static int update_iter(struct kallsym_iter *iter, loff_t pos) 736 { 737 /* Module symbols can be accessed randomly. */ 738 if (pos >= kallsyms_num_syms) 739 return update_iter_mod(iter, pos); 740 741 /* If we're not on the desired position, reset to new position. */ 742 if (pos != iter->pos) 743 reset_iter(iter, pos); 744 745 iter->nameoff += get_ksymbol_core(iter); 746 iter->pos++; 747 748 return 1; 749 } 750 751 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 752 { 753 (*pos)++; 754 755 if (!update_iter(m->private, *pos)) 756 return NULL; 757 return p; 758 } 759 760 static void *s_start(struct seq_file *m, loff_t *pos) 761 { 762 if (!update_iter(m->private, *pos)) 763 return NULL; 764 return m->private; 765 } 766 767 static void s_stop(struct seq_file *m, void *p) 768 { 769 } 770 771 static int s_show(struct seq_file *m, void *p) 772 { 773 void *value; 774 struct kallsym_iter *iter = m->private; 775 776 /* Some debugging symbols have no name. Ignore them. */ 777 if (!iter->name[0]) 778 return 0; 779 780 value = iter->show_value ? (void *)iter->value : NULL; 781 782 if (iter->module_name[0]) { 783 char type; 784 785 /* 786 * Label it "global" if it is exported, 787 * "local" if not exported. 788 */ 789 type = iter->exported ? toupper(iter->type) : 790 tolower(iter->type); 791 seq_printf(m, "%px %c %s\t[%s]\n", value, 792 type, iter->name, iter->module_name); 793 } else 794 seq_printf(m, "%px %c %s\n", value, 795 iter->type, iter->name); 796 return 0; 797 } 798 799 static const struct seq_operations kallsyms_op = { 800 .start = s_start, 801 .next = s_next, 802 .stop = s_stop, 803 .show = s_show 804 }; 805 806 #ifdef CONFIG_BPF_SYSCALL 807 808 struct bpf_iter__ksym { 809 __bpf_md_ptr(struct bpf_iter_meta *, meta); 810 __bpf_md_ptr(struct kallsym_iter *, ksym); 811 }; 812 813 static int ksym_prog_seq_show(struct seq_file *m, bool in_stop) 814 { 815 struct bpf_iter__ksym ctx; 816 struct bpf_iter_meta meta; 817 struct bpf_prog *prog; 818 819 meta.seq = m; 820 prog = bpf_iter_get_info(&meta, in_stop); 821 if (!prog) 822 return 0; 823 824 ctx.meta = &meta; 825 ctx.ksym = m ? m->private : NULL; 826 return bpf_iter_run_prog(prog, &ctx); 827 } 828 829 static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p) 830 { 831 return ksym_prog_seq_show(m, false); 832 } 833 834 static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p) 835 { 836 if (!p) 837 (void) ksym_prog_seq_show(m, true); 838 else 839 s_stop(m, p); 840 } 841 842 static const struct seq_operations bpf_iter_ksym_ops = { 843 .start = s_start, 844 .next = s_next, 845 .stop = bpf_iter_ksym_seq_stop, 846 .show = bpf_iter_ksym_seq_show, 847 }; 848 849 static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux) 850 { 851 struct kallsym_iter *iter = priv_data; 852 853 reset_iter(iter, 0); 854 855 /* cache here as in kallsyms_open() case; use current process 856 * credentials to tell BPF iterators if values should be shown. 857 */ 858 iter->show_value = kallsyms_show_value(current_cred()); 859 860 return 0; 861 } 862 863 DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym) 864 865 static const struct bpf_iter_seq_info ksym_iter_seq_info = { 866 .seq_ops = &bpf_iter_ksym_ops, 867 .init_seq_private = bpf_iter_ksym_init, 868 .fini_seq_private = NULL, 869 .seq_priv_size = sizeof(struct kallsym_iter), 870 }; 871 872 static struct bpf_iter_reg ksym_iter_reg_info = { 873 .target = "ksym", 874 .feature = BPF_ITER_RESCHED, 875 .ctx_arg_info_size = 1, 876 .ctx_arg_info = { 877 { offsetof(struct bpf_iter__ksym, ksym), 878 PTR_TO_BTF_ID_OR_NULL }, 879 }, 880 .seq_info = &ksym_iter_seq_info, 881 }; 882 883 BTF_ID_LIST(btf_ksym_iter_id) 884 BTF_ID(struct, kallsym_iter) 885 886 static int __init bpf_ksym_iter_register(void) 887 { 888 ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; 889 return bpf_iter_reg_target(&ksym_iter_reg_info); 890 } 891 892 late_initcall(bpf_ksym_iter_register); 893 894 #endif /* CONFIG_BPF_SYSCALL */ 895 896 static int kallsyms_open(struct inode *inode, struct file *file) 897 { 898 /* 899 * We keep iterator in m->private, since normal case is to 900 * s_start from where we left off, so we avoid doing 901 * using get_symbol_offset for every symbol. 902 */ 903 struct kallsym_iter *iter; 904 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); 905 if (!iter) 906 return -ENOMEM; 907 reset_iter(iter, 0); 908 909 /* 910 * Instead of checking this on every s_show() call, cache 911 * the result here at open time. 912 */ 913 iter->show_value = kallsyms_show_value(file->f_cred); 914 return 0; 915 } 916 917 #ifdef CONFIG_KGDB_KDB 918 const char *kdb_walk_kallsyms(loff_t *pos) 919 { 920 static struct kallsym_iter kdb_walk_kallsyms_iter; 921 if (*pos == 0) { 922 memset(&kdb_walk_kallsyms_iter, 0, 923 sizeof(kdb_walk_kallsyms_iter)); 924 reset_iter(&kdb_walk_kallsyms_iter, 0); 925 } 926 while (1) { 927 if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) 928 return NULL; 929 ++*pos; 930 /* Some debugging symbols have no name. Ignore them. */ 931 if (kdb_walk_kallsyms_iter.name[0]) 932 return kdb_walk_kallsyms_iter.name; 933 } 934 } 935 #endif /* CONFIG_KGDB_KDB */ 936 937 static const struct proc_ops kallsyms_proc_ops = { 938 .proc_open = kallsyms_open, 939 .proc_read = seq_read, 940 .proc_lseek = seq_lseek, 941 .proc_release = seq_release_private, 942 }; 943 944 static int __init kallsyms_init(void) 945 { 946 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); 947 return 0; 948 } 949 device_initcall(kallsyms_init); 950