1 // SPDX-License-Identifier: GPL-2.0-or-later 2 #define _GNU_SOURCE /* memmem() */ 3 #include <subcmd/parse-options.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <libgen.h> 7 #include <stdio.h> 8 #include <ctype.h> 9 10 #include <objtool/objtool.h> 11 #include <objtool/warn.h> 12 #include <objtool/arch.h> 13 #include <objtool/klp.h> 14 #include <objtool/util.h> 15 #include <arch/special.h> 16 17 #include <linux/objtool_types.h> 18 #include <linux/livepatch_external.h> 19 #include <linux/stringify.h> 20 #include <linux/string.h> 21 #include <linux/jhash.h> 22 23 #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) 24 25 struct elfs { 26 struct elf *orig, *patched, *out; 27 const char *modname; 28 }; 29 30 struct export { 31 struct hlist_node hash; 32 char *mod, *sym; 33 }; 34 35 static const char * const klp_diff_usage[] = { 36 "objtool klp diff [<options>] <in1.o> <in2.o> <out.o>", 37 NULL, 38 }; 39 40 static const struct option klp_diff_options[] = { 41 OPT_END(), 42 }; 43 44 static DEFINE_HASHTABLE(exports, 15); 45 46 static inline u32 str_hash(const char *str) 47 { 48 return jhash(str, strlen(str), 0); 49 } 50 51 static int read_exports(void) 52 { 53 const char *symvers = "Module.symvers"; 54 char line[1024], *path = NULL; 55 unsigned int line_num = 1; 56 FILE *file; 57 58 file = fopen(symvers, "r"); 59 if (!file) { 60 path = top_level_dir(symvers); 61 if (!path) { 62 ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers); 63 return -1; 64 } 65 66 file = fopen(path, "r"); 67 if (!file) { 68 ERROR_GLIBC("fopen"); 69 return -1; 70 } 71 } 72 73 while (fgets(line, 1024, file)) { 74 char *sym, *mod, *type; 75 struct export *export; 76 77 sym = strchr(line, '\t'); 78 if (!sym) { 79 ERROR("malformed Module.symvers (sym) at line %d", line_num); 80 return -1; 81 } 82 83 *sym++ = '\0'; 84 85 mod = strchr(sym, '\t'); 86 if (!mod) { 87 ERROR("malformed Module.symvers (mod) at line %d", line_num); 88 return -1; 89 } 90 91 *mod++ = '\0'; 92 93 type = strchr(mod, '\t'); 94 if (!type) { 95 ERROR("malformed Module.symvers (type) at line %d", line_num); 96 return -1; 97 } 98 99 *type++ = '\0'; 100 101 if (*sym == '\0' || *mod == '\0') { 102 ERROR("malformed Module.symvers at line %d", line_num); 103 return -1; 104 } 105 106 export = calloc(1, sizeof(*export)); 107 if (!export) { 108 ERROR_GLIBC("calloc"); 109 return -1; 110 } 111 112 export->mod = strdup(mod); 113 if (!export->mod) { 114 ERROR_GLIBC("strdup"); 115 return -1; 116 } 117 118 export->sym = strdup(sym); 119 if (!export->sym) { 120 ERROR_GLIBC("strdup"); 121 return -1; 122 } 123 124 hash_add(exports, &export->hash, str_hash(sym)); 125 } 126 127 free(path); 128 fclose(file); 129 130 return 0; 131 } 132 133 static int read_sym_checksums(struct elf *elf) 134 { 135 struct section *sec; 136 137 sec = find_section_by_name(elf, ".discard.sym_checksum"); 138 if (!sec) { 139 ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?", 140 elf->name); 141 return -1; 142 } 143 144 if (!sec->rsec) { 145 ERROR("missing reloc section for .discard.sym_checksum"); 146 return -1; 147 } 148 149 if (sec_size(sec) % sizeof(struct sym_checksum)) { 150 ERROR("struct sym_checksum size mismatch"); 151 return -1; 152 } 153 154 for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) { 155 struct sym_checksum *sym_checksum; 156 struct reloc *reloc; 157 struct symbol *sym; 158 159 sym_checksum = (struct sym_checksum *)sec->data->d_buf + i; 160 161 reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum)); 162 if (!reloc) { 163 ERROR("can't find reloc for sym_checksum[%d]", i); 164 return -1; 165 } 166 167 sym = reloc->sym; 168 169 if (is_sec_sym(sym)) { 170 ERROR("not sure how to handle section %s", sym->name); 171 return -1; 172 } 173 174 if (is_func_sym(sym)) 175 sym->csum.checksum = sym_checksum->checksum; 176 } 177 178 return 0; 179 } 180 181 static struct symbol *first_file_symbol(struct elf *elf) 182 { 183 struct symbol *sym; 184 185 for_each_sym(elf, sym) { 186 if (is_file_sym(sym)) 187 return sym; 188 } 189 190 return NULL; 191 } 192 193 static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym) 194 { 195 for_each_sym_continue(elf, sym) { 196 if (is_file_sym(sym)) 197 return sym; 198 } 199 200 return NULL; 201 } 202 203 /* 204 * Certain static local variables should never be correlated. They will be 205 * used in place rather than referencing the originals. 206 */ 207 static bool is_uncorrelated_static_local(struct symbol *sym) 208 { 209 static const char * const vars[] = { 210 "__key.", 211 "__warned.", 212 "__already_done.", 213 "__func__.", 214 "_rs.", 215 "descriptor.", 216 "CSWTCH.", 217 }; 218 219 if (!is_object_sym(sym) || !is_local_sym(sym)) 220 return false; 221 222 if (!strcmp(sym->sec->name, ".data.once")) 223 return true; 224 225 for (int i = 0; i < ARRAY_SIZE(vars); i++) { 226 if (strstarts(sym->name, vars[i])) 227 return true; 228 } 229 230 return false; 231 } 232 233 /* 234 * Clang emits several useless .Ltmp_* code labels. 235 */ 236 static bool is_clang_tmp_label(struct symbol *sym) 237 { 238 return sym->type == STT_NOTYPE && 239 is_text_sec(sym->sec) && 240 strstarts(sym->name, ".Ltmp") && 241 isdigit(sym->name[5]); 242 } 243 244 static bool is_special_section(struct section *sec) 245 { 246 static const char * const specials[] = { 247 ".altinstructions", 248 ".smp_locks", 249 "__bug_table", 250 "__ex_table", 251 "__jump_table", 252 "__mcount_loc", 253 254 /* 255 * Extract .static_call_sites here to inherit non-module 256 * preferential treatment. The later static call processing 257 * during klp module build will be skipped when it sees this 258 * section already exists. 259 */ 260 ".static_call_sites", 261 }; 262 263 static const char * const non_special_discards[] = { 264 ".discard.addressable", 265 ".discard.sym_checksum", 266 }; 267 268 if (is_text_sec(sec)) 269 return false; 270 271 for (int i = 0; i < ARRAY_SIZE(specials); i++) { 272 if (!strcmp(sec->name, specials[i])) 273 return true; 274 } 275 276 /* Most .discard data sections are special */ 277 for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) { 278 if (!strcmp(sec->name, non_special_discards[i])) 279 return false; 280 } 281 282 return strstarts(sec->name, ".discard."); 283 } 284 285 /* 286 * These sections are referenced by special sections but aren't considered 287 * special sections themselves. 288 */ 289 static bool is_special_section_aux(struct section *sec) 290 { 291 static const char * const specials_aux[] = { 292 ".altinstr_replacement", 293 ".altinstr_aux", 294 }; 295 296 for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) { 297 if (!strcmp(sec->name, specials_aux[i])) 298 return true; 299 } 300 301 return false; 302 } 303 304 /* 305 * These symbols should never be correlated, so their local patched versions 306 * are used instead of linking to the originals. 307 */ 308 static bool dont_correlate(struct symbol *sym) 309 { 310 return is_file_sym(sym) || 311 is_null_sym(sym) || 312 is_sec_sym(sym) || 313 is_prefix_func(sym) || 314 is_uncorrelated_static_local(sym) || 315 is_clang_tmp_label(sym) || 316 is_string_sec(sym->sec) || 317 is_special_section(sym->sec) || 318 is_special_section_aux(sym->sec) || 319 strstarts(sym->name, "__initcall__"); 320 } 321 322 /* 323 * For each symbol in the original kernel, find its corresponding "twin" in the 324 * patched kernel. 325 */ 326 static int correlate_symbols(struct elfs *e) 327 { 328 struct symbol *file1_sym, *file2_sym; 329 struct symbol *sym1, *sym2; 330 331 /* Correlate locals */ 332 for (file1_sym = first_file_symbol(e->orig), 333 file2_sym = first_file_symbol(e->patched); ; 334 file1_sym = next_file_symbol(e->orig, file1_sym), 335 file2_sym = next_file_symbol(e->patched, file2_sym)) { 336 337 if (!file1_sym && file2_sym) { 338 ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name); 339 return -1; 340 } 341 342 if (file1_sym && !file2_sym) { 343 ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name); 344 return -1; 345 } 346 347 if (!file1_sym) 348 break; 349 350 if (strcmp(file1_sym->name, file2_sym->name)) { 351 ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name); 352 return -1; 353 } 354 355 file1_sym->twin = file2_sym; 356 file2_sym->twin = file1_sym; 357 358 sym1 = file1_sym; 359 360 for_each_sym_continue(e->orig, sym1) { 361 if (is_file_sym(sym1) || !is_local_sym(sym1)) 362 break; 363 364 if (dont_correlate(sym1)) 365 continue; 366 367 sym2 = file2_sym; 368 for_each_sym_continue(e->patched, sym2) { 369 if (is_file_sym(sym2) || !is_local_sym(sym2)) 370 break; 371 372 if (sym2->twin || dont_correlate(sym2)) 373 continue; 374 375 if (strcmp(sym1->demangled_name, sym2->demangled_name)) 376 continue; 377 378 sym1->twin = sym2; 379 sym2->twin = sym1; 380 break; 381 } 382 } 383 } 384 385 /* Correlate globals */ 386 for_each_sym(e->orig, sym1) { 387 if (sym1->bind == STB_LOCAL) 388 continue; 389 390 sym2 = find_global_symbol_by_name(e->patched, sym1->name); 391 392 if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) { 393 sym1->twin = sym2; 394 sym2->twin = sym1; 395 } 396 } 397 398 for_each_sym(e->orig, sym1) { 399 if (sym1->twin || dont_correlate(sym1)) 400 continue; 401 WARN("no correlation: %s", sym1->name); 402 } 403 404 return 0; 405 } 406 407 /* "sympos" is used by livepatch to disambiguate duplicate symbol names */ 408 static unsigned long find_sympos(struct elf *elf, struct symbol *sym) 409 { 410 bool vmlinux = str_ends_with(objname, "vmlinux.o"); 411 unsigned long sympos = 0, nr_matches = 0; 412 bool has_dup = false; 413 struct symbol *s; 414 415 if (sym->bind != STB_LOCAL) 416 return 0; 417 418 if (vmlinux && sym->type == STT_FUNC) { 419 /* 420 * HACK: Unfortunately, symbol ordering can differ between 421 * vmlinux.o and vmlinux due to the linker script emitting 422 * .text.unlikely* before .text*. Count .text.unlikely* first. 423 * 424 * TODO: Disambiguate symbols more reliably (checksums?) 425 */ 426 for_each_sym(elf, s) { 427 if (strstarts(s->sec->name, ".text.unlikely") && 428 !strcmp(s->name, sym->name)) { 429 nr_matches++; 430 if (s == sym) 431 sympos = nr_matches; 432 else 433 has_dup = true; 434 } 435 } 436 for_each_sym(elf, s) { 437 if (!strstarts(s->sec->name, ".text.unlikely") && 438 !strcmp(s->name, sym->name)) { 439 nr_matches++; 440 if (s == sym) 441 sympos = nr_matches; 442 else 443 has_dup = true; 444 } 445 } 446 } else { 447 for_each_sym(elf, s) { 448 if (!strcmp(s->name, sym->name)) { 449 nr_matches++; 450 if (s == sym) 451 sympos = nr_matches; 452 else 453 has_dup = true; 454 } 455 } 456 } 457 458 if (!sympos) { 459 ERROR("can't find sympos for %s", sym->name); 460 return ULONG_MAX; 461 } 462 463 return has_dup ? sympos : 0; 464 } 465 466 static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym); 467 468 static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym, 469 bool data_too) 470 { 471 struct section *out_sec = NULL; 472 unsigned long offset = 0; 473 struct symbol *out_sym; 474 475 if (data_too && !is_undef_sym(patched_sym)) { 476 struct section *patched_sec = patched_sym->sec; 477 478 out_sec = find_section_by_name(elf, patched_sec->name); 479 if (!out_sec) { 480 out_sec = elf_create_section(elf, patched_sec->name, 0, 481 patched_sec->sh.sh_entsize, 482 patched_sec->sh.sh_type, 483 patched_sec->sh.sh_addralign, 484 patched_sec->sh.sh_flags); 485 if (!out_sec) 486 return NULL; 487 } 488 489 if (is_string_sec(patched_sym->sec)) { 490 out_sym = elf_create_section_symbol(elf, out_sec); 491 if (!out_sym) 492 return NULL; 493 494 goto sym_created; 495 } 496 497 if (!is_sec_sym(patched_sym)) 498 offset = sec_size(out_sec); 499 500 if (patched_sym->len || is_sec_sym(patched_sym)) { 501 void *data = NULL; 502 size_t size; 503 504 /* bss doesn't have data */ 505 if (patched_sym->sec->data->d_buf) 506 data = patched_sym->sec->data->d_buf + patched_sym->offset; 507 508 if (is_sec_sym(patched_sym)) 509 size = sec_size(patched_sym->sec); 510 else 511 size = patched_sym->len; 512 513 if (!elf_add_data(elf, out_sec, data, size)) 514 return NULL; 515 } 516 } 517 518 out_sym = elf_create_symbol(elf, patched_sym->name, out_sec, 519 patched_sym->bind, patched_sym->type, 520 offset, patched_sym->len); 521 if (!out_sym) 522 return NULL; 523 524 sym_created: 525 patched_sym->clone = out_sym; 526 out_sym->clone = patched_sym; 527 528 return out_sym; 529 } 530 531 /* 532 * Copy a symbol to the output object, optionally including its data and 533 * relocations. 534 */ 535 static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym, 536 bool data_too) 537 { 538 struct symbol *pfx; 539 540 if (patched_sym->clone) 541 return patched_sym->clone; 542 543 /* Make sure the prefix gets cloned first */ 544 if (is_func_sym(patched_sym) && data_too) { 545 pfx = get_func_prefix(patched_sym); 546 if (pfx) 547 clone_symbol(e, pfx, true); 548 } 549 550 if (!__clone_symbol(e->out, patched_sym, data_too)) 551 return NULL; 552 553 if (data_too && clone_sym_relocs(e, patched_sym)) 554 return NULL; 555 556 return patched_sym->clone; 557 } 558 559 static void mark_included_function(struct symbol *func) 560 { 561 struct symbol *pfx; 562 563 func->included = 1; 564 565 /* Include prefix function */ 566 pfx = get_func_prefix(func); 567 if (pfx) 568 pfx->included = 1; 569 570 /* Make sure .cold parent+child always stay together */ 571 if (func->cfunc && func->cfunc != func) 572 func->cfunc->included = 1; 573 if (func->pfunc && func->pfunc != func) 574 func->pfunc->included = 1; 575 } 576 577 /* 578 * Copy all changed functions (and their dependencies) from the patched object 579 * to the output object. 580 */ 581 static int mark_changed_functions(struct elfs *e) 582 { 583 struct symbol *sym_orig, *patched_sym; 584 bool changed = false; 585 586 /* Find changed functions */ 587 for_each_sym(e->orig, sym_orig) { 588 if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig)) 589 continue; 590 591 patched_sym = sym_orig->twin; 592 if (!patched_sym) 593 continue; 594 595 if (sym_orig->csum.checksum != patched_sym->csum.checksum) { 596 patched_sym->changed = 1; 597 mark_included_function(patched_sym); 598 changed = true; 599 } 600 } 601 602 /* Find added functions and print them */ 603 for_each_sym(e->patched, patched_sym) { 604 if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym)) 605 continue; 606 607 if (!patched_sym->twin) { 608 printf("%s: new function: %s\n", objname, patched_sym->name); 609 mark_included_function(patched_sym); 610 changed = true; 611 } 612 } 613 614 /* Print changed functions */ 615 for_each_sym(e->patched, patched_sym) { 616 if (patched_sym->changed) 617 printf("%s: changed function: %s\n", objname, patched_sym->name); 618 } 619 620 return !changed ? -1 : 0; 621 } 622 623 static int clone_included_functions(struct elfs *e) 624 { 625 struct symbol *patched_sym; 626 627 for_each_sym(e->patched, patched_sym) { 628 if (patched_sym->included) { 629 if (!clone_symbol(e, patched_sym, true)) 630 return -1; 631 } 632 } 633 634 return 0; 635 } 636 637 /* 638 * Determine whether a relocation should reference the section rather than the 639 * underlying symbol. 640 */ 641 static bool section_reference_needed(struct section *sec) 642 { 643 /* 644 * String symbols are zero-length and uncorrelated. It's easier to 645 * deal with them as section symbols. 646 */ 647 if (is_string_sec(sec)) 648 return true; 649 650 /* 651 * .rodata has mostly anonymous data so there's no way to determine the 652 * length of a needed reference. just copy the whole section if needed. 653 */ 654 if (strstarts(sec->name, ".rodata")) 655 return true; 656 657 /* UBSAN anonymous data */ 658 if (strstarts(sec->name, ".data..Lubsan") || /* GCC */ 659 strstarts(sec->name, ".data..L__unnamed_")) /* Clang */ 660 return true; 661 662 return false; 663 } 664 665 static bool is_reloc_allowed(struct reloc *reloc) 666 { 667 return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym); 668 } 669 670 static struct export *find_export(struct symbol *sym) 671 { 672 struct export *export; 673 674 hash_for_each_possible(exports, export, hash, str_hash(sym->name)) { 675 if (!strcmp(export->sym, sym->name)) 676 return export; 677 } 678 679 return NULL; 680 } 681 682 static const char *__find_modname(struct elfs *e) 683 { 684 struct section *sec; 685 char *name; 686 687 sec = find_section_by_name(e->orig, ".modinfo"); 688 if (!sec) { 689 ERROR("missing .modinfo section"); 690 return NULL; 691 } 692 693 name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6); 694 if (name) 695 return name + 6; 696 697 name = strdup(e->orig->name); 698 if (!name) { 699 ERROR_GLIBC("strdup"); 700 return NULL; 701 } 702 703 for (char *c = name; *c; c++) { 704 if (*c == '/') 705 name = c + 1; 706 else if (*c == '-') 707 *c = '_'; 708 else if (*c == '.') { 709 *c = '\0'; 710 break; 711 } 712 } 713 714 return name; 715 } 716 717 /* Get the object's module name as defined by the kernel (and klp_object) */ 718 static const char *find_modname(struct elfs *e) 719 { 720 const char *modname; 721 722 if (e->modname) 723 return e->modname; 724 725 modname = __find_modname(e); 726 e->modname = modname; 727 return modname; 728 } 729 730 /* 731 * Copying a function from its native compiled environment to a kernel module 732 * removes its natural access to local functions/variables and unexported 733 * globals. References to such symbols need to be converted to KLP relocs so 734 * the kernel arch relocation code knows to apply them and where to find the 735 * symbols. Particularly, duplicate static symbols need to be disambiguated. 736 */ 737 static bool klp_reloc_needed(struct reloc *patched_reloc) 738 { 739 struct symbol *patched_sym = patched_reloc->sym; 740 struct export *export; 741 742 /* no external symbol to reference */ 743 if (dont_correlate(patched_sym)) 744 return false; 745 746 /* For included functions, a regular reloc will do. */ 747 if (patched_sym->included) 748 return false; 749 750 /* 751 * If exported by a module, it has to be a klp reloc. Thanks to the 752 * clusterfunk that is late module patching, the patch module is 753 * allowed to be loaded before any modules it depends on. 754 * 755 * If exported by vmlinux, a normal reloc will do. 756 */ 757 export = find_export(patched_sym); 758 if (export) 759 return strcmp(export->mod, "vmlinux"); 760 761 if (!patched_sym->twin) { 762 /* 763 * Presumably the symbol and its reference were added by the 764 * patch. The symbol could be defined in this .o or in another 765 * .o in the patch module. 766 * 767 * This check needs to be *after* the export check due to the 768 * possibility of the patch adding a new UNDEF reference to an 769 * exported symbol. 770 */ 771 return false; 772 } 773 774 /* Unexported symbol which lives in the original vmlinux or module. */ 775 return true; 776 } 777 778 static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc) 779 { 780 struct symbol *sym = reloc->sym; 781 struct section *sec = sym->sec; 782 783 if (!sec->sym && !elf_create_section_symbol(elf, sec)) 784 return -1; 785 786 reloc->sym = sec->sym; 787 set_reloc_sym(elf, reloc, sym->idx); 788 set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc)); 789 return 0; 790 } 791 792 static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc) 793 { 794 struct symbol *sym = reloc->sym; 795 struct section *sec = sym->sec; 796 797 /* If the symbol has a dedicated section, it's easy to find */ 798 sym = find_symbol_by_offset(sec, 0); 799 if (sym && sym->len == sec_size(sec)) 800 goto found_sym; 801 802 /* No dedicated section; find the symbol manually */ 803 sym = find_symbol_containing(sec, arch_adjusted_addend(reloc)); 804 if (!sym) { 805 /* 806 * This can happen for special section references to weak code 807 * whose symbol has been stripped by the linker. 808 */ 809 return -1; 810 } 811 812 found_sym: 813 reloc->sym = sym; 814 set_reloc_sym(elf, reloc, sym->idx); 815 set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset); 816 return 0; 817 } 818 819 /* 820 * Convert a relocation symbol reference to the needed format: either a section 821 * symbol or the underlying symbol itself. 822 */ 823 static int convert_reloc_sym(struct elf *elf, struct reloc *reloc) 824 { 825 if (is_reloc_allowed(reloc)) 826 return 0; 827 828 if (section_reference_needed(reloc->sym->sec)) 829 return convert_reloc_sym_to_secsym(elf, reloc); 830 else 831 return convert_reloc_secsym_to_sym(elf, reloc); 832 } 833 834 /* 835 * Convert a regular relocation to a klp relocation (sort of). 836 */ 837 static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc, 838 struct section *sec, unsigned long offset, 839 struct export *export) 840 { 841 struct symbol *patched_sym = patched_reloc->sym; 842 s64 addend = reloc_addend(patched_reloc); 843 const char *sym_modname, *sym_orig_name; 844 static struct section *klp_relocs; 845 struct symbol *sym, *klp_sym; 846 unsigned long klp_reloc_off; 847 char sym_name[SYM_NAME_LEN]; 848 struct klp_reloc klp_reloc; 849 unsigned long sympos; 850 851 if (!patched_sym->twin) { 852 ERROR("unexpected klp reloc for new symbol %s", patched_sym->name); 853 return -1; 854 } 855 856 /* 857 * Keep the original reloc intact for now to avoid breaking objtool run 858 * which relies on proper relocations for many of its features. This 859 * will be disabled later by "objtool klp post-link". 860 * 861 * Convert it to UNDEF (and WEAK to avoid modpost warnings). 862 */ 863 864 sym = patched_sym->clone; 865 if (!sym) { 866 /* STB_WEAK: avoid modpost undefined symbol warnings */ 867 sym = elf_create_symbol(e->out, patched_sym->name, NULL, 868 STB_WEAK, patched_sym->type, 0, 0); 869 if (!sym) 870 return -1; 871 872 patched_sym->clone = sym; 873 sym->clone = patched_sym; 874 } 875 876 if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc))) 877 return -1; 878 879 /* 880 * Create the KLP symbol. 881 */ 882 883 if (export) { 884 sym_modname = export->mod; 885 sym_orig_name = export->sym; 886 sympos = 0; 887 } else { 888 sym_modname = find_modname(e); 889 if (!sym_modname) 890 return -1; 891 892 sym_orig_name = patched_sym->twin->name; 893 sympos = find_sympos(e->orig, patched_sym->twin); 894 if (sympos == ULONG_MAX) 895 return -1; 896 } 897 898 /* symbol format: .klp.sym.modname.sym_name,sympos */ 899 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld", 900 sym_modname, sym_orig_name, sympos)) 901 return -1; 902 903 klp_sym = find_symbol_by_name(e->out, sym_name); 904 if (!klp_sym) { 905 /* STB_WEAK: avoid modpost undefined symbol warnings */ 906 klp_sym = elf_create_symbol(e->out, sym_name, NULL, 907 STB_WEAK, patched_sym->type, 0, 0); 908 if (!klp_sym) 909 return -1; 910 } 911 912 /* 913 * Create the __klp_relocs entry. This will be converted to an actual 914 * KLP rela by "objtool klp post-link". 915 * 916 * This intermediate step is necessary to prevent corruption by the 917 * linker, which doesn't know how to properly handle two rela sections 918 * applying to the same base section. 919 */ 920 921 if (!klp_relocs) { 922 klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0, 923 0, SHT_PROGBITS, 8, SHF_ALLOC); 924 if (!klp_relocs) 925 return -1; 926 } 927 928 klp_reloc_off = sec_size(klp_relocs); 929 memset(&klp_reloc, 0, sizeof(klp_reloc)); 930 931 klp_reloc.type = reloc_type(patched_reloc); 932 if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc))) 933 return -1; 934 935 /* klp_reloc.offset */ 936 if (!sec->sym && !elf_create_section_symbol(e->out, sec)) 937 return -1; 938 939 if (!elf_create_reloc(e->out, klp_relocs, 940 klp_reloc_off + offsetof(struct klp_reloc, offset), 941 sec->sym, offset, R_ABS64)) 942 return -1; 943 944 /* klp_reloc.sym */ 945 if (!elf_create_reloc(e->out, klp_relocs, 946 klp_reloc_off + offsetof(struct klp_reloc, sym), 947 klp_sym, addend, R_ABS64)) 948 return -1; 949 950 return 0; 951 } 952 953 /* Copy a reloc and its symbol to the output object */ 954 static int clone_reloc(struct elfs *e, struct reloc *patched_reloc, 955 struct section *sec, unsigned long offset) 956 { 957 struct symbol *patched_sym = patched_reloc->sym; 958 struct export *export = find_export(patched_sym); 959 long addend = reloc_addend(patched_reloc); 960 struct symbol *out_sym; 961 bool klp; 962 963 if (!is_reloc_allowed(patched_reloc)) { 964 ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc), 965 "missing symbol for reference to %s+%ld", 966 patched_sym->name, addend); 967 return -1; 968 } 969 970 klp = klp_reloc_needed(patched_reloc); 971 972 if (klp) { 973 if (clone_reloc_klp(e, patched_reloc, sec, offset, export)) 974 return -1; 975 976 return 0; 977 } 978 979 /* 980 * Why !export sets 'data_too': 981 * 982 * Unexported non-klp symbols need to live in the patch module, 983 * otherwise there will be unresolved symbols. Notably, this includes: 984 * 985 * - New functions/data 986 * - String sections 987 * - Special section entries 988 * - Uncorrelated static local variables 989 * - UBSAN sections 990 */ 991 out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export); 992 if (!out_sym) 993 return -1; 994 995 /* 996 * For strings, all references use section symbols, thanks to 997 * section_reference_needed(). clone_symbol() has cloned an empty 998 * version of the string section. Now copy the string itself. 999 */ 1000 if (is_string_sec(patched_sym->sec)) { 1001 const char *str = patched_sym->sec->data->d_buf + addend; 1002 1003 addend = elf_add_string(e->out, out_sym->sec, str); 1004 if (addend == -1) 1005 return -1; 1006 } 1007 1008 if (!elf_create_reloc(e->out, sec, offset, out_sym, addend, 1009 reloc_type(patched_reloc))) 1010 return -1; 1011 1012 return 0; 1013 } 1014 1015 /* Copy all relocs needed for a symbol's contents */ 1016 static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym) 1017 { 1018 struct section *patched_rsec = patched_sym->sec->rsec; 1019 struct reloc *patched_reloc; 1020 unsigned long start, end; 1021 struct symbol *out_sym; 1022 1023 out_sym = patched_sym->clone; 1024 if (!out_sym) { 1025 ERROR("no clone for %s", patched_sym->name); 1026 return -1; 1027 } 1028 1029 if (!patched_rsec) 1030 return 0; 1031 1032 if (!is_sec_sym(patched_sym) && !patched_sym->len) 1033 return 0; 1034 1035 if (is_string_sec(patched_sym->sec)) 1036 return 0; 1037 1038 if (is_sec_sym(patched_sym)) { 1039 start = 0; 1040 end = sec_size(patched_sym->sec); 1041 } else { 1042 start = patched_sym->offset; 1043 end = start + patched_sym->len; 1044 } 1045 1046 for_each_reloc(patched_rsec, patched_reloc) { 1047 unsigned long offset; 1048 1049 if (reloc_offset(patched_reloc) < start || 1050 reloc_offset(patched_reloc) >= end) 1051 continue; 1052 1053 /* 1054 * Skip any reloc referencing .altinstr_aux. Its code is 1055 * always patched by alternatives. See ALTERNATIVE_TERNARY(). 1056 */ 1057 if (patched_reloc->sym->sec && 1058 !strcmp(patched_reloc->sym->sec->name, ".altinstr_aux")) 1059 continue; 1060 1061 if (convert_reloc_sym(e->patched, patched_reloc)) { 1062 ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc), 1063 "failed to convert reloc sym '%s' to its proper format", 1064 patched_reloc->sym->name); 1065 return -1; 1066 } 1067 1068 offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset); 1069 1070 if (clone_reloc(e, patched_reloc, out_sym->sec, offset)) 1071 return -1; 1072 } 1073 return 0; 1074 1075 } 1076 1077 static int create_fake_symbol(struct elf *elf, struct section *sec, 1078 unsigned long offset, size_t size) 1079 { 1080 char name[SYM_NAME_LEN]; 1081 unsigned int type; 1082 static int ctr; 1083 char *c; 1084 1085 if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++)) 1086 return -1; 1087 1088 for (c = name; *c; c++) 1089 if (*c == '.') 1090 *c = '_'; 1091 1092 /* 1093 * STT_NOTYPE: Prevent objtool from validating .altinstr_replacement 1094 * while still allowing objdump to disassemble it. 1095 */ 1096 type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT; 1097 return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1; 1098 } 1099 1100 /* 1101 * Special sections (alternatives, etc) are basically arrays of structs. 1102 * For all the special sections, create a symbol for each struct entry. This 1103 * is a bit cumbersome, but it makes the extracting of the individual entries 1104 * much more straightforward. 1105 * 1106 * There are three ways to identify the entry sizes for a special section: 1107 * 1108 * 1) ELF section header sh_entsize: Ideally this would be used almost 1109 * everywhere. But unfortunately the toolchains make it difficult. The 1110 * assembler .[push]section directive syntax only takes entsize when 1111 * combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with 1112 * SHF_WRITE. And some special sections do need to be writable. 1113 * 1114 * Another place this wouldn't work is .altinstr_replacement, whose entries 1115 * don't have a fixed size. 1116 * 1117 * 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which 1118 * points to the beginning of each entry. The size of the entry is then 1119 * inferred by the location of the subsequent annotation (or end of 1120 * section). 1121 * 1122 * 3) Simple array of pointers: If the special section is just a basic array of 1123 * pointers, the entry size can be inferred by the number of relocations. 1124 * No annotations needed. 1125 * 1126 * Note I also tried to create per-entry symbols at the time of creation, in 1127 * the original [inline] asm. Unfortunately, creating uniquely named symbols 1128 * is trickier than one might think, especially with Clang inline asm. I 1129 * eventually just gave up trying to make that work, in favor of using 1130 * ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact. 1131 */ 1132 static int create_fake_symbols(struct elf *elf) 1133 { 1134 struct section *sec; 1135 struct reloc *reloc; 1136 1137 /* 1138 * 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries: 1139 */ 1140 1141 sec = find_section_by_name(elf, ".discard.annotate_data"); 1142 if (!sec || !sec->rsec) 1143 return 0; 1144 1145 for_each_reloc(sec->rsec, reloc) { 1146 unsigned long offset, size; 1147 struct reloc *next_reloc; 1148 1149 if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL) 1150 continue; 1151 1152 offset = reloc_addend(reloc); 1153 1154 size = 0; 1155 next_reloc = reloc; 1156 for_each_reloc_continue(sec->rsec, next_reloc) { 1157 if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL || 1158 next_reloc->sym->sec != reloc->sym->sec) 1159 continue; 1160 1161 size = reloc_addend(next_reloc) - offset; 1162 break; 1163 } 1164 1165 if (!size) 1166 size = sec_size(reloc->sym->sec) - offset; 1167 1168 if (create_fake_symbol(elf, reloc->sym->sec, offset, size)) 1169 return -1; 1170 } 1171 1172 /* 1173 * 2) Make symbols for sh_entsize, and simple arrays of pointers: 1174 */ 1175 1176 for_each_sec(elf, sec) { 1177 unsigned int entry_size; 1178 unsigned long offset; 1179 1180 if (!is_special_section(sec) || find_symbol_by_offset(sec, 0)) 1181 continue; 1182 1183 if (!sec->rsec) { 1184 ERROR("%s: missing special section relocations", sec->name); 1185 return -1; 1186 } 1187 1188 entry_size = sec->sh.sh_entsize; 1189 if (!entry_size) { 1190 entry_size = arch_reloc_size(sec->rsec->relocs); 1191 if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) { 1192 ERROR("%s: missing special section entsize or annotations", sec->name); 1193 return -1; 1194 } 1195 } 1196 1197 for (offset = 0; offset < sec_size(sec); offset += entry_size) { 1198 if (create_fake_symbol(elf, sec, offset, entry_size)) 1199 return -1; 1200 } 1201 } 1202 1203 return 0; 1204 } 1205 1206 /* Keep a special section entry if it references an included function */ 1207 static bool should_keep_special_sym(struct elf *elf, struct symbol *sym) 1208 { 1209 struct reloc *reloc; 1210 1211 if (is_sec_sym(sym) || !sym->sec->rsec) 1212 return false; 1213 1214 sym_for_each_reloc(elf, sym, reloc) { 1215 if (convert_reloc_sym(elf, reloc)) 1216 continue; 1217 1218 if (is_func_sym(reloc->sym) && reloc->sym->included) 1219 return true; 1220 } 1221 1222 return false; 1223 } 1224 1225 /* 1226 * Klp relocations aren't allowed for __jump_table and .static_call_sites if 1227 * the referenced symbol lives in a kernel module, because such klp relocs may 1228 * be applied after static branch/call init, resulting in code corruption. 1229 * 1230 * Validate a special section entry to avoid that. Note that an inert 1231 * tracepoint is harmless enough, in that case just skip the entry and print a 1232 * warning. Otherwise, return an error. 1233 * 1234 * This is only a temporary limitation which will be fixed when livepatch adds 1235 * support for submodules: fully self-contained modules which are embedded in 1236 * the top-level livepatch module's data and which can be loaded on demand when 1237 * their corresponding to-be-patched module gets loaded. Then klp relocs can 1238 * be retired. 1239 * 1240 * Return: 1241 * -1: error: validation failed 1242 * 1: warning: tracepoint skipped 1243 * 0: success 1244 */ 1245 static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym) 1246 { 1247 bool static_branch = !strcmp(sym->sec->name, "__jump_table"); 1248 bool static_call = !strcmp(sym->sec->name, ".static_call_sites"); 1249 struct symbol *code_sym = NULL; 1250 unsigned long code_offset = 0; 1251 struct reloc *reloc; 1252 int ret = 0; 1253 1254 if (!static_branch && !static_call) 1255 return 0; 1256 1257 sym_for_each_reloc(e->patched, sym, reloc) { 1258 const char *sym_modname; 1259 struct export *export; 1260 1261 /* Static branch/call keys are always STT_OBJECT */ 1262 if (reloc->sym->type != STT_OBJECT) { 1263 1264 /* Save code location which can be printed below */ 1265 if (reloc->sym->type == STT_FUNC && !code_sym) { 1266 code_sym = reloc->sym; 1267 code_offset = reloc_addend(reloc); 1268 } 1269 1270 continue; 1271 } 1272 1273 if (!klp_reloc_needed(reloc)) 1274 continue; 1275 1276 export = find_export(reloc->sym); 1277 if (export) { 1278 sym_modname = export->mod; 1279 } else { 1280 sym_modname = find_modname(e); 1281 if (!sym_modname) 1282 return -1; 1283 } 1284 1285 /* vmlinux keys are ok */ 1286 if (!strcmp(sym_modname, "vmlinux")) 1287 continue; 1288 1289 if (static_branch) { 1290 if (strstarts(reloc->sym->name, "__tracepoint_")) { 1291 WARN("%s: disabling unsupported tracepoint %s", 1292 code_sym->name, reloc->sym->name + 13); 1293 ret = 1; 1294 continue; 1295 } 1296 1297 ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead", 1298 code_sym->name, code_offset, reloc->sym->name); 1299 return -1; 1300 } 1301 1302 /* static call */ 1303 if (strstarts(reloc->sym->name, "__SCK__tp_func_")) { 1304 ret = 1; 1305 continue; 1306 } 1307 1308 ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead", 1309 code_sym->name, code_offset, reloc->sym->name); 1310 return -1; 1311 } 1312 1313 return ret; 1314 } 1315 1316 static int clone_special_section(struct elfs *e, struct section *patched_sec) 1317 { 1318 struct symbol *patched_sym; 1319 1320 /* 1321 * Extract all special section symbols (and their dependencies) which 1322 * reference included functions. 1323 */ 1324 sec_for_each_sym(patched_sec, patched_sym) { 1325 int ret; 1326 1327 if (!is_object_sym(patched_sym)) 1328 continue; 1329 1330 if (!should_keep_special_sym(e->patched, patched_sym)) 1331 continue; 1332 1333 ret = validate_special_section_klp_reloc(e, patched_sym); 1334 if (ret < 0) 1335 return -1; 1336 if (ret > 0) 1337 continue; 1338 1339 if (!clone_symbol(e, patched_sym, true)) 1340 return -1; 1341 } 1342 1343 return 0; 1344 } 1345 1346 /* Extract only the needed bits from special sections */ 1347 static int clone_special_sections(struct elfs *e) 1348 { 1349 struct section *patched_sec; 1350 1351 if (create_fake_symbols(e->patched)) 1352 return -1; 1353 1354 for_each_sec(e->patched, patched_sec) { 1355 if (is_special_section(patched_sec)) { 1356 if (clone_special_section(e, patched_sec)) 1357 return -1; 1358 } 1359 } 1360 1361 return 0; 1362 } 1363 1364 /* 1365 * Create __klp_objects and __klp_funcs sections which are intermediate 1366 * sections provided as input to the patch module's init code for building the 1367 * klp_patch, klp_object and klp_func structs for the livepatch API. 1368 */ 1369 static int create_klp_sections(struct elfs *e) 1370 { 1371 size_t obj_size = sizeof(struct klp_object_ext); 1372 size_t func_size = sizeof(struct klp_func_ext); 1373 struct section *obj_sec, *funcs_sec, *str_sec; 1374 struct symbol *funcs_sym, *str_sym, *sym; 1375 char sym_name[SYM_NAME_LEN]; 1376 unsigned int nr_funcs = 0; 1377 const char *modname; 1378 void *obj_data; 1379 s64 addend; 1380 1381 obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0); 1382 if (!obj_sec) 1383 return -1; 1384 1385 funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0); 1386 if (!funcs_sec) 1387 return -1; 1388 1389 funcs_sym = elf_create_section_symbol(e->out, funcs_sec); 1390 if (!funcs_sym) 1391 return -1; 1392 1393 str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0, 1394 SHT_PROGBITS, 1, 1395 SHF_ALLOC | SHF_STRINGS | SHF_MERGE); 1396 if (!str_sec) 1397 return -1; 1398 1399 if (elf_add_string(e->out, str_sec, "") == -1) 1400 return -1; 1401 1402 str_sym = elf_create_section_symbol(e->out, str_sec); 1403 if (!str_sym) 1404 return -1; 1405 1406 /* allocate klp_object_ext */ 1407 obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size); 1408 if (!obj_data) 1409 return -1; 1410 1411 modname = find_modname(e); 1412 if (!modname) 1413 return -1; 1414 1415 /* klp_object_ext.name */ 1416 if (strcmp(modname, "vmlinux")) { 1417 addend = elf_add_string(e->out, str_sec, modname); 1418 if (addend == -1) 1419 return -1; 1420 1421 if (!elf_create_reloc(e->out, obj_sec, 1422 offsetof(struct klp_object_ext, name), 1423 str_sym, addend, R_ABS64)) 1424 return -1; 1425 } 1426 1427 /* klp_object_ext.funcs */ 1428 if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs), 1429 funcs_sym, 0, R_ABS64)) 1430 return -1; 1431 1432 for_each_sym(e->out, sym) { 1433 unsigned long offset = nr_funcs * func_size; 1434 unsigned long sympos; 1435 void *func_data; 1436 1437 if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed) 1438 continue; 1439 1440 /* allocate klp_func_ext */ 1441 func_data = elf_add_data(e->out, funcs_sec, NULL, func_size); 1442 if (!func_data) 1443 return -1; 1444 1445 /* klp_func_ext.old_name */ 1446 addend = elf_add_string(e->out, str_sec, sym->clone->twin->name); 1447 if (addend == -1) 1448 return -1; 1449 1450 if (!elf_create_reloc(e->out, funcs_sec, 1451 offset + offsetof(struct klp_func_ext, old_name), 1452 str_sym, addend, R_ABS64)) 1453 return -1; 1454 1455 /* klp_func_ext.new_func */ 1456 if (!elf_create_reloc(e->out, funcs_sec, 1457 offset + offsetof(struct klp_func_ext, new_func), 1458 sym, 0, R_ABS64)) 1459 return -1; 1460 1461 /* klp_func_ext.sympos */ 1462 BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos)); 1463 sympos = find_sympos(e->orig, sym->clone->twin); 1464 if (sympos == ULONG_MAX) 1465 return -1; 1466 memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos, 1467 sizeof_field(struct klp_func_ext, sympos)); 1468 1469 nr_funcs++; 1470 } 1471 1472 /* klp_object_ext.nr_funcs */ 1473 BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs)); 1474 memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs, 1475 sizeof_field(struct klp_object_ext, nr_funcs)); 1476 1477 /* 1478 * Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and 1479 * friends, and add them to the klp object. 1480 */ 1481 1482 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname)) 1483 return -1; 1484 1485 sym = find_symbol_by_name(e->out, sym_name); 1486 if (sym) { 1487 struct reloc *reloc; 1488 1489 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1490 1491 if (!elf_create_reloc(e->out, obj_sec, 1492 offsetof(struct klp_object_ext, callbacks) + 1493 offsetof(struct klp_callbacks, pre_patch), 1494 reloc->sym, reloc_addend(reloc), R_ABS64)) 1495 return -1; 1496 } 1497 1498 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname)) 1499 return -1; 1500 1501 sym = find_symbol_by_name(e->out, sym_name); 1502 if (sym) { 1503 struct reloc *reloc; 1504 1505 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1506 1507 if (!elf_create_reloc(e->out, obj_sec, 1508 offsetof(struct klp_object_ext, callbacks) + 1509 offsetof(struct klp_callbacks, post_patch), 1510 reloc->sym, reloc_addend(reloc), R_ABS64)) 1511 return -1; 1512 } 1513 1514 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname)) 1515 return -1; 1516 1517 sym = find_symbol_by_name(e->out, sym_name); 1518 if (sym) { 1519 struct reloc *reloc; 1520 1521 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1522 1523 if (!elf_create_reloc(e->out, obj_sec, 1524 offsetof(struct klp_object_ext, callbacks) + 1525 offsetof(struct klp_callbacks, pre_unpatch), 1526 reloc->sym, reloc_addend(reloc), R_ABS64)) 1527 return -1; 1528 } 1529 1530 if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname)) 1531 return -1; 1532 1533 sym = find_symbol_by_name(e->out, sym_name); 1534 if (sym) { 1535 struct reloc *reloc; 1536 1537 reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1538 1539 if (!elf_create_reloc(e->out, obj_sec, 1540 offsetof(struct klp_object_ext, callbacks) + 1541 offsetof(struct klp_callbacks, post_unpatch), 1542 reloc->sym, reloc_addend(reloc), R_ABS64)) 1543 return -1; 1544 } 1545 1546 return 0; 1547 } 1548 1549 /* 1550 * Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols 1551 * can be accessed via normal relocs. 1552 */ 1553 static int copy_import_ns(struct elfs *e) 1554 { 1555 struct section *patched_sec, *out_sec = NULL; 1556 char *import_ns, *data_end; 1557 1558 patched_sec = find_section_by_name(e->patched, ".modinfo"); 1559 if (!patched_sec) 1560 return 0; 1561 1562 import_ns = patched_sec->data->d_buf; 1563 if (!import_ns) 1564 return 0; 1565 1566 for (data_end = import_ns + sec_size(patched_sec); 1567 import_ns < data_end; 1568 import_ns += strlen(import_ns) + 1) { 1569 1570 import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10); 1571 if (!import_ns) 1572 return 0; 1573 1574 if (!out_sec) { 1575 out_sec = find_section_by_name(e->out, ".modinfo"); 1576 if (!out_sec) { 1577 out_sec = elf_create_section(e->out, ".modinfo", 0, 1578 patched_sec->sh.sh_entsize, 1579 patched_sec->sh.sh_type, 1580 patched_sec->sh.sh_addralign, 1581 patched_sec->sh.sh_flags); 1582 if (!out_sec) 1583 return -1; 1584 } 1585 } 1586 1587 if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1)) 1588 return -1; 1589 } 1590 1591 return 0; 1592 } 1593 1594 int cmd_klp_diff(int argc, const char **argv) 1595 { 1596 struct elfs e = {0}; 1597 1598 argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0); 1599 if (argc != 3) 1600 usage_with_options(klp_diff_usage, klp_diff_options); 1601 1602 objname = argv[0]; 1603 1604 e.orig = elf_open_read(argv[0], O_RDONLY); 1605 e.patched = elf_open_read(argv[1], O_RDONLY); 1606 e.out = NULL; 1607 1608 if (!e.orig || !e.patched) 1609 return -1; 1610 1611 if (read_exports()) 1612 return -1; 1613 1614 if (read_sym_checksums(e.orig)) 1615 return -1; 1616 1617 if (read_sym_checksums(e.patched)) 1618 return -1; 1619 1620 if (correlate_symbols(&e)) 1621 return -1; 1622 1623 if (mark_changed_functions(&e)) 1624 return 0; 1625 1626 e.out = elf_create_file(&e.orig->ehdr, argv[2]); 1627 if (!e.out) 1628 return -1; 1629 1630 if (clone_included_functions(&e)) 1631 return -1; 1632 1633 if (clone_special_sections(&e)) 1634 return -1; 1635 1636 if (create_klp_sections(&e)) 1637 return -1; 1638 1639 if (copy_import_ns(&e)) 1640 return -1; 1641 1642 if (elf_write(e.out)) 1643 return -1; 1644 1645 return elf_close(e.out); 1646 } 1647