1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2002 Richard Henderson 4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> 6 */ 7 8 #define INCLUDE_VERMAGIC 9 10 #include <linux/export.h> 11 #include <linux/extable.h> 12 #include <linux/moduleloader.h> 13 #include <linux/module_signature.h> 14 #include <linux/trace_events.h> 15 #include <linux/init.h> 16 #include <linux/kallsyms.h> 17 #include <linux/buildid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kernel_read_file.h> 21 #include <linux/kstrtox.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/elf.h> 25 #include <linux/seq_file.h> 26 #include <linux/syscalls.h> 27 #include <linux/fcntl.h> 28 #include <linux/rcupdate.h> 29 #include <linux/capability.h> 30 #include <linux/cpu.h> 31 #include <linux/moduleparam.h> 32 #include <linux/errno.h> 33 #include <linux/err.h> 34 #include <linux/vermagic.h> 35 #include <linux/notifier.h> 36 #include <linux/sched.h> 37 #include <linux/device.h> 38 #include <linux/string.h> 39 #include <linux/mutex.h> 40 #include <linux/rculist.h> 41 #include <linux/uaccess.h> 42 #include <asm/cacheflush.h> 43 #include <linux/set_memory.h> 44 #include <asm/mmu_context.h> 45 #include <linux/license.h> 46 #include <asm/sections.h> 47 #include <linux/tracepoint.h> 48 #include <linux/ftrace.h> 49 #include <linux/livepatch.h> 50 #include <linux/async.h> 51 #include <linux/percpu.h> 52 #include <linux/kmemleak.h> 53 #include <linux/jump_label.h> 54 #include <linux/pfn.h> 55 #include <linux/bsearch.h> 56 #include <linux/dynamic_debug.h> 57 #include <linux/audit.h> 58 #include <linux/cfi.h> 59 #include <linux/codetag.h> 60 #include <linux/debugfs.h> 61 #include <linux/execmem.h> 62 #include <uapi/linux/module.h> 63 #include "internal.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/module.h> 67 68 /* 69 * Mutex protects: 70 * 1) List of modules (also safely readable within RCU read section), 71 * 2) module_use links, 72 * 3) mod_tree.addr_min/mod_tree.addr_max. 73 * (delete and add uses RCU list operations). 74 */ 75 DEFINE_MUTEX(module_mutex); 76 LIST_HEAD(modules); 77 78 /* Work queue for freeing init sections in success case */ 79 static void do_free_init(struct work_struct *w); 80 static DECLARE_WORK(init_free_wq, do_free_init); 81 static LLIST_HEAD(init_free_list); 82 83 struct mod_tree_root mod_tree __cacheline_aligned = { 84 .addr_min = -1UL, 85 }; 86 87 struct symsearch { 88 const struct kernel_symbol *start, *stop; 89 const u32 *crcs; 90 enum mod_license license; 91 }; 92 93 /* 94 * Bounds of module memory, for speeding up __module_address. 95 * Protected by module_mutex. 96 */ 97 static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, 98 unsigned int size, struct mod_tree_root *tree) 99 { 100 unsigned long min = (unsigned long)base; 101 unsigned long max = min + size; 102 103 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 104 if (mod_mem_type_is_core_data(type)) { 105 if (min < tree->data_addr_min) 106 tree->data_addr_min = min; 107 if (max > tree->data_addr_max) 108 tree->data_addr_max = max; 109 return; 110 } 111 #endif 112 if (min < tree->addr_min) 113 tree->addr_min = min; 114 if (max > tree->addr_max) 115 tree->addr_max = max; 116 } 117 118 static void mod_update_bounds(struct module *mod) 119 { 120 for_each_mod_mem_type(type) { 121 struct module_memory *mod_mem = &mod->mem[type]; 122 123 if (mod_mem->size) 124 __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); 125 } 126 } 127 128 /* Block module loading/unloading? */ 129 static int modules_disabled; 130 core_param(nomodule, modules_disabled, bint, 0); 131 132 static const struct ctl_table module_sysctl_table[] = { 133 { 134 .procname = "modprobe", 135 .data = &modprobe_path, 136 .maxlen = KMOD_PATH_LEN, 137 .mode = 0644, 138 .proc_handler = proc_dostring, 139 }, 140 { 141 .procname = "modules_disabled", 142 .data = &modules_disabled, 143 .maxlen = sizeof(int), 144 .mode = 0644, 145 /* only handle a transition from default "0" to "1" */ 146 .proc_handler = proc_dointvec_minmax, 147 .extra1 = SYSCTL_ONE, 148 .extra2 = SYSCTL_ONE, 149 }, 150 }; 151 152 static int __init init_module_sysctl(void) 153 { 154 register_sysctl_init("kernel", module_sysctl_table); 155 return 0; 156 } 157 158 subsys_initcall(init_module_sysctl); 159 160 /* Waiting for a module to finish initializing? */ 161 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 162 163 static BLOCKING_NOTIFIER_HEAD(module_notify_list); 164 165 int register_module_notifier(struct notifier_block *nb) 166 { 167 return blocking_notifier_chain_register(&module_notify_list, nb); 168 } 169 EXPORT_SYMBOL(register_module_notifier); 170 171 int unregister_module_notifier(struct notifier_block *nb) 172 { 173 return blocking_notifier_chain_unregister(&module_notify_list, nb); 174 } 175 EXPORT_SYMBOL(unregister_module_notifier); 176 177 /* 178 * We require a truly strong try_module_get(): 0 means success. 179 * Otherwise an error is returned due to ongoing or failed 180 * initialization etc. 181 */ 182 static inline int strong_try_module_get(struct module *mod) 183 { 184 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 185 if (mod && mod->state == MODULE_STATE_COMING) 186 return -EBUSY; 187 if (try_module_get(mod)) 188 return 0; 189 else 190 return -ENOENT; 191 } 192 193 static inline void add_taint_module(struct module *mod, unsigned flag, 194 enum lockdep_ok lockdep_ok) 195 { 196 add_taint(flag, lockdep_ok); 197 set_bit(flag, &mod->taints); 198 } 199 200 /* 201 * Like strncmp(), except s/-/_/g as per scripts/Makefile.lib:name-fix-token rule. 202 */ 203 static int mod_strncmp(const char *str_a, const char *str_b, size_t n) 204 { 205 for (int i = 0; i < n; i++) { 206 char a = str_a[i]; 207 char b = str_b[i]; 208 int d; 209 210 if (a == '-') a = '_'; 211 if (b == '-') b = '_'; 212 213 d = a - b; 214 if (d) 215 return d; 216 217 if (!a) 218 break; 219 } 220 221 return 0; 222 } 223 224 /* 225 * A thread that wants to hold a reference to a module only while it 226 * is running can call this to safely exit. 227 */ 228 void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 229 { 230 module_put(mod); 231 kthread_exit(code); 232 } 233 EXPORT_SYMBOL(__module_put_and_kthread_exit); 234 235 /* Find a module section: 0 means not found. */ 236 static unsigned int find_sec(const struct load_info *info, const char *name) 237 { 238 unsigned int i; 239 240 for (i = 1; i < info->hdr->e_shnum; i++) { 241 Elf_Shdr *shdr = &info->sechdrs[i]; 242 /* Alloc bit cleared means "ignore it." */ 243 if ((shdr->sh_flags & SHF_ALLOC) 244 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 245 return i; 246 } 247 return 0; 248 } 249 250 /** 251 * find_any_unique_sec() - Find a unique section index by name 252 * @info: Load info for the module to scan 253 * @name: Name of the section we're looking for 254 * 255 * Locates a unique section by name. Ignores SHF_ALLOC. 256 * 257 * Return: Section index if found uniquely, zero if absent, negative count 258 * of total instances if multiple were found. 259 */ 260 static int find_any_unique_sec(const struct load_info *info, const char *name) 261 { 262 unsigned int idx; 263 unsigned int count = 0; 264 int i; 265 266 for (i = 1; i < info->hdr->e_shnum; i++) { 267 if (strcmp(info->secstrings + info->sechdrs[i].sh_name, 268 name) == 0) { 269 count++; 270 idx = i; 271 } 272 } 273 if (count == 1) { 274 return idx; 275 } else if (count == 0) { 276 return 0; 277 } else { 278 return -count; 279 } 280 } 281 282 /* Find a module section, or NULL. */ 283 static void *section_addr(const struct load_info *info, const char *name) 284 { 285 /* Section 0 has sh_addr 0. */ 286 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 287 } 288 289 /* Find a module section, or NULL. Fill in number of "objects" in section. */ 290 static void *section_objs(const struct load_info *info, 291 const char *name, 292 size_t object_size, 293 unsigned int *num) 294 { 295 unsigned int sec = find_sec(info, name); 296 297 /* Section 0 has sh_addr 0 and sh_size 0. */ 298 *num = info->sechdrs[sec].sh_size / object_size; 299 return (void *)info->sechdrs[sec].sh_addr; 300 } 301 302 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 303 static unsigned int find_any_sec(const struct load_info *info, const char *name) 304 { 305 unsigned int i; 306 307 for (i = 1; i < info->hdr->e_shnum; i++) { 308 Elf_Shdr *shdr = &info->sechdrs[i]; 309 if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 310 return i; 311 } 312 return 0; 313 } 314 315 /* 316 * Find a module section, or NULL. Fill in number of "objects" in section. 317 * Ignores SHF_ALLOC flag. 318 */ 319 static __maybe_unused void *any_section_objs(const struct load_info *info, 320 const char *name, 321 size_t object_size, 322 unsigned int *num) 323 { 324 unsigned int sec = find_any_sec(info, name); 325 326 /* Section 0 has sh_addr 0 and sh_size 0. */ 327 *num = info->sechdrs[sec].sh_size / object_size; 328 return (void *)info->sechdrs[sec].sh_addr; 329 } 330 331 #ifndef CONFIG_MODVERSIONS 332 #define symversion(base, idx) NULL 333 #else 334 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 335 #endif 336 337 static const char *kernel_symbol_name(const struct kernel_symbol *sym) 338 { 339 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 340 return offset_to_ptr(&sym->name_offset); 341 #else 342 return sym->name; 343 #endif 344 } 345 346 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 347 { 348 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 349 if (!sym->namespace_offset) 350 return NULL; 351 return offset_to_ptr(&sym->namespace_offset); 352 #else 353 return sym->namespace; 354 #endif 355 } 356 357 int cmp_name(const void *name, const void *sym) 358 { 359 return strcmp(name, kernel_symbol_name(sym)); 360 } 361 362 static bool find_exported_symbol_in_section(const struct symsearch *syms, 363 struct module *owner, 364 struct find_symbol_arg *fsa) 365 { 366 struct kernel_symbol *sym; 367 368 if (!fsa->gplok && syms->license == GPL_ONLY) 369 return false; 370 371 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 372 sizeof(struct kernel_symbol), cmp_name); 373 if (!sym) 374 return false; 375 376 fsa->owner = owner; 377 fsa->crc = symversion(syms->crcs, sym - syms->start); 378 fsa->sym = sym; 379 fsa->license = syms->license; 380 381 return true; 382 } 383 384 /* 385 * Find an exported symbol and return it, along with, (optional) crc and 386 * (optional) module which owns it. Needs RCU or module_mutex. 387 */ 388 bool find_symbol(struct find_symbol_arg *fsa) 389 { 390 static const struct symsearch arr[] = { 391 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 392 NOT_GPL_ONLY }, 393 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 394 __start___kcrctab_gpl, 395 GPL_ONLY }, 396 }; 397 struct module *mod; 398 unsigned int i; 399 400 for (i = 0; i < ARRAY_SIZE(arr); i++) 401 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 402 return true; 403 404 list_for_each_entry_rcu(mod, &modules, list, 405 lockdep_is_held(&module_mutex)) { 406 struct symsearch arr[] = { 407 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 408 NOT_GPL_ONLY }, 409 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 410 mod->gpl_crcs, 411 GPL_ONLY }, 412 }; 413 414 if (mod->state == MODULE_STATE_UNFORMED) 415 continue; 416 417 for (i = 0; i < ARRAY_SIZE(arr); i++) 418 if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 419 return true; 420 } 421 422 pr_debug("Failed to find symbol %s\n", fsa->name); 423 return false; 424 } 425 426 /* 427 * Search for module by name: must hold module_mutex (or RCU for read-only 428 * access). 429 */ 430 struct module *find_module_all(const char *name, size_t len, 431 bool even_unformed) 432 { 433 struct module *mod; 434 435 list_for_each_entry_rcu(mod, &modules, list, 436 lockdep_is_held(&module_mutex)) { 437 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 438 continue; 439 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 440 return mod; 441 } 442 return NULL; 443 } 444 445 struct module *find_module(const char *name) 446 { 447 return find_module_all(name, strlen(name), false); 448 } 449 450 #ifdef CONFIG_SMP 451 452 static inline void __percpu *mod_percpu(struct module *mod) 453 { 454 return mod->percpu; 455 } 456 457 static int percpu_modalloc(struct module *mod, struct load_info *info) 458 { 459 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 460 unsigned long align = pcpusec->sh_addralign; 461 462 if (!pcpusec->sh_size) 463 return 0; 464 465 if (align > PAGE_SIZE) { 466 pr_warn("%s: per-cpu alignment %li > %li\n", 467 mod->name, align, PAGE_SIZE); 468 align = PAGE_SIZE; 469 } 470 471 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 472 if (!mod->percpu) { 473 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 474 mod->name, (unsigned long)pcpusec->sh_size); 475 return -ENOMEM; 476 } 477 mod->percpu_size = pcpusec->sh_size; 478 return 0; 479 } 480 481 static void percpu_modfree(struct module *mod) 482 { 483 free_percpu(mod->percpu); 484 } 485 486 static unsigned int find_pcpusec(struct load_info *info) 487 { 488 return find_sec(info, ".data..percpu"); 489 } 490 491 static void percpu_modcopy(struct module *mod, 492 const void *from, unsigned long size) 493 { 494 int cpu; 495 496 for_each_possible_cpu(cpu) 497 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 498 } 499 500 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 501 { 502 struct module *mod; 503 unsigned int cpu; 504 505 guard(rcu)(); 506 list_for_each_entry_rcu(mod, &modules, list) { 507 if (mod->state == MODULE_STATE_UNFORMED) 508 continue; 509 if (!mod->percpu_size) 510 continue; 511 for_each_possible_cpu(cpu) { 512 void *start = per_cpu_ptr(mod->percpu, cpu); 513 void *va = (void *)addr; 514 515 if (va >= start && va < start + mod->percpu_size) { 516 if (can_addr) { 517 *can_addr = (unsigned long) (va - start); 518 *can_addr += (unsigned long) 519 per_cpu_ptr(mod->percpu, 520 get_boot_cpu_id()); 521 } 522 return true; 523 } 524 } 525 } 526 return false; 527 } 528 529 /** 530 * is_module_percpu_address() - test whether address is from module static percpu 531 * @addr: address to test 532 * 533 * Test whether @addr belongs to module static percpu area. 534 * 535 * Return: %true if @addr is from module static percpu area 536 */ 537 bool is_module_percpu_address(unsigned long addr) 538 { 539 return __is_module_percpu_address(addr, NULL); 540 } 541 542 #else /* ... !CONFIG_SMP */ 543 544 static inline void __percpu *mod_percpu(struct module *mod) 545 { 546 return NULL; 547 } 548 static int percpu_modalloc(struct module *mod, struct load_info *info) 549 { 550 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 551 if (info->sechdrs[info->index.pcpu].sh_size != 0) 552 return -ENOMEM; 553 return 0; 554 } 555 static inline void percpu_modfree(struct module *mod) 556 { 557 } 558 static unsigned int find_pcpusec(struct load_info *info) 559 { 560 return 0; 561 } 562 static inline void percpu_modcopy(struct module *mod, 563 const void *from, unsigned long size) 564 { 565 /* pcpusec should be 0, and size of that section should be 0. */ 566 BUG_ON(size != 0); 567 } 568 bool is_module_percpu_address(unsigned long addr) 569 { 570 return false; 571 } 572 573 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 574 { 575 return false; 576 } 577 578 #endif /* CONFIG_SMP */ 579 580 #define MODINFO_ATTR(field) \ 581 static void setup_modinfo_##field(struct module *mod, const char *s) \ 582 { \ 583 mod->field = kstrdup(s, GFP_KERNEL); \ 584 } \ 585 static ssize_t show_modinfo_##field(const struct module_attribute *mattr, \ 586 struct module_kobject *mk, char *buffer) \ 587 { \ 588 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 589 } \ 590 static int modinfo_##field##_exists(struct module *mod) \ 591 { \ 592 return mod->field != NULL; \ 593 } \ 594 static void free_modinfo_##field(struct module *mod) \ 595 { \ 596 kfree(mod->field); \ 597 mod->field = NULL; \ 598 } \ 599 static const struct module_attribute modinfo_##field = { \ 600 .attr = { .name = __stringify(field), .mode = 0444 }, \ 601 .show = show_modinfo_##field, \ 602 .setup = setup_modinfo_##field, \ 603 .test = modinfo_##field##_exists, \ 604 .free = free_modinfo_##field, \ 605 }; 606 607 MODINFO_ATTR(version); 608 MODINFO_ATTR(srcversion); 609 610 static struct { 611 char name[MODULE_NAME_LEN + 1]; 612 char taints[MODULE_FLAGS_BUF_SIZE]; 613 } last_unloaded_module; 614 615 #ifdef CONFIG_MODULE_UNLOAD 616 617 EXPORT_TRACEPOINT_SYMBOL(module_get); 618 619 /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 620 #define MODULE_REF_BASE 1 621 622 /* Init the unload section of the module. */ 623 static int module_unload_init(struct module *mod) 624 { 625 /* 626 * Initialize reference counter to MODULE_REF_BASE. 627 * refcnt == 0 means module is going. 628 */ 629 atomic_set(&mod->refcnt, MODULE_REF_BASE); 630 631 INIT_LIST_HEAD(&mod->source_list); 632 INIT_LIST_HEAD(&mod->target_list); 633 634 /* Hold reference count during initialization. */ 635 atomic_inc(&mod->refcnt); 636 637 return 0; 638 } 639 640 /* Does a already use b? */ 641 static int already_uses(struct module *a, struct module *b) 642 { 643 struct module_use *use; 644 645 list_for_each_entry(use, &b->source_list, source_list) { 646 if (use->source == a) 647 return 1; 648 } 649 pr_debug("%s does not use %s!\n", a->name, b->name); 650 return 0; 651 } 652 653 /* 654 * Module a uses b 655 * - we add 'a' as a "source", 'b' as a "target" of module use 656 * - the module_use is added to the list of 'b' sources (so 657 * 'b' can walk the list to see who sourced them), and of 'a' 658 * targets (so 'a' can see what modules it targets). 659 */ 660 static int add_module_usage(struct module *a, struct module *b) 661 { 662 struct module_use *use; 663 664 pr_debug("Allocating new usage for %s.\n", a->name); 665 use = kmalloc(sizeof(*use), GFP_ATOMIC); 666 if (!use) 667 return -ENOMEM; 668 669 use->source = a; 670 use->target = b; 671 list_add(&use->source_list, &b->source_list); 672 list_add(&use->target_list, &a->target_list); 673 return 0; 674 } 675 676 /* Module a uses b: caller needs module_mutex() */ 677 static int ref_module(struct module *a, struct module *b) 678 { 679 int err; 680 681 if (b == NULL || already_uses(a, b)) 682 return 0; 683 684 /* If module isn't available, we fail. */ 685 err = strong_try_module_get(b); 686 if (err) 687 return err; 688 689 err = add_module_usage(a, b); 690 if (err) { 691 module_put(b); 692 return err; 693 } 694 return 0; 695 } 696 697 /* Clear the unload stuff of the module. */ 698 static void module_unload_free(struct module *mod) 699 { 700 struct module_use *use, *tmp; 701 702 mutex_lock(&module_mutex); 703 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 704 struct module *i = use->target; 705 pr_debug("%s unusing %s\n", mod->name, i->name); 706 module_put(i); 707 list_del(&use->source_list); 708 list_del(&use->target_list); 709 kfree(use); 710 } 711 mutex_unlock(&module_mutex); 712 } 713 714 #ifdef CONFIG_MODULE_FORCE_UNLOAD 715 static inline int try_force_unload(unsigned int flags) 716 { 717 int ret = (flags & O_TRUNC); 718 if (ret) 719 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 720 return ret; 721 } 722 #else 723 static inline int try_force_unload(unsigned int flags) 724 { 725 return 0; 726 } 727 #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 728 729 /* Try to release refcount of module, 0 means success. */ 730 static int try_release_module_ref(struct module *mod) 731 { 732 int ret; 733 734 /* Try to decrement refcnt which we set at loading */ 735 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 736 BUG_ON(ret < 0); 737 if (ret) 738 /* Someone can put this right now, recover with checking */ 739 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 740 741 return ret; 742 } 743 744 static int try_stop_module(struct module *mod, int flags, int *forced) 745 { 746 /* If it's not unused, quit unless we're forcing. */ 747 if (try_release_module_ref(mod) != 0) { 748 *forced = try_force_unload(flags); 749 if (!(*forced)) 750 return -EWOULDBLOCK; 751 } 752 753 /* Mark it as dying. */ 754 mod->state = MODULE_STATE_GOING; 755 756 return 0; 757 } 758 759 /** 760 * module_refcount() - return the refcount or -1 if unloading 761 * @mod: the module we're checking 762 * 763 * Return: 764 * -1 if the module is in the process of unloading 765 * otherwise the number of references in the kernel to the module 766 */ 767 int module_refcount(struct module *mod) 768 { 769 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 770 } 771 EXPORT_SYMBOL(module_refcount); 772 773 /* This exists whether we can unload or not */ 774 static void free_module(struct module *mod); 775 776 SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 777 unsigned int, flags) 778 { 779 struct module *mod; 780 char name[MODULE_NAME_LEN]; 781 char buf[MODULE_FLAGS_BUF_SIZE]; 782 int ret, forced = 0; 783 784 if (!capable(CAP_SYS_MODULE) || modules_disabled) 785 return -EPERM; 786 787 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 788 return -EFAULT; 789 name[MODULE_NAME_LEN-1] = '\0'; 790 791 audit_log_kern_module(name); 792 793 if (mutex_lock_interruptible(&module_mutex) != 0) 794 return -EINTR; 795 796 mod = find_module(name); 797 if (!mod) { 798 ret = -ENOENT; 799 goto out; 800 } 801 802 if (!list_empty(&mod->source_list)) { 803 /* Other modules depend on us: get rid of them first. */ 804 ret = -EWOULDBLOCK; 805 goto out; 806 } 807 808 /* Doing init or already dying? */ 809 if (mod->state != MODULE_STATE_LIVE) { 810 /* FIXME: if (force), slam module count damn the torpedoes */ 811 pr_debug("%s already dying\n", mod->name); 812 ret = -EBUSY; 813 goto out; 814 } 815 816 /* If it has an init func, it must have an exit func to unload */ 817 if (mod->init && !mod->exit) { 818 forced = try_force_unload(flags); 819 if (!forced) { 820 /* This module can't be removed */ 821 ret = -EBUSY; 822 goto out; 823 } 824 } 825 826 ret = try_stop_module(mod, flags, &forced); 827 if (ret != 0) 828 goto out; 829 830 mutex_unlock(&module_mutex); 831 /* Final destruction now no one is using it. */ 832 if (mod->exit != NULL) 833 mod->exit(); 834 blocking_notifier_call_chain(&module_notify_list, 835 MODULE_STATE_GOING, mod); 836 klp_module_going(mod); 837 ftrace_release_mod(mod); 838 839 async_synchronize_full(); 840 841 /* Store the name and taints of the last unloaded module for diagnostic purposes */ 842 strscpy(last_unloaded_module.name, mod->name); 843 strscpy(last_unloaded_module.taints, module_flags(mod, buf, false)); 844 845 free_module(mod); 846 /* someone could wait for the module in add_unformed_module() */ 847 wake_up_all(&module_wq); 848 return 0; 849 out: 850 mutex_unlock(&module_mutex); 851 return ret; 852 } 853 854 void __symbol_put(const char *symbol) 855 { 856 struct find_symbol_arg fsa = { 857 .name = symbol, 858 .gplok = true, 859 }; 860 861 guard(rcu)(); 862 BUG_ON(!find_symbol(&fsa)); 863 module_put(fsa.owner); 864 } 865 EXPORT_SYMBOL(__symbol_put); 866 867 /* Note this assumes addr is a function, which it currently always is. */ 868 void symbol_put_addr(void *addr) 869 { 870 struct module *modaddr; 871 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 872 873 if (core_kernel_text(a)) 874 return; 875 876 /* 877 * Even though we hold a reference on the module; we still need to 878 * RCU read section in order to safely traverse the data structure. 879 */ 880 guard(rcu)(); 881 modaddr = __module_text_address(a); 882 BUG_ON(!modaddr); 883 module_put(modaddr); 884 } 885 EXPORT_SYMBOL_GPL(symbol_put_addr); 886 887 static ssize_t show_refcnt(const struct module_attribute *mattr, 888 struct module_kobject *mk, char *buffer) 889 { 890 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 891 } 892 893 static const struct module_attribute modinfo_refcnt = 894 __ATTR(refcnt, 0444, show_refcnt, NULL); 895 896 void __module_get(struct module *module) 897 { 898 if (module) { 899 atomic_inc(&module->refcnt); 900 trace_module_get(module, _RET_IP_); 901 } 902 } 903 EXPORT_SYMBOL(__module_get); 904 905 bool try_module_get(struct module *module) 906 { 907 bool ret = true; 908 909 if (module) { 910 /* Note: here, we can fail to get a reference */ 911 if (likely(module_is_live(module) && 912 atomic_inc_not_zero(&module->refcnt) != 0)) 913 trace_module_get(module, _RET_IP_); 914 else 915 ret = false; 916 } 917 return ret; 918 } 919 EXPORT_SYMBOL(try_module_get); 920 921 void module_put(struct module *module) 922 { 923 int ret; 924 925 if (module) { 926 ret = atomic_dec_if_positive(&module->refcnt); 927 WARN_ON(ret < 0); /* Failed to put refcount */ 928 trace_module_put(module, _RET_IP_); 929 } 930 } 931 EXPORT_SYMBOL(module_put); 932 933 #else /* !CONFIG_MODULE_UNLOAD */ 934 static inline void module_unload_free(struct module *mod) 935 { 936 } 937 938 static int ref_module(struct module *a, struct module *b) 939 { 940 return strong_try_module_get(b); 941 } 942 943 static inline int module_unload_init(struct module *mod) 944 { 945 return 0; 946 } 947 #endif /* CONFIG_MODULE_UNLOAD */ 948 949 size_t module_flags_taint(unsigned long taints, char *buf) 950 { 951 size_t l = 0; 952 int i; 953 954 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 955 if (taint_flags[i].module && test_bit(i, &taints)) 956 buf[l++] = taint_flags[i].c_true; 957 } 958 959 return l; 960 } 961 962 static ssize_t show_initstate(const struct module_attribute *mattr, 963 struct module_kobject *mk, char *buffer) 964 { 965 const char *state = "unknown"; 966 967 switch (mk->mod->state) { 968 case MODULE_STATE_LIVE: 969 state = "live"; 970 break; 971 case MODULE_STATE_COMING: 972 state = "coming"; 973 break; 974 case MODULE_STATE_GOING: 975 state = "going"; 976 break; 977 default: 978 BUG(); 979 } 980 return sprintf(buffer, "%s\n", state); 981 } 982 983 static const struct module_attribute modinfo_initstate = 984 __ATTR(initstate, 0444, show_initstate, NULL); 985 986 static ssize_t store_uevent(const struct module_attribute *mattr, 987 struct module_kobject *mk, 988 const char *buffer, size_t count) 989 { 990 int rc; 991 992 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 993 return rc ? rc : count; 994 } 995 996 const struct module_attribute module_uevent = 997 __ATTR(uevent, 0200, NULL, store_uevent); 998 999 static ssize_t show_coresize(const struct module_attribute *mattr, 1000 struct module_kobject *mk, char *buffer) 1001 { 1002 unsigned int size = mk->mod->mem[MOD_TEXT].size; 1003 1004 if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { 1005 for_class_mod_mem_type(type, core_data) 1006 size += mk->mod->mem[type].size; 1007 } 1008 return sprintf(buffer, "%u\n", size); 1009 } 1010 1011 static const struct module_attribute modinfo_coresize = 1012 __ATTR(coresize, 0444, show_coresize, NULL); 1013 1014 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1015 static ssize_t show_datasize(const struct module_attribute *mattr, 1016 struct module_kobject *mk, char *buffer) 1017 { 1018 unsigned int size = 0; 1019 1020 for_class_mod_mem_type(type, core_data) 1021 size += mk->mod->mem[type].size; 1022 return sprintf(buffer, "%u\n", size); 1023 } 1024 1025 static const struct module_attribute modinfo_datasize = 1026 __ATTR(datasize, 0444, show_datasize, NULL); 1027 #endif 1028 1029 static ssize_t show_initsize(const struct module_attribute *mattr, 1030 struct module_kobject *mk, char *buffer) 1031 { 1032 unsigned int size = 0; 1033 1034 for_class_mod_mem_type(type, init) 1035 size += mk->mod->mem[type].size; 1036 return sprintf(buffer, "%u\n", size); 1037 } 1038 1039 static const struct module_attribute modinfo_initsize = 1040 __ATTR(initsize, 0444, show_initsize, NULL); 1041 1042 static ssize_t show_taint(const struct module_attribute *mattr, 1043 struct module_kobject *mk, char *buffer) 1044 { 1045 size_t l; 1046 1047 l = module_flags_taint(mk->mod->taints, buffer); 1048 buffer[l++] = '\n'; 1049 return l; 1050 } 1051 1052 static const struct module_attribute modinfo_taint = 1053 __ATTR(taint, 0444, show_taint, NULL); 1054 1055 const struct module_attribute *const modinfo_attrs[] = { 1056 &module_uevent, 1057 &modinfo_version, 1058 &modinfo_srcversion, 1059 &modinfo_initstate, 1060 &modinfo_coresize, 1061 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1062 &modinfo_datasize, 1063 #endif 1064 &modinfo_initsize, 1065 &modinfo_taint, 1066 #ifdef CONFIG_MODULE_UNLOAD 1067 &modinfo_refcnt, 1068 #endif 1069 NULL, 1070 }; 1071 1072 const size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); 1073 1074 static const char vermagic[] = VERMAGIC_STRING; 1075 1076 int try_to_force_load(struct module *mod, const char *reason) 1077 { 1078 #ifdef CONFIG_MODULE_FORCE_LOAD 1079 if (!test_taint(TAINT_FORCED_MODULE)) 1080 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1081 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1082 return 0; 1083 #else 1084 return -ENOEXEC; 1085 #endif 1086 } 1087 1088 /* Parse tag=value strings from .modinfo section */ 1089 char *module_next_tag_pair(char *string, unsigned long *secsize) 1090 { 1091 /* Skip non-zero chars */ 1092 while (string[0]) { 1093 string++; 1094 if ((*secsize)-- <= 1) 1095 return NULL; 1096 } 1097 1098 /* Skip any zero padding. */ 1099 while (!string[0]) { 1100 string++; 1101 if ((*secsize)-- <= 1) 1102 return NULL; 1103 } 1104 return string; 1105 } 1106 1107 static char *get_next_modinfo(const struct load_info *info, const char *tag, 1108 char *prev) 1109 { 1110 char *p; 1111 unsigned int taglen = strlen(tag); 1112 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 1113 unsigned long size = infosec->sh_size; 1114 1115 /* 1116 * get_modinfo() calls made before rewrite_section_headers() 1117 * must use sh_offset, as sh_addr isn't set! 1118 */ 1119 char *modinfo = (char *)info->hdr + infosec->sh_offset; 1120 1121 if (prev) { 1122 size -= prev - modinfo; 1123 modinfo = module_next_tag_pair(prev, &size); 1124 } 1125 1126 for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { 1127 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 1128 return p + taglen + 1; 1129 } 1130 return NULL; 1131 } 1132 1133 static char *get_modinfo(const struct load_info *info, const char *tag) 1134 { 1135 return get_next_modinfo(info, tag, NULL); 1136 } 1137 1138 /** 1139 * verify_module_namespace() - does @modname have access to this symbol's @namespace 1140 * @namespace: export symbol namespace 1141 * @modname: module name 1142 * 1143 * If @namespace is prefixed with "module:" to indicate it is a module namespace 1144 * then test if @modname matches any of the comma separated patterns. 1145 * 1146 * The patterns only support tail-glob. 1147 */ 1148 static bool verify_module_namespace(const char *namespace, const char *modname) 1149 { 1150 size_t len, modlen = strlen(modname); 1151 const char *prefix = "module:"; 1152 const char *sep; 1153 bool glob; 1154 1155 if (!strstarts(namespace, prefix)) 1156 return false; 1157 1158 for (namespace += strlen(prefix); *namespace; namespace = sep) { 1159 sep = strchrnul(namespace, ','); 1160 len = sep - namespace; 1161 1162 glob = false; 1163 if (sep[-1] == '*') { 1164 len--; 1165 glob = true; 1166 } 1167 1168 if (*sep) 1169 sep++; 1170 1171 if (mod_strncmp(namespace, modname, len) == 0 && (glob || len == modlen)) 1172 return true; 1173 } 1174 1175 return false; 1176 } 1177 1178 static int verify_namespace_is_imported(const struct load_info *info, 1179 const struct kernel_symbol *sym, 1180 struct module *mod) 1181 { 1182 const char *namespace; 1183 char *imported_namespace; 1184 1185 namespace = kernel_symbol_namespace(sym); 1186 if (namespace && namespace[0]) { 1187 1188 if (verify_module_namespace(namespace, mod->name)) 1189 return 0; 1190 1191 for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1192 if (strcmp(namespace, imported_namespace) == 0) 1193 return 0; 1194 } 1195 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1196 pr_warn( 1197 #else 1198 pr_err( 1199 #endif 1200 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1201 mod->name, kernel_symbol_name(sym), namespace); 1202 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1203 return -EINVAL; 1204 #endif 1205 } 1206 return 0; 1207 } 1208 1209 static bool inherit_taint(struct module *mod, struct module *owner, const char *name) 1210 { 1211 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1212 return true; 1213 1214 if (mod->using_gplonly_symbols) { 1215 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", 1216 mod->name, name, owner->name); 1217 return false; 1218 } 1219 1220 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 1221 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", 1222 mod->name, name, owner->name); 1223 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1224 } 1225 return true; 1226 } 1227 1228 /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1229 static const struct kernel_symbol *resolve_symbol(struct module *mod, 1230 const struct load_info *info, 1231 const char *name, 1232 char ownername[]) 1233 { 1234 struct find_symbol_arg fsa = { 1235 .name = name, 1236 .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1237 .warn = true, 1238 }; 1239 int err; 1240 1241 /* 1242 * The module_mutex should not be a heavily contended lock; 1243 * if we get the occasional sleep here, we'll go an extra iteration 1244 * in the wait_event_interruptible(), which is harmless. 1245 */ 1246 sched_annotate_sleep(); 1247 mutex_lock(&module_mutex); 1248 if (!find_symbol(&fsa)) 1249 goto unlock; 1250 1251 if (fsa.license == GPL_ONLY) 1252 mod->using_gplonly_symbols = true; 1253 1254 if (!inherit_taint(mod, fsa.owner, name)) { 1255 fsa.sym = NULL; 1256 goto getname; 1257 } 1258 1259 if (!check_version(info, name, mod, fsa.crc)) { 1260 fsa.sym = ERR_PTR(-EINVAL); 1261 goto getname; 1262 } 1263 1264 err = verify_namespace_is_imported(info, fsa.sym, mod); 1265 if (err) { 1266 fsa.sym = ERR_PTR(err); 1267 goto getname; 1268 } 1269 1270 err = ref_module(mod, fsa.owner); 1271 if (err) { 1272 fsa.sym = ERR_PTR(err); 1273 goto getname; 1274 } 1275 1276 getname: 1277 /* We must make copy under the lock if we failed to get ref. */ 1278 strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1279 unlock: 1280 mutex_unlock(&module_mutex); 1281 return fsa.sym; 1282 } 1283 1284 static const struct kernel_symbol * 1285 resolve_symbol_wait(struct module *mod, 1286 const struct load_info *info, 1287 const char *name) 1288 { 1289 const struct kernel_symbol *ksym; 1290 char owner[MODULE_NAME_LEN]; 1291 1292 if (wait_event_interruptible_timeout(module_wq, 1293 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1294 || PTR_ERR(ksym) != -EBUSY, 1295 30 * HZ) <= 0) { 1296 pr_warn("%s: gave up waiting for init of module %s.\n", 1297 mod->name, owner); 1298 } 1299 return ksym; 1300 } 1301 1302 void __weak module_arch_cleanup(struct module *mod) 1303 { 1304 } 1305 1306 void __weak module_arch_freeing_init(struct module *mod) 1307 { 1308 } 1309 1310 static int module_memory_alloc(struct module *mod, enum mod_mem_type type) 1311 { 1312 unsigned int size = PAGE_ALIGN(mod->mem[type].size); 1313 enum execmem_type execmem_type; 1314 void *ptr; 1315 1316 mod->mem[type].size = size; 1317 1318 if (mod_mem_type_is_data(type)) 1319 execmem_type = EXECMEM_MODULE_DATA; 1320 else 1321 execmem_type = EXECMEM_MODULE_TEXT; 1322 1323 ptr = execmem_alloc(execmem_type, size); 1324 if (!ptr) 1325 return -ENOMEM; 1326 1327 if (execmem_is_rox(execmem_type)) { 1328 int err = execmem_make_temp_rw(ptr, size); 1329 1330 if (err) { 1331 execmem_free(ptr); 1332 return -ENOMEM; 1333 } 1334 1335 mod->mem[type].is_rox = true; 1336 } 1337 1338 /* 1339 * The pointer to these blocks of memory are stored on the module 1340 * structure and we keep that around so long as the module is 1341 * around. We only free that memory when we unload the module. 1342 * Just mark them as not being a leak then. The .init* ELF 1343 * sections *do* get freed after boot so we *could* treat them 1344 * slightly differently with kmemleak_ignore() and only grey 1345 * them out as they work as typical memory allocations which 1346 * *do* eventually get freed, but let's just keep things simple 1347 * and avoid *any* false positives. 1348 */ 1349 if (!mod->mem[type].is_rox) 1350 kmemleak_not_leak(ptr); 1351 1352 memset(ptr, 0, size); 1353 mod->mem[type].base = ptr; 1354 1355 return 0; 1356 } 1357 1358 static void module_memory_restore_rox(struct module *mod) 1359 { 1360 for_class_mod_mem_type(type, text) { 1361 struct module_memory *mem = &mod->mem[type]; 1362 1363 if (mem->is_rox) 1364 execmem_restore_rox(mem->base, mem->size); 1365 } 1366 } 1367 1368 static void module_memory_free(struct module *mod, enum mod_mem_type type) 1369 { 1370 struct module_memory *mem = &mod->mem[type]; 1371 1372 execmem_free(mem->base); 1373 } 1374 1375 static void free_mod_mem(struct module *mod) 1376 { 1377 for_each_mod_mem_type(type) { 1378 struct module_memory *mod_mem = &mod->mem[type]; 1379 1380 if (type == MOD_DATA) 1381 continue; 1382 1383 /* Free lock-classes; relies on the preceding sync_rcu(). */ 1384 lockdep_free_key_range(mod_mem->base, mod_mem->size); 1385 if (mod_mem->size) 1386 module_memory_free(mod, type); 1387 } 1388 1389 /* MOD_DATA hosts mod, so free it at last */ 1390 lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); 1391 module_memory_free(mod, MOD_DATA); 1392 } 1393 1394 /* Free a module, remove from lists, etc. */ 1395 static void free_module(struct module *mod) 1396 { 1397 trace_module_free(mod); 1398 1399 codetag_unload_module(mod); 1400 1401 mod_sysfs_teardown(mod); 1402 1403 /* 1404 * We leave it in list to prevent duplicate loads, but make sure 1405 * that noone uses it while it's being deconstructed. 1406 */ 1407 mutex_lock(&module_mutex); 1408 mod->state = MODULE_STATE_UNFORMED; 1409 mutex_unlock(&module_mutex); 1410 1411 /* Arch-specific cleanup. */ 1412 module_arch_cleanup(mod); 1413 1414 /* Module unload stuff */ 1415 module_unload_free(mod); 1416 1417 /* Free any allocated parameters. */ 1418 destroy_params(mod->kp, mod->num_kp); 1419 1420 if (is_livepatch_module(mod)) 1421 free_module_elf(mod); 1422 1423 /* Now we can delete it from the lists */ 1424 mutex_lock(&module_mutex); 1425 /* Unlink carefully: kallsyms could be walking list. */ 1426 list_del_rcu(&mod->list); 1427 mod_tree_remove(mod); 1428 /* Remove this module from bug list, this uses list_del_rcu */ 1429 module_bug_cleanup(mod); 1430 /* Wait for RCU synchronizing before releasing mod->list and buglist. */ 1431 synchronize_rcu(); 1432 if (try_add_tainted_module(mod)) 1433 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", 1434 mod->name); 1435 mutex_unlock(&module_mutex); 1436 1437 /* This may be empty, but that's OK */ 1438 module_arch_freeing_init(mod); 1439 kfree(mod->args); 1440 percpu_modfree(mod); 1441 1442 free_mod_mem(mod); 1443 } 1444 1445 void *__symbol_get(const char *symbol) 1446 { 1447 struct find_symbol_arg fsa = { 1448 .name = symbol, 1449 .gplok = true, 1450 .warn = true, 1451 }; 1452 1453 scoped_guard(rcu) { 1454 if (!find_symbol(&fsa)) 1455 return NULL; 1456 if (fsa.license != GPL_ONLY) { 1457 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 1458 symbol); 1459 return NULL; 1460 } 1461 if (strong_try_module_get(fsa.owner)) 1462 return NULL; 1463 } 1464 return (void *)kernel_symbol_value(fsa.sym); 1465 } 1466 EXPORT_SYMBOL_GPL(__symbol_get); 1467 1468 /* 1469 * Ensure that an exported symbol [global namespace] does not already exist 1470 * in the kernel or in some other module's exported symbol table. 1471 * 1472 * You must hold the module_mutex. 1473 */ 1474 static int verify_exported_symbols(struct module *mod) 1475 { 1476 unsigned int i; 1477 const struct kernel_symbol *s; 1478 struct { 1479 const struct kernel_symbol *sym; 1480 unsigned int num; 1481 } arr[] = { 1482 { mod->syms, mod->num_syms }, 1483 { mod->gpl_syms, mod->num_gpl_syms }, 1484 }; 1485 1486 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1487 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 1488 struct find_symbol_arg fsa = { 1489 .name = kernel_symbol_name(s), 1490 .gplok = true, 1491 }; 1492 if (find_symbol(&fsa)) { 1493 pr_err("%s: exports duplicate symbol %s" 1494 " (owned by %s)\n", 1495 mod->name, kernel_symbol_name(s), 1496 module_name(fsa.owner)); 1497 return -ENOEXEC; 1498 } 1499 } 1500 } 1501 return 0; 1502 } 1503 1504 static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 1505 { 1506 /* 1507 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 1508 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 1509 * i386 has a similar problem but may not deserve a fix. 1510 * 1511 * If we ever have to ignore many symbols, consider refactoring the code to 1512 * only warn if referenced by a relocation. 1513 */ 1514 if (emachine == EM_386 || emachine == EM_X86_64) 1515 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 1516 return false; 1517 } 1518 1519 /* Change all symbols so that st_value encodes the pointer directly. */ 1520 static int simplify_symbols(struct module *mod, const struct load_info *info) 1521 { 1522 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 1523 Elf_Sym *sym = (void *)symsec->sh_addr; 1524 unsigned long secbase; 1525 unsigned int i; 1526 int ret = 0; 1527 const struct kernel_symbol *ksym; 1528 1529 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 1530 const char *name = info->strtab + sym[i].st_name; 1531 1532 switch (sym[i].st_shndx) { 1533 case SHN_COMMON: 1534 /* Ignore common symbols */ 1535 if (!strncmp(name, "__gnu_lto", 9)) 1536 break; 1537 1538 /* 1539 * We compiled with -fno-common. These are not 1540 * supposed to happen. 1541 */ 1542 pr_debug("Common symbol: %s\n", name); 1543 pr_warn("%s: please compile with -fno-common\n", 1544 mod->name); 1545 ret = -ENOEXEC; 1546 break; 1547 1548 case SHN_ABS: 1549 /* Don't need to do anything */ 1550 pr_debug("Absolute symbol: 0x%08lx %s\n", 1551 (long)sym[i].st_value, name); 1552 break; 1553 1554 case SHN_LIVEPATCH: 1555 /* Livepatch symbols are resolved by livepatch */ 1556 break; 1557 1558 case SHN_UNDEF: 1559 ksym = resolve_symbol_wait(mod, info, name); 1560 /* Ok if resolved. */ 1561 if (ksym && !IS_ERR(ksym)) { 1562 sym[i].st_value = kernel_symbol_value(ksym); 1563 break; 1564 } 1565 1566 /* Ok if weak or ignored. */ 1567 if (!ksym && 1568 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 1569 ignore_undef_symbol(info->hdr->e_machine, name))) 1570 break; 1571 1572 ret = PTR_ERR(ksym) ?: -ENOENT; 1573 pr_warn("%s: Unknown symbol %s (err %d)\n", 1574 mod->name, name, ret); 1575 break; 1576 1577 default: 1578 /* Divert to percpu allocation if a percpu var. */ 1579 if (sym[i].st_shndx == info->index.pcpu) 1580 secbase = (unsigned long)mod_percpu(mod); 1581 else 1582 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 1583 sym[i].st_value += secbase; 1584 break; 1585 } 1586 } 1587 1588 return ret; 1589 } 1590 1591 static int apply_relocations(struct module *mod, const struct load_info *info) 1592 { 1593 unsigned int i; 1594 int err = 0; 1595 1596 /* Now do relocations. */ 1597 for (i = 1; i < info->hdr->e_shnum; i++) { 1598 unsigned int infosec = info->sechdrs[i].sh_info; 1599 1600 /* Not a valid relocation section? */ 1601 if (infosec >= info->hdr->e_shnum) 1602 continue; 1603 1604 /* 1605 * Don't bother with non-allocated sections. 1606 * An exception is the percpu section, which has separate allocations 1607 * for individual CPUs. We relocate the percpu section in the initial 1608 * ELF template and subsequently copy it to the per-CPU destinations. 1609 */ 1610 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC) && 1611 (!infosec || infosec != info->index.pcpu)) 1612 continue; 1613 1614 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 1615 err = klp_apply_section_relocs(mod, info->sechdrs, 1616 info->secstrings, 1617 info->strtab, 1618 info->index.sym, i, 1619 NULL); 1620 else if (info->sechdrs[i].sh_type == SHT_REL) 1621 err = apply_relocate(info->sechdrs, info->strtab, 1622 info->index.sym, i, mod); 1623 else if (info->sechdrs[i].sh_type == SHT_RELA) 1624 err = apply_relocate_add(info->sechdrs, info->strtab, 1625 info->index.sym, i, mod); 1626 if (err < 0) 1627 break; 1628 } 1629 return err; 1630 } 1631 1632 /* Additional bytes needed by arch in front of individual sections */ 1633 unsigned int __weak arch_mod_section_prepend(struct module *mod, 1634 unsigned int section) 1635 { 1636 /* default implementation just returns zero */ 1637 return 0; 1638 } 1639 1640 long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, 1641 Elf_Shdr *sechdr, unsigned int section) 1642 { 1643 long offset; 1644 long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; 1645 1646 mod->mem[type].size += arch_mod_section_prepend(mod, section); 1647 offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); 1648 mod->mem[type].size = offset + sechdr->sh_size; 1649 1650 WARN_ON_ONCE(offset & mask); 1651 return offset | mask; 1652 } 1653 1654 bool module_init_layout_section(const char *sname) 1655 { 1656 #ifndef CONFIG_MODULE_UNLOAD 1657 if (module_exit_section(sname)) 1658 return true; 1659 #endif 1660 return module_init_section(sname); 1661 } 1662 1663 static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) 1664 { 1665 unsigned int m, i; 1666 1667 /* 1668 * { Mask of required section header flags, 1669 * Mask of excluded section header flags } 1670 */ 1671 static const unsigned long masks[][2] = { 1672 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 1673 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 1674 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 1675 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 1676 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 1677 }; 1678 static const int core_m_to_mem_type[] = { 1679 MOD_TEXT, 1680 MOD_RODATA, 1681 MOD_RO_AFTER_INIT, 1682 MOD_DATA, 1683 MOD_DATA, 1684 }; 1685 static const int init_m_to_mem_type[] = { 1686 MOD_INIT_TEXT, 1687 MOD_INIT_RODATA, 1688 MOD_INVALID, 1689 MOD_INIT_DATA, 1690 MOD_INIT_DATA, 1691 }; 1692 1693 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1694 enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; 1695 1696 for (i = 0; i < info->hdr->e_shnum; ++i) { 1697 Elf_Shdr *s = &info->sechdrs[i]; 1698 const char *sname = info->secstrings + s->sh_name; 1699 1700 if ((s->sh_flags & masks[m][0]) != masks[m][0] 1701 || (s->sh_flags & masks[m][1]) 1702 || s->sh_entsize != ~0UL 1703 || is_init != module_init_layout_section(sname)) 1704 continue; 1705 1706 if (WARN_ON_ONCE(type == MOD_INVALID)) 1707 continue; 1708 1709 /* 1710 * Do not allocate codetag memory as we load it into 1711 * preallocated contiguous memory. 1712 */ 1713 if (codetag_needs_module_section(mod, sname, s->sh_size)) { 1714 /* 1715 * s->sh_entsize won't be used but populate the 1716 * type field to avoid confusion. 1717 */ 1718 s->sh_entsize = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) 1719 << SH_ENTSIZE_TYPE_SHIFT; 1720 continue; 1721 } 1722 1723 s->sh_entsize = module_get_offset_and_type(mod, type, s, i); 1724 pr_debug("\t%s\n", sname); 1725 } 1726 } 1727 } 1728 1729 /* 1730 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 1731 * might -- code, read-only data, read-write data, small data. Tally 1732 * sizes, and place the offsets into sh_entsize fields: high bit means it 1733 * belongs in init. 1734 */ 1735 static void layout_sections(struct module *mod, struct load_info *info) 1736 { 1737 unsigned int i; 1738 1739 for (i = 0; i < info->hdr->e_shnum; i++) 1740 info->sechdrs[i].sh_entsize = ~0UL; 1741 1742 pr_debug("Core section allocation order for %s:\n", mod->name); 1743 __layout_sections(mod, info, false); 1744 1745 pr_debug("Init section allocation order for %s:\n", mod->name); 1746 __layout_sections(mod, info, true); 1747 } 1748 1749 static void module_license_taint_check(struct module *mod, const char *license) 1750 { 1751 if (!license) 1752 license = "unspecified"; 1753 1754 if (!license_is_gpl_compatible(license)) { 1755 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 1756 pr_warn("%s: module license '%s' taints kernel.\n", 1757 mod->name, license); 1758 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 1759 LOCKDEP_NOW_UNRELIABLE); 1760 } 1761 } 1762 1763 static int setup_modinfo(struct module *mod, struct load_info *info) 1764 { 1765 const struct module_attribute *attr; 1766 char *imported_namespace; 1767 int i; 1768 1769 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1770 if (attr->setup) 1771 attr->setup(mod, get_modinfo(info, attr->attr.name)); 1772 } 1773 1774 for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1775 /* 1776 * 'module:' prefixed namespaces are implicit, disallow 1777 * explicit imports. 1778 */ 1779 if (strstarts(imported_namespace, "module:")) { 1780 pr_err("%s: module tries to import module namespace: %s\n", 1781 mod->name, imported_namespace); 1782 return -EPERM; 1783 } 1784 } 1785 1786 return 0; 1787 } 1788 1789 static void free_modinfo(struct module *mod) 1790 { 1791 const struct module_attribute *attr; 1792 int i; 1793 1794 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1795 if (attr->free) 1796 attr->free(mod); 1797 } 1798 } 1799 1800 bool __weak module_init_section(const char *name) 1801 { 1802 return strstarts(name, ".init"); 1803 } 1804 1805 bool __weak module_exit_section(const char *name) 1806 { 1807 return strstarts(name, ".exit"); 1808 } 1809 1810 static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) 1811 { 1812 #if defined(CONFIG_64BIT) 1813 unsigned long long secend; 1814 #else 1815 unsigned long secend; 1816 #endif 1817 1818 /* 1819 * Check for both overflow and offset/size being 1820 * too large. 1821 */ 1822 secend = shdr->sh_offset + shdr->sh_size; 1823 if (secend < shdr->sh_offset || secend > info->len) 1824 return -ENOEXEC; 1825 1826 return 0; 1827 } 1828 1829 /** 1830 * elf_validity_ehdr() - Checks an ELF header for module validity 1831 * @info: Load info containing the ELF header to check 1832 * 1833 * Checks whether an ELF header could belong to a valid module. Checks: 1834 * 1835 * * ELF header is within the data the user provided 1836 * * ELF magic is present 1837 * * It is relocatable (not final linked, not core file, etc.) 1838 * * The header's machine type matches what the architecture expects. 1839 * * Optional arch-specific hook for other properties 1840 * - module_elf_check_arch() is currently only used by PPC to check 1841 * ELF ABI version, but may be used by others in the future. 1842 * 1843 * Return: %0 if valid, %-ENOEXEC on failure. 1844 */ 1845 static int elf_validity_ehdr(const struct load_info *info) 1846 { 1847 if (info->len < sizeof(*(info->hdr))) { 1848 pr_err("Invalid ELF header len %lu\n", info->len); 1849 return -ENOEXEC; 1850 } 1851 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 1852 pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 1853 return -ENOEXEC; 1854 } 1855 if (info->hdr->e_type != ET_REL) { 1856 pr_err("Invalid ELF header type: %u != %u\n", 1857 info->hdr->e_type, ET_REL); 1858 return -ENOEXEC; 1859 } 1860 if (!elf_check_arch(info->hdr)) { 1861 pr_err("Invalid architecture in ELF header: %u\n", 1862 info->hdr->e_machine); 1863 return -ENOEXEC; 1864 } 1865 if (!module_elf_check_arch(info->hdr)) { 1866 pr_err("Invalid module architecture in ELF header: %u\n", 1867 info->hdr->e_machine); 1868 return -ENOEXEC; 1869 } 1870 return 0; 1871 } 1872 1873 /** 1874 * elf_validity_cache_sechdrs() - Cache section headers if valid 1875 * @info: Load info to compute section headers from 1876 * 1877 * Checks: 1878 * 1879 * * ELF header is valid (see elf_validity_ehdr()) 1880 * * Section headers are the size we expect 1881 * * Section array fits in the user provided data 1882 * * Section index 0 is NULL 1883 * * Section contents are inbounds 1884 * 1885 * Then updates @info with a &load_info->sechdrs pointer if valid. 1886 * 1887 * Return: %0 if valid, negative error code if validation failed. 1888 */ 1889 static int elf_validity_cache_sechdrs(struct load_info *info) 1890 { 1891 Elf_Shdr *sechdrs; 1892 Elf_Shdr *shdr; 1893 int i; 1894 int err; 1895 1896 err = elf_validity_ehdr(info); 1897 if (err < 0) 1898 return err; 1899 1900 if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 1901 pr_err("Invalid ELF section header size\n"); 1902 return -ENOEXEC; 1903 } 1904 1905 /* 1906 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 1907 * known and small. So e_shnum * sizeof(Elf_Shdr) 1908 * will not overflow unsigned long on any platform. 1909 */ 1910 if (info->hdr->e_shoff >= info->len 1911 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 1912 info->len - info->hdr->e_shoff)) { 1913 pr_err("Invalid ELF section header overflow\n"); 1914 return -ENOEXEC; 1915 } 1916 1917 sechdrs = (void *)info->hdr + info->hdr->e_shoff; 1918 1919 /* 1920 * The code assumes that section 0 has a length of zero and 1921 * an addr of zero, so check for it. 1922 */ 1923 if (sechdrs[0].sh_type != SHT_NULL 1924 || sechdrs[0].sh_size != 0 1925 || sechdrs[0].sh_addr != 0) { 1926 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 1927 sechdrs[0].sh_type); 1928 return -ENOEXEC; 1929 } 1930 1931 /* Validate contents are inbounds */ 1932 for (i = 1; i < info->hdr->e_shnum; i++) { 1933 shdr = &sechdrs[i]; 1934 switch (shdr->sh_type) { 1935 case SHT_NULL: 1936 case SHT_NOBITS: 1937 /* No contents, offset/size don't mean anything */ 1938 continue; 1939 default: 1940 err = validate_section_offset(info, shdr); 1941 if (err < 0) { 1942 pr_err("Invalid ELF section in module (section %u type %u)\n", 1943 i, shdr->sh_type); 1944 return err; 1945 } 1946 } 1947 } 1948 1949 info->sechdrs = sechdrs; 1950 1951 return 0; 1952 } 1953 1954 /** 1955 * elf_validity_cache_secstrings() - Caches section names if valid 1956 * @info: Load info to cache section names from. Must have valid sechdrs. 1957 * 1958 * Specifically checks: 1959 * 1960 * * Section name table index is inbounds of section headers 1961 * * Section name table is not empty 1962 * * Section name table is NUL terminated 1963 * * All section name offsets are inbounds of the section 1964 * 1965 * Then updates @info with a &load_info->secstrings pointer if valid. 1966 * 1967 * Return: %0 if valid, negative error code if validation failed. 1968 */ 1969 static int elf_validity_cache_secstrings(struct load_info *info) 1970 { 1971 Elf_Shdr *strhdr, *shdr; 1972 char *secstrings; 1973 int i; 1974 1975 /* 1976 * Verify if the section name table index is valid. 1977 */ 1978 if (info->hdr->e_shstrndx == SHN_UNDEF 1979 || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 1980 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 1981 info->hdr->e_shstrndx, info->hdr->e_shstrndx, 1982 info->hdr->e_shnum); 1983 return -ENOEXEC; 1984 } 1985 1986 strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 1987 1988 /* 1989 * The section name table must be NUL-terminated, as required 1990 * by the spec. This makes strcmp and pr_* calls that access 1991 * strings in the section safe. 1992 */ 1993 secstrings = (void *)info->hdr + strhdr->sh_offset; 1994 if (strhdr->sh_size == 0) { 1995 pr_err("empty section name table\n"); 1996 return -ENOEXEC; 1997 } 1998 if (secstrings[strhdr->sh_size - 1] != '\0') { 1999 pr_err("ELF Spec violation: section name table isn't null terminated\n"); 2000 return -ENOEXEC; 2001 } 2002 2003 for (i = 0; i < info->hdr->e_shnum; i++) { 2004 shdr = &info->sechdrs[i]; 2005 /* SHT_NULL means sh_name has an undefined value */ 2006 if (shdr->sh_type == SHT_NULL) 2007 continue; 2008 if (shdr->sh_name >= strhdr->sh_size) { 2009 pr_err("Invalid ELF section name in module (section %u type %u)\n", 2010 i, shdr->sh_type); 2011 return -ENOEXEC; 2012 } 2013 } 2014 2015 info->secstrings = secstrings; 2016 return 0; 2017 } 2018 2019 /** 2020 * elf_validity_cache_index_info() - Validate and cache modinfo section 2021 * @info: Load info to populate the modinfo index on. 2022 * Must have &load_info->sechdrs and &load_info->secstrings populated 2023 * 2024 * Checks that if there is a .modinfo section, it is unique. 2025 * Then, it caches its index in &load_info->index.info. 2026 * Finally, it tries to populate the name to improve error messages. 2027 * 2028 * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. 2029 */ 2030 static int elf_validity_cache_index_info(struct load_info *info) 2031 { 2032 int info_idx; 2033 2034 info_idx = find_any_unique_sec(info, ".modinfo"); 2035 2036 if (info_idx == 0) 2037 /* Early return, no .modinfo */ 2038 return 0; 2039 2040 if (info_idx < 0) { 2041 pr_err("Only one .modinfo section must exist.\n"); 2042 return -ENOEXEC; 2043 } 2044 2045 info->index.info = info_idx; 2046 /* Try to find a name early so we can log errors with a module name */ 2047 info->name = get_modinfo(info, "name"); 2048 2049 return 0; 2050 } 2051 2052 /** 2053 * elf_validity_cache_index_mod() - Validates and caches this_module section 2054 * @info: Load info to cache this_module on. 2055 * Must have &load_info->sechdrs and &load_info->secstrings populated 2056 * 2057 * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost 2058 * uses to refer to __this_module and let's use rely on THIS_MODULE to point 2059 * to &__this_module properly. The kernel's modpost declares it on each 2060 * modules's *.mod.c file. If the struct module of the kernel changes a full 2061 * kernel rebuild is required. 2062 * 2063 * We have a few expectations for this special section, this function 2064 * validates all this for us: 2065 * 2066 * * The section has contents 2067 * * The section is unique 2068 * * We expect the kernel to always have to allocate it: SHF_ALLOC 2069 * * The section size must match the kernel's run time's struct module 2070 * size 2071 * 2072 * If all checks pass, the index will be cached in &load_info->index.mod 2073 * 2074 * Return: %0 on validation success, %-ENOEXEC on failure 2075 */ 2076 static int elf_validity_cache_index_mod(struct load_info *info) 2077 { 2078 Elf_Shdr *shdr; 2079 int mod_idx; 2080 2081 mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); 2082 if (mod_idx <= 0) { 2083 pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", 2084 info->name ?: "(missing .modinfo section or name field)"); 2085 return -ENOEXEC; 2086 } 2087 2088 shdr = &info->sechdrs[mod_idx]; 2089 2090 if (shdr->sh_type == SHT_NOBITS) { 2091 pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", 2092 info->name ?: "(missing .modinfo section or name field)"); 2093 return -ENOEXEC; 2094 } 2095 2096 if (!(shdr->sh_flags & SHF_ALLOC)) { 2097 pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", 2098 info->name ?: "(missing .modinfo section or name field)"); 2099 return -ENOEXEC; 2100 } 2101 2102 if (shdr->sh_size != sizeof(struct module)) { 2103 pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", 2104 info->name ?: "(missing .modinfo section or name field)"); 2105 return -ENOEXEC; 2106 } 2107 2108 info->index.mod = mod_idx; 2109 2110 return 0; 2111 } 2112 2113 /** 2114 * elf_validity_cache_index_sym() - Validate and cache symtab index 2115 * @info: Load info to cache symtab index in. 2116 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2117 * 2118 * Checks that there is exactly one symbol table, then caches its index in 2119 * &load_info->index.sym. 2120 * 2121 * Return: %0 if valid, %-ENOEXEC on failure. 2122 */ 2123 static int elf_validity_cache_index_sym(struct load_info *info) 2124 { 2125 unsigned int sym_idx; 2126 unsigned int num_sym_secs = 0; 2127 int i; 2128 2129 for (i = 1; i < info->hdr->e_shnum; i++) { 2130 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 2131 num_sym_secs++; 2132 sym_idx = i; 2133 } 2134 } 2135 2136 if (num_sym_secs != 1) { 2137 pr_warn("%s: module has no symbols (stripped?)\n", 2138 info->name ?: "(missing .modinfo section or name field)"); 2139 return -ENOEXEC; 2140 } 2141 2142 info->index.sym = sym_idx; 2143 2144 return 0; 2145 } 2146 2147 /** 2148 * elf_validity_cache_index_str() - Validate and cache strtab index 2149 * @info: Load info to cache strtab index in. 2150 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2151 * Must have &load_info->index.sym populated. 2152 * 2153 * Looks at the symbol table's associated string table, makes sure it is 2154 * in-bounds, and caches it. 2155 * 2156 * Return: %0 if valid, %-ENOEXEC on failure. 2157 */ 2158 static int elf_validity_cache_index_str(struct load_info *info) 2159 { 2160 unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; 2161 2162 if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { 2163 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 2164 str_idx, str_idx, info->hdr->e_shnum); 2165 return -ENOEXEC; 2166 } 2167 2168 info->index.str = str_idx; 2169 return 0; 2170 } 2171 2172 /** 2173 * elf_validity_cache_index_versions() - Validate and cache version indices 2174 * @info: Load info to cache version indices in. 2175 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2176 * @flags: Load flags, relevant to suppress version loading, see 2177 * uapi/linux/module.h 2178 * 2179 * If we're ignoring modversions based on @flags, zero all version indices 2180 * and return validity. Othewrise check: 2181 * 2182 * * If "__version_ext_crcs" is present, "__version_ext_names" is present 2183 * * There is a name present for every crc 2184 * 2185 * Then populate: 2186 * 2187 * * &load_info->index.vers 2188 * * &load_info->index.vers_ext_crc 2189 * * &load_info->index.vers_ext_names 2190 * 2191 * if present. 2192 * 2193 * Return: %0 if valid, %-ENOEXEC on failure. 2194 */ 2195 static int elf_validity_cache_index_versions(struct load_info *info, int flags) 2196 { 2197 unsigned int vers_ext_crc; 2198 unsigned int vers_ext_name; 2199 size_t crc_count; 2200 size_t remaining_len; 2201 size_t name_size; 2202 char *name; 2203 2204 /* If modversions were suppressed, pretend we didn't find any */ 2205 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) { 2206 info->index.vers = 0; 2207 info->index.vers_ext_crc = 0; 2208 info->index.vers_ext_name = 0; 2209 return 0; 2210 } 2211 2212 vers_ext_crc = find_sec(info, "__version_ext_crcs"); 2213 vers_ext_name = find_sec(info, "__version_ext_names"); 2214 2215 /* If we have one field, we must have the other */ 2216 if (!!vers_ext_crc != !!vers_ext_name) { 2217 pr_err("extended version crc+name presence does not match"); 2218 return -ENOEXEC; 2219 } 2220 2221 /* 2222 * If we have extended version information, we should have the same 2223 * number of entries in every section. 2224 */ 2225 if (vers_ext_crc) { 2226 crc_count = info->sechdrs[vers_ext_crc].sh_size / sizeof(u32); 2227 name = (void *)info->hdr + 2228 info->sechdrs[vers_ext_name].sh_offset; 2229 remaining_len = info->sechdrs[vers_ext_name].sh_size; 2230 2231 while (crc_count--) { 2232 name_size = strnlen(name, remaining_len) + 1; 2233 if (name_size > remaining_len) { 2234 pr_err("more extended version crcs than names"); 2235 return -ENOEXEC; 2236 } 2237 remaining_len -= name_size; 2238 name += name_size; 2239 } 2240 } 2241 2242 info->index.vers = find_sec(info, "__versions"); 2243 info->index.vers_ext_crc = vers_ext_crc; 2244 info->index.vers_ext_name = vers_ext_name; 2245 return 0; 2246 } 2247 2248 /** 2249 * elf_validity_cache_index() - Resolve, validate, cache section indices 2250 * @info: Load info to read from and update. 2251 * &load_info->sechdrs and &load_info->secstrings must be populated. 2252 * @flags: Load flags, relevant to suppress version loading, see 2253 * uapi/linux/module.h 2254 * 2255 * Populates &load_info->index, validating as it goes. 2256 * See child functions for per-field validation: 2257 * 2258 * * elf_validity_cache_index_info() 2259 * * elf_validity_cache_index_mod() 2260 * * elf_validity_cache_index_sym() 2261 * * elf_validity_cache_index_str() 2262 * * elf_validity_cache_index_versions() 2263 * 2264 * If CONFIG_SMP is enabled, load the percpu section by name with no 2265 * validation. 2266 * 2267 * Return: 0 on success, negative error code if an index failed validation. 2268 */ 2269 static int elf_validity_cache_index(struct load_info *info, int flags) 2270 { 2271 int err; 2272 2273 err = elf_validity_cache_index_info(info); 2274 if (err < 0) 2275 return err; 2276 err = elf_validity_cache_index_mod(info); 2277 if (err < 0) 2278 return err; 2279 err = elf_validity_cache_index_sym(info); 2280 if (err < 0) 2281 return err; 2282 err = elf_validity_cache_index_str(info); 2283 if (err < 0) 2284 return err; 2285 err = elf_validity_cache_index_versions(info, flags); 2286 if (err < 0) 2287 return err; 2288 2289 info->index.pcpu = find_pcpusec(info); 2290 2291 return 0; 2292 } 2293 2294 /** 2295 * elf_validity_cache_strtab() - Validate and cache symbol string table 2296 * @info: Load info to read from and update. 2297 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2298 * Must have &load_info->index populated. 2299 * 2300 * Checks: 2301 * 2302 * * The string table is not empty. 2303 * * The string table starts and ends with NUL (required by ELF spec). 2304 * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the 2305 * string table. 2306 * 2307 * And caches the pointer as &load_info->strtab in @info. 2308 * 2309 * Return: 0 on success, negative error code if a check failed. 2310 */ 2311 static int elf_validity_cache_strtab(struct load_info *info) 2312 { 2313 Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; 2314 Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; 2315 char *strtab = (char *)info->hdr + str_shdr->sh_offset; 2316 Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; 2317 int i; 2318 2319 if (str_shdr->sh_size == 0) { 2320 pr_err("empty symbol string table\n"); 2321 return -ENOEXEC; 2322 } 2323 if (strtab[0] != '\0') { 2324 pr_err("symbol string table missing leading NUL\n"); 2325 return -ENOEXEC; 2326 } 2327 if (strtab[str_shdr->sh_size - 1] != '\0') { 2328 pr_err("symbol string table isn't NUL terminated\n"); 2329 return -ENOEXEC; 2330 } 2331 2332 /* 2333 * Now that we know strtab is correctly structured, check symbol 2334 * starts are inbounds before they're used later. 2335 */ 2336 for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { 2337 if (syms[i].st_name >= str_shdr->sh_size) { 2338 pr_err("symbol name out of bounds in string table"); 2339 return -ENOEXEC; 2340 } 2341 } 2342 2343 info->strtab = strtab; 2344 return 0; 2345 } 2346 2347 /* 2348 * Check userspace passed ELF module against our expectations, and cache 2349 * useful variables for further processing as we go. 2350 * 2351 * This does basic validity checks against section offsets and sizes, the 2352 * section name string table, and the indices used for it (sh_name). 2353 * 2354 * As a last step, since we're already checking the ELF sections we cache 2355 * useful variables which will be used later for our convenience: 2356 * 2357 * o pointers to section headers 2358 * o cache the modinfo symbol section 2359 * o cache the string symbol section 2360 * o cache the module section 2361 * 2362 * As a last step we set info->mod to the temporary copy of the module in 2363 * info->hdr. The final one will be allocated in move_module(). Any 2364 * modifications we make to our copy of the module will be carried over 2365 * to the final minted module. 2366 */ 2367 static int elf_validity_cache_copy(struct load_info *info, int flags) 2368 { 2369 int err; 2370 2371 err = elf_validity_cache_sechdrs(info); 2372 if (err < 0) 2373 return err; 2374 err = elf_validity_cache_secstrings(info); 2375 if (err < 0) 2376 return err; 2377 err = elf_validity_cache_index(info, flags); 2378 if (err < 0) 2379 return err; 2380 err = elf_validity_cache_strtab(info); 2381 if (err < 0) 2382 return err; 2383 2384 /* This is temporary: point mod into copy of data. */ 2385 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 2386 2387 /* 2388 * If we didn't load the .modinfo 'name' field earlier, fall back to 2389 * on-disk struct mod 'name' field. 2390 */ 2391 if (!info->name) 2392 info->name = info->mod->name; 2393 2394 return 0; 2395 } 2396 2397 #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2398 2399 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2400 { 2401 do { 2402 unsigned long n = min(len, COPY_CHUNK_SIZE); 2403 2404 if (copy_from_user(dst, usrc, n) != 0) 2405 return -EFAULT; 2406 cond_resched(); 2407 dst += n; 2408 usrc += n; 2409 len -= n; 2410 } while (len); 2411 return 0; 2412 } 2413 2414 static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2415 { 2416 if (!get_modinfo(info, "livepatch")) 2417 /* Nothing more to do */ 2418 return 0; 2419 2420 if (set_livepatch_module(mod)) 2421 return 0; 2422 2423 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 2424 mod->name); 2425 return -ENOEXEC; 2426 } 2427 2428 static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 2429 { 2430 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 2431 return; 2432 2433 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 2434 mod->name); 2435 } 2436 2437 /* Sets info->hdr and info->len. */ 2438 static int copy_module_from_user(const void __user *umod, unsigned long len, 2439 struct load_info *info) 2440 { 2441 int err; 2442 2443 info->len = len; 2444 if (info->len < sizeof(*(info->hdr))) 2445 return -ENOEXEC; 2446 2447 err = security_kernel_load_data(LOADING_MODULE, true); 2448 if (err) 2449 return err; 2450 2451 /* Suck in entire file: we'll want most of it. */ 2452 info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 2453 if (!info->hdr) 2454 return -ENOMEM; 2455 2456 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2457 err = -EFAULT; 2458 goto out; 2459 } 2460 2461 err = security_kernel_post_load_data((char *)info->hdr, info->len, 2462 LOADING_MODULE, "init_module"); 2463 out: 2464 if (err) 2465 vfree(info->hdr); 2466 2467 return err; 2468 } 2469 2470 static void free_copy(struct load_info *info, int flags) 2471 { 2472 if (flags & MODULE_INIT_COMPRESSED_FILE) 2473 module_decompress_cleanup(info); 2474 else 2475 vfree(info->hdr); 2476 } 2477 2478 static int rewrite_section_headers(struct load_info *info, int flags) 2479 { 2480 unsigned int i; 2481 2482 /* This should always be true, but let's be sure. */ 2483 info->sechdrs[0].sh_addr = 0; 2484 2485 for (i = 1; i < info->hdr->e_shnum; i++) { 2486 Elf_Shdr *shdr = &info->sechdrs[i]; 2487 2488 /* 2489 * Mark all sections sh_addr with their address in the 2490 * temporary image. 2491 */ 2492 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2493 2494 } 2495 2496 /* Track but don't keep modinfo and version sections. */ 2497 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2498 info->sechdrs[info->index.vers_ext_crc].sh_flags &= 2499 ~(unsigned long)SHF_ALLOC; 2500 info->sechdrs[info->index.vers_ext_name].sh_flags &= 2501 ~(unsigned long)SHF_ALLOC; 2502 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2503 2504 return 0; 2505 } 2506 2507 static const char *const module_license_offenders[] = { 2508 /* driverloader was caught wrongly pretending to be under GPL */ 2509 "driverloader", 2510 2511 /* lve claims to be GPL but upstream won't provide source */ 2512 "lve", 2513 }; 2514 2515 /* 2516 * These calls taint the kernel depending certain module circumstances */ 2517 static void module_augment_kernel_taints(struct module *mod, struct load_info *info) 2518 { 2519 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 2520 size_t i; 2521 2522 if (!get_modinfo(info, "intree")) { 2523 if (!test_taint(TAINT_OOT_MODULE)) 2524 pr_warn("%s: loading out-of-tree module taints kernel.\n", 2525 mod->name); 2526 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2527 } 2528 2529 check_modinfo_retpoline(mod, info); 2530 2531 if (get_modinfo(info, "staging")) { 2532 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2533 pr_warn("%s: module is from the staging directory, the quality " 2534 "is unknown, you have been warned.\n", mod->name); 2535 } 2536 2537 if (is_livepatch_module(mod)) { 2538 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 2539 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 2540 mod->name); 2541 } 2542 2543 module_license_taint_check(mod, get_modinfo(info, "license")); 2544 2545 if (get_modinfo(info, "test")) { 2546 if (!test_taint(TAINT_TEST)) 2547 pr_warn("%s: loading test module taints kernel.\n", 2548 mod->name); 2549 add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); 2550 } 2551 #ifdef CONFIG_MODULE_SIG 2552 mod->sig_ok = info->sig_ok; 2553 if (!mod->sig_ok) { 2554 pr_notice_once("%s: module verification failed: signature " 2555 "and/or required key missing - tainting " 2556 "kernel\n", mod->name); 2557 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 2558 } 2559 #endif 2560 2561 /* 2562 * ndiswrapper is under GPL by itself, but loads proprietary modules. 2563 * Don't use add_taint_module(), as it would prevent ndiswrapper from 2564 * using GPL-only symbols it needs. 2565 */ 2566 if (strcmp(mod->name, "ndiswrapper") == 0) 2567 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 2568 2569 for (i = 0; i < ARRAY_SIZE(module_license_offenders); ++i) { 2570 if (strcmp(mod->name, module_license_offenders[i]) == 0) 2571 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2572 LOCKDEP_NOW_UNRELIABLE); 2573 } 2574 2575 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 2576 pr_warn("%s: module license taints kernel.\n", mod->name); 2577 2578 } 2579 2580 static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2581 { 2582 const char *modmagic = get_modinfo(info, "vermagic"); 2583 int err; 2584 2585 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 2586 modmagic = NULL; 2587 2588 /* This is allowed: modprobe --force will invalidate it. */ 2589 if (!modmagic) { 2590 err = try_to_force_load(mod, "bad vermagic"); 2591 if (err) 2592 return err; 2593 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 2594 pr_err("%s: version magic '%s' should be '%s'\n", 2595 info->name, modmagic, vermagic); 2596 return -ENOEXEC; 2597 } 2598 2599 err = check_modinfo_livepatch(mod, info); 2600 if (err) 2601 return err; 2602 2603 return 0; 2604 } 2605 2606 static int find_module_sections(struct module *mod, struct load_info *info) 2607 { 2608 mod->kp = section_objs(info, "__param", 2609 sizeof(*mod->kp), &mod->num_kp); 2610 mod->syms = section_objs(info, "__ksymtab", 2611 sizeof(*mod->syms), &mod->num_syms); 2612 mod->crcs = section_addr(info, "__kcrctab"); 2613 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2614 sizeof(*mod->gpl_syms), 2615 &mod->num_gpl_syms); 2616 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2617 2618 #ifdef CONFIG_CONSTRUCTORS 2619 mod->ctors = section_objs(info, ".ctors", 2620 sizeof(*mod->ctors), &mod->num_ctors); 2621 if (!mod->ctors) 2622 mod->ctors = section_objs(info, ".init_array", 2623 sizeof(*mod->ctors), &mod->num_ctors); 2624 else if (find_sec(info, ".init_array")) { 2625 /* 2626 * This shouldn't happen with same compiler and binutils 2627 * building all parts of the module. 2628 */ 2629 pr_warn("%s: has both .ctors and .init_array.\n", 2630 mod->name); 2631 return -EINVAL; 2632 } 2633 #endif 2634 2635 mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 2636 &mod->noinstr_text_size); 2637 2638 #ifdef CONFIG_TRACEPOINTS 2639 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2640 sizeof(*mod->tracepoints_ptrs), 2641 &mod->num_tracepoints); 2642 #endif 2643 #ifdef CONFIG_TREE_SRCU 2644 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 2645 sizeof(*mod->srcu_struct_ptrs), 2646 &mod->num_srcu_structs); 2647 #endif 2648 #ifdef CONFIG_BPF_EVENTS 2649 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 2650 sizeof(*mod->bpf_raw_events), 2651 &mod->num_bpf_raw_events); 2652 #endif 2653 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2654 mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 2655 mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, 2656 &mod->btf_base_data_size); 2657 #endif 2658 #ifdef CONFIG_JUMP_LABEL 2659 mod->jump_entries = section_objs(info, "__jump_table", 2660 sizeof(*mod->jump_entries), 2661 &mod->num_jump_entries); 2662 #endif 2663 #ifdef CONFIG_EVENT_TRACING 2664 mod->trace_events = section_objs(info, "_ftrace_events", 2665 sizeof(*mod->trace_events), 2666 &mod->num_trace_events); 2667 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 2668 sizeof(*mod->trace_evals), 2669 &mod->num_trace_evals); 2670 #endif 2671 #ifdef CONFIG_TRACING 2672 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2673 sizeof(*mod->trace_bprintk_fmt_start), 2674 &mod->num_trace_bprintk_fmt); 2675 #endif 2676 #ifdef CONFIG_DYNAMIC_FTRACE 2677 /* sechdrs[0].sh_size is always zero */ 2678 mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 2679 sizeof(*mod->ftrace_callsites), 2680 &mod->num_ftrace_callsites); 2681 #endif 2682 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 2683 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 2684 sizeof(*mod->ei_funcs), 2685 &mod->num_ei_funcs); 2686 #endif 2687 #ifdef CONFIG_KPROBES 2688 mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 2689 &mod->kprobes_text_size); 2690 mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 2691 sizeof(unsigned long), 2692 &mod->num_kprobe_blacklist); 2693 #endif 2694 #ifdef CONFIG_PRINTK_INDEX 2695 mod->printk_index_start = section_objs(info, ".printk_index", 2696 sizeof(*mod->printk_index_start), 2697 &mod->printk_index_size); 2698 #endif 2699 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 2700 mod->static_call_sites = section_objs(info, ".static_call_sites", 2701 sizeof(*mod->static_call_sites), 2702 &mod->num_static_call_sites); 2703 #endif 2704 #if IS_ENABLED(CONFIG_KUNIT) 2705 mod->kunit_suites = section_objs(info, ".kunit_test_suites", 2706 sizeof(*mod->kunit_suites), 2707 &mod->num_kunit_suites); 2708 mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites", 2709 sizeof(*mod->kunit_init_suites), 2710 &mod->num_kunit_init_suites); 2711 #endif 2712 2713 mod->extable = section_objs(info, "__ex_table", 2714 sizeof(*mod->extable), &mod->num_exentries); 2715 2716 if (section_addr(info, "__obsparm")) 2717 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2718 2719 #ifdef CONFIG_DYNAMIC_DEBUG_CORE 2720 mod->dyndbg_info.descs = section_objs(info, "__dyndbg", 2721 sizeof(*mod->dyndbg_info.descs), 2722 &mod->dyndbg_info.num_descs); 2723 mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", 2724 sizeof(*mod->dyndbg_info.classes), 2725 &mod->dyndbg_info.num_classes); 2726 #endif 2727 2728 return 0; 2729 } 2730 2731 static int move_module(struct module *mod, struct load_info *info) 2732 { 2733 int i, ret; 2734 enum mod_mem_type t = MOD_MEM_NUM_TYPES; 2735 bool codetag_section_found = false; 2736 2737 for_each_mod_mem_type(type) { 2738 if (!mod->mem[type].size) { 2739 mod->mem[type].base = NULL; 2740 continue; 2741 } 2742 2743 ret = module_memory_alloc(mod, type); 2744 if (ret) { 2745 t = type; 2746 goto out_err; 2747 } 2748 } 2749 2750 /* Transfer each section which specifies SHF_ALLOC */ 2751 pr_debug("Final section addresses for %s:\n", mod->name); 2752 for (i = 0; i < info->hdr->e_shnum; i++) { 2753 void *dest; 2754 Elf_Shdr *shdr = &info->sechdrs[i]; 2755 const char *sname; 2756 2757 if (!(shdr->sh_flags & SHF_ALLOC)) 2758 continue; 2759 2760 sname = info->secstrings + shdr->sh_name; 2761 /* 2762 * Load codetag sections separately as they might still be used 2763 * after module unload. 2764 */ 2765 if (codetag_needs_module_section(mod, sname, shdr->sh_size)) { 2766 dest = codetag_alloc_module_section(mod, sname, shdr->sh_size, 2767 arch_mod_section_prepend(mod, i), shdr->sh_addralign); 2768 if (WARN_ON(!dest)) { 2769 ret = -EINVAL; 2770 goto out_err; 2771 } 2772 if (IS_ERR(dest)) { 2773 ret = PTR_ERR(dest); 2774 goto out_err; 2775 } 2776 codetag_section_found = true; 2777 } else { 2778 enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 2779 unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 2780 2781 dest = mod->mem[type].base + offset; 2782 } 2783 2784 if (shdr->sh_type != SHT_NOBITS) { 2785 /* 2786 * Our ELF checker already validated this, but let's 2787 * be pedantic and make the goal clearer. We actually 2788 * end up copying over all modifications made to the 2789 * userspace copy of the entire struct module. 2790 */ 2791 if (i == info->index.mod && 2792 (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { 2793 ret = -ENOEXEC; 2794 goto out_err; 2795 } 2796 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2797 } 2798 /* 2799 * Update the userspace copy's ELF section address to point to 2800 * our newly allocated memory as a pure convenience so that 2801 * users of info can keep taking advantage and using the newly 2802 * minted official memory area. 2803 */ 2804 shdr->sh_addr = (unsigned long)dest; 2805 pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, 2806 (long)shdr->sh_size, info->secstrings + shdr->sh_name); 2807 } 2808 2809 return 0; 2810 out_err: 2811 module_memory_restore_rox(mod); 2812 while (t--) 2813 module_memory_free(mod, t); 2814 if (codetag_section_found) 2815 codetag_free_module_sections(mod); 2816 2817 return ret; 2818 } 2819 2820 static int check_export_symbol_versions(struct module *mod) 2821 { 2822 #ifdef CONFIG_MODVERSIONS 2823 if ((mod->num_syms && !mod->crcs) || 2824 (mod->num_gpl_syms && !mod->gpl_crcs)) { 2825 return try_to_force_load(mod, 2826 "no versions for exported symbols"); 2827 } 2828 #endif 2829 return 0; 2830 } 2831 2832 static void flush_module_icache(const struct module *mod) 2833 { 2834 /* 2835 * Flush the instruction cache, since we've played with text. 2836 * Do it before processing of module parameters, so the module 2837 * can provide parameter accessor functions of its own. 2838 */ 2839 for_each_mod_mem_type(type) { 2840 const struct module_memory *mod_mem = &mod->mem[type]; 2841 2842 if (mod_mem->size) { 2843 flush_icache_range((unsigned long)mod_mem->base, 2844 (unsigned long)mod_mem->base + mod_mem->size); 2845 } 2846 } 2847 } 2848 2849 bool __weak module_elf_check_arch(Elf_Ehdr *hdr) 2850 { 2851 return true; 2852 } 2853 2854 int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 2855 Elf_Shdr *sechdrs, 2856 char *secstrings, 2857 struct module *mod) 2858 { 2859 return 0; 2860 } 2861 2862 /* module_blacklist is a comma-separated list of module names */ 2863 static char *module_blacklist; 2864 static bool blacklisted(const char *module_name) 2865 { 2866 const char *p; 2867 size_t len; 2868 2869 if (!module_blacklist) 2870 return false; 2871 2872 for (p = module_blacklist; *p; p += len) { 2873 len = strcspn(p, ","); 2874 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 2875 return true; 2876 if (p[len] == ',') 2877 len++; 2878 } 2879 return false; 2880 } 2881 core_param(module_blacklist, module_blacklist, charp, 0400); 2882 2883 static struct module *layout_and_allocate(struct load_info *info, int flags) 2884 { 2885 struct module *mod; 2886 int err; 2887 2888 /* Allow arches to frob section contents and sizes. */ 2889 err = module_frob_arch_sections(info->hdr, info->sechdrs, 2890 info->secstrings, info->mod); 2891 if (err < 0) 2892 return ERR_PTR(err); 2893 2894 err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 2895 info->secstrings, info->mod); 2896 if (err < 0) 2897 return ERR_PTR(err); 2898 2899 /* We will do a special allocation for per-cpu sections later. */ 2900 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 2901 2902 /* 2903 * Mark relevant sections as SHF_RO_AFTER_INIT so layout_sections() can 2904 * put them in the right place. 2905 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 2906 */ 2907 module_mark_ro_after_init(info->hdr, info->sechdrs, info->secstrings); 2908 2909 /* 2910 * Determine total sizes, and put offsets in sh_entsize. For now 2911 * this is done generically; there doesn't appear to be any 2912 * special cases for the architectures. 2913 */ 2914 layout_sections(info->mod, info); 2915 layout_symtab(info->mod, info); 2916 2917 /* Allocate and move to the final place */ 2918 err = move_module(info->mod, info); 2919 if (err) 2920 return ERR_PTR(err); 2921 2922 /* Module has been copied to its final place now: return it. */ 2923 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2924 kmemleak_load_module(mod, info); 2925 codetag_module_replaced(info->mod, mod); 2926 2927 return mod; 2928 } 2929 2930 /* mod is no longer valid after this! */ 2931 static void module_deallocate(struct module *mod, struct load_info *info) 2932 { 2933 percpu_modfree(mod); 2934 module_arch_freeing_init(mod); 2935 codetag_free_module_sections(mod); 2936 2937 free_mod_mem(mod); 2938 } 2939 2940 int __weak module_finalize(const Elf_Ehdr *hdr, 2941 const Elf_Shdr *sechdrs, 2942 struct module *me) 2943 { 2944 return 0; 2945 } 2946 2947 static int post_relocation(struct module *mod, const struct load_info *info) 2948 { 2949 /* Sort exception table now relocations are done. */ 2950 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2951 2952 /* Copy relocated percpu area over. */ 2953 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 2954 info->sechdrs[info->index.pcpu].sh_size); 2955 2956 /* Setup kallsyms-specific fields. */ 2957 add_kallsyms(mod, info); 2958 2959 /* Arch-specific module finalizing. */ 2960 return module_finalize(info->hdr, info->sechdrs, mod); 2961 } 2962 2963 /* Call module constructors. */ 2964 static void do_mod_ctors(struct module *mod) 2965 { 2966 #ifdef CONFIG_CONSTRUCTORS 2967 unsigned long i; 2968 2969 for (i = 0; i < mod->num_ctors; i++) 2970 mod->ctors[i](); 2971 #endif 2972 } 2973 2974 /* For freeing module_init on success, in case kallsyms traversing */ 2975 struct mod_initfree { 2976 struct llist_node node; 2977 void *init_text; 2978 void *init_data; 2979 void *init_rodata; 2980 }; 2981 2982 static void do_free_init(struct work_struct *w) 2983 { 2984 struct llist_node *pos, *n, *list; 2985 struct mod_initfree *initfree; 2986 2987 list = llist_del_all(&init_free_list); 2988 2989 synchronize_rcu(); 2990 2991 llist_for_each_safe(pos, n, list) { 2992 initfree = container_of(pos, struct mod_initfree, node); 2993 execmem_free(initfree->init_text); 2994 execmem_free(initfree->init_data); 2995 execmem_free(initfree->init_rodata); 2996 kfree(initfree); 2997 } 2998 } 2999 3000 void flush_module_init_free_work(void) 3001 { 3002 flush_work(&init_free_wq); 3003 } 3004 3005 #undef MODULE_PARAM_PREFIX 3006 #define MODULE_PARAM_PREFIX "module." 3007 /* Default value for module->async_probe_requested */ 3008 static bool async_probe; 3009 module_param(async_probe, bool, 0644); 3010 3011 /* 3012 * This is where the real work happens. 3013 * 3014 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 3015 * helper command 'lx-symbols'. 3016 */ 3017 static noinline int do_init_module(struct module *mod) 3018 { 3019 int ret = 0; 3020 struct mod_initfree *freeinit; 3021 #if defined(CONFIG_MODULE_STATS) 3022 unsigned int text_size = 0, total_size = 0; 3023 3024 for_each_mod_mem_type(type) { 3025 const struct module_memory *mod_mem = &mod->mem[type]; 3026 if (mod_mem->size) { 3027 total_size += mod_mem->size; 3028 if (type == MOD_TEXT || type == MOD_INIT_TEXT) 3029 text_size += mod_mem->size; 3030 } 3031 } 3032 #endif 3033 3034 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 3035 if (!freeinit) { 3036 ret = -ENOMEM; 3037 goto fail; 3038 } 3039 freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; 3040 freeinit->init_data = mod->mem[MOD_INIT_DATA].base; 3041 freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; 3042 3043 do_mod_ctors(mod); 3044 /* Start the module */ 3045 if (mod->init != NULL) 3046 ret = do_one_initcall(mod->init); 3047 if (ret < 0) { 3048 goto fail_free_freeinit; 3049 } 3050 if (ret > 0) { 3051 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3052 "follow 0/-E convention\n" 3053 "%s: loading module anyway...\n", 3054 __func__, mod->name, ret, __func__); 3055 dump_stack(); 3056 } 3057 3058 /* Now it's a first class citizen! */ 3059 mod->state = MODULE_STATE_LIVE; 3060 blocking_notifier_call_chain(&module_notify_list, 3061 MODULE_STATE_LIVE, mod); 3062 3063 /* Delay uevent until module has finished its init routine */ 3064 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 3065 3066 /* 3067 * We need to finish all async code before the module init sequence 3068 * is done. This has potential to deadlock if synchronous module 3069 * loading is requested from async (which is not allowed!). 3070 * 3071 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 3072 * request_module() from async workers") for more details. 3073 */ 3074 if (!mod->async_probe_requested) 3075 async_synchronize_full(); 3076 3077 ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, 3078 mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); 3079 mutex_lock(&module_mutex); 3080 /* Drop initial reference. */ 3081 module_put(mod); 3082 trim_init_extable(mod); 3083 #ifdef CONFIG_KALLSYMS 3084 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 3085 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 3086 #endif 3087 ret = module_enable_rodata_ro_after_init(mod); 3088 if (ret) 3089 pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, " 3090 "ro_after_init data might still be writable\n", 3091 mod->name, ret); 3092 3093 mod_tree_remove_init(mod); 3094 module_arch_freeing_init(mod); 3095 for_class_mod_mem_type(type, init) { 3096 mod->mem[type].base = NULL; 3097 mod->mem[type].size = 0; 3098 } 3099 3100 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 3101 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ 3102 mod->btf_data = NULL; 3103 mod->btf_base_data = NULL; 3104 #endif 3105 /* 3106 * We want to free module_init, but be aware that kallsyms may be 3107 * walking this within an RCU read section. In all the failure paths, we 3108 * call synchronize_rcu(), but we don't want to slow down the success 3109 * path. execmem_free() cannot be called in an interrupt, so do the 3110 * work and call synchronize_rcu() in a work queue. 3111 * 3112 * Note that execmem_alloc() on most architectures creates W+X page 3113 * mappings which won't be cleaned up until do_free_init() runs. Any 3114 * code such as mark_rodata_ro() which depends on those mappings to 3115 * be cleaned up needs to sync with the queued work by invoking 3116 * flush_module_init_free_work(). 3117 */ 3118 if (llist_add(&freeinit->node, &init_free_list)) 3119 schedule_work(&init_free_wq); 3120 3121 mutex_unlock(&module_mutex); 3122 wake_up_all(&module_wq); 3123 3124 mod_stat_add_long(text_size, &total_text_size); 3125 mod_stat_add_long(total_size, &total_mod_size); 3126 3127 mod_stat_inc(&modcount); 3128 3129 return 0; 3130 3131 fail_free_freeinit: 3132 kfree(freeinit); 3133 fail: 3134 /* Try to protect us from buggy refcounters. */ 3135 mod->state = MODULE_STATE_GOING; 3136 synchronize_rcu(); 3137 module_put(mod); 3138 blocking_notifier_call_chain(&module_notify_list, 3139 MODULE_STATE_GOING, mod); 3140 klp_module_going(mod); 3141 ftrace_release_mod(mod); 3142 free_module(mod); 3143 wake_up_all(&module_wq); 3144 3145 return ret; 3146 } 3147 3148 static int may_init_module(void) 3149 { 3150 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3151 return -EPERM; 3152 3153 return 0; 3154 } 3155 3156 /* Is this module of this name done loading? No locks held. */ 3157 static bool finished_loading(const char *name) 3158 { 3159 struct module *mod; 3160 bool ret; 3161 3162 /* 3163 * The module_mutex should not be a heavily contended lock; 3164 * if we get the occasional sleep here, we'll go an extra iteration 3165 * in the wait_event_interruptible(), which is harmless. 3166 */ 3167 sched_annotate_sleep(); 3168 mutex_lock(&module_mutex); 3169 mod = find_module_all(name, strlen(name), true); 3170 ret = !mod || mod->state == MODULE_STATE_LIVE 3171 || mod->state == MODULE_STATE_GOING; 3172 mutex_unlock(&module_mutex); 3173 3174 return ret; 3175 } 3176 3177 /* Must be called with module_mutex held */ 3178 static int module_patient_check_exists(const char *name, 3179 enum fail_dup_mod_reason reason) 3180 { 3181 struct module *old; 3182 int err = 0; 3183 3184 old = find_module_all(name, strlen(name), true); 3185 if (old == NULL) 3186 return 0; 3187 3188 if (old->state == MODULE_STATE_COMING || 3189 old->state == MODULE_STATE_UNFORMED) { 3190 /* Wait in case it fails to load. */ 3191 mutex_unlock(&module_mutex); 3192 err = wait_event_interruptible(module_wq, 3193 finished_loading(name)); 3194 mutex_lock(&module_mutex); 3195 if (err) 3196 return err; 3197 3198 /* The module might have gone in the meantime. */ 3199 old = find_module_all(name, strlen(name), true); 3200 } 3201 3202 if (try_add_failed_module(name, reason)) 3203 pr_warn("Could not add fail-tracking for module: %s\n", name); 3204 3205 /* 3206 * We are here only when the same module was being loaded. Do 3207 * not try to load it again right now. It prevents long delays 3208 * caused by serialized module load failures. It might happen 3209 * when more devices of the same type trigger load of 3210 * a particular module. 3211 */ 3212 if (old && old->state == MODULE_STATE_LIVE) 3213 return -EEXIST; 3214 return -EBUSY; 3215 } 3216 3217 /* 3218 * We try to place it in the list now to make sure it's unique before 3219 * we dedicate too many resources. In particular, temporary percpu 3220 * memory exhaustion. 3221 */ 3222 static int add_unformed_module(struct module *mod) 3223 { 3224 int err; 3225 3226 mod->state = MODULE_STATE_UNFORMED; 3227 3228 mutex_lock(&module_mutex); 3229 err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); 3230 if (err) 3231 goto out; 3232 3233 mod_update_bounds(mod); 3234 list_add_rcu(&mod->list, &modules); 3235 mod_tree_insert(mod); 3236 err = 0; 3237 3238 out: 3239 mutex_unlock(&module_mutex); 3240 return err; 3241 } 3242 3243 static int complete_formation(struct module *mod, struct load_info *info) 3244 { 3245 int err; 3246 3247 mutex_lock(&module_mutex); 3248 3249 /* Find duplicate symbols (must be called under lock). */ 3250 err = verify_exported_symbols(mod); 3251 if (err < 0) 3252 goto out; 3253 3254 /* These rely on module_mutex for list integrity. */ 3255 module_bug_finalize(info->hdr, info->sechdrs, mod); 3256 module_cfi_finalize(info->hdr, info->sechdrs, mod); 3257 3258 err = module_enable_rodata_ro(mod); 3259 if (err) 3260 goto out_strict_rwx; 3261 err = module_enable_data_nx(mod); 3262 if (err) 3263 goto out_strict_rwx; 3264 err = module_enable_text_rox(mod); 3265 if (err) 3266 goto out_strict_rwx; 3267 3268 /* 3269 * Mark state as coming so strong_try_module_get() ignores us, 3270 * but kallsyms etc. can see us. 3271 */ 3272 mod->state = MODULE_STATE_COMING; 3273 mutex_unlock(&module_mutex); 3274 3275 return 0; 3276 3277 out_strict_rwx: 3278 module_bug_cleanup(mod); 3279 out: 3280 mutex_unlock(&module_mutex); 3281 return err; 3282 } 3283 3284 static int prepare_coming_module(struct module *mod) 3285 { 3286 int err; 3287 3288 ftrace_module_enable(mod); 3289 err = klp_module_coming(mod); 3290 if (err) 3291 return err; 3292 3293 err = blocking_notifier_call_chain_robust(&module_notify_list, 3294 MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 3295 err = notifier_to_errno(err); 3296 if (err) 3297 klp_module_going(mod); 3298 3299 return err; 3300 } 3301 3302 static int unknown_module_param_cb(char *param, char *val, const char *modname, 3303 void *arg) 3304 { 3305 struct module *mod = arg; 3306 int ret; 3307 3308 if (strcmp(param, "async_probe") == 0) { 3309 if (kstrtobool(val, &mod->async_probe_requested)) 3310 mod->async_probe_requested = true; 3311 return 0; 3312 } 3313 3314 /* Check for magic 'dyndbg' arg */ 3315 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3316 if (ret != 0) 3317 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3318 return 0; 3319 } 3320 3321 /* Module within temporary copy, this doesn't do any allocation */ 3322 static int early_mod_check(struct load_info *info, int flags) 3323 { 3324 int err; 3325 3326 /* 3327 * Now that we know we have the correct module name, check 3328 * if it's blacklisted. 3329 */ 3330 if (blacklisted(info->name)) { 3331 pr_err("Module %s is blacklisted\n", info->name); 3332 return -EPERM; 3333 } 3334 3335 err = rewrite_section_headers(info, flags); 3336 if (err) 3337 return err; 3338 3339 /* Check module struct version now, before we try to use module. */ 3340 if (!check_modstruct_version(info, info->mod)) 3341 return -ENOEXEC; 3342 3343 err = check_modinfo(info->mod, info, flags); 3344 if (err) 3345 return err; 3346 3347 mutex_lock(&module_mutex); 3348 err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); 3349 mutex_unlock(&module_mutex); 3350 3351 return err; 3352 } 3353 3354 /* 3355 * Allocate and load the module: note that size of section 0 is always 3356 * zero, and we rely on this for optional sections. 3357 */ 3358 static int load_module(struct load_info *info, const char __user *uargs, 3359 int flags) 3360 { 3361 struct module *mod; 3362 bool module_allocated = false; 3363 long err = 0; 3364 char *after_dashes; 3365 3366 /* 3367 * Do the signature check (if any) first. All that 3368 * the signature check needs is info->len, it does 3369 * not need any of the section info. That can be 3370 * set up later. This will minimize the chances 3371 * of a corrupt module causing problems before 3372 * we even get to the signature check. 3373 * 3374 * The check will also adjust info->len by stripping 3375 * off the sig length at the end of the module, making 3376 * checks against info->len more correct. 3377 */ 3378 err = module_sig_check(info, flags); 3379 if (err) 3380 goto free_copy; 3381 3382 /* 3383 * Do basic sanity checks against the ELF header and 3384 * sections. Cache useful sections and set the 3385 * info->mod to the userspace passed struct module. 3386 */ 3387 err = elf_validity_cache_copy(info, flags); 3388 if (err) 3389 goto free_copy; 3390 3391 err = early_mod_check(info, flags); 3392 if (err) 3393 goto free_copy; 3394 3395 /* Figure out module layout, and allocate all the memory. */ 3396 mod = layout_and_allocate(info, flags); 3397 if (IS_ERR(mod)) { 3398 err = PTR_ERR(mod); 3399 goto free_copy; 3400 } 3401 3402 module_allocated = true; 3403 3404 audit_log_kern_module(info->name); 3405 3406 /* Reserve our place in the list. */ 3407 err = add_unformed_module(mod); 3408 if (err) 3409 goto free_module; 3410 3411 /* 3412 * We are tainting your kernel if your module gets into 3413 * the modules linked list somehow. 3414 */ 3415 module_augment_kernel_taints(mod, info); 3416 3417 /* To avoid stressing percpu allocator, do this once we're unique. */ 3418 err = percpu_modalloc(mod, info); 3419 if (err) 3420 goto unlink_mod; 3421 3422 /* Now module is in final location, initialize linked lists, etc. */ 3423 err = module_unload_init(mod); 3424 if (err) 3425 goto unlink_mod; 3426 3427 init_param_lock(mod); 3428 3429 /* 3430 * Now we've got everything in the final locations, we can 3431 * find optional sections. 3432 */ 3433 err = find_module_sections(mod, info); 3434 if (err) 3435 goto free_unload; 3436 3437 err = check_export_symbol_versions(mod); 3438 if (err) 3439 goto free_unload; 3440 3441 /* Set up MODINFO_ATTR fields */ 3442 err = setup_modinfo(mod, info); 3443 if (err) 3444 goto free_modinfo; 3445 3446 /* Fix up syms, so that st_value is a pointer to location. */ 3447 err = simplify_symbols(mod, info); 3448 if (err < 0) 3449 goto free_modinfo; 3450 3451 err = apply_relocations(mod, info); 3452 if (err < 0) 3453 goto free_modinfo; 3454 3455 err = post_relocation(mod, info); 3456 if (err < 0) 3457 goto free_modinfo; 3458 3459 flush_module_icache(mod); 3460 3461 /* Now copy in args */ 3462 mod->args = strndup_user(uargs, ~0UL >> 1); 3463 if (IS_ERR(mod->args)) { 3464 err = PTR_ERR(mod->args); 3465 goto free_arch_cleanup; 3466 } 3467 3468 init_build_id(mod, info); 3469 3470 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3471 ftrace_module_init(mod); 3472 3473 /* Finally it's fully formed, ready to start executing. */ 3474 err = complete_formation(mod, info); 3475 if (err) 3476 goto ddebug_cleanup; 3477 3478 err = prepare_coming_module(mod); 3479 if (err) 3480 goto bug_cleanup; 3481 3482 mod->async_probe_requested = async_probe; 3483 3484 /* Module is ready to execute: parsing args may do that. */ 3485 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3486 -32768, 32767, mod, 3487 unknown_module_param_cb); 3488 if (IS_ERR(after_dashes)) { 3489 err = PTR_ERR(after_dashes); 3490 goto coming_cleanup; 3491 } else if (after_dashes) { 3492 pr_warn("%s: parameters '%s' after `--' ignored\n", 3493 mod->name, after_dashes); 3494 } 3495 3496 /* Link in to sysfs. */ 3497 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3498 if (err < 0) 3499 goto coming_cleanup; 3500 3501 if (is_livepatch_module(mod)) { 3502 err = copy_module_elf(mod, info); 3503 if (err < 0) 3504 goto sysfs_cleanup; 3505 } 3506 3507 if (codetag_load_module(mod)) 3508 goto sysfs_cleanup; 3509 3510 /* Get rid of temporary copy. */ 3511 free_copy(info, flags); 3512 3513 /* Done! */ 3514 trace_module_load(mod); 3515 3516 return do_init_module(mod); 3517 3518 sysfs_cleanup: 3519 mod_sysfs_teardown(mod); 3520 coming_cleanup: 3521 mod->state = MODULE_STATE_GOING; 3522 destroy_params(mod->kp, mod->num_kp); 3523 blocking_notifier_call_chain(&module_notify_list, 3524 MODULE_STATE_GOING, mod); 3525 klp_module_going(mod); 3526 bug_cleanup: 3527 mod->state = MODULE_STATE_GOING; 3528 /* module_bug_cleanup needs module_mutex protection */ 3529 mutex_lock(&module_mutex); 3530 module_bug_cleanup(mod); 3531 mutex_unlock(&module_mutex); 3532 3533 ddebug_cleanup: 3534 ftrace_release_mod(mod); 3535 synchronize_rcu(); 3536 kfree(mod->args); 3537 free_arch_cleanup: 3538 module_arch_cleanup(mod); 3539 free_modinfo: 3540 free_modinfo(mod); 3541 free_unload: 3542 module_unload_free(mod); 3543 unlink_mod: 3544 mutex_lock(&module_mutex); 3545 /* Unlink carefully: kallsyms could be walking list. */ 3546 list_del_rcu(&mod->list); 3547 mod_tree_remove(mod); 3548 wake_up_all(&module_wq); 3549 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3550 synchronize_rcu(); 3551 mutex_unlock(&module_mutex); 3552 free_module: 3553 mod_stat_bump_invalid(info, flags); 3554 /* Free lock-classes; relies on the preceding sync_rcu() */ 3555 for_class_mod_mem_type(type, core_data) { 3556 lockdep_free_key_range(mod->mem[type].base, 3557 mod->mem[type].size); 3558 } 3559 3560 module_memory_restore_rox(mod); 3561 module_deallocate(mod, info); 3562 free_copy: 3563 /* 3564 * The info->len is always set. We distinguish between 3565 * failures once the proper module was allocated and 3566 * before that. 3567 */ 3568 if (!module_allocated) { 3569 audit_log_kern_module(info->name ? info->name : "?"); 3570 mod_stat_bump_becoming(info, flags); 3571 } 3572 free_copy(info, flags); 3573 return err; 3574 } 3575 3576 SYSCALL_DEFINE3(init_module, void __user *, umod, 3577 unsigned long, len, const char __user *, uargs) 3578 { 3579 int err; 3580 struct load_info info = { }; 3581 3582 err = may_init_module(); 3583 if (err) 3584 return err; 3585 3586 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3587 umod, len, uargs); 3588 3589 err = copy_module_from_user(umod, len, &info); 3590 if (err) { 3591 mod_stat_inc(&failed_kreads); 3592 mod_stat_add_long(len, &invalid_kread_bytes); 3593 return err; 3594 } 3595 3596 return load_module(&info, uargs, 0); 3597 } 3598 3599 struct idempotent { 3600 const void *cookie; 3601 struct hlist_node entry; 3602 struct completion complete; 3603 int ret; 3604 }; 3605 3606 #define IDEM_HASH_BITS 8 3607 static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; 3608 static DEFINE_SPINLOCK(idem_lock); 3609 3610 static bool idempotent(struct idempotent *u, const void *cookie) 3611 { 3612 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3613 struct hlist_head *head = idem_hash + hash; 3614 struct idempotent *existing; 3615 bool first; 3616 3617 u->ret = -EINTR; 3618 u->cookie = cookie; 3619 init_completion(&u->complete); 3620 3621 spin_lock(&idem_lock); 3622 first = true; 3623 hlist_for_each_entry(existing, head, entry) { 3624 if (existing->cookie != cookie) 3625 continue; 3626 first = false; 3627 break; 3628 } 3629 hlist_add_head(&u->entry, idem_hash + hash); 3630 spin_unlock(&idem_lock); 3631 3632 return !first; 3633 } 3634 3635 /* 3636 * We were the first one with 'cookie' on the list, and we ended 3637 * up completing the operation. We now need to walk the list, 3638 * remove everybody - which includes ourselves - fill in the return 3639 * value, and then complete the operation. 3640 */ 3641 static int idempotent_complete(struct idempotent *u, int ret) 3642 { 3643 const void *cookie = u->cookie; 3644 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3645 struct hlist_head *head = idem_hash + hash; 3646 struct hlist_node *next; 3647 struct idempotent *pos; 3648 3649 spin_lock(&idem_lock); 3650 hlist_for_each_entry_safe(pos, next, head, entry) { 3651 if (pos->cookie != cookie) 3652 continue; 3653 hlist_del_init(&pos->entry); 3654 pos->ret = ret; 3655 complete(&pos->complete); 3656 } 3657 spin_unlock(&idem_lock); 3658 return ret; 3659 } 3660 3661 /* 3662 * Wait for the idempotent worker. 3663 * 3664 * If we get interrupted, we need to remove ourselves from the 3665 * the idempotent list, and the completion may still come in. 3666 * 3667 * The 'idem_lock' protects against the race, and 'idem.ret' was 3668 * initialized to -EINTR and is thus always the right return 3669 * value even if the idempotent work then completes between 3670 * the wait_for_completion and the cleanup. 3671 */ 3672 static int idempotent_wait_for_completion(struct idempotent *u) 3673 { 3674 if (wait_for_completion_interruptible(&u->complete)) { 3675 spin_lock(&idem_lock); 3676 if (!hlist_unhashed(&u->entry)) 3677 hlist_del(&u->entry); 3678 spin_unlock(&idem_lock); 3679 } 3680 return u->ret; 3681 } 3682 3683 static int init_module_from_file(struct file *f, const char __user * uargs, int flags) 3684 { 3685 struct load_info info = { }; 3686 void *buf = NULL; 3687 int len; 3688 3689 len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); 3690 if (len < 0) { 3691 mod_stat_inc(&failed_kreads); 3692 return len; 3693 } 3694 3695 if (flags & MODULE_INIT_COMPRESSED_FILE) { 3696 int err = module_decompress(&info, buf, len); 3697 vfree(buf); /* compressed data is no longer needed */ 3698 if (err) { 3699 mod_stat_inc(&failed_decompress); 3700 mod_stat_add_long(len, &invalid_decompress_bytes); 3701 return err; 3702 } 3703 } else { 3704 info.hdr = buf; 3705 info.len = len; 3706 } 3707 3708 return load_module(&info, uargs, flags); 3709 } 3710 3711 static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) 3712 { 3713 struct idempotent idem; 3714 3715 if (!(f->f_mode & FMODE_READ)) 3716 return -EBADF; 3717 3718 /* Are we the winners of the race and get to do this? */ 3719 if (!idempotent(&idem, file_inode(f))) { 3720 int ret = init_module_from_file(f, uargs, flags); 3721 return idempotent_complete(&idem, ret); 3722 } 3723 3724 /* 3725 * Somebody else won the race and is loading the module. 3726 */ 3727 return idempotent_wait_for_completion(&idem); 3728 } 3729 3730 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3731 { 3732 int err = may_init_module(); 3733 if (err) 3734 return err; 3735 3736 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3737 3738 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3739 |MODULE_INIT_IGNORE_VERMAGIC 3740 |MODULE_INIT_COMPRESSED_FILE)) 3741 return -EINVAL; 3742 3743 CLASS(fd, f)(fd); 3744 if (fd_empty(f)) 3745 return -EBADF; 3746 return idempotent_init_module(fd_file(f), uargs, flags); 3747 } 3748 3749 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 3750 char *module_flags(struct module *mod, char *buf, bool show_state) 3751 { 3752 int bx = 0; 3753 3754 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 3755 if (!mod->taints && !show_state) 3756 goto out; 3757 if (mod->taints || 3758 mod->state == MODULE_STATE_GOING || 3759 mod->state == MODULE_STATE_COMING) { 3760 buf[bx++] = '('; 3761 bx += module_flags_taint(mod->taints, buf + bx); 3762 /* Show a - for module-is-being-unloaded */ 3763 if (mod->state == MODULE_STATE_GOING && show_state) 3764 buf[bx++] = '-'; 3765 /* Show a + for module-is-being-loaded */ 3766 if (mod->state == MODULE_STATE_COMING && show_state) 3767 buf[bx++] = '+'; 3768 buf[bx++] = ')'; 3769 } 3770 out: 3771 buf[bx] = '\0'; 3772 3773 return buf; 3774 } 3775 3776 /* Given an address, look for it in the module exception tables. */ 3777 const struct exception_table_entry *search_module_extables(unsigned long addr) 3778 { 3779 struct module *mod; 3780 3781 guard(rcu)(); 3782 mod = __module_address(addr); 3783 if (!mod) 3784 return NULL; 3785 3786 if (!mod->num_exentries) 3787 return NULL; 3788 /* 3789 * The address passed here belongs to a module that is currently 3790 * invoked (we are running inside it). Therefore its module::refcnt 3791 * needs already be >0 to ensure that it is not removed at this stage. 3792 * All other user need to invoke this function within a RCU read 3793 * section. 3794 */ 3795 return search_extable(mod->extable, mod->num_exentries, addr); 3796 } 3797 3798 /** 3799 * is_module_address() - is this address inside a module? 3800 * @addr: the address to check. 3801 * 3802 * See is_module_text_address() if you simply want to see if the address 3803 * is code (not data). 3804 */ 3805 bool is_module_address(unsigned long addr) 3806 { 3807 guard(rcu)(); 3808 return __module_address(addr) != NULL; 3809 } 3810 3811 /** 3812 * __module_address() - get the module which contains an address. 3813 * @addr: the address. 3814 * 3815 * Must be called within RCU read section or module mutex held so that 3816 * module doesn't get freed during this. 3817 */ 3818 struct module *__module_address(unsigned long addr) 3819 { 3820 struct module *mod; 3821 3822 if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) 3823 goto lookup; 3824 3825 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 3826 if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) 3827 goto lookup; 3828 #endif 3829 3830 return NULL; 3831 3832 lookup: 3833 mod = mod_find(addr, &mod_tree); 3834 if (mod) { 3835 BUG_ON(!within_module(addr, mod)); 3836 if (mod->state == MODULE_STATE_UNFORMED) 3837 mod = NULL; 3838 } 3839 return mod; 3840 } 3841 3842 /** 3843 * is_module_text_address() - is this address inside module code? 3844 * @addr: the address to check. 3845 * 3846 * See is_module_address() if you simply want to see if the address is 3847 * anywhere in a module. See kernel_text_address() for testing if an 3848 * address corresponds to kernel or module code. 3849 */ 3850 bool is_module_text_address(unsigned long addr) 3851 { 3852 guard(rcu)(); 3853 return __module_text_address(addr) != NULL; 3854 } 3855 3856 void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data) 3857 { 3858 struct module *mod; 3859 3860 guard(rcu)(); 3861 list_for_each_entry_rcu(mod, &modules, list) { 3862 if (mod->state == MODULE_STATE_UNFORMED) 3863 continue; 3864 if (func(mod, data)) 3865 break; 3866 } 3867 } 3868 3869 /** 3870 * __module_text_address() - get the module whose code contains an address. 3871 * @addr: the address. 3872 * 3873 * Must be called within RCU read section or module mutex held so that 3874 * module doesn't get freed during this. 3875 */ 3876 struct module *__module_text_address(unsigned long addr) 3877 { 3878 struct module *mod = __module_address(addr); 3879 if (mod) { 3880 /* Make sure it's within the text section. */ 3881 if (!within_module_mem_type(addr, mod, MOD_TEXT) && 3882 !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) 3883 mod = NULL; 3884 } 3885 return mod; 3886 } 3887 3888 /* Don't grab lock, we're oopsing. */ 3889 void print_modules(void) 3890 { 3891 struct module *mod; 3892 char buf[MODULE_FLAGS_BUF_SIZE]; 3893 3894 printk(KERN_DEFAULT "Modules linked in:"); 3895 /* Most callers should already have preempt disabled, but make sure */ 3896 guard(rcu)(); 3897 list_for_each_entry_rcu(mod, &modules, list) { 3898 if (mod->state == MODULE_STATE_UNFORMED) 3899 continue; 3900 pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); 3901 } 3902 3903 print_unloaded_tainted_modules(); 3904 if (last_unloaded_module.name[0]) 3905 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 3906 last_unloaded_module.taints); 3907 pr_cont("\n"); 3908 } 3909 3910 #ifdef CONFIG_MODULE_DEBUGFS 3911 struct dentry *mod_debugfs_root; 3912 3913 static int module_debugfs_init(void) 3914 { 3915 mod_debugfs_root = debugfs_create_dir("modules", NULL); 3916 return 0; 3917 } 3918 module_init(module_debugfs_init); 3919 #endif 3920