1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2002 Richard Henderson 4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> 6 */ 7 8 #define INCLUDE_VERMAGIC 9 10 #include <linux/export.h> 11 #include <linux/extable.h> 12 #include <linux/moduleloader.h> 13 #include <linux/module_signature.h> 14 #include <linux/trace_events.h> 15 #include <linux/init.h> 16 #include <linux/kallsyms.h> 17 #include <linux/buildid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kernel_read_file.h> 21 #include <linux/kstrtox.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/elf.h> 25 #include <linux/seq_file.h> 26 #include <linux/syscalls.h> 27 #include <linux/fcntl.h> 28 #include <linux/rcupdate.h> 29 #include <linux/capability.h> 30 #include <linux/cpu.h> 31 #include <linux/moduleparam.h> 32 #include <linux/errno.h> 33 #include <linux/err.h> 34 #include <linux/vermagic.h> 35 #include <linux/notifier.h> 36 #include <linux/sched.h> 37 #include <linux/device.h> 38 #include <linux/string.h> 39 #include <linux/mutex.h> 40 #include <linux/rculist.h> 41 #include <linux/uaccess.h> 42 #include <asm/cacheflush.h> 43 #include <linux/set_memory.h> 44 #include <asm/mmu_context.h> 45 #include <linux/license.h> 46 #include <asm/sections.h> 47 #include <linux/tracepoint.h> 48 #include <linux/ftrace.h> 49 #include <linux/livepatch.h> 50 #include <linux/async.h> 51 #include <linux/percpu.h> 52 #include <linux/kmemleak.h> 53 #include <linux/jump_label.h> 54 #include <linux/pfn.h> 55 #include <linux/bsearch.h> 56 #include <linux/dynamic_debug.h> 57 #include <linux/audit.h> 58 #include <linux/cfi.h> 59 #include <linux/codetag.h> 60 #include <linux/debugfs.h> 61 #include <linux/execmem.h> 62 #include <uapi/linux/module.h> 63 #include "internal.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/module.h> 67 68 /* 69 * Mutex protects: 70 * 1) List of modules (also safely readable within RCU read section), 71 * 2) module_use links, 72 * 3) mod_tree.addr_min/mod_tree.addr_max. 73 * (delete and add uses RCU list operations). 74 */ 75 DEFINE_MUTEX(module_mutex); 76 LIST_HEAD(modules); 77 78 /* Work queue for freeing init sections in success case */ 79 static void do_free_init(struct work_struct *w); 80 static DECLARE_WORK(init_free_wq, do_free_init); 81 static LLIST_HEAD(init_free_list); 82 83 struct mod_tree_root mod_tree __cacheline_aligned = { 84 .addr_min = -1UL, 85 }; 86 87 struct symsearch { 88 const struct kernel_symbol *start, *stop; 89 const u32 *crcs; 90 enum mod_license license; 91 }; 92 93 /* 94 * Bounds of module memory, for speeding up __module_address. 95 * Protected by module_mutex. 96 */ 97 static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, 98 unsigned int size, struct mod_tree_root *tree) 99 { 100 unsigned long min = (unsigned long)base; 101 unsigned long max = min + size; 102 103 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 104 if (mod_mem_type_is_core_data(type)) { 105 if (min < tree->data_addr_min) 106 tree->data_addr_min = min; 107 if (max > tree->data_addr_max) 108 tree->data_addr_max = max; 109 return; 110 } 111 #endif 112 if (min < tree->addr_min) 113 tree->addr_min = min; 114 if (max > tree->addr_max) 115 tree->addr_max = max; 116 } 117 118 static void mod_update_bounds(struct module *mod) 119 { 120 for_each_mod_mem_type(type) { 121 struct module_memory *mod_mem = &mod->mem[type]; 122 123 if (mod_mem->size) 124 __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); 125 } 126 } 127 128 /* Block module loading/unloading? */ 129 static int modules_disabled; 130 core_param(nomodule, modules_disabled, bint, 0); 131 132 static const struct ctl_table module_sysctl_table[] = { 133 { 134 .procname = "modprobe", 135 .data = &modprobe_path, 136 .maxlen = KMOD_PATH_LEN, 137 .mode = 0644, 138 .proc_handler = proc_dostring, 139 }, 140 { 141 .procname = "modules_disabled", 142 .data = &modules_disabled, 143 .maxlen = sizeof(int), 144 .mode = 0644, 145 /* only handle a transition from default "0" to "1" */ 146 .proc_handler = proc_dointvec_minmax, 147 .extra1 = SYSCTL_ONE, 148 .extra2 = SYSCTL_ONE, 149 }, 150 }; 151 152 static int __init init_module_sysctl(void) 153 { 154 register_sysctl_init("kernel", module_sysctl_table); 155 return 0; 156 } 157 158 subsys_initcall(init_module_sysctl); 159 160 /* Waiting for a module to finish initializing? */ 161 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 162 163 static BLOCKING_NOTIFIER_HEAD(module_notify_list); 164 165 int register_module_notifier(struct notifier_block *nb) 166 { 167 return blocking_notifier_chain_register(&module_notify_list, nb); 168 } 169 EXPORT_SYMBOL(register_module_notifier); 170 171 int unregister_module_notifier(struct notifier_block *nb) 172 { 173 return blocking_notifier_chain_unregister(&module_notify_list, nb); 174 } 175 EXPORT_SYMBOL(unregister_module_notifier); 176 177 /* 178 * We require a truly strong try_module_get(): 0 means success. 179 * Otherwise an error is returned due to ongoing or failed 180 * initialization etc. 181 */ 182 static inline int strong_try_module_get(struct module *mod) 183 { 184 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 185 if (mod && mod->state == MODULE_STATE_COMING) 186 return -EBUSY; 187 if (try_module_get(mod)) 188 return 0; 189 else 190 return -ENOENT; 191 } 192 193 static inline void add_taint_module(struct module *mod, unsigned flag, 194 enum lockdep_ok lockdep_ok) 195 { 196 add_taint(flag, lockdep_ok); 197 set_bit(flag, &mod->taints); 198 } 199 200 /* 201 * Like strncmp(), except s/-/_/g as per scripts/Makefile.lib:name-fix-token rule. 202 */ 203 static int mod_strncmp(const char *str_a, const char *str_b, size_t n) 204 { 205 for (int i = 0; i < n; i++) { 206 char a = str_a[i]; 207 char b = str_b[i]; 208 int d; 209 210 if (a == '-') a = '_'; 211 if (b == '-') b = '_'; 212 213 d = a - b; 214 if (d) 215 return d; 216 217 if (!a) 218 break; 219 } 220 221 return 0; 222 } 223 224 /* 225 * A thread that wants to hold a reference to a module only while it 226 * is running can call this to safely exit. 227 */ 228 void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 229 { 230 module_put(mod); 231 kthread_exit(code); 232 } 233 EXPORT_SYMBOL(__module_put_and_kthread_exit); 234 235 /* Find a module section: 0 means not found. */ 236 static unsigned int find_sec(const struct load_info *info, const char *name) 237 { 238 unsigned int i; 239 240 for (i = 1; i < info->hdr->e_shnum; i++) { 241 Elf_Shdr *shdr = &info->sechdrs[i]; 242 /* Alloc bit cleared means "ignore it." */ 243 if ((shdr->sh_flags & SHF_ALLOC) 244 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 245 return i; 246 } 247 return 0; 248 } 249 250 /** 251 * find_any_unique_sec() - Find a unique section index by name 252 * @info: Load info for the module to scan 253 * @name: Name of the section we're looking for 254 * 255 * Locates a unique section by name. Ignores SHF_ALLOC. 256 * 257 * Return: Section index if found uniquely, zero if absent, negative count 258 * of total instances if multiple were found. 259 */ 260 static int find_any_unique_sec(const struct load_info *info, const char *name) 261 { 262 unsigned int idx; 263 unsigned int count = 0; 264 int i; 265 266 for (i = 1; i < info->hdr->e_shnum; i++) { 267 if (strcmp(info->secstrings + info->sechdrs[i].sh_name, 268 name) == 0) { 269 count++; 270 idx = i; 271 } 272 } 273 if (count == 1) { 274 return idx; 275 } else if (count == 0) { 276 return 0; 277 } else { 278 return -count; 279 } 280 } 281 282 /* Find a module section, or NULL. */ 283 static void *section_addr(const struct load_info *info, const char *name) 284 { 285 /* Section 0 has sh_addr 0. */ 286 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 287 } 288 289 /* Find a module section, or NULL. Fill in number of "objects" in section. */ 290 static void *section_objs(const struct load_info *info, 291 const char *name, 292 size_t object_size, 293 unsigned int *num) 294 { 295 unsigned int sec = find_sec(info, name); 296 297 /* Section 0 has sh_addr 0 and sh_size 0. */ 298 *num = info->sechdrs[sec].sh_size / object_size; 299 return (void *)info->sechdrs[sec].sh_addr; 300 } 301 302 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 303 static unsigned int find_any_sec(const struct load_info *info, const char *name) 304 { 305 unsigned int i; 306 307 for (i = 1; i < info->hdr->e_shnum; i++) { 308 Elf_Shdr *shdr = &info->sechdrs[i]; 309 if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 310 return i; 311 } 312 return 0; 313 } 314 315 /* 316 * Find a module section, or NULL. Fill in number of "objects" in section. 317 * Ignores SHF_ALLOC flag. 318 */ 319 static __maybe_unused void *any_section_objs(const struct load_info *info, 320 const char *name, 321 size_t object_size, 322 unsigned int *num) 323 { 324 unsigned int sec = find_any_sec(info, name); 325 326 /* Section 0 has sh_addr 0 and sh_size 0. */ 327 *num = info->sechdrs[sec].sh_size / object_size; 328 return (void *)info->sechdrs[sec].sh_addr; 329 } 330 331 #ifndef CONFIG_MODVERSIONS 332 #define symversion(base, idx) NULL 333 #else 334 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 335 #endif 336 337 static const char *kernel_symbol_name(const struct kernel_symbol *sym) 338 { 339 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 340 return offset_to_ptr(&sym->name_offset); 341 #else 342 return sym->name; 343 #endif 344 } 345 346 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 347 { 348 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 349 if (!sym->namespace_offset) 350 return NULL; 351 return offset_to_ptr(&sym->namespace_offset); 352 #else 353 return sym->namespace; 354 #endif 355 } 356 357 int cmp_name(const void *name, const void *sym) 358 { 359 return strcmp(name, kernel_symbol_name(sym)); 360 } 361 362 static bool find_exported_symbol_in_section(const struct symsearch *syms, 363 struct module *owner, 364 struct find_symbol_arg *fsa) 365 { 366 struct kernel_symbol *sym; 367 368 if (!fsa->gplok && syms->license == GPL_ONLY) 369 return false; 370 371 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 372 sizeof(struct kernel_symbol), cmp_name); 373 if (!sym) 374 return false; 375 376 fsa->owner = owner; 377 fsa->crc = symversion(syms->crcs, sym - syms->start); 378 fsa->sym = sym; 379 fsa->license = syms->license; 380 381 return true; 382 } 383 384 /* 385 * Find an exported symbol and return it, along with, (optional) crc and 386 * (optional) module which owns it. Needs RCU or module_mutex. 387 */ 388 bool find_symbol(struct find_symbol_arg *fsa) 389 { 390 static const struct symsearch arr[] = { 391 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 392 NOT_GPL_ONLY }, 393 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 394 __start___kcrctab_gpl, 395 GPL_ONLY }, 396 }; 397 struct module *mod; 398 unsigned int i; 399 400 for (i = 0; i < ARRAY_SIZE(arr); i++) 401 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 402 return true; 403 404 list_for_each_entry_rcu(mod, &modules, list, 405 lockdep_is_held(&module_mutex)) { 406 struct symsearch arr[] = { 407 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 408 NOT_GPL_ONLY }, 409 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 410 mod->gpl_crcs, 411 GPL_ONLY }, 412 }; 413 414 if (mod->state == MODULE_STATE_UNFORMED) 415 continue; 416 417 for (i = 0; i < ARRAY_SIZE(arr); i++) 418 if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 419 return true; 420 } 421 422 pr_debug("Failed to find symbol %s\n", fsa->name); 423 return false; 424 } 425 426 /* 427 * Search for module by name: must hold module_mutex (or RCU for read-only 428 * access). 429 */ 430 struct module *find_module_all(const char *name, size_t len, 431 bool even_unformed) 432 { 433 struct module *mod; 434 435 list_for_each_entry_rcu(mod, &modules, list, 436 lockdep_is_held(&module_mutex)) { 437 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 438 continue; 439 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 440 return mod; 441 } 442 return NULL; 443 } 444 445 struct module *find_module(const char *name) 446 { 447 return find_module_all(name, strlen(name), false); 448 } 449 450 #ifdef CONFIG_SMP 451 452 static inline void __percpu *mod_percpu(struct module *mod) 453 { 454 return mod->percpu; 455 } 456 457 static int percpu_modalloc(struct module *mod, struct load_info *info) 458 { 459 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 460 unsigned long align = pcpusec->sh_addralign; 461 462 if (!pcpusec->sh_size) 463 return 0; 464 465 if (align > PAGE_SIZE) { 466 pr_warn("%s: per-cpu alignment %li > %li\n", 467 mod->name, align, PAGE_SIZE); 468 align = PAGE_SIZE; 469 } 470 471 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 472 if (!mod->percpu) { 473 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 474 mod->name, (unsigned long)pcpusec->sh_size); 475 return -ENOMEM; 476 } 477 mod->percpu_size = pcpusec->sh_size; 478 return 0; 479 } 480 481 static void percpu_modfree(struct module *mod) 482 { 483 free_percpu(mod->percpu); 484 } 485 486 static unsigned int find_pcpusec(struct load_info *info) 487 { 488 return find_sec(info, ".data..percpu"); 489 } 490 491 static void percpu_modcopy(struct module *mod, 492 const void *from, unsigned long size) 493 { 494 int cpu; 495 496 for_each_possible_cpu(cpu) 497 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 498 } 499 500 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 501 { 502 struct module *mod; 503 unsigned int cpu; 504 505 guard(rcu)(); 506 list_for_each_entry_rcu(mod, &modules, list) { 507 if (mod->state == MODULE_STATE_UNFORMED) 508 continue; 509 if (!mod->percpu_size) 510 continue; 511 for_each_possible_cpu(cpu) { 512 void *start = per_cpu_ptr(mod->percpu, cpu); 513 void *va = (void *)addr; 514 515 if (va >= start && va < start + mod->percpu_size) { 516 if (can_addr) { 517 *can_addr = (unsigned long) (va - start); 518 *can_addr += (unsigned long) 519 per_cpu_ptr(mod->percpu, 520 get_boot_cpu_id()); 521 } 522 return true; 523 } 524 } 525 } 526 return false; 527 } 528 529 /** 530 * is_module_percpu_address() - test whether address is from module static percpu 531 * @addr: address to test 532 * 533 * Test whether @addr belongs to module static percpu area. 534 * 535 * Return: %true if @addr is from module static percpu area 536 */ 537 bool is_module_percpu_address(unsigned long addr) 538 { 539 return __is_module_percpu_address(addr, NULL); 540 } 541 542 #else /* ... !CONFIG_SMP */ 543 544 static inline void __percpu *mod_percpu(struct module *mod) 545 { 546 return NULL; 547 } 548 static int percpu_modalloc(struct module *mod, struct load_info *info) 549 { 550 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 551 if (info->sechdrs[info->index.pcpu].sh_size != 0) 552 return -ENOMEM; 553 return 0; 554 } 555 static inline void percpu_modfree(struct module *mod) 556 { 557 } 558 static unsigned int find_pcpusec(struct load_info *info) 559 { 560 return 0; 561 } 562 static inline void percpu_modcopy(struct module *mod, 563 const void *from, unsigned long size) 564 { 565 /* pcpusec should be 0, and size of that section should be 0. */ 566 BUG_ON(size != 0); 567 } 568 bool is_module_percpu_address(unsigned long addr) 569 { 570 return false; 571 } 572 573 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 574 { 575 return false; 576 } 577 578 #endif /* CONFIG_SMP */ 579 580 #define MODINFO_ATTR(field) \ 581 static void setup_modinfo_##field(struct module *mod, const char *s) \ 582 { \ 583 mod->field = kstrdup(s, GFP_KERNEL); \ 584 } \ 585 static ssize_t show_modinfo_##field(const struct module_attribute *mattr, \ 586 struct module_kobject *mk, char *buffer) \ 587 { \ 588 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 589 } \ 590 static int modinfo_##field##_exists(struct module *mod) \ 591 { \ 592 return mod->field != NULL; \ 593 } \ 594 static void free_modinfo_##field(struct module *mod) \ 595 { \ 596 kfree(mod->field); \ 597 mod->field = NULL; \ 598 } \ 599 static const struct module_attribute modinfo_##field = { \ 600 .attr = { .name = __stringify(field), .mode = 0444 }, \ 601 .show = show_modinfo_##field, \ 602 .setup = setup_modinfo_##field, \ 603 .test = modinfo_##field##_exists, \ 604 .free = free_modinfo_##field, \ 605 }; 606 607 MODINFO_ATTR(version); 608 MODINFO_ATTR(srcversion); 609 610 static struct { 611 char name[MODULE_NAME_LEN]; 612 char taints[MODULE_FLAGS_BUF_SIZE]; 613 } last_unloaded_module; 614 615 #ifdef CONFIG_MODULE_UNLOAD 616 617 EXPORT_TRACEPOINT_SYMBOL(module_get); 618 619 /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 620 #define MODULE_REF_BASE 1 621 622 /* Init the unload section of the module. */ 623 static int module_unload_init(struct module *mod) 624 { 625 /* 626 * Initialize reference counter to MODULE_REF_BASE. 627 * refcnt == 0 means module is going. 628 */ 629 atomic_set(&mod->refcnt, MODULE_REF_BASE); 630 631 INIT_LIST_HEAD(&mod->source_list); 632 INIT_LIST_HEAD(&mod->target_list); 633 634 /* Hold reference count during initialization. */ 635 atomic_inc(&mod->refcnt); 636 637 return 0; 638 } 639 640 /* Does a already use b? */ 641 static int already_uses(struct module *a, struct module *b) 642 { 643 struct module_use *use; 644 645 list_for_each_entry(use, &b->source_list, source_list) { 646 if (use->source == a) 647 return 1; 648 } 649 pr_debug("%s does not use %s!\n", a->name, b->name); 650 return 0; 651 } 652 653 /* 654 * Module a uses b 655 * - we add 'a' as a "source", 'b' as a "target" of module use 656 * - the module_use is added to the list of 'b' sources (so 657 * 'b' can walk the list to see who sourced them), and of 'a' 658 * targets (so 'a' can see what modules it targets). 659 */ 660 static int add_module_usage(struct module *a, struct module *b) 661 { 662 struct module_use *use; 663 664 pr_debug("Allocating new usage for %s.\n", a->name); 665 use = kmalloc(sizeof(*use), GFP_ATOMIC); 666 if (!use) 667 return -ENOMEM; 668 669 use->source = a; 670 use->target = b; 671 list_add(&use->source_list, &b->source_list); 672 list_add(&use->target_list, &a->target_list); 673 return 0; 674 } 675 676 /* Module a uses b: caller needs module_mutex() */ 677 static int ref_module(struct module *a, struct module *b) 678 { 679 int err; 680 681 if (b == NULL || already_uses(a, b)) 682 return 0; 683 684 /* If module isn't available, we fail. */ 685 err = strong_try_module_get(b); 686 if (err) 687 return err; 688 689 err = add_module_usage(a, b); 690 if (err) { 691 module_put(b); 692 return err; 693 } 694 return 0; 695 } 696 697 /* Clear the unload stuff of the module. */ 698 static void module_unload_free(struct module *mod) 699 { 700 struct module_use *use, *tmp; 701 702 mutex_lock(&module_mutex); 703 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 704 struct module *i = use->target; 705 pr_debug("%s unusing %s\n", mod->name, i->name); 706 module_put(i); 707 list_del(&use->source_list); 708 list_del(&use->target_list); 709 kfree(use); 710 } 711 mutex_unlock(&module_mutex); 712 } 713 714 #ifdef CONFIG_MODULE_FORCE_UNLOAD 715 static inline int try_force_unload(unsigned int flags) 716 { 717 int ret = (flags & O_TRUNC); 718 if (ret) 719 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 720 return ret; 721 } 722 #else 723 static inline int try_force_unload(unsigned int flags) 724 { 725 return 0; 726 } 727 #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 728 729 /* Try to release refcount of module, 0 means success. */ 730 static int try_release_module_ref(struct module *mod) 731 { 732 int ret; 733 734 /* Try to decrement refcnt which we set at loading */ 735 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 736 BUG_ON(ret < 0); 737 if (ret) 738 /* Someone can put this right now, recover with checking */ 739 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 740 741 return ret; 742 } 743 744 static int try_stop_module(struct module *mod, int flags, int *forced) 745 { 746 /* If it's not unused, quit unless we're forcing. */ 747 if (try_release_module_ref(mod) != 0) { 748 *forced = try_force_unload(flags); 749 if (!(*forced)) 750 return -EWOULDBLOCK; 751 } 752 753 /* Mark it as dying. */ 754 mod->state = MODULE_STATE_GOING; 755 756 return 0; 757 } 758 759 /** 760 * module_refcount() - return the refcount or -1 if unloading 761 * @mod: the module we're checking 762 * 763 * Return: 764 * -1 if the module is in the process of unloading 765 * otherwise the number of references in the kernel to the module 766 */ 767 int module_refcount(struct module *mod) 768 { 769 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 770 } 771 EXPORT_SYMBOL(module_refcount); 772 773 /* This exists whether we can unload or not */ 774 static void free_module(struct module *mod); 775 776 SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 777 unsigned int, flags) 778 { 779 struct module *mod; 780 char name[MODULE_NAME_LEN]; 781 char buf[MODULE_FLAGS_BUF_SIZE]; 782 int ret, len, forced = 0; 783 784 if (!capable(CAP_SYS_MODULE) || modules_disabled) 785 return -EPERM; 786 787 len = strncpy_from_user(name, name_user, MODULE_NAME_LEN); 788 if (len == 0 || len == MODULE_NAME_LEN) 789 return -ENOENT; 790 if (len < 0) 791 return len; 792 793 audit_log_kern_module(name); 794 795 if (mutex_lock_interruptible(&module_mutex) != 0) 796 return -EINTR; 797 798 mod = find_module(name); 799 if (!mod) { 800 ret = -ENOENT; 801 goto out; 802 } 803 804 if (!list_empty(&mod->source_list)) { 805 /* Other modules depend on us: get rid of them first. */ 806 ret = -EWOULDBLOCK; 807 goto out; 808 } 809 810 /* Doing init or already dying? */ 811 if (mod->state != MODULE_STATE_LIVE) { 812 /* FIXME: if (force), slam module count damn the torpedoes */ 813 pr_debug("%s already dying\n", mod->name); 814 ret = -EBUSY; 815 goto out; 816 } 817 818 /* If it has an init func, it must have an exit func to unload */ 819 if (mod->init && !mod->exit) { 820 forced = try_force_unload(flags); 821 if (!forced) { 822 /* This module can't be removed */ 823 ret = -EBUSY; 824 goto out; 825 } 826 } 827 828 ret = try_stop_module(mod, flags, &forced); 829 if (ret != 0) 830 goto out; 831 832 mutex_unlock(&module_mutex); 833 /* Final destruction now no one is using it. */ 834 if (mod->exit != NULL) 835 mod->exit(); 836 blocking_notifier_call_chain(&module_notify_list, 837 MODULE_STATE_GOING, mod); 838 klp_module_going(mod); 839 ftrace_release_mod(mod); 840 841 async_synchronize_full(); 842 843 /* Store the name and taints of the last unloaded module for diagnostic purposes */ 844 strscpy(last_unloaded_module.name, mod->name); 845 strscpy(last_unloaded_module.taints, module_flags(mod, buf, false)); 846 847 free_module(mod); 848 /* someone could wait for the module in add_unformed_module() */ 849 wake_up_all(&module_wq); 850 return 0; 851 out: 852 mutex_unlock(&module_mutex); 853 return ret; 854 } 855 856 void __symbol_put(const char *symbol) 857 { 858 struct find_symbol_arg fsa = { 859 .name = symbol, 860 .gplok = true, 861 }; 862 863 guard(rcu)(); 864 BUG_ON(!find_symbol(&fsa)); 865 module_put(fsa.owner); 866 } 867 EXPORT_SYMBOL(__symbol_put); 868 869 /* Note this assumes addr is a function, which it currently always is. */ 870 void symbol_put_addr(void *addr) 871 { 872 struct module *modaddr; 873 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 874 875 if (core_kernel_text(a)) 876 return; 877 878 /* 879 * Even though we hold a reference on the module; we still need to 880 * RCU read section in order to safely traverse the data structure. 881 */ 882 guard(rcu)(); 883 modaddr = __module_text_address(a); 884 BUG_ON(!modaddr); 885 module_put(modaddr); 886 } 887 EXPORT_SYMBOL_GPL(symbol_put_addr); 888 889 static ssize_t show_refcnt(const struct module_attribute *mattr, 890 struct module_kobject *mk, char *buffer) 891 { 892 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 893 } 894 895 static const struct module_attribute modinfo_refcnt = 896 __ATTR(refcnt, 0444, show_refcnt, NULL); 897 898 void __module_get(struct module *module) 899 { 900 if (module) { 901 atomic_inc(&module->refcnt); 902 trace_module_get(module, _RET_IP_); 903 } 904 } 905 EXPORT_SYMBOL(__module_get); 906 907 bool try_module_get(struct module *module) 908 { 909 bool ret = true; 910 911 if (module) { 912 /* Note: here, we can fail to get a reference */ 913 if (likely(module_is_live(module) && 914 atomic_inc_not_zero(&module->refcnt) != 0)) 915 trace_module_get(module, _RET_IP_); 916 else 917 ret = false; 918 } 919 return ret; 920 } 921 EXPORT_SYMBOL(try_module_get); 922 923 void module_put(struct module *module) 924 { 925 int ret; 926 927 if (module) { 928 ret = atomic_dec_if_positive(&module->refcnt); 929 WARN_ON(ret < 0); /* Failed to put refcount */ 930 trace_module_put(module, _RET_IP_); 931 } 932 } 933 EXPORT_SYMBOL(module_put); 934 935 #else /* !CONFIG_MODULE_UNLOAD */ 936 static inline void module_unload_free(struct module *mod) 937 { 938 } 939 940 static int ref_module(struct module *a, struct module *b) 941 { 942 return strong_try_module_get(b); 943 } 944 945 static inline int module_unload_init(struct module *mod) 946 { 947 return 0; 948 } 949 #endif /* CONFIG_MODULE_UNLOAD */ 950 951 size_t module_flags_taint(unsigned long taints, char *buf) 952 { 953 size_t l = 0; 954 int i; 955 956 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 957 if (taint_flags[i].module && test_bit(i, &taints)) 958 buf[l++] = taint_flags[i].c_true; 959 } 960 961 return l; 962 } 963 964 static ssize_t show_initstate(const struct module_attribute *mattr, 965 struct module_kobject *mk, char *buffer) 966 { 967 const char *state = "unknown"; 968 969 switch (mk->mod->state) { 970 case MODULE_STATE_LIVE: 971 state = "live"; 972 break; 973 case MODULE_STATE_COMING: 974 state = "coming"; 975 break; 976 case MODULE_STATE_GOING: 977 state = "going"; 978 break; 979 default: 980 BUG(); 981 } 982 return sprintf(buffer, "%s\n", state); 983 } 984 985 static const struct module_attribute modinfo_initstate = 986 __ATTR(initstate, 0444, show_initstate, NULL); 987 988 static ssize_t store_uevent(const struct module_attribute *mattr, 989 struct module_kobject *mk, 990 const char *buffer, size_t count) 991 { 992 int rc; 993 994 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 995 return rc ? rc : count; 996 } 997 998 const struct module_attribute module_uevent = 999 __ATTR(uevent, 0200, NULL, store_uevent); 1000 1001 static ssize_t show_coresize(const struct module_attribute *mattr, 1002 struct module_kobject *mk, char *buffer) 1003 { 1004 unsigned int size = mk->mod->mem[MOD_TEXT].size; 1005 1006 if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { 1007 for_class_mod_mem_type(type, core_data) 1008 size += mk->mod->mem[type].size; 1009 } 1010 return sprintf(buffer, "%u\n", size); 1011 } 1012 1013 static const struct module_attribute modinfo_coresize = 1014 __ATTR(coresize, 0444, show_coresize, NULL); 1015 1016 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1017 static ssize_t show_datasize(const struct module_attribute *mattr, 1018 struct module_kobject *mk, char *buffer) 1019 { 1020 unsigned int size = 0; 1021 1022 for_class_mod_mem_type(type, core_data) 1023 size += mk->mod->mem[type].size; 1024 return sprintf(buffer, "%u\n", size); 1025 } 1026 1027 static const struct module_attribute modinfo_datasize = 1028 __ATTR(datasize, 0444, show_datasize, NULL); 1029 #endif 1030 1031 static ssize_t show_initsize(const struct module_attribute *mattr, 1032 struct module_kobject *mk, char *buffer) 1033 { 1034 unsigned int size = 0; 1035 1036 for_class_mod_mem_type(type, init) 1037 size += mk->mod->mem[type].size; 1038 return sprintf(buffer, "%u\n", size); 1039 } 1040 1041 static const struct module_attribute modinfo_initsize = 1042 __ATTR(initsize, 0444, show_initsize, NULL); 1043 1044 static ssize_t show_taint(const struct module_attribute *mattr, 1045 struct module_kobject *mk, char *buffer) 1046 { 1047 size_t l; 1048 1049 l = module_flags_taint(mk->mod->taints, buffer); 1050 buffer[l++] = '\n'; 1051 return l; 1052 } 1053 1054 static const struct module_attribute modinfo_taint = 1055 __ATTR(taint, 0444, show_taint, NULL); 1056 1057 const struct module_attribute *const modinfo_attrs[] = { 1058 &module_uevent, 1059 &modinfo_version, 1060 &modinfo_srcversion, 1061 &modinfo_initstate, 1062 &modinfo_coresize, 1063 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1064 &modinfo_datasize, 1065 #endif 1066 &modinfo_initsize, 1067 &modinfo_taint, 1068 #ifdef CONFIG_MODULE_UNLOAD 1069 &modinfo_refcnt, 1070 #endif 1071 NULL, 1072 }; 1073 1074 const size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); 1075 1076 static const char vermagic[] = VERMAGIC_STRING; 1077 1078 int try_to_force_load(struct module *mod, const char *reason) 1079 { 1080 #ifdef CONFIG_MODULE_FORCE_LOAD 1081 if (!test_taint(TAINT_FORCED_MODULE)) 1082 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1083 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1084 return 0; 1085 #else 1086 return -ENOEXEC; 1087 #endif 1088 } 1089 1090 /* Parse tag=value strings from .modinfo section */ 1091 char *module_next_tag_pair(char *string, unsigned long *secsize) 1092 { 1093 /* Skip non-zero chars */ 1094 while (string[0]) { 1095 string++; 1096 if ((*secsize)-- <= 1) 1097 return NULL; 1098 } 1099 1100 /* Skip any zero padding. */ 1101 while (!string[0]) { 1102 string++; 1103 if ((*secsize)-- <= 1) 1104 return NULL; 1105 } 1106 return string; 1107 } 1108 1109 static char *get_next_modinfo(const struct load_info *info, const char *tag, 1110 char *prev) 1111 { 1112 char *p; 1113 unsigned int taglen = strlen(tag); 1114 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 1115 unsigned long size = infosec->sh_size; 1116 1117 /* 1118 * get_modinfo() calls made before rewrite_section_headers() 1119 * must use sh_offset, as sh_addr isn't set! 1120 */ 1121 char *modinfo = (char *)info->hdr + infosec->sh_offset; 1122 1123 if (prev) { 1124 size -= prev - modinfo; 1125 modinfo = module_next_tag_pair(prev, &size); 1126 } 1127 1128 for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { 1129 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 1130 return p + taglen + 1; 1131 } 1132 return NULL; 1133 } 1134 1135 static char *get_modinfo(const struct load_info *info, const char *tag) 1136 { 1137 return get_next_modinfo(info, tag, NULL); 1138 } 1139 1140 /** 1141 * verify_module_namespace() - does @modname have access to this symbol's @namespace 1142 * @namespace: export symbol namespace 1143 * @modname: module name 1144 * 1145 * If @namespace is prefixed with "module:" to indicate it is a module namespace 1146 * then test if @modname matches any of the comma separated patterns. 1147 * 1148 * The patterns only support tail-glob. 1149 */ 1150 static bool verify_module_namespace(const char *namespace, const char *modname) 1151 { 1152 size_t len, modlen = strlen(modname); 1153 const char *prefix = "module:"; 1154 const char *sep; 1155 bool glob; 1156 1157 if (!strstarts(namespace, prefix)) 1158 return false; 1159 1160 for (namespace += strlen(prefix); *namespace; namespace = sep) { 1161 sep = strchrnul(namespace, ','); 1162 len = sep - namespace; 1163 1164 glob = false; 1165 if (sep[-1] == '*') { 1166 len--; 1167 glob = true; 1168 } 1169 1170 if (*sep) 1171 sep++; 1172 1173 if (mod_strncmp(namespace, modname, len) == 0 && (glob || len == modlen)) 1174 return true; 1175 } 1176 1177 return false; 1178 } 1179 1180 static int verify_namespace_is_imported(const struct load_info *info, 1181 const struct kernel_symbol *sym, 1182 struct module *mod) 1183 { 1184 const char *namespace; 1185 char *imported_namespace; 1186 1187 namespace = kernel_symbol_namespace(sym); 1188 if (namespace && namespace[0]) { 1189 1190 if (verify_module_namespace(namespace, mod->name)) 1191 return 0; 1192 1193 for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1194 if (strcmp(namespace, imported_namespace) == 0) 1195 return 0; 1196 } 1197 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1198 pr_warn( 1199 #else 1200 pr_err( 1201 #endif 1202 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1203 mod->name, kernel_symbol_name(sym), namespace); 1204 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1205 return -EINVAL; 1206 #endif 1207 } 1208 return 0; 1209 } 1210 1211 static bool inherit_taint(struct module *mod, struct module *owner, const char *name) 1212 { 1213 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1214 return true; 1215 1216 if (mod->using_gplonly_symbols) { 1217 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", 1218 mod->name, name, owner->name); 1219 return false; 1220 } 1221 1222 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 1223 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", 1224 mod->name, name, owner->name); 1225 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1226 } 1227 return true; 1228 } 1229 1230 /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1231 static const struct kernel_symbol *resolve_symbol(struct module *mod, 1232 const struct load_info *info, 1233 const char *name, 1234 char ownername[]) 1235 { 1236 struct find_symbol_arg fsa = { 1237 .name = name, 1238 .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1239 .warn = true, 1240 }; 1241 int err; 1242 1243 /* 1244 * The module_mutex should not be a heavily contended lock; 1245 * if we get the occasional sleep here, we'll go an extra iteration 1246 * in the wait_event_interruptible(), which is harmless. 1247 */ 1248 sched_annotate_sleep(); 1249 mutex_lock(&module_mutex); 1250 if (!find_symbol(&fsa)) 1251 goto unlock; 1252 1253 if (fsa.license == GPL_ONLY) 1254 mod->using_gplonly_symbols = true; 1255 1256 if (!inherit_taint(mod, fsa.owner, name)) { 1257 fsa.sym = NULL; 1258 goto getname; 1259 } 1260 1261 if (!check_version(info, name, mod, fsa.crc)) { 1262 fsa.sym = ERR_PTR(-EINVAL); 1263 goto getname; 1264 } 1265 1266 err = verify_namespace_is_imported(info, fsa.sym, mod); 1267 if (err) { 1268 fsa.sym = ERR_PTR(err); 1269 goto getname; 1270 } 1271 1272 err = ref_module(mod, fsa.owner); 1273 if (err) { 1274 fsa.sym = ERR_PTR(err); 1275 goto getname; 1276 } 1277 1278 getname: 1279 /* We must make copy under the lock if we failed to get ref. */ 1280 strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1281 unlock: 1282 mutex_unlock(&module_mutex); 1283 return fsa.sym; 1284 } 1285 1286 static const struct kernel_symbol * 1287 resolve_symbol_wait(struct module *mod, 1288 const struct load_info *info, 1289 const char *name) 1290 { 1291 const struct kernel_symbol *ksym; 1292 char owner[MODULE_NAME_LEN]; 1293 1294 if (wait_event_interruptible_timeout(module_wq, 1295 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1296 || PTR_ERR(ksym) != -EBUSY, 1297 30 * HZ) <= 0) { 1298 pr_warn("%s: gave up waiting for init of module %s.\n", 1299 mod->name, owner); 1300 } 1301 return ksym; 1302 } 1303 1304 void __weak module_arch_cleanup(struct module *mod) 1305 { 1306 } 1307 1308 void __weak module_arch_freeing_init(struct module *mod) 1309 { 1310 } 1311 1312 static int module_memory_alloc(struct module *mod, enum mod_mem_type type) 1313 { 1314 unsigned int size = PAGE_ALIGN(mod->mem[type].size); 1315 enum execmem_type execmem_type; 1316 void *ptr; 1317 1318 mod->mem[type].size = size; 1319 1320 if (mod_mem_type_is_data(type)) 1321 execmem_type = EXECMEM_MODULE_DATA; 1322 else 1323 execmem_type = EXECMEM_MODULE_TEXT; 1324 1325 ptr = execmem_alloc(execmem_type, size); 1326 if (!ptr) 1327 return -ENOMEM; 1328 1329 if (execmem_is_rox(execmem_type)) { 1330 int err = execmem_make_temp_rw(ptr, size); 1331 1332 if (err) { 1333 execmem_free(ptr); 1334 return -ENOMEM; 1335 } 1336 1337 mod->mem[type].is_rox = true; 1338 } 1339 1340 /* 1341 * The pointer to these blocks of memory are stored on the module 1342 * structure and we keep that around so long as the module is 1343 * around. We only free that memory when we unload the module. 1344 * Just mark them as not being a leak then. The .init* ELF 1345 * sections *do* get freed after boot so we *could* treat them 1346 * slightly differently with kmemleak_ignore() and only grey 1347 * them out as they work as typical memory allocations which 1348 * *do* eventually get freed, but let's just keep things simple 1349 * and avoid *any* false positives. 1350 */ 1351 if (!mod->mem[type].is_rox) 1352 kmemleak_not_leak(ptr); 1353 1354 memset(ptr, 0, size); 1355 mod->mem[type].base = ptr; 1356 1357 return 0; 1358 } 1359 1360 static void module_memory_restore_rox(struct module *mod) 1361 { 1362 for_class_mod_mem_type(type, text) { 1363 struct module_memory *mem = &mod->mem[type]; 1364 1365 if (mem->is_rox) 1366 execmem_restore_rox(mem->base, mem->size); 1367 } 1368 } 1369 1370 static void module_memory_free(struct module *mod, enum mod_mem_type type) 1371 { 1372 struct module_memory *mem = &mod->mem[type]; 1373 1374 execmem_free(mem->base); 1375 } 1376 1377 static void free_mod_mem(struct module *mod) 1378 { 1379 for_each_mod_mem_type(type) { 1380 struct module_memory *mod_mem = &mod->mem[type]; 1381 1382 if (type == MOD_DATA) 1383 continue; 1384 1385 /* Free lock-classes; relies on the preceding sync_rcu(). */ 1386 lockdep_free_key_range(mod_mem->base, mod_mem->size); 1387 if (mod_mem->size) 1388 module_memory_free(mod, type); 1389 } 1390 1391 /* MOD_DATA hosts mod, so free it at last */ 1392 lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); 1393 module_memory_free(mod, MOD_DATA); 1394 } 1395 1396 /* Free a module, remove from lists, etc. */ 1397 static void free_module(struct module *mod) 1398 { 1399 trace_module_free(mod); 1400 1401 codetag_unload_module(mod); 1402 1403 mod_sysfs_teardown(mod); 1404 1405 /* 1406 * We leave it in list to prevent duplicate loads, but make sure 1407 * that noone uses it while it's being deconstructed. 1408 */ 1409 mutex_lock(&module_mutex); 1410 mod->state = MODULE_STATE_UNFORMED; 1411 mutex_unlock(&module_mutex); 1412 1413 /* Arch-specific cleanup. */ 1414 module_arch_cleanup(mod); 1415 1416 /* Module unload stuff */ 1417 module_unload_free(mod); 1418 1419 /* Free any allocated parameters. */ 1420 destroy_params(mod->kp, mod->num_kp); 1421 1422 if (is_livepatch_module(mod)) 1423 free_module_elf(mod); 1424 1425 /* Now we can delete it from the lists */ 1426 mutex_lock(&module_mutex); 1427 /* Unlink carefully: kallsyms could be walking list. */ 1428 list_del_rcu(&mod->list); 1429 mod_tree_remove(mod); 1430 /* Remove this module from bug list, this uses list_del_rcu */ 1431 module_bug_cleanup(mod); 1432 /* Wait for RCU synchronizing before releasing mod->list and buglist. */ 1433 synchronize_rcu(); 1434 if (try_add_tainted_module(mod)) 1435 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", 1436 mod->name); 1437 mutex_unlock(&module_mutex); 1438 1439 /* This may be empty, but that's OK */ 1440 module_arch_freeing_init(mod); 1441 kfree(mod->args); 1442 percpu_modfree(mod); 1443 1444 free_mod_mem(mod); 1445 } 1446 1447 void *__symbol_get(const char *symbol) 1448 { 1449 struct find_symbol_arg fsa = { 1450 .name = symbol, 1451 .gplok = true, 1452 .warn = true, 1453 }; 1454 1455 scoped_guard(rcu) { 1456 if (!find_symbol(&fsa)) 1457 return NULL; 1458 if (fsa.license != GPL_ONLY) { 1459 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 1460 symbol); 1461 return NULL; 1462 } 1463 if (strong_try_module_get(fsa.owner)) 1464 return NULL; 1465 } 1466 return (void *)kernel_symbol_value(fsa.sym); 1467 } 1468 EXPORT_SYMBOL_GPL(__symbol_get); 1469 1470 /* 1471 * Ensure that an exported symbol [global namespace] does not already exist 1472 * in the kernel or in some other module's exported symbol table. 1473 * 1474 * You must hold the module_mutex. 1475 */ 1476 static int verify_exported_symbols(struct module *mod) 1477 { 1478 unsigned int i; 1479 const struct kernel_symbol *s; 1480 struct { 1481 const struct kernel_symbol *sym; 1482 unsigned int num; 1483 } arr[] = { 1484 { mod->syms, mod->num_syms }, 1485 { mod->gpl_syms, mod->num_gpl_syms }, 1486 }; 1487 1488 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1489 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 1490 struct find_symbol_arg fsa = { 1491 .name = kernel_symbol_name(s), 1492 .gplok = true, 1493 }; 1494 if (find_symbol(&fsa)) { 1495 pr_err("%s: exports duplicate symbol %s" 1496 " (owned by %s)\n", 1497 mod->name, kernel_symbol_name(s), 1498 module_name(fsa.owner)); 1499 return -ENOEXEC; 1500 } 1501 } 1502 } 1503 return 0; 1504 } 1505 1506 static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 1507 { 1508 /* 1509 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 1510 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 1511 * i386 has a similar problem but may not deserve a fix. 1512 * 1513 * If we ever have to ignore many symbols, consider refactoring the code to 1514 * only warn if referenced by a relocation. 1515 */ 1516 if (emachine == EM_386 || emachine == EM_X86_64) 1517 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 1518 return false; 1519 } 1520 1521 /* Change all symbols so that st_value encodes the pointer directly. */ 1522 static int simplify_symbols(struct module *mod, const struct load_info *info) 1523 { 1524 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 1525 Elf_Sym *sym = (void *)symsec->sh_addr; 1526 unsigned long secbase; 1527 unsigned int i; 1528 int ret = 0; 1529 const struct kernel_symbol *ksym; 1530 1531 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 1532 const char *name = info->strtab + sym[i].st_name; 1533 1534 switch (sym[i].st_shndx) { 1535 case SHN_COMMON: 1536 /* Ignore common symbols */ 1537 if (!strncmp(name, "__gnu_lto", 9)) 1538 break; 1539 1540 /* 1541 * We compiled with -fno-common. These are not 1542 * supposed to happen. 1543 */ 1544 pr_debug("Common symbol: %s\n", name); 1545 pr_warn("%s: please compile with -fno-common\n", 1546 mod->name); 1547 ret = -ENOEXEC; 1548 break; 1549 1550 case SHN_ABS: 1551 /* Don't need to do anything */ 1552 pr_debug("Absolute symbol: 0x%08lx %s\n", 1553 (long)sym[i].st_value, name); 1554 break; 1555 1556 case SHN_LIVEPATCH: 1557 /* Livepatch symbols are resolved by livepatch */ 1558 break; 1559 1560 case SHN_UNDEF: 1561 ksym = resolve_symbol_wait(mod, info, name); 1562 /* Ok if resolved. */ 1563 if (ksym && !IS_ERR(ksym)) { 1564 sym[i].st_value = kernel_symbol_value(ksym); 1565 break; 1566 } 1567 1568 /* Ok if weak or ignored. */ 1569 if (!ksym && 1570 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 1571 ignore_undef_symbol(info->hdr->e_machine, name))) 1572 break; 1573 1574 ret = PTR_ERR(ksym) ?: -ENOENT; 1575 pr_warn("%s: Unknown symbol %s (err %d)\n", 1576 mod->name, name, ret); 1577 break; 1578 1579 default: 1580 /* Divert to percpu allocation if a percpu var. */ 1581 if (sym[i].st_shndx == info->index.pcpu) 1582 secbase = (unsigned long)mod_percpu(mod); 1583 else 1584 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 1585 sym[i].st_value += secbase; 1586 break; 1587 } 1588 } 1589 1590 return ret; 1591 } 1592 1593 static int apply_relocations(struct module *mod, const struct load_info *info) 1594 { 1595 unsigned int i; 1596 int err = 0; 1597 1598 /* Now do relocations. */ 1599 for (i = 1; i < info->hdr->e_shnum; i++) { 1600 unsigned int infosec = info->sechdrs[i].sh_info; 1601 1602 /* Not a valid relocation section? */ 1603 if (infosec >= info->hdr->e_shnum) 1604 continue; 1605 1606 /* 1607 * Don't bother with non-allocated sections. 1608 * An exception is the percpu section, which has separate allocations 1609 * for individual CPUs. We relocate the percpu section in the initial 1610 * ELF template and subsequently copy it to the per-CPU destinations. 1611 */ 1612 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC) && 1613 (!infosec || infosec != info->index.pcpu)) 1614 continue; 1615 1616 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 1617 err = klp_apply_section_relocs(mod, info->sechdrs, 1618 info->secstrings, 1619 info->strtab, 1620 info->index.sym, i, 1621 NULL); 1622 else if (info->sechdrs[i].sh_type == SHT_REL) 1623 err = apply_relocate(info->sechdrs, info->strtab, 1624 info->index.sym, i, mod); 1625 else if (info->sechdrs[i].sh_type == SHT_RELA) 1626 err = apply_relocate_add(info->sechdrs, info->strtab, 1627 info->index.sym, i, mod); 1628 if (err < 0) 1629 break; 1630 } 1631 return err; 1632 } 1633 1634 /* Additional bytes needed by arch in front of individual sections */ 1635 unsigned int __weak arch_mod_section_prepend(struct module *mod, 1636 unsigned int section) 1637 { 1638 /* default implementation just returns zero */ 1639 return 0; 1640 } 1641 1642 long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, 1643 Elf_Shdr *sechdr, unsigned int section) 1644 { 1645 long offset; 1646 long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; 1647 1648 mod->mem[type].size += arch_mod_section_prepend(mod, section); 1649 offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); 1650 mod->mem[type].size = offset + sechdr->sh_size; 1651 1652 WARN_ON_ONCE(offset & mask); 1653 return offset | mask; 1654 } 1655 1656 bool module_init_layout_section(const char *sname) 1657 { 1658 #ifndef CONFIG_MODULE_UNLOAD 1659 if (module_exit_section(sname)) 1660 return true; 1661 #endif 1662 return module_init_section(sname); 1663 } 1664 1665 static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) 1666 { 1667 unsigned int m, i; 1668 1669 /* 1670 * { Mask of required section header flags, 1671 * Mask of excluded section header flags } 1672 */ 1673 static const unsigned long masks[][2] = { 1674 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 1675 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 1676 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 1677 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 1678 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 1679 }; 1680 static const int core_m_to_mem_type[] = { 1681 MOD_TEXT, 1682 MOD_RODATA, 1683 MOD_RO_AFTER_INIT, 1684 MOD_DATA, 1685 MOD_DATA, 1686 }; 1687 static const int init_m_to_mem_type[] = { 1688 MOD_INIT_TEXT, 1689 MOD_INIT_RODATA, 1690 MOD_INVALID, 1691 MOD_INIT_DATA, 1692 MOD_INIT_DATA, 1693 }; 1694 1695 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1696 enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; 1697 1698 for (i = 0; i < info->hdr->e_shnum; ++i) { 1699 Elf_Shdr *s = &info->sechdrs[i]; 1700 const char *sname = info->secstrings + s->sh_name; 1701 1702 if ((s->sh_flags & masks[m][0]) != masks[m][0] 1703 || (s->sh_flags & masks[m][1]) 1704 || s->sh_entsize != ~0UL 1705 || is_init != module_init_layout_section(sname)) 1706 continue; 1707 1708 if (WARN_ON_ONCE(type == MOD_INVALID)) 1709 continue; 1710 1711 /* 1712 * Do not allocate codetag memory as we load it into 1713 * preallocated contiguous memory. 1714 */ 1715 if (codetag_needs_module_section(mod, sname, s->sh_size)) { 1716 /* 1717 * s->sh_entsize won't be used but populate the 1718 * type field to avoid confusion. 1719 */ 1720 s->sh_entsize = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) 1721 << SH_ENTSIZE_TYPE_SHIFT; 1722 continue; 1723 } 1724 1725 s->sh_entsize = module_get_offset_and_type(mod, type, s, i); 1726 pr_debug("\t%s\n", sname); 1727 } 1728 } 1729 } 1730 1731 /* 1732 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 1733 * might -- code, read-only data, read-write data, small data. Tally 1734 * sizes, and place the offsets into sh_entsize fields: high bit means it 1735 * belongs in init. 1736 */ 1737 static void layout_sections(struct module *mod, struct load_info *info) 1738 { 1739 unsigned int i; 1740 1741 for (i = 0; i < info->hdr->e_shnum; i++) 1742 info->sechdrs[i].sh_entsize = ~0UL; 1743 1744 pr_debug("Core section allocation order for %s:\n", mod->name); 1745 __layout_sections(mod, info, false); 1746 1747 pr_debug("Init section allocation order for %s:\n", mod->name); 1748 __layout_sections(mod, info, true); 1749 } 1750 1751 static void module_license_taint_check(struct module *mod, const char *license) 1752 { 1753 if (!license) 1754 license = "unspecified"; 1755 1756 if (!license_is_gpl_compatible(license)) { 1757 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 1758 pr_warn("%s: module license '%s' taints kernel.\n", 1759 mod->name, license); 1760 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 1761 LOCKDEP_NOW_UNRELIABLE); 1762 } 1763 } 1764 1765 static int setup_modinfo(struct module *mod, struct load_info *info) 1766 { 1767 const struct module_attribute *attr; 1768 char *imported_namespace; 1769 int i; 1770 1771 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1772 if (attr->setup) 1773 attr->setup(mod, get_modinfo(info, attr->attr.name)); 1774 } 1775 1776 for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1777 /* 1778 * 'module:' prefixed namespaces are implicit, disallow 1779 * explicit imports. 1780 */ 1781 if (strstarts(imported_namespace, "module:")) { 1782 pr_err("%s: module tries to import module namespace: %s\n", 1783 mod->name, imported_namespace); 1784 return -EPERM; 1785 } 1786 } 1787 1788 return 0; 1789 } 1790 1791 static void free_modinfo(struct module *mod) 1792 { 1793 const struct module_attribute *attr; 1794 int i; 1795 1796 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1797 if (attr->free) 1798 attr->free(mod); 1799 } 1800 } 1801 1802 bool __weak module_init_section(const char *name) 1803 { 1804 return strstarts(name, ".init"); 1805 } 1806 1807 bool __weak module_exit_section(const char *name) 1808 { 1809 return strstarts(name, ".exit"); 1810 } 1811 1812 static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) 1813 { 1814 #if defined(CONFIG_64BIT) 1815 unsigned long long secend; 1816 #else 1817 unsigned long secend; 1818 #endif 1819 1820 /* 1821 * Check for both overflow and offset/size being 1822 * too large. 1823 */ 1824 secend = shdr->sh_offset + shdr->sh_size; 1825 if (secend < shdr->sh_offset || secend > info->len) 1826 return -ENOEXEC; 1827 1828 return 0; 1829 } 1830 1831 /** 1832 * elf_validity_ehdr() - Checks an ELF header for module validity 1833 * @info: Load info containing the ELF header to check 1834 * 1835 * Checks whether an ELF header could belong to a valid module. Checks: 1836 * 1837 * * ELF header is within the data the user provided 1838 * * ELF magic is present 1839 * * It is relocatable (not final linked, not core file, etc.) 1840 * * The header's machine type matches what the architecture expects. 1841 * * Optional arch-specific hook for other properties 1842 * - module_elf_check_arch() is currently only used by PPC to check 1843 * ELF ABI version, but may be used by others in the future. 1844 * 1845 * Return: %0 if valid, %-ENOEXEC on failure. 1846 */ 1847 static int elf_validity_ehdr(const struct load_info *info) 1848 { 1849 if (info->len < sizeof(*(info->hdr))) { 1850 pr_err("Invalid ELF header len %lu\n", info->len); 1851 return -ENOEXEC; 1852 } 1853 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 1854 pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 1855 return -ENOEXEC; 1856 } 1857 if (info->hdr->e_type != ET_REL) { 1858 pr_err("Invalid ELF header type: %u != %u\n", 1859 info->hdr->e_type, ET_REL); 1860 return -ENOEXEC; 1861 } 1862 if (!elf_check_arch(info->hdr)) { 1863 pr_err("Invalid architecture in ELF header: %u\n", 1864 info->hdr->e_machine); 1865 return -ENOEXEC; 1866 } 1867 if (!module_elf_check_arch(info->hdr)) { 1868 pr_err("Invalid module architecture in ELF header: %u\n", 1869 info->hdr->e_machine); 1870 return -ENOEXEC; 1871 } 1872 return 0; 1873 } 1874 1875 /** 1876 * elf_validity_cache_sechdrs() - Cache section headers if valid 1877 * @info: Load info to compute section headers from 1878 * 1879 * Checks: 1880 * 1881 * * ELF header is valid (see elf_validity_ehdr()) 1882 * * Section headers are the size we expect 1883 * * Section array fits in the user provided data 1884 * * Section index 0 is NULL 1885 * * Section contents are inbounds 1886 * 1887 * Then updates @info with a &load_info->sechdrs pointer if valid. 1888 * 1889 * Return: %0 if valid, negative error code if validation failed. 1890 */ 1891 static int elf_validity_cache_sechdrs(struct load_info *info) 1892 { 1893 Elf_Shdr *sechdrs; 1894 Elf_Shdr *shdr; 1895 int i; 1896 int err; 1897 1898 err = elf_validity_ehdr(info); 1899 if (err < 0) 1900 return err; 1901 1902 if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 1903 pr_err("Invalid ELF section header size\n"); 1904 return -ENOEXEC; 1905 } 1906 1907 /* 1908 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 1909 * known and small. So e_shnum * sizeof(Elf_Shdr) 1910 * will not overflow unsigned long on any platform. 1911 */ 1912 if (info->hdr->e_shoff >= info->len 1913 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 1914 info->len - info->hdr->e_shoff)) { 1915 pr_err("Invalid ELF section header overflow\n"); 1916 return -ENOEXEC; 1917 } 1918 1919 sechdrs = (void *)info->hdr + info->hdr->e_shoff; 1920 1921 /* 1922 * The code assumes that section 0 has a length of zero and 1923 * an addr of zero, so check for it. 1924 */ 1925 if (sechdrs[0].sh_type != SHT_NULL 1926 || sechdrs[0].sh_size != 0 1927 || sechdrs[0].sh_addr != 0) { 1928 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 1929 sechdrs[0].sh_type); 1930 return -ENOEXEC; 1931 } 1932 1933 /* Validate contents are inbounds */ 1934 for (i = 1; i < info->hdr->e_shnum; i++) { 1935 shdr = &sechdrs[i]; 1936 switch (shdr->sh_type) { 1937 case SHT_NULL: 1938 case SHT_NOBITS: 1939 /* No contents, offset/size don't mean anything */ 1940 continue; 1941 default: 1942 err = validate_section_offset(info, shdr); 1943 if (err < 0) { 1944 pr_err("Invalid ELF section in module (section %u type %u)\n", 1945 i, shdr->sh_type); 1946 return err; 1947 } 1948 } 1949 } 1950 1951 info->sechdrs = sechdrs; 1952 1953 return 0; 1954 } 1955 1956 /** 1957 * elf_validity_cache_secstrings() - Caches section names if valid 1958 * @info: Load info to cache section names from. Must have valid sechdrs. 1959 * 1960 * Specifically checks: 1961 * 1962 * * Section name table index is inbounds of section headers 1963 * * Section name table is not empty 1964 * * Section name table is NUL terminated 1965 * * All section name offsets are inbounds of the section 1966 * 1967 * Then updates @info with a &load_info->secstrings pointer if valid. 1968 * 1969 * Return: %0 if valid, negative error code if validation failed. 1970 */ 1971 static int elf_validity_cache_secstrings(struct load_info *info) 1972 { 1973 Elf_Shdr *strhdr, *shdr; 1974 char *secstrings; 1975 int i; 1976 1977 /* 1978 * Verify if the section name table index is valid. 1979 */ 1980 if (info->hdr->e_shstrndx == SHN_UNDEF 1981 || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 1982 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 1983 info->hdr->e_shstrndx, info->hdr->e_shstrndx, 1984 info->hdr->e_shnum); 1985 return -ENOEXEC; 1986 } 1987 1988 strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 1989 1990 /* 1991 * The section name table must be NUL-terminated, as required 1992 * by the spec. This makes strcmp and pr_* calls that access 1993 * strings in the section safe. 1994 */ 1995 secstrings = (void *)info->hdr + strhdr->sh_offset; 1996 if (strhdr->sh_size == 0) { 1997 pr_err("empty section name table\n"); 1998 return -ENOEXEC; 1999 } 2000 if (secstrings[strhdr->sh_size - 1] != '\0') { 2001 pr_err("ELF Spec violation: section name table isn't null terminated\n"); 2002 return -ENOEXEC; 2003 } 2004 2005 for (i = 0; i < info->hdr->e_shnum; i++) { 2006 shdr = &info->sechdrs[i]; 2007 /* SHT_NULL means sh_name has an undefined value */ 2008 if (shdr->sh_type == SHT_NULL) 2009 continue; 2010 if (shdr->sh_name >= strhdr->sh_size) { 2011 pr_err("Invalid ELF section name in module (section %u type %u)\n", 2012 i, shdr->sh_type); 2013 return -ENOEXEC; 2014 } 2015 } 2016 2017 info->secstrings = secstrings; 2018 return 0; 2019 } 2020 2021 /** 2022 * elf_validity_cache_index_info() - Validate and cache modinfo section 2023 * @info: Load info to populate the modinfo index on. 2024 * Must have &load_info->sechdrs and &load_info->secstrings populated 2025 * 2026 * Checks that if there is a .modinfo section, it is unique. 2027 * Then, it caches its index in &load_info->index.info. 2028 * Finally, it tries to populate the name to improve error messages. 2029 * 2030 * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. 2031 */ 2032 static int elf_validity_cache_index_info(struct load_info *info) 2033 { 2034 int info_idx; 2035 2036 info_idx = find_any_unique_sec(info, ".modinfo"); 2037 2038 if (info_idx == 0) 2039 /* Early return, no .modinfo */ 2040 return 0; 2041 2042 if (info_idx < 0) { 2043 pr_err("Only one .modinfo section must exist.\n"); 2044 return -ENOEXEC; 2045 } 2046 2047 info->index.info = info_idx; 2048 /* Try to find a name early so we can log errors with a module name */ 2049 info->name = get_modinfo(info, "name"); 2050 2051 return 0; 2052 } 2053 2054 /** 2055 * elf_validity_cache_index_mod() - Validates and caches this_module section 2056 * @info: Load info to cache this_module on. 2057 * Must have &load_info->sechdrs and &load_info->secstrings populated 2058 * 2059 * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost 2060 * uses to refer to __this_module and let's use rely on THIS_MODULE to point 2061 * to &__this_module properly. The kernel's modpost declares it on each 2062 * modules's *.mod.c file. If the struct module of the kernel changes a full 2063 * kernel rebuild is required. 2064 * 2065 * We have a few expectations for this special section, this function 2066 * validates all this for us: 2067 * 2068 * * The section has contents 2069 * * The section is unique 2070 * * We expect the kernel to always have to allocate it: SHF_ALLOC 2071 * * The section size must match the kernel's run time's struct module 2072 * size 2073 * 2074 * If all checks pass, the index will be cached in &load_info->index.mod 2075 * 2076 * Return: %0 on validation success, %-ENOEXEC on failure 2077 */ 2078 static int elf_validity_cache_index_mod(struct load_info *info) 2079 { 2080 Elf_Shdr *shdr; 2081 int mod_idx; 2082 2083 mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); 2084 if (mod_idx <= 0) { 2085 pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", 2086 info->name ?: "(missing .modinfo section or name field)"); 2087 return -ENOEXEC; 2088 } 2089 2090 shdr = &info->sechdrs[mod_idx]; 2091 2092 if (shdr->sh_type == SHT_NOBITS) { 2093 pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", 2094 info->name ?: "(missing .modinfo section or name field)"); 2095 return -ENOEXEC; 2096 } 2097 2098 if (!(shdr->sh_flags & SHF_ALLOC)) { 2099 pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", 2100 info->name ?: "(missing .modinfo section or name field)"); 2101 return -ENOEXEC; 2102 } 2103 2104 if (shdr->sh_size != sizeof(struct module)) { 2105 pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", 2106 info->name ?: "(missing .modinfo section or name field)"); 2107 return -ENOEXEC; 2108 } 2109 2110 info->index.mod = mod_idx; 2111 2112 return 0; 2113 } 2114 2115 /** 2116 * elf_validity_cache_index_sym() - Validate and cache symtab index 2117 * @info: Load info to cache symtab index in. 2118 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2119 * 2120 * Checks that there is exactly one symbol table, then caches its index in 2121 * &load_info->index.sym. 2122 * 2123 * Return: %0 if valid, %-ENOEXEC on failure. 2124 */ 2125 static int elf_validity_cache_index_sym(struct load_info *info) 2126 { 2127 unsigned int sym_idx; 2128 unsigned int num_sym_secs = 0; 2129 int i; 2130 2131 for (i = 1; i < info->hdr->e_shnum; i++) { 2132 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 2133 num_sym_secs++; 2134 sym_idx = i; 2135 } 2136 } 2137 2138 if (num_sym_secs != 1) { 2139 pr_warn("%s: module has no symbols (stripped?)\n", 2140 info->name ?: "(missing .modinfo section or name field)"); 2141 return -ENOEXEC; 2142 } 2143 2144 info->index.sym = sym_idx; 2145 2146 return 0; 2147 } 2148 2149 /** 2150 * elf_validity_cache_index_str() - Validate and cache strtab index 2151 * @info: Load info to cache strtab index in. 2152 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2153 * Must have &load_info->index.sym populated. 2154 * 2155 * Looks at the symbol table's associated string table, makes sure it is 2156 * in-bounds, and caches it. 2157 * 2158 * Return: %0 if valid, %-ENOEXEC on failure. 2159 */ 2160 static int elf_validity_cache_index_str(struct load_info *info) 2161 { 2162 unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; 2163 2164 if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { 2165 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 2166 str_idx, str_idx, info->hdr->e_shnum); 2167 return -ENOEXEC; 2168 } 2169 2170 info->index.str = str_idx; 2171 return 0; 2172 } 2173 2174 /** 2175 * elf_validity_cache_index_versions() - Validate and cache version indices 2176 * @info: Load info to cache version indices in. 2177 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2178 * @flags: Load flags, relevant to suppress version loading, see 2179 * uapi/linux/module.h 2180 * 2181 * If we're ignoring modversions based on @flags, zero all version indices 2182 * and return validity. Othewrise check: 2183 * 2184 * * If "__version_ext_crcs" is present, "__version_ext_names" is present 2185 * * There is a name present for every crc 2186 * 2187 * Then populate: 2188 * 2189 * * &load_info->index.vers 2190 * * &load_info->index.vers_ext_crc 2191 * * &load_info->index.vers_ext_names 2192 * 2193 * if present. 2194 * 2195 * Return: %0 if valid, %-ENOEXEC on failure. 2196 */ 2197 static int elf_validity_cache_index_versions(struct load_info *info, int flags) 2198 { 2199 unsigned int vers_ext_crc; 2200 unsigned int vers_ext_name; 2201 size_t crc_count; 2202 size_t remaining_len; 2203 size_t name_size; 2204 char *name; 2205 2206 /* If modversions were suppressed, pretend we didn't find any */ 2207 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) { 2208 info->index.vers = 0; 2209 info->index.vers_ext_crc = 0; 2210 info->index.vers_ext_name = 0; 2211 return 0; 2212 } 2213 2214 vers_ext_crc = find_sec(info, "__version_ext_crcs"); 2215 vers_ext_name = find_sec(info, "__version_ext_names"); 2216 2217 /* If we have one field, we must have the other */ 2218 if (!!vers_ext_crc != !!vers_ext_name) { 2219 pr_err("extended version crc+name presence does not match"); 2220 return -ENOEXEC; 2221 } 2222 2223 /* 2224 * If we have extended version information, we should have the same 2225 * number of entries in every section. 2226 */ 2227 if (vers_ext_crc) { 2228 crc_count = info->sechdrs[vers_ext_crc].sh_size / sizeof(u32); 2229 name = (void *)info->hdr + 2230 info->sechdrs[vers_ext_name].sh_offset; 2231 remaining_len = info->sechdrs[vers_ext_name].sh_size; 2232 2233 while (crc_count--) { 2234 name_size = strnlen(name, remaining_len) + 1; 2235 if (name_size > remaining_len) { 2236 pr_err("more extended version crcs than names"); 2237 return -ENOEXEC; 2238 } 2239 remaining_len -= name_size; 2240 name += name_size; 2241 } 2242 } 2243 2244 info->index.vers = find_sec(info, "__versions"); 2245 info->index.vers_ext_crc = vers_ext_crc; 2246 info->index.vers_ext_name = vers_ext_name; 2247 return 0; 2248 } 2249 2250 /** 2251 * elf_validity_cache_index() - Resolve, validate, cache section indices 2252 * @info: Load info to read from and update. 2253 * &load_info->sechdrs and &load_info->secstrings must be populated. 2254 * @flags: Load flags, relevant to suppress version loading, see 2255 * uapi/linux/module.h 2256 * 2257 * Populates &load_info->index, validating as it goes. 2258 * See child functions for per-field validation: 2259 * 2260 * * elf_validity_cache_index_info() 2261 * * elf_validity_cache_index_mod() 2262 * * elf_validity_cache_index_sym() 2263 * * elf_validity_cache_index_str() 2264 * * elf_validity_cache_index_versions() 2265 * 2266 * If CONFIG_SMP is enabled, load the percpu section by name with no 2267 * validation. 2268 * 2269 * Return: 0 on success, negative error code if an index failed validation. 2270 */ 2271 static int elf_validity_cache_index(struct load_info *info, int flags) 2272 { 2273 int err; 2274 2275 err = elf_validity_cache_index_info(info); 2276 if (err < 0) 2277 return err; 2278 err = elf_validity_cache_index_mod(info); 2279 if (err < 0) 2280 return err; 2281 err = elf_validity_cache_index_sym(info); 2282 if (err < 0) 2283 return err; 2284 err = elf_validity_cache_index_str(info); 2285 if (err < 0) 2286 return err; 2287 err = elf_validity_cache_index_versions(info, flags); 2288 if (err < 0) 2289 return err; 2290 2291 info->index.pcpu = find_pcpusec(info); 2292 2293 return 0; 2294 } 2295 2296 /** 2297 * elf_validity_cache_strtab() - Validate and cache symbol string table 2298 * @info: Load info to read from and update. 2299 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2300 * Must have &load_info->index populated. 2301 * 2302 * Checks: 2303 * 2304 * * The string table is not empty. 2305 * * The string table starts and ends with NUL (required by ELF spec). 2306 * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the 2307 * string table. 2308 * 2309 * And caches the pointer as &load_info->strtab in @info. 2310 * 2311 * Return: 0 on success, negative error code if a check failed. 2312 */ 2313 static int elf_validity_cache_strtab(struct load_info *info) 2314 { 2315 Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; 2316 Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; 2317 char *strtab = (char *)info->hdr + str_shdr->sh_offset; 2318 Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; 2319 int i; 2320 2321 if (str_shdr->sh_size == 0) { 2322 pr_err("empty symbol string table\n"); 2323 return -ENOEXEC; 2324 } 2325 if (strtab[0] != '\0') { 2326 pr_err("symbol string table missing leading NUL\n"); 2327 return -ENOEXEC; 2328 } 2329 if (strtab[str_shdr->sh_size - 1] != '\0') { 2330 pr_err("symbol string table isn't NUL terminated\n"); 2331 return -ENOEXEC; 2332 } 2333 2334 /* 2335 * Now that we know strtab is correctly structured, check symbol 2336 * starts are inbounds before they're used later. 2337 */ 2338 for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { 2339 if (syms[i].st_name >= str_shdr->sh_size) { 2340 pr_err("symbol name out of bounds in string table"); 2341 return -ENOEXEC; 2342 } 2343 } 2344 2345 info->strtab = strtab; 2346 return 0; 2347 } 2348 2349 /* 2350 * Check userspace passed ELF module against our expectations, and cache 2351 * useful variables for further processing as we go. 2352 * 2353 * This does basic validity checks against section offsets and sizes, the 2354 * section name string table, and the indices used for it (sh_name). 2355 * 2356 * As a last step, since we're already checking the ELF sections we cache 2357 * useful variables which will be used later for our convenience: 2358 * 2359 * o pointers to section headers 2360 * o cache the modinfo symbol section 2361 * o cache the string symbol section 2362 * o cache the module section 2363 * 2364 * As a last step we set info->mod to the temporary copy of the module in 2365 * info->hdr. The final one will be allocated in move_module(). Any 2366 * modifications we make to our copy of the module will be carried over 2367 * to the final minted module. 2368 */ 2369 static int elf_validity_cache_copy(struct load_info *info, int flags) 2370 { 2371 int err; 2372 2373 err = elf_validity_cache_sechdrs(info); 2374 if (err < 0) 2375 return err; 2376 err = elf_validity_cache_secstrings(info); 2377 if (err < 0) 2378 return err; 2379 err = elf_validity_cache_index(info, flags); 2380 if (err < 0) 2381 return err; 2382 err = elf_validity_cache_strtab(info); 2383 if (err < 0) 2384 return err; 2385 2386 /* This is temporary: point mod into copy of data. */ 2387 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 2388 2389 /* 2390 * If we didn't load the .modinfo 'name' field earlier, fall back to 2391 * on-disk struct mod 'name' field. 2392 */ 2393 if (!info->name) 2394 info->name = info->mod->name; 2395 2396 return 0; 2397 } 2398 2399 #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2400 2401 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2402 { 2403 do { 2404 unsigned long n = min(len, COPY_CHUNK_SIZE); 2405 2406 if (copy_from_user(dst, usrc, n) != 0) 2407 return -EFAULT; 2408 cond_resched(); 2409 dst += n; 2410 usrc += n; 2411 len -= n; 2412 } while (len); 2413 return 0; 2414 } 2415 2416 static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2417 { 2418 if (!get_modinfo(info, "livepatch")) 2419 /* Nothing more to do */ 2420 return 0; 2421 2422 if (set_livepatch_module(mod)) 2423 return 0; 2424 2425 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 2426 mod->name); 2427 return -ENOEXEC; 2428 } 2429 2430 static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 2431 { 2432 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 2433 return; 2434 2435 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 2436 mod->name); 2437 } 2438 2439 /* Sets info->hdr and info->len. */ 2440 static int copy_module_from_user(const void __user *umod, unsigned long len, 2441 struct load_info *info) 2442 { 2443 int err; 2444 2445 info->len = len; 2446 if (info->len < sizeof(*(info->hdr))) 2447 return -ENOEXEC; 2448 2449 err = security_kernel_load_data(LOADING_MODULE, true); 2450 if (err) 2451 return err; 2452 2453 /* Suck in entire file: we'll want most of it. */ 2454 info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 2455 if (!info->hdr) 2456 return -ENOMEM; 2457 2458 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2459 err = -EFAULT; 2460 goto out; 2461 } 2462 2463 err = security_kernel_post_load_data((char *)info->hdr, info->len, 2464 LOADING_MODULE, "init_module"); 2465 out: 2466 if (err) 2467 vfree(info->hdr); 2468 2469 return err; 2470 } 2471 2472 static void free_copy(struct load_info *info, int flags) 2473 { 2474 if (flags & MODULE_INIT_COMPRESSED_FILE) 2475 module_decompress_cleanup(info); 2476 else 2477 vfree(info->hdr); 2478 } 2479 2480 static int rewrite_section_headers(struct load_info *info, int flags) 2481 { 2482 unsigned int i; 2483 2484 /* This should always be true, but let's be sure. */ 2485 info->sechdrs[0].sh_addr = 0; 2486 2487 for (i = 1; i < info->hdr->e_shnum; i++) { 2488 Elf_Shdr *shdr = &info->sechdrs[i]; 2489 2490 /* 2491 * Mark all sections sh_addr with their address in the 2492 * temporary image. 2493 */ 2494 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2495 2496 } 2497 2498 /* Track but don't keep modinfo and version sections. */ 2499 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2500 info->sechdrs[info->index.vers_ext_crc].sh_flags &= 2501 ~(unsigned long)SHF_ALLOC; 2502 info->sechdrs[info->index.vers_ext_name].sh_flags &= 2503 ~(unsigned long)SHF_ALLOC; 2504 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2505 2506 return 0; 2507 } 2508 2509 static const char *const module_license_offenders[] = { 2510 /* driverloader was caught wrongly pretending to be under GPL */ 2511 "driverloader", 2512 2513 /* lve claims to be GPL but upstream won't provide source */ 2514 "lve", 2515 }; 2516 2517 /* 2518 * These calls taint the kernel depending certain module circumstances */ 2519 static void module_augment_kernel_taints(struct module *mod, struct load_info *info) 2520 { 2521 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 2522 size_t i; 2523 2524 if (!get_modinfo(info, "intree")) { 2525 if (!test_taint(TAINT_OOT_MODULE)) 2526 pr_warn("%s: loading out-of-tree module taints kernel.\n", 2527 mod->name); 2528 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2529 } 2530 2531 check_modinfo_retpoline(mod, info); 2532 2533 if (get_modinfo(info, "staging")) { 2534 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2535 pr_warn("%s: module is from the staging directory, the quality " 2536 "is unknown, you have been warned.\n", mod->name); 2537 } 2538 2539 if (is_livepatch_module(mod)) { 2540 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 2541 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 2542 mod->name); 2543 } 2544 2545 module_license_taint_check(mod, get_modinfo(info, "license")); 2546 2547 if (get_modinfo(info, "test")) { 2548 if (!test_taint(TAINT_TEST)) 2549 pr_warn("%s: loading test module taints kernel.\n", 2550 mod->name); 2551 add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); 2552 } 2553 #ifdef CONFIG_MODULE_SIG 2554 mod->sig_ok = info->sig_ok; 2555 if (!mod->sig_ok) { 2556 pr_notice_once("%s: module verification failed: signature " 2557 "and/or required key missing - tainting " 2558 "kernel\n", mod->name); 2559 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 2560 } 2561 #endif 2562 2563 /* 2564 * ndiswrapper is under GPL by itself, but loads proprietary modules. 2565 * Don't use add_taint_module(), as it would prevent ndiswrapper from 2566 * using GPL-only symbols it needs. 2567 */ 2568 if (strcmp(mod->name, "ndiswrapper") == 0) 2569 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 2570 2571 for (i = 0; i < ARRAY_SIZE(module_license_offenders); ++i) { 2572 if (strcmp(mod->name, module_license_offenders[i]) == 0) 2573 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2574 LOCKDEP_NOW_UNRELIABLE); 2575 } 2576 2577 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 2578 pr_warn("%s: module license taints kernel.\n", mod->name); 2579 2580 } 2581 2582 static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2583 { 2584 const char *modmagic = get_modinfo(info, "vermagic"); 2585 int err; 2586 2587 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 2588 modmagic = NULL; 2589 2590 /* This is allowed: modprobe --force will invalidate it. */ 2591 if (!modmagic) { 2592 err = try_to_force_load(mod, "bad vermagic"); 2593 if (err) 2594 return err; 2595 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 2596 pr_err("%s: version magic '%s' should be '%s'\n", 2597 info->name, modmagic, vermagic); 2598 return -ENOEXEC; 2599 } 2600 2601 err = check_modinfo_livepatch(mod, info); 2602 if (err) 2603 return err; 2604 2605 return 0; 2606 } 2607 2608 static int find_module_sections(struct module *mod, struct load_info *info) 2609 { 2610 mod->kp = section_objs(info, "__param", 2611 sizeof(*mod->kp), &mod->num_kp); 2612 mod->syms = section_objs(info, "__ksymtab", 2613 sizeof(*mod->syms), &mod->num_syms); 2614 mod->crcs = section_addr(info, "__kcrctab"); 2615 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2616 sizeof(*mod->gpl_syms), 2617 &mod->num_gpl_syms); 2618 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2619 2620 #ifdef CONFIG_CONSTRUCTORS 2621 mod->ctors = section_objs(info, ".ctors", 2622 sizeof(*mod->ctors), &mod->num_ctors); 2623 if (!mod->ctors) 2624 mod->ctors = section_objs(info, ".init_array", 2625 sizeof(*mod->ctors), &mod->num_ctors); 2626 else if (find_sec(info, ".init_array")) { 2627 /* 2628 * This shouldn't happen with same compiler and binutils 2629 * building all parts of the module. 2630 */ 2631 pr_warn("%s: has both .ctors and .init_array.\n", 2632 mod->name); 2633 return -EINVAL; 2634 } 2635 #endif 2636 2637 mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 2638 &mod->noinstr_text_size); 2639 2640 #ifdef CONFIG_TRACEPOINTS 2641 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2642 sizeof(*mod->tracepoints_ptrs), 2643 &mod->num_tracepoints); 2644 #endif 2645 #ifdef CONFIG_TREE_SRCU 2646 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 2647 sizeof(*mod->srcu_struct_ptrs), 2648 &mod->num_srcu_structs); 2649 #endif 2650 #ifdef CONFIG_BPF_EVENTS 2651 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 2652 sizeof(*mod->bpf_raw_events), 2653 &mod->num_bpf_raw_events); 2654 #endif 2655 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2656 mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 2657 mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, 2658 &mod->btf_base_data_size); 2659 #endif 2660 #ifdef CONFIG_JUMP_LABEL 2661 mod->jump_entries = section_objs(info, "__jump_table", 2662 sizeof(*mod->jump_entries), 2663 &mod->num_jump_entries); 2664 #endif 2665 #ifdef CONFIG_EVENT_TRACING 2666 mod->trace_events = section_objs(info, "_ftrace_events", 2667 sizeof(*mod->trace_events), 2668 &mod->num_trace_events); 2669 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 2670 sizeof(*mod->trace_evals), 2671 &mod->num_trace_evals); 2672 #endif 2673 #ifdef CONFIG_TRACING 2674 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2675 sizeof(*mod->trace_bprintk_fmt_start), 2676 &mod->num_trace_bprintk_fmt); 2677 #endif 2678 #ifdef CONFIG_DYNAMIC_FTRACE 2679 /* sechdrs[0].sh_size is always zero */ 2680 mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 2681 sizeof(*mod->ftrace_callsites), 2682 &mod->num_ftrace_callsites); 2683 #endif 2684 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 2685 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 2686 sizeof(*mod->ei_funcs), 2687 &mod->num_ei_funcs); 2688 #endif 2689 #ifdef CONFIG_KPROBES 2690 mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 2691 &mod->kprobes_text_size); 2692 mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 2693 sizeof(unsigned long), 2694 &mod->num_kprobe_blacklist); 2695 #endif 2696 #ifdef CONFIG_PRINTK_INDEX 2697 mod->printk_index_start = section_objs(info, ".printk_index", 2698 sizeof(*mod->printk_index_start), 2699 &mod->printk_index_size); 2700 #endif 2701 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 2702 mod->static_call_sites = section_objs(info, ".static_call_sites", 2703 sizeof(*mod->static_call_sites), 2704 &mod->num_static_call_sites); 2705 #endif 2706 #if IS_ENABLED(CONFIG_KUNIT) 2707 mod->kunit_suites = section_objs(info, ".kunit_test_suites", 2708 sizeof(*mod->kunit_suites), 2709 &mod->num_kunit_suites); 2710 mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites", 2711 sizeof(*mod->kunit_init_suites), 2712 &mod->num_kunit_init_suites); 2713 #endif 2714 2715 mod->extable = section_objs(info, "__ex_table", 2716 sizeof(*mod->extable), &mod->num_exentries); 2717 2718 if (section_addr(info, "__obsparm")) 2719 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2720 2721 #ifdef CONFIG_DYNAMIC_DEBUG_CORE 2722 mod->dyndbg_info.descs = section_objs(info, "__dyndbg", 2723 sizeof(*mod->dyndbg_info.descs), 2724 &mod->dyndbg_info.num_descs); 2725 mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", 2726 sizeof(*mod->dyndbg_info.classes), 2727 &mod->dyndbg_info.num_classes); 2728 #endif 2729 2730 return 0; 2731 } 2732 2733 static int move_module(struct module *mod, struct load_info *info) 2734 { 2735 int i, ret; 2736 enum mod_mem_type t = MOD_MEM_NUM_TYPES; 2737 bool codetag_section_found = false; 2738 2739 for_each_mod_mem_type(type) { 2740 if (!mod->mem[type].size) { 2741 mod->mem[type].base = NULL; 2742 continue; 2743 } 2744 2745 ret = module_memory_alloc(mod, type); 2746 if (ret) { 2747 t = type; 2748 goto out_err; 2749 } 2750 } 2751 2752 /* Transfer each section which specifies SHF_ALLOC */ 2753 pr_debug("Final section addresses for %s:\n", mod->name); 2754 for (i = 0; i < info->hdr->e_shnum; i++) { 2755 void *dest; 2756 Elf_Shdr *shdr = &info->sechdrs[i]; 2757 const char *sname; 2758 2759 if (!(shdr->sh_flags & SHF_ALLOC)) 2760 continue; 2761 2762 sname = info->secstrings + shdr->sh_name; 2763 /* 2764 * Load codetag sections separately as they might still be used 2765 * after module unload. 2766 */ 2767 if (codetag_needs_module_section(mod, sname, shdr->sh_size)) { 2768 dest = codetag_alloc_module_section(mod, sname, shdr->sh_size, 2769 arch_mod_section_prepend(mod, i), shdr->sh_addralign); 2770 if (WARN_ON(!dest)) { 2771 ret = -EINVAL; 2772 goto out_err; 2773 } 2774 if (IS_ERR(dest)) { 2775 ret = PTR_ERR(dest); 2776 goto out_err; 2777 } 2778 codetag_section_found = true; 2779 } else { 2780 enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 2781 unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 2782 2783 dest = mod->mem[type].base + offset; 2784 } 2785 2786 if (shdr->sh_type != SHT_NOBITS) { 2787 /* 2788 * Our ELF checker already validated this, but let's 2789 * be pedantic and make the goal clearer. We actually 2790 * end up copying over all modifications made to the 2791 * userspace copy of the entire struct module. 2792 */ 2793 if (i == info->index.mod && 2794 (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { 2795 ret = -ENOEXEC; 2796 goto out_err; 2797 } 2798 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2799 } 2800 /* 2801 * Update the userspace copy's ELF section address to point to 2802 * our newly allocated memory as a pure convenience so that 2803 * users of info can keep taking advantage and using the newly 2804 * minted official memory area. 2805 */ 2806 shdr->sh_addr = (unsigned long)dest; 2807 pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, 2808 (long)shdr->sh_size, info->secstrings + shdr->sh_name); 2809 } 2810 2811 return 0; 2812 out_err: 2813 module_memory_restore_rox(mod); 2814 while (t--) 2815 module_memory_free(mod, t); 2816 if (codetag_section_found) 2817 codetag_free_module_sections(mod); 2818 2819 return ret; 2820 } 2821 2822 static int check_export_symbol_versions(struct module *mod) 2823 { 2824 #ifdef CONFIG_MODVERSIONS 2825 if ((mod->num_syms && !mod->crcs) || 2826 (mod->num_gpl_syms && !mod->gpl_crcs)) { 2827 return try_to_force_load(mod, 2828 "no versions for exported symbols"); 2829 } 2830 #endif 2831 return 0; 2832 } 2833 2834 static void flush_module_icache(const struct module *mod) 2835 { 2836 /* 2837 * Flush the instruction cache, since we've played with text. 2838 * Do it before processing of module parameters, so the module 2839 * can provide parameter accessor functions of its own. 2840 */ 2841 for_each_mod_mem_type(type) { 2842 const struct module_memory *mod_mem = &mod->mem[type]; 2843 2844 if (mod_mem->size) { 2845 flush_icache_range((unsigned long)mod_mem->base, 2846 (unsigned long)mod_mem->base + mod_mem->size); 2847 } 2848 } 2849 } 2850 2851 bool __weak module_elf_check_arch(Elf_Ehdr *hdr) 2852 { 2853 return true; 2854 } 2855 2856 int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 2857 Elf_Shdr *sechdrs, 2858 char *secstrings, 2859 struct module *mod) 2860 { 2861 return 0; 2862 } 2863 2864 /* module_blacklist is a comma-separated list of module names */ 2865 static char *module_blacklist; 2866 static bool blacklisted(const char *module_name) 2867 { 2868 const char *p; 2869 size_t len; 2870 2871 if (!module_blacklist) 2872 return false; 2873 2874 for (p = module_blacklist; *p; p += len) { 2875 len = strcspn(p, ","); 2876 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 2877 return true; 2878 if (p[len] == ',') 2879 len++; 2880 } 2881 return false; 2882 } 2883 core_param(module_blacklist, module_blacklist, charp, 0400); 2884 2885 static struct module *layout_and_allocate(struct load_info *info, int flags) 2886 { 2887 struct module *mod; 2888 int err; 2889 2890 /* Allow arches to frob section contents and sizes. */ 2891 err = module_frob_arch_sections(info->hdr, info->sechdrs, 2892 info->secstrings, info->mod); 2893 if (err < 0) 2894 return ERR_PTR(err); 2895 2896 err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 2897 info->secstrings, info->mod); 2898 if (err < 0) 2899 return ERR_PTR(err); 2900 2901 /* We will do a special allocation for per-cpu sections later. */ 2902 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 2903 2904 /* 2905 * Mark relevant sections as SHF_RO_AFTER_INIT so layout_sections() can 2906 * put them in the right place. 2907 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 2908 */ 2909 module_mark_ro_after_init(info->hdr, info->sechdrs, info->secstrings); 2910 2911 /* 2912 * Determine total sizes, and put offsets in sh_entsize. For now 2913 * this is done generically; there doesn't appear to be any 2914 * special cases for the architectures. 2915 */ 2916 layout_sections(info->mod, info); 2917 layout_symtab(info->mod, info); 2918 2919 /* Allocate and move to the final place */ 2920 err = move_module(info->mod, info); 2921 if (err) 2922 return ERR_PTR(err); 2923 2924 /* Module has been copied to its final place now: return it. */ 2925 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2926 kmemleak_load_module(mod, info); 2927 codetag_module_replaced(info->mod, mod); 2928 2929 return mod; 2930 } 2931 2932 /* mod is no longer valid after this! */ 2933 static void module_deallocate(struct module *mod, struct load_info *info) 2934 { 2935 percpu_modfree(mod); 2936 module_arch_freeing_init(mod); 2937 codetag_free_module_sections(mod); 2938 2939 free_mod_mem(mod); 2940 } 2941 2942 int __weak module_finalize(const Elf_Ehdr *hdr, 2943 const Elf_Shdr *sechdrs, 2944 struct module *me) 2945 { 2946 return 0; 2947 } 2948 2949 static int post_relocation(struct module *mod, const struct load_info *info) 2950 { 2951 /* Sort exception table now relocations are done. */ 2952 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2953 2954 /* Copy relocated percpu area over. */ 2955 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 2956 info->sechdrs[info->index.pcpu].sh_size); 2957 2958 /* Setup kallsyms-specific fields. */ 2959 add_kallsyms(mod, info); 2960 2961 /* Arch-specific module finalizing. */ 2962 return module_finalize(info->hdr, info->sechdrs, mod); 2963 } 2964 2965 /* Call module constructors. */ 2966 static void do_mod_ctors(struct module *mod) 2967 { 2968 #ifdef CONFIG_CONSTRUCTORS 2969 unsigned long i; 2970 2971 for (i = 0; i < mod->num_ctors; i++) 2972 mod->ctors[i](); 2973 #endif 2974 } 2975 2976 /* For freeing module_init on success, in case kallsyms traversing */ 2977 struct mod_initfree { 2978 struct llist_node node; 2979 void *init_text; 2980 void *init_data; 2981 void *init_rodata; 2982 }; 2983 2984 static void do_free_init(struct work_struct *w) 2985 { 2986 struct llist_node *pos, *n, *list; 2987 struct mod_initfree *initfree; 2988 2989 list = llist_del_all(&init_free_list); 2990 2991 synchronize_rcu(); 2992 2993 llist_for_each_safe(pos, n, list) { 2994 initfree = container_of(pos, struct mod_initfree, node); 2995 execmem_free(initfree->init_text); 2996 execmem_free(initfree->init_data); 2997 execmem_free(initfree->init_rodata); 2998 kfree(initfree); 2999 } 3000 } 3001 3002 void flush_module_init_free_work(void) 3003 { 3004 flush_work(&init_free_wq); 3005 } 3006 3007 #undef MODULE_PARAM_PREFIX 3008 #define MODULE_PARAM_PREFIX "module." 3009 /* Default value for module->async_probe_requested */ 3010 static bool async_probe; 3011 module_param(async_probe, bool, 0644); 3012 3013 /* 3014 * This is where the real work happens. 3015 * 3016 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 3017 * helper command 'lx-symbols'. 3018 */ 3019 static noinline int do_init_module(struct module *mod) 3020 { 3021 int ret = 0; 3022 struct mod_initfree *freeinit; 3023 #if defined(CONFIG_MODULE_STATS) 3024 unsigned int text_size = 0, total_size = 0; 3025 3026 for_each_mod_mem_type(type) { 3027 const struct module_memory *mod_mem = &mod->mem[type]; 3028 if (mod_mem->size) { 3029 total_size += mod_mem->size; 3030 if (type == MOD_TEXT || type == MOD_INIT_TEXT) 3031 text_size += mod_mem->size; 3032 } 3033 } 3034 #endif 3035 3036 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 3037 if (!freeinit) { 3038 ret = -ENOMEM; 3039 goto fail; 3040 } 3041 freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; 3042 freeinit->init_data = mod->mem[MOD_INIT_DATA].base; 3043 freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; 3044 3045 do_mod_ctors(mod); 3046 /* Start the module */ 3047 if (mod->init != NULL) 3048 ret = do_one_initcall(mod->init); 3049 if (ret < 0) { 3050 goto fail_free_freeinit; 3051 } 3052 if (ret > 0) { 3053 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3054 "follow 0/-E convention\n" 3055 "%s: loading module anyway...\n", 3056 __func__, mod->name, ret, __func__); 3057 dump_stack(); 3058 } 3059 3060 /* Now it's a first class citizen! */ 3061 mod->state = MODULE_STATE_LIVE; 3062 blocking_notifier_call_chain(&module_notify_list, 3063 MODULE_STATE_LIVE, mod); 3064 3065 /* Delay uevent until module has finished its init routine */ 3066 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 3067 3068 /* 3069 * We need to finish all async code before the module init sequence 3070 * is done. This has potential to deadlock if synchronous module 3071 * loading is requested from async (which is not allowed!). 3072 * 3073 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 3074 * request_module() from async workers") for more details. 3075 */ 3076 if (!mod->async_probe_requested) 3077 async_synchronize_full(); 3078 3079 ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, 3080 mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); 3081 mutex_lock(&module_mutex); 3082 /* Drop initial reference. */ 3083 module_put(mod); 3084 trim_init_extable(mod); 3085 #ifdef CONFIG_KALLSYMS 3086 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 3087 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 3088 #endif 3089 ret = module_enable_rodata_ro_after_init(mod); 3090 if (ret) 3091 pr_warn("%s: module_enable_rodata_ro_after_init() returned %d, " 3092 "ro_after_init data might still be writable\n", 3093 mod->name, ret); 3094 3095 mod_tree_remove_init(mod); 3096 module_arch_freeing_init(mod); 3097 for_class_mod_mem_type(type, init) { 3098 mod->mem[type].base = NULL; 3099 mod->mem[type].size = 0; 3100 } 3101 3102 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 3103 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ 3104 mod->btf_data = NULL; 3105 mod->btf_base_data = NULL; 3106 #endif 3107 /* 3108 * We want to free module_init, but be aware that kallsyms may be 3109 * walking this within an RCU read section. In all the failure paths, we 3110 * call synchronize_rcu(), but we don't want to slow down the success 3111 * path. execmem_free() cannot be called in an interrupt, so do the 3112 * work and call synchronize_rcu() in a work queue. 3113 * 3114 * Note that execmem_alloc() on most architectures creates W+X page 3115 * mappings which won't be cleaned up until do_free_init() runs. Any 3116 * code such as mark_rodata_ro() which depends on those mappings to 3117 * be cleaned up needs to sync with the queued work by invoking 3118 * flush_module_init_free_work(). 3119 */ 3120 if (llist_add(&freeinit->node, &init_free_list)) 3121 schedule_work(&init_free_wq); 3122 3123 mutex_unlock(&module_mutex); 3124 wake_up_all(&module_wq); 3125 3126 mod_stat_add_long(text_size, &total_text_size); 3127 mod_stat_add_long(total_size, &total_mod_size); 3128 3129 mod_stat_inc(&modcount); 3130 3131 return 0; 3132 3133 fail_free_freeinit: 3134 kfree(freeinit); 3135 fail: 3136 /* Try to protect us from buggy refcounters. */ 3137 mod->state = MODULE_STATE_GOING; 3138 synchronize_rcu(); 3139 module_put(mod); 3140 blocking_notifier_call_chain(&module_notify_list, 3141 MODULE_STATE_GOING, mod); 3142 klp_module_going(mod); 3143 ftrace_release_mod(mod); 3144 free_module(mod); 3145 wake_up_all(&module_wq); 3146 3147 return ret; 3148 } 3149 3150 static int may_init_module(void) 3151 { 3152 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3153 return -EPERM; 3154 3155 return 0; 3156 } 3157 3158 /* Is this module of this name done loading? No locks held. */ 3159 static bool finished_loading(const char *name) 3160 { 3161 struct module *mod; 3162 bool ret; 3163 3164 /* 3165 * The module_mutex should not be a heavily contended lock; 3166 * if we get the occasional sleep here, we'll go an extra iteration 3167 * in the wait_event_interruptible(), which is harmless. 3168 */ 3169 sched_annotate_sleep(); 3170 mutex_lock(&module_mutex); 3171 mod = find_module_all(name, strlen(name), true); 3172 ret = !mod || mod->state == MODULE_STATE_LIVE 3173 || mod->state == MODULE_STATE_GOING; 3174 mutex_unlock(&module_mutex); 3175 3176 return ret; 3177 } 3178 3179 /* Must be called with module_mutex held */ 3180 static int module_patient_check_exists(const char *name, 3181 enum fail_dup_mod_reason reason) 3182 { 3183 struct module *old; 3184 int err = 0; 3185 3186 old = find_module_all(name, strlen(name), true); 3187 if (old == NULL) 3188 return 0; 3189 3190 if (old->state == MODULE_STATE_COMING || 3191 old->state == MODULE_STATE_UNFORMED) { 3192 /* Wait in case it fails to load. */ 3193 mutex_unlock(&module_mutex); 3194 err = wait_event_interruptible(module_wq, 3195 finished_loading(name)); 3196 mutex_lock(&module_mutex); 3197 if (err) 3198 return err; 3199 3200 /* The module might have gone in the meantime. */ 3201 old = find_module_all(name, strlen(name), true); 3202 } 3203 3204 if (try_add_failed_module(name, reason)) 3205 pr_warn("Could not add fail-tracking for module: %s\n", name); 3206 3207 /* 3208 * We are here only when the same module was being loaded. Do 3209 * not try to load it again right now. It prevents long delays 3210 * caused by serialized module load failures. It might happen 3211 * when more devices of the same type trigger load of 3212 * a particular module. 3213 */ 3214 if (old && old->state == MODULE_STATE_LIVE) 3215 return -EEXIST; 3216 return -EBUSY; 3217 } 3218 3219 /* 3220 * We try to place it in the list now to make sure it's unique before 3221 * we dedicate too many resources. In particular, temporary percpu 3222 * memory exhaustion. 3223 */ 3224 static int add_unformed_module(struct module *mod) 3225 { 3226 int err; 3227 3228 mod->state = MODULE_STATE_UNFORMED; 3229 3230 mutex_lock(&module_mutex); 3231 err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); 3232 if (err) 3233 goto out; 3234 3235 mod_update_bounds(mod); 3236 list_add_rcu(&mod->list, &modules); 3237 mod_tree_insert(mod); 3238 err = 0; 3239 3240 out: 3241 mutex_unlock(&module_mutex); 3242 return err; 3243 } 3244 3245 static int complete_formation(struct module *mod, struct load_info *info) 3246 { 3247 int err; 3248 3249 mutex_lock(&module_mutex); 3250 3251 /* Find duplicate symbols (must be called under lock). */ 3252 err = verify_exported_symbols(mod); 3253 if (err < 0) 3254 goto out; 3255 3256 /* These rely on module_mutex for list integrity. */ 3257 module_bug_finalize(info->hdr, info->sechdrs, mod); 3258 module_cfi_finalize(info->hdr, info->sechdrs, mod); 3259 3260 err = module_enable_rodata_ro(mod); 3261 if (err) 3262 goto out_strict_rwx; 3263 err = module_enable_data_nx(mod); 3264 if (err) 3265 goto out_strict_rwx; 3266 err = module_enable_text_rox(mod); 3267 if (err) 3268 goto out_strict_rwx; 3269 3270 /* 3271 * Mark state as coming so strong_try_module_get() ignores us, 3272 * but kallsyms etc. can see us. 3273 */ 3274 mod->state = MODULE_STATE_COMING; 3275 mutex_unlock(&module_mutex); 3276 3277 return 0; 3278 3279 out_strict_rwx: 3280 module_bug_cleanup(mod); 3281 out: 3282 mutex_unlock(&module_mutex); 3283 return err; 3284 } 3285 3286 static int prepare_coming_module(struct module *mod) 3287 { 3288 int err; 3289 3290 ftrace_module_enable(mod); 3291 err = klp_module_coming(mod); 3292 if (err) 3293 return err; 3294 3295 err = blocking_notifier_call_chain_robust(&module_notify_list, 3296 MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 3297 err = notifier_to_errno(err); 3298 if (err) 3299 klp_module_going(mod); 3300 3301 return err; 3302 } 3303 3304 static int unknown_module_param_cb(char *param, char *val, const char *modname, 3305 void *arg) 3306 { 3307 struct module *mod = arg; 3308 int ret; 3309 3310 if (strcmp(param, "async_probe") == 0) { 3311 if (kstrtobool(val, &mod->async_probe_requested)) 3312 mod->async_probe_requested = true; 3313 return 0; 3314 } 3315 3316 /* Check for magic 'dyndbg' arg */ 3317 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3318 if (ret != 0) 3319 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3320 return 0; 3321 } 3322 3323 /* Module within temporary copy, this doesn't do any allocation */ 3324 static int early_mod_check(struct load_info *info, int flags) 3325 { 3326 int err; 3327 3328 /* 3329 * Now that we know we have the correct module name, check 3330 * if it's blacklisted. 3331 */ 3332 if (blacklisted(info->name)) { 3333 pr_err("Module %s is blacklisted\n", info->name); 3334 return -EPERM; 3335 } 3336 3337 err = rewrite_section_headers(info, flags); 3338 if (err) 3339 return err; 3340 3341 /* Check module struct version now, before we try to use module. */ 3342 if (!check_modstruct_version(info, info->mod)) 3343 return -ENOEXEC; 3344 3345 err = check_modinfo(info->mod, info, flags); 3346 if (err) 3347 return err; 3348 3349 mutex_lock(&module_mutex); 3350 err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); 3351 mutex_unlock(&module_mutex); 3352 3353 return err; 3354 } 3355 3356 /* 3357 * Allocate and load the module: note that size of section 0 is always 3358 * zero, and we rely on this for optional sections. 3359 */ 3360 static int load_module(struct load_info *info, const char __user *uargs, 3361 int flags) 3362 { 3363 struct module *mod; 3364 bool module_allocated = false; 3365 long err = 0; 3366 char *after_dashes; 3367 3368 /* 3369 * Do the signature check (if any) first. All that 3370 * the signature check needs is info->len, it does 3371 * not need any of the section info. That can be 3372 * set up later. This will minimize the chances 3373 * of a corrupt module causing problems before 3374 * we even get to the signature check. 3375 * 3376 * The check will also adjust info->len by stripping 3377 * off the sig length at the end of the module, making 3378 * checks against info->len more correct. 3379 */ 3380 err = module_sig_check(info, flags); 3381 if (err) 3382 goto free_copy; 3383 3384 /* 3385 * Do basic sanity checks against the ELF header and 3386 * sections. Cache useful sections and set the 3387 * info->mod to the userspace passed struct module. 3388 */ 3389 err = elf_validity_cache_copy(info, flags); 3390 if (err) 3391 goto free_copy; 3392 3393 err = early_mod_check(info, flags); 3394 if (err) 3395 goto free_copy; 3396 3397 /* Figure out module layout, and allocate all the memory. */ 3398 mod = layout_and_allocate(info, flags); 3399 if (IS_ERR(mod)) { 3400 err = PTR_ERR(mod); 3401 goto free_copy; 3402 } 3403 3404 module_allocated = true; 3405 3406 audit_log_kern_module(info->name); 3407 3408 /* Reserve our place in the list. */ 3409 err = add_unformed_module(mod); 3410 if (err) 3411 goto free_module; 3412 3413 /* 3414 * We are tainting your kernel if your module gets into 3415 * the modules linked list somehow. 3416 */ 3417 module_augment_kernel_taints(mod, info); 3418 3419 /* To avoid stressing percpu allocator, do this once we're unique. */ 3420 err = percpu_modalloc(mod, info); 3421 if (err) 3422 goto unlink_mod; 3423 3424 /* Now module is in final location, initialize linked lists, etc. */ 3425 err = module_unload_init(mod); 3426 if (err) 3427 goto unlink_mod; 3428 3429 init_param_lock(mod); 3430 3431 /* 3432 * Now we've got everything in the final locations, we can 3433 * find optional sections. 3434 */ 3435 err = find_module_sections(mod, info); 3436 if (err) 3437 goto free_unload; 3438 3439 err = check_export_symbol_versions(mod); 3440 if (err) 3441 goto free_unload; 3442 3443 /* Set up MODINFO_ATTR fields */ 3444 err = setup_modinfo(mod, info); 3445 if (err) 3446 goto free_modinfo; 3447 3448 /* Fix up syms, so that st_value is a pointer to location. */ 3449 err = simplify_symbols(mod, info); 3450 if (err < 0) 3451 goto free_modinfo; 3452 3453 err = apply_relocations(mod, info); 3454 if (err < 0) 3455 goto free_modinfo; 3456 3457 err = post_relocation(mod, info); 3458 if (err < 0) 3459 goto free_modinfo; 3460 3461 flush_module_icache(mod); 3462 3463 /* Now copy in args */ 3464 mod->args = strndup_user(uargs, ~0UL >> 1); 3465 if (IS_ERR(mod->args)) { 3466 err = PTR_ERR(mod->args); 3467 goto free_arch_cleanup; 3468 } 3469 3470 init_build_id(mod, info); 3471 3472 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3473 ftrace_module_init(mod); 3474 3475 /* Finally it's fully formed, ready to start executing. */ 3476 err = complete_formation(mod, info); 3477 if (err) 3478 goto ddebug_cleanup; 3479 3480 err = prepare_coming_module(mod); 3481 if (err) 3482 goto bug_cleanup; 3483 3484 mod->async_probe_requested = async_probe; 3485 3486 /* Module is ready to execute: parsing args may do that. */ 3487 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3488 -32768, 32767, mod, 3489 unknown_module_param_cb); 3490 if (IS_ERR(after_dashes)) { 3491 err = PTR_ERR(after_dashes); 3492 goto coming_cleanup; 3493 } else if (after_dashes) { 3494 pr_warn("%s: parameters '%s' after `--' ignored\n", 3495 mod->name, after_dashes); 3496 } 3497 3498 /* Link in to sysfs. */ 3499 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3500 if (err < 0) 3501 goto coming_cleanup; 3502 3503 if (is_livepatch_module(mod)) { 3504 err = copy_module_elf(mod, info); 3505 if (err < 0) 3506 goto sysfs_cleanup; 3507 } 3508 3509 if (codetag_load_module(mod)) 3510 goto sysfs_cleanup; 3511 3512 /* Get rid of temporary copy. */ 3513 free_copy(info, flags); 3514 3515 /* Done! */ 3516 trace_module_load(mod); 3517 3518 return do_init_module(mod); 3519 3520 sysfs_cleanup: 3521 mod_sysfs_teardown(mod); 3522 coming_cleanup: 3523 mod->state = MODULE_STATE_GOING; 3524 destroy_params(mod->kp, mod->num_kp); 3525 blocking_notifier_call_chain(&module_notify_list, 3526 MODULE_STATE_GOING, mod); 3527 klp_module_going(mod); 3528 bug_cleanup: 3529 mod->state = MODULE_STATE_GOING; 3530 /* module_bug_cleanup needs module_mutex protection */ 3531 mutex_lock(&module_mutex); 3532 module_bug_cleanup(mod); 3533 mutex_unlock(&module_mutex); 3534 3535 ddebug_cleanup: 3536 ftrace_release_mod(mod); 3537 synchronize_rcu(); 3538 kfree(mod->args); 3539 free_arch_cleanup: 3540 module_arch_cleanup(mod); 3541 free_modinfo: 3542 free_modinfo(mod); 3543 free_unload: 3544 module_unload_free(mod); 3545 unlink_mod: 3546 mutex_lock(&module_mutex); 3547 /* Unlink carefully: kallsyms could be walking list. */ 3548 list_del_rcu(&mod->list); 3549 mod_tree_remove(mod); 3550 wake_up_all(&module_wq); 3551 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3552 synchronize_rcu(); 3553 mutex_unlock(&module_mutex); 3554 free_module: 3555 mod_stat_bump_invalid(info, flags); 3556 /* Free lock-classes; relies on the preceding sync_rcu() */ 3557 for_class_mod_mem_type(type, core_data) { 3558 lockdep_free_key_range(mod->mem[type].base, 3559 mod->mem[type].size); 3560 } 3561 3562 module_memory_restore_rox(mod); 3563 module_deallocate(mod, info); 3564 free_copy: 3565 /* 3566 * The info->len is always set. We distinguish between 3567 * failures once the proper module was allocated and 3568 * before that. 3569 */ 3570 if (!module_allocated) { 3571 audit_log_kern_module(info->name ? info->name : "?"); 3572 mod_stat_bump_becoming(info, flags); 3573 } 3574 free_copy(info, flags); 3575 return err; 3576 } 3577 3578 SYSCALL_DEFINE3(init_module, void __user *, umod, 3579 unsigned long, len, const char __user *, uargs) 3580 { 3581 int err; 3582 struct load_info info = { }; 3583 3584 err = may_init_module(); 3585 if (err) 3586 return err; 3587 3588 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3589 umod, len, uargs); 3590 3591 err = copy_module_from_user(umod, len, &info); 3592 if (err) { 3593 mod_stat_inc(&failed_kreads); 3594 mod_stat_add_long(len, &invalid_kread_bytes); 3595 return err; 3596 } 3597 3598 return load_module(&info, uargs, 0); 3599 } 3600 3601 struct idempotent { 3602 const void *cookie; 3603 struct hlist_node entry; 3604 struct completion complete; 3605 int ret; 3606 }; 3607 3608 #define IDEM_HASH_BITS 8 3609 static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; 3610 static DEFINE_SPINLOCK(idem_lock); 3611 3612 static bool idempotent(struct idempotent *u, const void *cookie) 3613 { 3614 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3615 struct hlist_head *head = idem_hash + hash; 3616 struct idempotent *existing; 3617 bool first; 3618 3619 u->ret = -EINTR; 3620 u->cookie = cookie; 3621 init_completion(&u->complete); 3622 3623 spin_lock(&idem_lock); 3624 first = true; 3625 hlist_for_each_entry(existing, head, entry) { 3626 if (existing->cookie != cookie) 3627 continue; 3628 first = false; 3629 break; 3630 } 3631 hlist_add_head(&u->entry, idem_hash + hash); 3632 spin_unlock(&idem_lock); 3633 3634 return !first; 3635 } 3636 3637 /* 3638 * We were the first one with 'cookie' on the list, and we ended 3639 * up completing the operation. We now need to walk the list, 3640 * remove everybody - which includes ourselves - fill in the return 3641 * value, and then complete the operation. 3642 */ 3643 static int idempotent_complete(struct idempotent *u, int ret) 3644 { 3645 const void *cookie = u->cookie; 3646 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3647 struct hlist_head *head = idem_hash + hash; 3648 struct hlist_node *next; 3649 struct idempotent *pos; 3650 3651 spin_lock(&idem_lock); 3652 hlist_for_each_entry_safe(pos, next, head, entry) { 3653 if (pos->cookie != cookie) 3654 continue; 3655 hlist_del_init(&pos->entry); 3656 pos->ret = ret; 3657 complete(&pos->complete); 3658 } 3659 spin_unlock(&idem_lock); 3660 return ret; 3661 } 3662 3663 /* 3664 * Wait for the idempotent worker. 3665 * 3666 * If we get interrupted, we need to remove ourselves from the 3667 * the idempotent list, and the completion may still come in. 3668 * 3669 * The 'idem_lock' protects against the race, and 'idem.ret' was 3670 * initialized to -EINTR and is thus always the right return 3671 * value even if the idempotent work then completes between 3672 * the wait_for_completion and the cleanup. 3673 */ 3674 static int idempotent_wait_for_completion(struct idempotent *u) 3675 { 3676 if (wait_for_completion_interruptible(&u->complete)) { 3677 spin_lock(&idem_lock); 3678 if (!hlist_unhashed(&u->entry)) 3679 hlist_del(&u->entry); 3680 spin_unlock(&idem_lock); 3681 } 3682 return u->ret; 3683 } 3684 3685 static int init_module_from_file(struct file *f, const char __user * uargs, int flags) 3686 { 3687 struct load_info info = { }; 3688 void *buf = NULL; 3689 int len; 3690 3691 len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); 3692 if (len < 0) { 3693 mod_stat_inc(&failed_kreads); 3694 return len; 3695 } 3696 3697 if (flags & MODULE_INIT_COMPRESSED_FILE) { 3698 int err = module_decompress(&info, buf, len); 3699 vfree(buf); /* compressed data is no longer needed */ 3700 if (err) { 3701 mod_stat_inc(&failed_decompress); 3702 mod_stat_add_long(len, &invalid_decompress_bytes); 3703 return err; 3704 } 3705 } else { 3706 info.hdr = buf; 3707 info.len = len; 3708 } 3709 3710 return load_module(&info, uargs, flags); 3711 } 3712 3713 static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) 3714 { 3715 struct idempotent idem; 3716 3717 if (!(f->f_mode & FMODE_READ)) 3718 return -EBADF; 3719 3720 /* Are we the winners of the race and get to do this? */ 3721 if (!idempotent(&idem, file_inode(f))) { 3722 int ret = init_module_from_file(f, uargs, flags); 3723 return idempotent_complete(&idem, ret); 3724 } 3725 3726 /* 3727 * Somebody else won the race and is loading the module. 3728 */ 3729 return idempotent_wait_for_completion(&idem); 3730 } 3731 3732 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3733 { 3734 int err = may_init_module(); 3735 if (err) 3736 return err; 3737 3738 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3739 3740 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3741 |MODULE_INIT_IGNORE_VERMAGIC 3742 |MODULE_INIT_COMPRESSED_FILE)) 3743 return -EINVAL; 3744 3745 CLASS(fd, f)(fd); 3746 if (fd_empty(f)) 3747 return -EBADF; 3748 return idempotent_init_module(fd_file(f), uargs, flags); 3749 } 3750 3751 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 3752 char *module_flags(struct module *mod, char *buf, bool show_state) 3753 { 3754 int bx = 0; 3755 3756 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 3757 if (!mod->taints && !show_state) 3758 goto out; 3759 if (mod->taints || 3760 mod->state == MODULE_STATE_GOING || 3761 mod->state == MODULE_STATE_COMING) { 3762 buf[bx++] = '('; 3763 bx += module_flags_taint(mod->taints, buf + bx); 3764 /* Show a - for module-is-being-unloaded */ 3765 if (mod->state == MODULE_STATE_GOING && show_state) 3766 buf[bx++] = '-'; 3767 /* Show a + for module-is-being-loaded */ 3768 if (mod->state == MODULE_STATE_COMING && show_state) 3769 buf[bx++] = '+'; 3770 buf[bx++] = ')'; 3771 } 3772 out: 3773 buf[bx] = '\0'; 3774 3775 return buf; 3776 } 3777 3778 /* Given an address, look for it in the module exception tables. */ 3779 const struct exception_table_entry *search_module_extables(unsigned long addr) 3780 { 3781 struct module *mod; 3782 3783 guard(rcu)(); 3784 mod = __module_address(addr); 3785 if (!mod) 3786 return NULL; 3787 3788 if (!mod->num_exentries) 3789 return NULL; 3790 /* 3791 * The address passed here belongs to a module that is currently 3792 * invoked (we are running inside it). Therefore its module::refcnt 3793 * needs already be >0 to ensure that it is not removed at this stage. 3794 * All other user need to invoke this function within a RCU read 3795 * section. 3796 */ 3797 return search_extable(mod->extable, mod->num_exentries, addr); 3798 } 3799 3800 /** 3801 * is_module_address() - is this address inside a module? 3802 * @addr: the address to check. 3803 * 3804 * See is_module_text_address() if you simply want to see if the address 3805 * is code (not data). 3806 */ 3807 bool is_module_address(unsigned long addr) 3808 { 3809 guard(rcu)(); 3810 return __module_address(addr) != NULL; 3811 } 3812 3813 /** 3814 * __module_address() - get the module which contains an address. 3815 * @addr: the address. 3816 * 3817 * Must be called within RCU read section or module mutex held so that 3818 * module doesn't get freed during this. 3819 */ 3820 struct module *__module_address(unsigned long addr) 3821 { 3822 struct module *mod; 3823 3824 if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) 3825 goto lookup; 3826 3827 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 3828 if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) 3829 goto lookup; 3830 #endif 3831 3832 return NULL; 3833 3834 lookup: 3835 mod = mod_find(addr, &mod_tree); 3836 if (mod) { 3837 BUG_ON(!within_module(addr, mod)); 3838 if (mod->state == MODULE_STATE_UNFORMED) 3839 mod = NULL; 3840 } 3841 return mod; 3842 } 3843 3844 /** 3845 * is_module_text_address() - is this address inside module code? 3846 * @addr: the address to check. 3847 * 3848 * See is_module_address() if you simply want to see if the address is 3849 * anywhere in a module. See kernel_text_address() for testing if an 3850 * address corresponds to kernel or module code. 3851 */ 3852 bool is_module_text_address(unsigned long addr) 3853 { 3854 guard(rcu)(); 3855 return __module_text_address(addr) != NULL; 3856 } 3857 3858 void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data) 3859 { 3860 struct module *mod; 3861 3862 guard(rcu)(); 3863 list_for_each_entry_rcu(mod, &modules, list) { 3864 if (mod->state == MODULE_STATE_UNFORMED) 3865 continue; 3866 if (func(mod, data)) 3867 break; 3868 } 3869 } 3870 3871 /** 3872 * __module_text_address() - get the module whose code contains an address. 3873 * @addr: the address. 3874 * 3875 * Must be called within RCU read section or module mutex held so that 3876 * module doesn't get freed during this. 3877 */ 3878 struct module *__module_text_address(unsigned long addr) 3879 { 3880 struct module *mod = __module_address(addr); 3881 if (mod) { 3882 /* Make sure it's within the text section. */ 3883 if (!within_module_mem_type(addr, mod, MOD_TEXT) && 3884 !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) 3885 mod = NULL; 3886 } 3887 return mod; 3888 } 3889 3890 /* Don't grab lock, we're oopsing. */ 3891 void print_modules(void) 3892 { 3893 struct module *mod; 3894 char buf[MODULE_FLAGS_BUF_SIZE]; 3895 3896 printk(KERN_DEFAULT "Modules linked in:"); 3897 /* Most callers should already have preempt disabled, but make sure */ 3898 guard(rcu)(); 3899 list_for_each_entry_rcu(mod, &modules, list) { 3900 if (mod->state == MODULE_STATE_UNFORMED) 3901 continue; 3902 pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); 3903 } 3904 3905 print_unloaded_tainted_modules(); 3906 if (last_unloaded_module.name[0]) 3907 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 3908 last_unloaded_module.taints); 3909 pr_cont("\n"); 3910 } 3911 3912 #ifdef CONFIG_MODULE_DEBUGFS 3913 struct dentry *mod_debugfs_root; 3914 3915 static int module_debugfs_init(void) 3916 { 3917 mod_debugfs_root = debugfs_create_dir("modules", NULL); 3918 return 0; 3919 } 3920 module_init(module_debugfs_init); 3921 #endif 3922