1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2002 Richard Henderson 4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 5 */ 6 7 #define INCLUDE_VERMAGIC 8 9 #include <linux/export.h> 10 #include <linux/extable.h> 11 #include <linux/moduleloader.h> 12 #include <linux/module_signature.h> 13 #include <linux/trace_events.h> 14 #include <linux/init.h> 15 #include <linux/kallsyms.h> 16 #include <linux/buildid.h> 17 #include <linux/fs.h> 18 #include <linux/kernel.h> 19 #include <linux/kernel_read_file.h> 20 #include <linux/kstrtox.h> 21 #include <linux/slab.h> 22 #include <linux/vmalloc.h> 23 #include <linux/elf.h> 24 #include <linux/seq_file.h> 25 #include <linux/syscalls.h> 26 #include <linux/fcntl.h> 27 #include <linux/rcupdate.h> 28 #include <linux/capability.h> 29 #include <linux/cpu.h> 30 #include <linux/moduleparam.h> 31 #include <linux/errno.h> 32 #include <linux/err.h> 33 #include <linux/vermagic.h> 34 #include <linux/notifier.h> 35 #include <linux/sched.h> 36 #include <linux/device.h> 37 #include <linux/string.h> 38 #include <linux/mutex.h> 39 #include <linux/rculist.h> 40 #include <linux/uaccess.h> 41 #include <asm/cacheflush.h> 42 #include <linux/set_memory.h> 43 #include <asm/mmu_context.h> 44 #include <linux/license.h> 45 #include <asm/sections.h> 46 #include <linux/tracepoint.h> 47 #include <linux/ftrace.h> 48 #include <linux/livepatch.h> 49 #include <linux/async.h> 50 #include <linux/percpu.h> 51 #include <linux/kmemleak.h> 52 #include <linux/jump_label.h> 53 #include <linux/pfn.h> 54 #include <linux/bsearch.h> 55 #include <linux/dynamic_debug.h> 56 #include <linux/audit.h> 57 #include <linux/cfi.h> 58 #include <uapi/linux/module.h> 59 #include "internal.h" 60 61 #define CREATE_TRACE_POINTS 62 #include <trace/events/module.h> 63 64 /* 65 * Mutex protects: 66 * 1) List of modules (also safely readable with preempt_disable), 67 * 2) module_use links, 68 * 3) mod_tree.addr_min/mod_tree.addr_max. 69 * (delete and add uses RCU list operations). 70 */ 71 DEFINE_MUTEX(module_mutex); 72 LIST_HEAD(modules); 73 74 /* Work queue for freeing init sections in success case */ 75 static void do_free_init(struct work_struct *w); 76 static DECLARE_WORK(init_free_wq, do_free_init); 77 static LLIST_HEAD(init_free_list); 78 79 struct mod_tree_root mod_tree __cacheline_aligned = { 80 .addr_min = -1UL, 81 }; 82 83 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 84 struct mod_tree_root mod_data_tree __cacheline_aligned = { 85 .addr_min = -1UL, 86 }; 87 #endif 88 89 struct symsearch { 90 const struct kernel_symbol *start, *stop; 91 const s32 *crcs; 92 enum mod_license license; 93 }; 94 95 /* 96 * Bounds of module text, for speeding up __module_address. 97 * Protected by module_mutex. 98 */ 99 static void __mod_update_bounds(void *base, unsigned int size, struct mod_tree_root *tree) 100 { 101 unsigned long min = (unsigned long)base; 102 unsigned long max = min + size; 103 104 if (min < tree->addr_min) 105 tree->addr_min = min; 106 if (max > tree->addr_max) 107 tree->addr_max = max; 108 } 109 110 static void mod_update_bounds(struct module *mod) 111 { 112 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size, &mod_tree); 113 if (mod->init_layout.size) 114 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size, &mod_tree); 115 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 116 __mod_update_bounds(mod->data_layout.base, mod->data_layout.size, &mod_data_tree); 117 #endif 118 } 119 120 /* Block module loading/unloading? */ 121 int modules_disabled; 122 core_param(nomodule, modules_disabled, bint, 0); 123 124 /* Waiting for a module to finish initializing? */ 125 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 126 127 static BLOCKING_NOTIFIER_HEAD(module_notify_list); 128 129 int register_module_notifier(struct notifier_block *nb) 130 { 131 return blocking_notifier_chain_register(&module_notify_list, nb); 132 } 133 EXPORT_SYMBOL(register_module_notifier); 134 135 int unregister_module_notifier(struct notifier_block *nb) 136 { 137 return blocking_notifier_chain_unregister(&module_notify_list, nb); 138 } 139 EXPORT_SYMBOL(unregister_module_notifier); 140 141 /* 142 * We require a truly strong try_module_get(): 0 means success. 143 * Otherwise an error is returned due to ongoing or failed 144 * initialization etc. 145 */ 146 static inline int strong_try_module_get(struct module *mod) 147 { 148 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 149 if (mod && mod->state == MODULE_STATE_COMING) 150 return -EBUSY; 151 if (try_module_get(mod)) 152 return 0; 153 else 154 return -ENOENT; 155 } 156 157 static inline void add_taint_module(struct module *mod, unsigned flag, 158 enum lockdep_ok lockdep_ok) 159 { 160 add_taint(flag, lockdep_ok); 161 set_bit(flag, &mod->taints); 162 } 163 164 /* 165 * A thread that wants to hold a reference to a module only while it 166 * is running can call this to safely exit. 167 */ 168 void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 169 { 170 module_put(mod); 171 kthread_exit(code); 172 } 173 EXPORT_SYMBOL(__module_put_and_kthread_exit); 174 175 /* Find a module section: 0 means not found. */ 176 static unsigned int find_sec(const struct load_info *info, const char *name) 177 { 178 unsigned int i; 179 180 for (i = 1; i < info->hdr->e_shnum; i++) { 181 Elf_Shdr *shdr = &info->sechdrs[i]; 182 /* Alloc bit cleared means "ignore it." */ 183 if ((shdr->sh_flags & SHF_ALLOC) 184 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 185 return i; 186 } 187 return 0; 188 } 189 190 /* Find a module section, or NULL. */ 191 static void *section_addr(const struct load_info *info, const char *name) 192 { 193 /* Section 0 has sh_addr 0. */ 194 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 195 } 196 197 /* Find a module section, or NULL. Fill in number of "objects" in section. */ 198 static void *section_objs(const struct load_info *info, 199 const char *name, 200 size_t object_size, 201 unsigned int *num) 202 { 203 unsigned int sec = find_sec(info, name); 204 205 /* Section 0 has sh_addr 0 and sh_size 0. */ 206 *num = info->sechdrs[sec].sh_size / object_size; 207 return (void *)info->sechdrs[sec].sh_addr; 208 } 209 210 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 211 static unsigned int find_any_sec(const struct load_info *info, const char *name) 212 { 213 unsigned int i; 214 215 for (i = 1; i < info->hdr->e_shnum; i++) { 216 Elf_Shdr *shdr = &info->sechdrs[i]; 217 if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 218 return i; 219 } 220 return 0; 221 } 222 223 /* 224 * Find a module section, or NULL. Fill in number of "objects" in section. 225 * Ignores SHF_ALLOC flag. 226 */ 227 static __maybe_unused void *any_section_objs(const struct load_info *info, 228 const char *name, 229 size_t object_size, 230 unsigned int *num) 231 { 232 unsigned int sec = find_any_sec(info, name); 233 234 /* Section 0 has sh_addr 0 and sh_size 0. */ 235 *num = info->sechdrs[sec].sh_size / object_size; 236 return (void *)info->sechdrs[sec].sh_addr; 237 } 238 239 #ifndef CONFIG_MODVERSIONS 240 #define symversion(base, idx) NULL 241 #else 242 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 243 #endif 244 245 static const char *kernel_symbol_name(const struct kernel_symbol *sym) 246 { 247 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 248 return offset_to_ptr(&sym->name_offset); 249 #else 250 return sym->name; 251 #endif 252 } 253 254 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 255 { 256 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 257 if (!sym->namespace_offset) 258 return NULL; 259 return offset_to_ptr(&sym->namespace_offset); 260 #else 261 return sym->namespace; 262 #endif 263 } 264 265 int cmp_name(const void *name, const void *sym) 266 { 267 return strcmp(name, kernel_symbol_name(sym)); 268 } 269 270 static bool find_exported_symbol_in_section(const struct symsearch *syms, 271 struct module *owner, 272 struct find_symbol_arg *fsa) 273 { 274 struct kernel_symbol *sym; 275 276 if (!fsa->gplok && syms->license == GPL_ONLY) 277 return false; 278 279 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 280 sizeof(struct kernel_symbol), cmp_name); 281 if (!sym) 282 return false; 283 284 fsa->owner = owner; 285 fsa->crc = symversion(syms->crcs, sym - syms->start); 286 fsa->sym = sym; 287 fsa->license = syms->license; 288 289 return true; 290 } 291 292 /* 293 * Find an exported symbol and return it, along with, (optional) crc and 294 * (optional) module which owns it. Needs preempt disabled or module_mutex. 295 */ 296 bool find_symbol(struct find_symbol_arg *fsa) 297 { 298 static const struct symsearch arr[] = { 299 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 300 NOT_GPL_ONLY }, 301 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 302 __start___kcrctab_gpl, 303 GPL_ONLY }, 304 }; 305 struct module *mod; 306 unsigned int i; 307 308 module_assert_mutex_or_preempt(); 309 310 for (i = 0; i < ARRAY_SIZE(arr); i++) 311 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 312 return true; 313 314 list_for_each_entry_rcu(mod, &modules, list, 315 lockdep_is_held(&module_mutex)) { 316 struct symsearch arr[] = { 317 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 318 NOT_GPL_ONLY }, 319 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 320 mod->gpl_crcs, 321 GPL_ONLY }, 322 }; 323 324 if (mod->state == MODULE_STATE_UNFORMED) 325 continue; 326 327 for (i = 0; i < ARRAY_SIZE(arr); i++) 328 if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 329 return true; 330 } 331 332 pr_debug("Failed to find symbol %s\n", fsa->name); 333 return false; 334 } 335 336 /* 337 * Search for module by name: must hold module_mutex (or preempt disabled 338 * for read-only access). 339 */ 340 struct module *find_module_all(const char *name, size_t len, 341 bool even_unformed) 342 { 343 struct module *mod; 344 345 module_assert_mutex_or_preempt(); 346 347 list_for_each_entry_rcu(mod, &modules, list, 348 lockdep_is_held(&module_mutex)) { 349 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 350 continue; 351 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 352 return mod; 353 } 354 return NULL; 355 } 356 357 struct module *find_module(const char *name) 358 { 359 return find_module_all(name, strlen(name), false); 360 } 361 362 #ifdef CONFIG_SMP 363 364 static inline void __percpu *mod_percpu(struct module *mod) 365 { 366 return mod->percpu; 367 } 368 369 static int percpu_modalloc(struct module *mod, struct load_info *info) 370 { 371 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 372 unsigned long align = pcpusec->sh_addralign; 373 374 if (!pcpusec->sh_size) 375 return 0; 376 377 if (align > PAGE_SIZE) { 378 pr_warn("%s: per-cpu alignment %li > %li\n", 379 mod->name, align, PAGE_SIZE); 380 align = PAGE_SIZE; 381 } 382 383 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 384 if (!mod->percpu) { 385 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 386 mod->name, (unsigned long)pcpusec->sh_size); 387 return -ENOMEM; 388 } 389 mod->percpu_size = pcpusec->sh_size; 390 return 0; 391 } 392 393 static void percpu_modfree(struct module *mod) 394 { 395 free_percpu(mod->percpu); 396 } 397 398 static unsigned int find_pcpusec(struct load_info *info) 399 { 400 return find_sec(info, ".data..percpu"); 401 } 402 403 static void percpu_modcopy(struct module *mod, 404 const void *from, unsigned long size) 405 { 406 int cpu; 407 408 for_each_possible_cpu(cpu) 409 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 410 } 411 412 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 413 { 414 struct module *mod; 415 unsigned int cpu; 416 417 preempt_disable(); 418 419 list_for_each_entry_rcu(mod, &modules, list) { 420 if (mod->state == MODULE_STATE_UNFORMED) 421 continue; 422 if (!mod->percpu_size) 423 continue; 424 for_each_possible_cpu(cpu) { 425 void *start = per_cpu_ptr(mod->percpu, cpu); 426 void *va = (void *)addr; 427 428 if (va >= start && va < start + mod->percpu_size) { 429 if (can_addr) { 430 *can_addr = (unsigned long) (va - start); 431 *can_addr += (unsigned long) 432 per_cpu_ptr(mod->percpu, 433 get_boot_cpu_id()); 434 } 435 preempt_enable(); 436 return true; 437 } 438 } 439 } 440 441 preempt_enable(); 442 return false; 443 } 444 445 /** 446 * is_module_percpu_address() - test whether address is from module static percpu 447 * @addr: address to test 448 * 449 * Test whether @addr belongs to module static percpu area. 450 * 451 * Return: %true if @addr is from module static percpu area 452 */ 453 bool is_module_percpu_address(unsigned long addr) 454 { 455 return __is_module_percpu_address(addr, NULL); 456 } 457 458 #else /* ... !CONFIG_SMP */ 459 460 static inline void __percpu *mod_percpu(struct module *mod) 461 { 462 return NULL; 463 } 464 static int percpu_modalloc(struct module *mod, struct load_info *info) 465 { 466 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 467 if (info->sechdrs[info->index.pcpu].sh_size != 0) 468 return -ENOMEM; 469 return 0; 470 } 471 static inline void percpu_modfree(struct module *mod) 472 { 473 } 474 static unsigned int find_pcpusec(struct load_info *info) 475 { 476 return 0; 477 } 478 static inline void percpu_modcopy(struct module *mod, 479 const void *from, unsigned long size) 480 { 481 /* pcpusec should be 0, and size of that section should be 0. */ 482 BUG_ON(size != 0); 483 } 484 bool is_module_percpu_address(unsigned long addr) 485 { 486 return false; 487 } 488 489 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 490 { 491 return false; 492 } 493 494 #endif /* CONFIG_SMP */ 495 496 #define MODINFO_ATTR(field) \ 497 static void setup_modinfo_##field(struct module *mod, const char *s) \ 498 { \ 499 mod->field = kstrdup(s, GFP_KERNEL); \ 500 } \ 501 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ 502 struct module_kobject *mk, char *buffer) \ 503 { \ 504 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 505 } \ 506 static int modinfo_##field##_exists(struct module *mod) \ 507 { \ 508 return mod->field != NULL; \ 509 } \ 510 static void free_modinfo_##field(struct module *mod) \ 511 { \ 512 kfree(mod->field); \ 513 mod->field = NULL; \ 514 } \ 515 static struct module_attribute modinfo_##field = { \ 516 .attr = { .name = __stringify(field), .mode = 0444 }, \ 517 .show = show_modinfo_##field, \ 518 .setup = setup_modinfo_##field, \ 519 .test = modinfo_##field##_exists, \ 520 .free = free_modinfo_##field, \ 521 }; 522 523 MODINFO_ATTR(version); 524 MODINFO_ATTR(srcversion); 525 526 static struct { 527 char name[MODULE_NAME_LEN + 1]; 528 char taints[MODULE_FLAGS_BUF_SIZE]; 529 } last_unloaded_module; 530 531 #ifdef CONFIG_MODULE_UNLOAD 532 533 EXPORT_TRACEPOINT_SYMBOL(module_get); 534 535 /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 536 #define MODULE_REF_BASE 1 537 538 /* Init the unload section of the module. */ 539 static int module_unload_init(struct module *mod) 540 { 541 /* 542 * Initialize reference counter to MODULE_REF_BASE. 543 * refcnt == 0 means module is going. 544 */ 545 atomic_set(&mod->refcnt, MODULE_REF_BASE); 546 547 INIT_LIST_HEAD(&mod->source_list); 548 INIT_LIST_HEAD(&mod->target_list); 549 550 /* Hold reference count during initialization. */ 551 atomic_inc(&mod->refcnt); 552 553 return 0; 554 } 555 556 /* Does a already use b? */ 557 static int already_uses(struct module *a, struct module *b) 558 { 559 struct module_use *use; 560 561 list_for_each_entry(use, &b->source_list, source_list) { 562 if (use->source == a) { 563 pr_debug("%s uses %s!\n", a->name, b->name); 564 return 1; 565 } 566 } 567 pr_debug("%s does not use %s!\n", a->name, b->name); 568 return 0; 569 } 570 571 /* 572 * Module a uses b 573 * - we add 'a' as a "source", 'b' as a "target" of module use 574 * - the module_use is added to the list of 'b' sources (so 575 * 'b' can walk the list to see who sourced them), and of 'a' 576 * targets (so 'a' can see what modules it targets). 577 */ 578 static int add_module_usage(struct module *a, struct module *b) 579 { 580 struct module_use *use; 581 582 pr_debug("Allocating new usage for %s.\n", a->name); 583 use = kmalloc(sizeof(*use), GFP_ATOMIC); 584 if (!use) 585 return -ENOMEM; 586 587 use->source = a; 588 use->target = b; 589 list_add(&use->source_list, &b->source_list); 590 list_add(&use->target_list, &a->target_list); 591 return 0; 592 } 593 594 /* Module a uses b: caller needs module_mutex() */ 595 static int ref_module(struct module *a, struct module *b) 596 { 597 int err; 598 599 if (b == NULL || already_uses(a, b)) 600 return 0; 601 602 /* If module isn't available, we fail. */ 603 err = strong_try_module_get(b); 604 if (err) 605 return err; 606 607 err = add_module_usage(a, b); 608 if (err) { 609 module_put(b); 610 return err; 611 } 612 return 0; 613 } 614 615 /* Clear the unload stuff of the module. */ 616 static void module_unload_free(struct module *mod) 617 { 618 struct module_use *use, *tmp; 619 620 mutex_lock(&module_mutex); 621 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 622 struct module *i = use->target; 623 pr_debug("%s unusing %s\n", mod->name, i->name); 624 module_put(i); 625 list_del(&use->source_list); 626 list_del(&use->target_list); 627 kfree(use); 628 } 629 mutex_unlock(&module_mutex); 630 } 631 632 #ifdef CONFIG_MODULE_FORCE_UNLOAD 633 static inline int try_force_unload(unsigned int flags) 634 { 635 int ret = (flags & O_TRUNC); 636 if (ret) 637 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 638 return ret; 639 } 640 #else 641 static inline int try_force_unload(unsigned int flags) 642 { 643 return 0; 644 } 645 #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 646 647 /* Try to release refcount of module, 0 means success. */ 648 static int try_release_module_ref(struct module *mod) 649 { 650 int ret; 651 652 /* Try to decrement refcnt which we set at loading */ 653 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 654 BUG_ON(ret < 0); 655 if (ret) 656 /* Someone can put this right now, recover with checking */ 657 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 658 659 return ret; 660 } 661 662 static int try_stop_module(struct module *mod, int flags, int *forced) 663 { 664 /* If it's not unused, quit unless we're forcing. */ 665 if (try_release_module_ref(mod) != 0) { 666 *forced = try_force_unload(flags); 667 if (!(*forced)) 668 return -EWOULDBLOCK; 669 } 670 671 /* Mark it as dying. */ 672 mod->state = MODULE_STATE_GOING; 673 674 return 0; 675 } 676 677 /** 678 * module_refcount() - return the refcount or -1 if unloading 679 * @mod: the module we're checking 680 * 681 * Return: 682 * -1 if the module is in the process of unloading 683 * otherwise the number of references in the kernel to the module 684 */ 685 int module_refcount(struct module *mod) 686 { 687 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 688 } 689 EXPORT_SYMBOL(module_refcount); 690 691 /* This exists whether we can unload or not */ 692 static void free_module(struct module *mod); 693 694 SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 695 unsigned int, flags) 696 { 697 struct module *mod; 698 char name[MODULE_NAME_LEN]; 699 char buf[MODULE_FLAGS_BUF_SIZE]; 700 int ret, forced = 0; 701 702 if (!capable(CAP_SYS_MODULE) || modules_disabled) 703 return -EPERM; 704 705 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 706 return -EFAULT; 707 name[MODULE_NAME_LEN-1] = '\0'; 708 709 audit_log_kern_module(name); 710 711 if (mutex_lock_interruptible(&module_mutex) != 0) 712 return -EINTR; 713 714 mod = find_module(name); 715 if (!mod) { 716 ret = -ENOENT; 717 goto out; 718 } 719 720 if (!list_empty(&mod->source_list)) { 721 /* Other modules depend on us: get rid of them first. */ 722 ret = -EWOULDBLOCK; 723 goto out; 724 } 725 726 /* Doing init or already dying? */ 727 if (mod->state != MODULE_STATE_LIVE) { 728 /* FIXME: if (force), slam module count damn the torpedoes */ 729 pr_debug("%s already dying\n", mod->name); 730 ret = -EBUSY; 731 goto out; 732 } 733 734 /* If it has an init func, it must have an exit func to unload */ 735 if (mod->init && !mod->exit) { 736 forced = try_force_unload(flags); 737 if (!forced) { 738 /* This module can't be removed */ 739 ret = -EBUSY; 740 goto out; 741 } 742 } 743 744 ret = try_stop_module(mod, flags, &forced); 745 if (ret != 0) 746 goto out; 747 748 mutex_unlock(&module_mutex); 749 /* Final destruction now no one is using it. */ 750 if (mod->exit != NULL) 751 mod->exit(); 752 blocking_notifier_call_chain(&module_notify_list, 753 MODULE_STATE_GOING, mod); 754 klp_module_going(mod); 755 ftrace_release_mod(mod); 756 757 async_synchronize_full(); 758 759 /* Store the name and taints of the last unloaded module for diagnostic purposes */ 760 strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); 761 strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); 762 763 free_module(mod); 764 /* someone could wait for the module in add_unformed_module() */ 765 wake_up_all(&module_wq); 766 return 0; 767 out: 768 mutex_unlock(&module_mutex); 769 return ret; 770 } 771 772 void __symbol_put(const char *symbol) 773 { 774 struct find_symbol_arg fsa = { 775 .name = symbol, 776 .gplok = true, 777 }; 778 779 preempt_disable(); 780 BUG_ON(!find_symbol(&fsa)); 781 module_put(fsa.owner); 782 preempt_enable(); 783 } 784 EXPORT_SYMBOL(__symbol_put); 785 786 /* Note this assumes addr is a function, which it currently always is. */ 787 void symbol_put_addr(void *addr) 788 { 789 struct module *modaddr; 790 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 791 792 if (core_kernel_text(a)) 793 return; 794 795 /* 796 * Even though we hold a reference on the module; we still need to 797 * disable preemption in order to safely traverse the data structure. 798 */ 799 preempt_disable(); 800 modaddr = __module_text_address(a); 801 BUG_ON(!modaddr); 802 module_put(modaddr); 803 preempt_enable(); 804 } 805 EXPORT_SYMBOL_GPL(symbol_put_addr); 806 807 static ssize_t show_refcnt(struct module_attribute *mattr, 808 struct module_kobject *mk, char *buffer) 809 { 810 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 811 } 812 813 static struct module_attribute modinfo_refcnt = 814 __ATTR(refcnt, 0444, show_refcnt, NULL); 815 816 void __module_get(struct module *module) 817 { 818 if (module) { 819 preempt_disable(); 820 atomic_inc(&module->refcnt); 821 trace_module_get(module, _RET_IP_); 822 preempt_enable(); 823 } 824 } 825 EXPORT_SYMBOL(__module_get); 826 827 bool try_module_get(struct module *module) 828 { 829 bool ret = true; 830 831 if (module) { 832 preempt_disable(); 833 /* Note: here, we can fail to get a reference */ 834 if (likely(module_is_live(module) && 835 atomic_inc_not_zero(&module->refcnt) != 0)) 836 trace_module_get(module, _RET_IP_); 837 else 838 ret = false; 839 840 preempt_enable(); 841 } 842 return ret; 843 } 844 EXPORT_SYMBOL(try_module_get); 845 846 void module_put(struct module *module) 847 { 848 int ret; 849 850 if (module) { 851 preempt_disable(); 852 ret = atomic_dec_if_positive(&module->refcnt); 853 WARN_ON(ret < 0); /* Failed to put refcount */ 854 trace_module_put(module, _RET_IP_); 855 preempt_enable(); 856 } 857 } 858 EXPORT_SYMBOL(module_put); 859 860 #else /* !CONFIG_MODULE_UNLOAD */ 861 static inline void module_unload_free(struct module *mod) 862 { 863 } 864 865 static int ref_module(struct module *a, struct module *b) 866 { 867 return strong_try_module_get(b); 868 } 869 870 static inline int module_unload_init(struct module *mod) 871 { 872 return 0; 873 } 874 #endif /* CONFIG_MODULE_UNLOAD */ 875 876 size_t module_flags_taint(unsigned long taints, char *buf) 877 { 878 size_t l = 0; 879 int i; 880 881 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 882 if (taint_flags[i].module && test_bit(i, &taints)) 883 buf[l++] = taint_flags[i].c_true; 884 } 885 886 return l; 887 } 888 889 static ssize_t show_initstate(struct module_attribute *mattr, 890 struct module_kobject *mk, char *buffer) 891 { 892 const char *state = "unknown"; 893 894 switch (mk->mod->state) { 895 case MODULE_STATE_LIVE: 896 state = "live"; 897 break; 898 case MODULE_STATE_COMING: 899 state = "coming"; 900 break; 901 case MODULE_STATE_GOING: 902 state = "going"; 903 break; 904 default: 905 BUG(); 906 } 907 return sprintf(buffer, "%s\n", state); 908 } 909 910 static struct module_attribute modinfo_initstate = 911 __ATTR(initstate, 0444, show_initstate, NULL); 912 913 static ssize_t store_uevent(struct module_attribute *mattr, 914 struct module_kobject *mk, 915 const char *buffer, size_t count) 916 { 917 int rc; 918 919 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 920 return rc ? rc : count; 921 } 922 923 struct module_attribute module_uevent = 924 __ATTR(uevent, 0200, NULL, store_uevent); 925 926 static ssize_t show_coresize(struct module_attribute *mattr, 927 struct module_kobject *mk, char *buffer) 928 { 929 return sprintf(buffer, "%u\n", mk->mod->core_layout.size); 930 } 931 932 static struct module_attribute modinfo_coresize = 933 __ATTR(coresize, 0444, show_coresize, NULL); 934 935 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 936 static ssize_t show_datasize(struct module_attribute *mattr, 937 struct module_kobject *mk, char *buffer) 938 { 939 return sprintf(buffer, "%u\n", mk->mod->data_layout.size); 940 } 941 942 static struct module_attribute modinfo_datasize = 943 __ATTR(datasize, 0444, show_datasize, NULL); 944 #endif 945 946 static ssize_t show_initsize(struct module_attribute *mattr, 947 struct module_kobject *mk, char *buffer) 948 { 949 return sprintf(buffer, "%u\n", mk->mod->init_layout.size); 950 } 951 952 static struct module_attribute modinfo_initsize = 953 __ATTR(initsize, 0444, show_initsize, NULL); 954 955 static ssize_t show_taint(struct module_attribute *mattr, 956 struct module_kobject *mk, char *buffer) 957 { 958 size_t l; 959 960 l = module_flags_taint(mk->mod->taints, buffer); 961 buffer[l++] = '\n'; 962 return l; 963 } 964 965 static struct module_attribute modinfo_taint = 966 __ATTR(taint, 0444, show_taint, NULL); 967 968 struct module_attribute *modinfo_attrs[] = { 969 &module_uevent, 970 &modinfo_version, 971 &modinfo_srcversion, 972 &modinfo_initstate, 973 &modinfo_coresize, 974 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 975 &modinfo_datasize, 976 #endif 977 &modinfo_initsize, 978 &modinfo_taint, 979 #ifdef CONFIG_MODULE_UNLOAD 980 &modinfo_refcnt, 981 #endif 982 NULL, 983 }; 984 985 size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); 986 987 static const char vermagic[] = VERMAGIC_STRING; 988 989 int try_to_force_load(struct module *mod, const char *reason) 990 { 991 #ifdef CONFIG_MODULE_FORCE_LOAD 992 if (!test_taint(TAINT_FORCED_MODULE)) 993 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 994 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 995 return 0; 996 #else 997 return -ENOEXEC; 998 #endif 999 } 1000 1001 static char *get_modinfo(const struct load_info *info, const char *tag); 1002 static char *get_next_modinfo(const struct load_info *info, const char *tag, 1003 char *prev); 1004 1005 static int verify_namespace_is_imported(const struct load_info *info, 1006 const struct kernel_symbol *sym, 1007 struct module *mod) 1008 { 1009 const char *namespace; 1010 char *imported_namespace; 1011 1012 namespace = kernel_symbol_namespace(sym); 1013 if (namespace && namespace[0]) { 1014 imported_namespace = get_modinfo(info, "import_ns"); 1015 while (imported_namespace) { 1016 if (strcmp(namespace, imported_namespace) == 0) 1017 return 0; 1018 imported_namespace = get_next_modinfo( 1019 info, "import_ns", imported_namespace); 1020 } 1021 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1022 pr_warn( 1023 #else 1024 pr_err( 1025 #endif 1026 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1027 mod->name, kernel_symbol_name(sym), namespace); 1028 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1029 return -EINVAL; 1030 #endif 1031 } 1032 return 0; 1033 } 1034 1035 static bool inherit_taint(struct module *mod, struct module *owner, const char *name) 1036 { 1037 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1038 return true; 1039 1040 if (mod->using_gplonly_symbols) { 1041 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", 1042 mod->name, name, owner->name); 1043 return false; 1044 } 1045 1046 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 1047 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", 1048 mod->name, name, owner->name); 1049 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1050 } 1051 return true; 1052 } 1053 1054 /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1055 static const struct kernel_symbol *resolve_symbol(struct module *mod, 1056 const struct load_info *info, 1057 const char *name, 1058 char ownername[]) 1059 { 1060 struct find_symbol_arg fsa = { 1061 .name = name, 1062 .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1063 .warn = true, 1064 }; 1065 int err; 1066 1067 /* 1068 * The module_mutex should not be a heavily contended lock; 1069 * if we get the occasional sleep here, we'll go an extra iteration 1070 * in the wait_event_interruptible(), which is harmless. 1071 */ 1072 sched_annotate_sleep(); 1073 mutex_lock(&module_mutex); 1074 if (!find_symbol(&fsa)) 1075 goto unlock; 1076 1077 if (fsa.license == GPL_ONLY) 1078 mod->using_gplonly_symbols = true; 1079 1080 if (!inherit_taint(mod, fsa.owner, name)) { 1081 fsa.sym = NULL; 1082 goto getname; 1083 } 1084 1085 if (!check_version(info, name, mod, fsa.crc)) { 1086 fsa.sym = ERR_PTR(-EINVAL); 1087 goto getname; 1088 } 1089 1090 err = verify_namespace_is_imported(info, fsa.sym, mod); 1091 if (err) { 1092 fsa.sym = ERR_PTR(err); 1093 goto getname; 1094 } 1095 1096 err = ref_module(mod, fsa.owner); 1097 if (err) { 1098 fsa.sym = ERR_PTR(err); 1099 goto getname; 1100 } 1101 1102 getname: 1103 /* We must make copy under the lock if we failed to get ref. */ 1104 strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1105 unlock: 1106 mutex_unlock(&module_mutex); 1107 return fsa.sym; 1108 } 1109 1110 static const struct kernel_symbol * 1111 resolve_symbol_wait(struct module *mod, 1112 const struct load_info *info, 1113 const char *name) 1114 { 1115 const struct kernel_symbol *ksym; 1116 char owner[MODULE_NAME_LEN]; 1117 1118 if (wait_event_interruptible_timeout(module_wq, 1119 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1120 || PTR_ERR(ksym) != -EBUSY, 1121 30 * HZ) <= 0) { 1122 pr_warn("%s: gave up waiting for init of module %s.\n", 1123 mod->name, owner); 1124 } 1125 return ksym; 1126 } 1127 1128 void __weak module_memfree(void *module_region) 1129 { 1130 /* 1131 * This memory may be RO, and freeing RO memory in an interrupt is not 1132 * supported by vmalloc. 1133 */ 1134 WARN_ON(in_interrupt()); 1135 vfree(module_region); 1136 } 1137 1138 void __weak module_arch_cleanup(struct module *mod) 1139 { 1140 } 1141 1142 void __weak module_arch_freeing_init(struct module *mod) 1143 { 1144 } 1145 1146 /* Free a module, remove from lists, etc. */ 1147 static void free_module(struct module *mod) 1148 { 1149 trace_module_free(mod); 1150 1151 mod_sysfs_teardown(mod); 1152 1153 /* 1154 * We leave it in list to prevent duplicate loads, but make sure 1155 * that noone uses it while it's being deconstructed. 1156 */ 1157 mutex_lock(&module_mutex); 1158 mod->state = MODULE_STATE_UNFORMED; 1159 mutex_unlock(&module_mutex); 1160 1161 /* Remove dynamic debug info */ 1162 ddebug_remove_module(mod->name); 1163 1164 /* Arch-specific cleanup. */ 1165 module_arch_cleanup(mod); 1166 1167 /* Module unload stuff */ 1168 module_unload_free(mod); 1169 1170 /* Free any allocated parameters. */ 1171 destroy_params(mod->kp, mod->num_kp); 1172 1173 if (is_livepatch_module(mod)) 1174 free_module_elf(mod); 1175 1176 /* Now we can delete it from the lists */ 1177 mutex_lock(&module_mutex); 1178 /* Unlink carefully: kallsyms could be walking list. */ 1179 list_del_rcu(&mod->list); 1180 mod_tree_remove(mod); 1181 /* Remove this module from bug list, this uses list_del_rcu */ 1182 module_bug_cleanup(mod); 1183 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 1184 synchronize_rcu(); 1185 if (try_add_tainted_module(mod)) 1186 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", 1187 mod->name); 1188 mutex_unlock(&module_mutex); 1189 1190 /* This may be empty, but that's OK */ 1191 module_arch_freeing_init(mod); 1192 module_memfree(mod->init_layout.base); 1193 kfree(mod->args); 1194 percpu_modfree(mod); 1195 1196 /* Free lock-classes; relies on the preceding sync_rcu(). */ 1197 lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size); 1198 1199 /* Finally, free the core (containing the module structure) */ 1200 module_memfree(mod->core_layout.base); 1201 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1202 vfree(mod->data_layout.base); 1203 #endif 1204 } 1205 1206 void *__symbol_get(const char *symbol) 1207 { 1208 struct find_symbol_arg fsa = { 1209 .name = symbol, 1210 .gplok = true, 1211 .warn = true, 1212 }; 1213 1214 preempt_disable(); 1215 if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) { 1216 preempt_enable(); 1217 return NULL; 1218 } 1219 preempt_enable(); 1220 return (void *)kernel_symbol_value(fsa.sym); 1221 } 1222 EXPORT_SYMBOL_GPL(__symbol_get); 1223 1224 /* 1225 * Ensure that an exported symbol [global namespace] does not already exist 1226 * in the kernel or in some other module's exported symbol table. 1227 * 1228 * You must hold the module_mutex. 1229 */ 1230 static int verify_exported_symbols(struct module *mod) 1231 { 1232 unsigned int i; 1233 const struct kernel_symbol *s; 1234 struct { 1235 const struct kernel_symbol *sym; 1236 unsigned int num; 1237 } arr[] = { 1238 { mod->syms, mod->num_syms }, 1239 { mod->gpl_syms, mod->num_gpl_syms }, 1240 }; 1241 1242 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1243 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 1244 struct find_symbol_arg fsa = { 1245 .name = kernel_symbol_name(s), 1246 .gplok = true, 1247 }; 1248 if (find_symbol(&fsa)) { 1249 pr_err("%s: exports duplicate symbol %s" 1250 " (owned by %s)\n", 1251 mod->name, kernel_symbol_name(s), 1252 module_name(fsa.owner)); 1253 return -ENOEXEC; 1254 } 1255 } 1256 } 1257 return 0; 1258 } 1259 1260 static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 1261 { 1262 /* 1263 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 1264 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 1265 * i386 has a similar problem but may not deserve a fix. 1266 * 1267 * If we ever have to ignore many symbols, consider refactoring the code to 1268 * only warn if referenced by a relocation. 1269 */ 1270 if (emachine == EM_386 || emachine == EM_X86_64) 1271 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 1272 return false; 1273 } 1274 1275 /* Change all symbols so that st_value encodes the pointer directly. */ 1276 static int simplify_symbols(struct module *mod, const struct load_info *info) 1277 { 1278 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 1279 Elf_Sym *sym = (void *)symsec->sh_addr; 1280 unsigned long secbase; 1281 unsigned int i; 1282 int ret = 0; 1283 const struct kernel_symbol *ksym; 1284 1285 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 1286 const char *name = info->strtab + sym[i].st_name; 1287 1288 switch (sym[i].st_shndx) { 1289 case SHN_COMMON: 1290 /* Ignore common symbols */ 1291 if (!strncmp(name, "__gnu_lto", 9)) 1292 break; 1293 1294 /* 1295 * We compiled with -fno-common. These are not 1296 * supposed to happen. 1297 */ 1298 pr_debug("Common symbol: %s\n", name); 1299 pr_warn("%s: please compile with -fno-common\n", 1300 mod->name); 1301 ret = -ENOEXEC; 1302 break; 1303 1304 case SHN_ABS: 1305 /* Don't need to do anything */ 1306 pr_debug("Absolute symbol: 0x%08lx\n", 1307 (long)sym[i].st_value); 1308 break; 1309 1310 case SHN_LIVEPATCH: 1311 /* Livepatch symbols are resolved by livepatch */ 1312 break; 1313 1314 case SHN_UNDEF: 1315 ksym = resolve_symbol_wait(mod, info, name); 1316 /* Ok if resolved. */ 1317 if (ksym && !IS_ERR(ksym)) { 1318 sym[i].st_value = kernel_symbol_value(ksym); 1319 break; 1320 } 1321 1322 /* Ok if weak or ignored. */ 1323 if (!ksym && 1324 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 1325 ignore_undef_symbol(info->hdr->e_machine, name))) 1326 break; 1327 1328 ret = PTR_ERR(ksym) ?: -ENOENT; 1329 pr_warn("%s: Unknown symbol %s (err %d)\n", 1330 mod->name, name, ret); 1331 break; 1332 1333 default: 1334 /* Divert to percpu allocation if a percpu var. */ 1335 if (sym[i].st_shndx == info->index.pcpu) 1336 secbase = (unsigned long)mod_percpu(mod); 1337 else 1338 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 1339 sym[i].st_value += secbase; 1340 break; 1341 } 1342 } 1343 1344 return ret; 1345 } 1346 1347 static int apply_relocations(struct module *mod, const struct load_info *info) 1348 { 1349 unsigned int i; 1350 int err = 0; 1351 1352 /* Now do relocations. */ 1353 for (i = 1; i < info->hdr->e_shnum; i++) { 1354 unsigned int infosec = info->sechdrs[i].sh_info; 1355 1356 /* Not a valid relocation section? */ 1357 if (infosec >= info->hdr->e_shnum) 1358 continue; 1359 1360 /* Don't bother with non-allocated sections */ 1361 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 1362 continue; 1363 1364 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 1365 err = klp_apply_section_relocs(mod, info->sechdrs, 1366 info->secstrings, 1367 info->strtab, 1368 info->index.sym, i, 1369 NULL); 1370 else if (info->sechdrs[i].sh_type == SHT_REL) 1371 err = apply_relocate(info->sechdrs, info->strtab, 1372 info->index.sym, i, mod); 1373 else if (info->sechdrs[i].sh_type == SHT_RELA) 1374 err = apply_relocate_add(info->sechdrs, info->strtab, 1375 info->index.sym, i, mod); 1376 if (err < 0) 1377 break; 1378 } 1379 return err; 1380 } 1381 1382 /* Additional bytes needed by arch in front of individual sections */ 1383 unsigned int __weak arch_mod_section_prepend(struct module *mod, 1384 unsigned int section) 1385 { 1386 /* default implementation just returns zero */ 1387 return 0; 1388 } 1389 1390 /* Update size with this section: return offset. */ 1391 long module_get_offset(struct module *mod, unsigned int *size, 1392 Elf_Shdr *sechdr, unsigned int section) 1393 { 1394 long ret; 1395 1396 *size += arch_mod_section_prepend(mod, section); 1397 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 1398 *size = ret + sechdr->sh_size; 1399 return ret; 1400 } 1401 1402 static bool module_init_layout_section(const char *sname) 1403 { 1404 #ifndef CONFIG_MODULE_UNLOAD 1405 if (module_exit_section(sname)) 1406 return true; 1407 #endif 1408 return module_init_section(sname); 1409 } 1410 1411 /* 1412 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 1413 * might -- code, read-only data, read-write data, small data. Tally 1414 * sizes, and place the offsets into sh_entsize fields: high bit means it 1415 * belongs in init. 1416 */ 1417 static void layout_sections(struct module *mod, struct load_info *info) 1418 { 1419 static unsigned long const masks[][2] = { 1420 /* 1421 * NOTE: all executable code must be the first section 1422 * in this array; otherwise modify the text_size 1423 * finder in the two loops below 1424 */ 1425 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 1426 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 1427 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 1428 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 1429 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 1430 }; 1431 unsigned int m, i; 1432 1433 for (i = 0; i < info->hdr->e_shnum; i++) 1434 info->sechdrs[i].sh_entsize = ~0UL; 1435 1436 pr_debug("Core section allocation order:\n"); 1437 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1438 for (i = 0; i < info->hdr->e_shnum; ++i) { 1439 Elf_Shdr *s = &info->sechdrs[i]; 1440 const char *sname = info->secstrings + s->sh_name; 1441 unsigned int *sizep; 1442 1443 if ((s->sh_flags & masks[m][0]) != masks[m][0] 1444 || (s->sh_flags & masks[m][1]) 1445 || s->sh_entsize != ~0UL 1446 || module_init_layout_section(sname)) 1447 continue; 1448 sizep = m ? &mod->data_layout.size : &mod->core_layout.size; 1449 s->sh_entsize = module_get_offset(mod, sizep, s, i); 1450 pr_debug("\t%s\n", sname); 1451 } 1452 switch (m) { 1453 case 0: /* executable */ 1454 mod->core_layout.size = strict_align(mod->core_layout.size); 1455 mod->core_layout.text_size = mod->core_layout.size; 1456 break; 1457 case 1: /* RO: text and ro-data */ 1458 mod->data_layout.size = strict_align(mod->data_layout.size); 1459 mod->data_layout.ro_size = mod->data_layout.size; 1460 break; 1461 case 2: /* RO after init */ 1462 mod->data_layout.size = strict_align(mod->data_layout.size); 1463 mod->data_layout.ro_after_init_size = mod->data_layout.size; 1464 break; 1465 case 4: /* whole core */ 1466 mod->data_layout.size = strict_align(mod->data_layout.size); 1467 break; 1468 } 1469 } 1470 1471 pr_debug("Init section allocation order:\n"); 1472 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1473 for (i = 0; i < info->hdr->e_shnum; ++i) { 1474 Elf_Shdr *s = &info->sechdrs[i]; 1475 const char *sname = info->secstrings + s->sh_name; 1476 1477 if ((s->sh_flags & masks[m][0]) != masks[m][0] 1478 || (s->sh_flags & masks[m][1]) 1479 || s->sh_entsize != ~0UL 1480 || !module_init_layout_section(sname)) 1481 continue; 1482 s->sh_entsize = (module_get_offset(mod, &mod->init_layout.size, s, i) 1483 | INIT_OFFSET_MASK); 1484 pr_debug("\t%s\n", sname); 1485 } 1486 switch (m) { 1487 case 0: /* executable */ 1488 mod->init_layout.size = strict_align(mod->init_layout.size); 1489 mod->init_layout.text_size = mod->init_layout.size; 1490 break; 1491 case 1: /* RO: text and ro-data */ 1492 mod->init_layout.size = strict_align(mod->init_layout.size); 1493 mod->init_layout.ro_size = mod->init_layout.size; 1494 break; 1495 case 2: 1496 /* 1497 * RO after init doesn't apply to init_layout (only 1498 * core_layout), so it just takes the value of ro_size. 1499 */ 1500 mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; 1501 break; 1502 case 4: /* whole init */ 1503 mod->init_layout.size = strict_align(mod->init_layout.size); 1504 break; 1505 } 1506 } 1507 } 1508 1509 static void set_license(struct module *mod, const char *license) 1510 { 1511 if (!license) 1512 license = "unspecified"; 1513 1514 if (!license_is_gpl_compatible(license)) { 1515 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 1516 pr_warn("%s: module license '%s' taints kernel.\n", 1517 mod->name, license); 1518 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 1519 LOCKDEP_NOW_UNRELIABLE); 1520 } 1521 } 1522 1523 /* Parse tag=value strings from .modinfo section */ 1524 static char *next_string(char *string, unsigned long *secsize) 1525 { 1526 /* Skip non-zero chars */ 1527 while (string[0]) { 1528 string++; 1529 if ((*secsize)-- <= 1) 1530 return NULL; 1531 } 1532 1533 /* Skip any zero padding. */ 1534 while (!string[0]) { 1535 string++; 1536 if ((*secsize)-- <= 1) 1537 return NULL; 1538 } 1539 return string; 1540 } 1541 1542 static char *get_next_modinfo(const struct load_info *info, const char *tag, 1543 char *prev) 1544 { 1545 char *p; 1546 unsigned int taglen = strlen(tag); 1547 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 1548 unsigned long size = infosec->sh_size; 1549 1550 /* 1551 * get_modinfo() calls made before rewrite_section_headers() 1552 * must use sh_offset, as sh_addr isn't set! 1553 */ 1554 char *modinfo = (char *)info->hdr + infosec->sh_offset; 1555 1556 if (prev) { 1557 size -= prev - modinfo; 1558 modinfo = next_string(prev, &size); 1559 } 1560 1561 for (p = modinfo; p; p = next_string(p, &size)) { 1562 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 1563 return p + taglen + 1; 1564 } 1565 return NULL; 1566 } 1567 1568 static char *get_modinfo(const struct load_info *info, const char *tag) 1569 { 1570 return get_next_modinfo(info, tag, NULL); 1571 } 1572 1573 static void setup_modinfo(struct module *mod, struct load_info *info) 1574 { 1575 struct module_attribute *attr; 1576 int i; 1577 1578 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1579 if (attr->setup) 1580 attr->setup(mod, get_modinfo(info, attr->attr.name)); 1581 } 1582 } 1583 1584 static void free_modinfo(struct module *mod) 1585 { 1586 struct module_attribute *attr; 1587 int i; 1588 1589 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1590 if (attr->free) 1591 attr->free(mod); 1592 } 1593 } 1594 1595 static void dynamic_debug_setup(struct module *mod, struct _ddebug_info *dyndbg) 1596 { 1597 if (!dyndbg->num_descs) 1598 return; 1599 ddebug_add_module(dyndbg, mod->name); 1600 } 1601 1602 static void dynamic_debug_remove(struct module *mod, struct _ddebug_info *dyndbg) 1603 { 1604 if (dyndbg->num_descs) 1605 ddebug_remove_module(mod->name); 1606 } 1607 1608 void * __weak module_alloc(unsigned long size) 1609 { 1610 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 1611 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 1612 NUMA_NO_NODE, __builtin_return_address(0)); 1613 } 1614 1615 bool __weak module_init_section(const char *name) 1616 { 1617 return strstarts(name, ".init"); 1618 } 1619 1620 bool __weak module_exit_section(const char *name) 1621 { 1622 return strstarts(name, ".exit"); 1623 } 1624 1625 static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) 1626 { 1627 #if defined(CONFIG_64BIT) 1628 unsigned long long secend; 1629 #else 1630 unsigned long secend; 1631 #endif 1632 1633 /* 1634 * Check for both overflow and offset/size being 1635 * too large. 1636 */ 1637 secend = shdr->sh_offset + shdr->sh_size; 1638 if (secend < shdr->sh_offset || secend > info->len) 1639 return -ENOEXEC; 1640 1641 return 0; 1642 } 1643 1644 /* 1645 * Sanity checks against invalid binaries, wrong arch, weird elf version. 1646 * 1647 * Also do basic validity checks against section offsets and sizes, the 1648 * section name string table, and the indices used for it (sh_name). 1649 */ 1650 static int elf_validity_check(struct load_info *info) 1651 { 1652 unsigned int i; 1653 Elf_Shdr *shdr, *strhdr; 1654 int err; 1655 1656 if (info->len < sizeof(*(info->hdr))) { 1657 pr_err("Invalid ELF header len %lu\n", info->len); 1658 goto no_exec; 1659 } 1660 1661 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 1662 pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 1663 goto no_exec; 1664 } 1665 if (info->hdr->e_type != ET_REL) { 1666 pr_err("Invalid ELF header type: %u != %u\n", 1667 info->hdr->e_type, ET_REL); 1668 goto no_exec; 1669 } 1670 if (!elf_check_arch(info->hdr)) { 1671 pr_err("Invalid architecture in ELF header: %u\n", 1672 info->hdr->e_machine); 1673 goto no_exec; 1674 } 1675 if (!module_elf_check_arch(info->hdr)) { 1676 pr_err("Invalid module architecture in ELF header: %u\n", 1677 info->hdr->e_machine); 1678 goto no_exec; 1679 } 1680 if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 1681 pr_err("Invalid ELF section header size\n"); 1682 goto no_exec; 1683 } 1684 1685 /* 1686 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 1687 * known and small. So e_shnum * sizeof(Elf_Shdr) 1688 * will not overflow unsigned long on any platform. 1689 */ 1690 if (info->hdr->e_shoff >= info->len 1691 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 1692 info->len - info->hdr->e_shoff)) { 1693 pr_err("Invalid ELF section header overflow\n"); 1694 goto no_exec; 1695 } 1696 1697 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; 1698 1699 /* 1700 * Verify if the section name table index is valid. 1701 */ 1702 if (info->hdr->e_shstrndx == SHN_UNDEF 1703 || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 1704 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 1705 info->hdr->e_shstrndx, info->hdr->e_shstrndx, 1706 info->hdr->e_shnum); 1707 goto no_exec; 1708 } 1709 1710 strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 1711 err = validate_section_offset(info, strhdr); 1712 if (err < 0) { 1713 pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); 1714 return err; 1715 } 1716 1717 /* 1718 * The section name table must be NUL-terminated, as required 1719 * by the spec. This makes strcmp and pr_* calls that access 1720 * strings in the section safe. 1721 */ 1722 info->secstrings = (void *)info->hdr + strhdr->sh_offset; 1723 if (strhdr->sh_size == 0) { 1724 pr_err("empty section name table\n"); 1725 goto no_exec; 1726 } 1727 if (info->secstrings[strhdr->sh_size - 1] != '\0') { 1728 pr_err("ELF Spec violation: section name table isn't null terminated\n"); 1729 goto no_exec; 1730 } 1731 1732 /* 1733 * The code assumes that section 0 has a length of zero and 1734 * an addr of zero, so check for it. 1735 */ 1736 if (info->sechdrs[0].sh_type != SHT_NULL 1737 || info->sechdrs[0].sh_size != 0 1738 || info->sechdrs[0].sh_addr != 0) { 1739 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 1740 info->sechdrs[0].sh_type); 1741 goto no_exec; 1742 } 1743 1744 for (i = 1; i < info->hdr->e_shnum; i++) { 1745 shdr = &info->sechdrs[i]; 1746 switch (shdr->sh_type) { 1747 case SHT_NULL: 1748 case SHT_NOBITS: 1749 continue; 1750 case SHT_SYMTAB: 1751 if (shdr->sh_link == SHN_UNDEF 1752 || shdr->sh_link >= info->hdr->e_shnum) { 1753 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 1754 shdr->sh_link, shdr->sh_link, 1755 info->hdr->e_shnum); 1756 goto no_exec; 1757 } 1758 fallthrough; 1759 default: 1760 err = validate_section_offset(info, shdr); 1761 if (err < 0) { 1762 pr_err("Invalid ELF section in module (section %u type %u)\n", 1763 i, shdr->sh_type); 1764 return err; 1765 } 1766 1767 if (shdr->sh_flags & SHF_ALLOC) { 1768 if (shdr->sh_name >= strhdr->sh_size) { 1769 pr_err("Invalid ELF section name in module (section %u type %u)\n", 1770 i, shdr->sh_type); 1771 return -ENOEXEC; 1772 } 1773 } 1774 break; 1775 } 1776 } 1777 1778 return 0; 1779 1780 no_exec: 1781 return -ENOEXEC; 1782 } 1783 1784 #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 1785 1786 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 1787 { 1788 do { 1789 unsigned long n = min(len, COPY_CHUNK_SIZE); 1790 1791 if (copy_from_user(dst, usrc, n) != 0) 1792 return -EFAULT; 1793 cond_resched(); 1794 dst += n; 1795 usrc += n; 1796 len -= n; 1797 } while (len); 1798 return 0; 1799 } 1800 1801 static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 1802 { 1803 if (!get_modinfo(info, "livepatch")) 1804 /* Nothing more to do */ 1805 return 0; 1806 1807 if (set_livepatch_module(mod)) { 1808 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 1809 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 1810 mod->name); 1811 return 0; 1812 } 1813 1814 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 1815 mod->name); 1816 return -ENOEXEC; 1817 } 1818 1819 static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 1820 { 1821 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 1822 return; 1823 1824 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 1825 mod->name); 1826 } 1827 1828 /* Sets info->hdr and info->len. */ 1829 static int copy_module_from_user(const void __user *umod, unsigned long len, 1830 struct load_info *info) 1831 { 1832 int err; 1833 1834 info->len = len; 1835 if (info->len < sizeof(*(info->hdr))) 1836 return -ENOEXEC; 1837 1838 err = security_kernel_load_data(LOADING_MODULE, true); 1839 if (err) 1840 return err; 1841 1842 /* Suck in entire file: we'll want most of it. */ 1843 info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 1844 if (!info->hdr) 1845 return -ENOMEM; 1846 1847 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 1848 err = -EFAULT; 1849 goto out; 1850 } 1851 1852 err = security_kernel_post_load_data((char *)info->hdr, info->len, 1853 LOADING_MODULE, "init_module"); 1854 out: 1855 if (err) 1856 vfree(info->hdr); 1857 1858 return err; 1859 } 1860 1861 static void free_copy(struct load_info *info, int flags) 1862 { 1863 if (flags & MODULE_INIT_COMPRESSED_FILE) 1864 module_decompress_cleanup(info); 1865 else 1866 vfree(info->hdr); 1867 } 1868 1869 static int rewrite_section_headers(struct load_info *info, int flags) 1870 { 1871 unsigned int i; 1872 1873 /* This should always be true, but let's be sure. */ 1874 info->sechdrs[0].sh_addr = 0; 1875 1876 for (i = 1; i < info->hdr->e_shnum; i++) { 1877 Elf_Shdr *shdr = &info->sechdrs[i]; 1878 1879 /* 1880 * Mark all sections sh_addr with their address in the 1881 * temporary image. 1882 */ 1883 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 1884 1885 } 1886 1887 /* Track but don't keep modinfo and version sections. */ 1888 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 1889 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 1890 1891 return 0; 1892 } 1893 1894 /* 1895 * Set up our basic convenience variables (pointers to section headers, 1896 * search for module section index etc), and do some basic section 1897 * verification. 1898 * 1899 * Set info->mod to the temporary copy of the module in info->hdr. The final one 1900 * will be allocated in move_module(). 1901 */ 1902 static int setup_load_info(struct load_info *info, int flags) 1903 { 1904 unsigned int i; 1905 1906 /* Try to find a name early so we can log errors with a module name */ 1907 info->index.info = find_sec(info, ".modinfo"); 1908 if (info->index.info) 1909 info->name = get_modinfo(info, "name"); 1910 1911 /* Find internal symbols and strings. */ 1912 for (i = 1; i < info->hdr->e_shnum; i++) { 1913 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 1914 info->index.sym = i; 1915 info->index.str = info->sechdrs[i].sh_link; 1916 info->strtab = (char *)info->hdr 1917 + info->sechdrs[info->index.str].sh_offset; 1918 break; 1919 } 1920 } 1921 1922 if (info->index.sym == 0) { 1923 pr_warn("%s: module has no symbols (stripped?)\n", 1924 info->name ?: "(missing .modinfo section or name field)"); 1925 return -ENOEXEC; 1926 } 1927 1928 info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); 1929 if (!info->index.mod) { 1930 pr_warn("%s: No module found in object\n", 1931 info->name ?: "(missing .modinfo section or name field)"); 1932 return -ENOEXEC; 1933 } 1934 /* This is temporary: point mod into copy of data. */ 1935 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 1936 1937 /* 1938 * If we didn't load the .modinfo 'name' field earlier, fall back to 1939 * on-disk struct mod 'name' field. 1940 */ 1941 if (!info->name) 1942 info->name = info->mod->name; 1943 1944 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) 1945 info->index.vers = 0; /* Pretend no __versions section! */ 1946 else 1947 info->index.vers = find_sec(info, "__versions"); 1948 1949 info->index.pcpu = find_pcpusec(info); 1950 1951 return 0; 1952 } 1953 1954 static int check_modinfo(struct module *mod, struct load_info *info, int flags) 1955 { 1956 const char *modmagic = get_modinfo(info, "vermagic"); 1957 int err; 1958 1959 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 1960 modmagic = NULL; 1961 1962 /* This is allowed: modprobe --force will invalidate it. */ 1963 if (!modmagic) { 1964 err = try_to_force_load(mod, "bad vermagic"); 1965 if (err) 1966 return err; 1967 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 1968 pr_err("%s: version magic '%s' should be '%s'\n", 1969 info->name, modmagic, vermagic); 1970 return -ENOEXEC; 1971 } 1972 1973 if (!get_modinfo(info, "intree")) { 1974 if (!test_taint(TAINT_OOT_MODULE)) 1975 pr_warn("%s: loading out-of-tree module taints kernel.\n", 1976 mod->name); 1977 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 1978 } 1979 1980 check_modinfo_retpoline(mod, info); 1981 1982 if (get_modinfo(info, "staging")) { 1983 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 1984 pr_warn("%s: module is from the staging directory, the quality " 1985 "is unknown, you have been warned.\n", mod->name); 1986 } 1987 1988 err = check_modinfo_livepatch(mod, info); 1989 if (err) 1990 return err; 1991 1992 /* Set up license info based on the info section */ 1993 set_license(mod, get_modinfo(info, "license")); 1994 1995 if (get_modinfo(info, "test")) { 1996 if (!test_taint(TAINT_TEST)) 1997 pr_warn("%s: loading test module taints kernel.\n", 1998 mod->name); 1999 add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); 2000 } 2001 2002 return 0; 2003 } 2004 2005 static int find_module_sections(struct module *mod, struct load_info *info) 2006 { 2007 mod->kp = section_objs(info, "__param", 2008 sizeof(*mod->kp), &mod->num_kp); 2009 mod->syms = section_objs(info, "__ksymtab", 2010 sizeof(*mod->syms), &mod->num_syms); 2011 mod->crcs = section_addr(info, "__kcrctab"); 2012 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2013 sizeof(*mod->gpl_syms), 2014 &mod->num_gpl_syms); 2015 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2016 2017 #ifdef CONFIG_CONSTRUCTORS 2018 mod->ctors = section_objs(info, ".ctors", 2019 sizeof(*mod->ctors), &mod->num_ctors); 2020 if (!mod->ctors) 2021 mod->ctors = section_objs(info, ".init_array", 2022 sizeof(*mod->ctors), &mod->num_ctors); 2023 else if (find_sec(info, ".init_array")) { 2024 /* 2025 * This shouldn't happen with same compiler and binutils 2026 * building all parts of the module. 2027 */ 2028 pr_warn("%s: has both .ctors and .init_array.\n", 2029 mod->name); 2030 return -EINVAL; 2031 } 2032 #endif 2033 2034 mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 2035 &mod->noinstr_text_size); 2036 2037 #ifdef CONFIG_TRACEPOINTS 2038 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2039 sizeof(*mod->tracepoints_ptrs), 2040 &mod->num_tracepoints); 2041 #endif 2042 #ifdef CONFIG_TREE_SRCU 2043 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 2044 sizeof(*mod->srcu_struct_ptrs), 2045 &mod->num_srcu_structs); 2046 #endif 2047 #ifdef CONFIG_BPF_EVENTS 2048 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 2049 sizeof(*mod->bpf_raw_events), 2050 &mod->num_bpf_raw_events); 2051 #endif 2052 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2053 mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 2054 #endif 2055 #ifdef CONFIG_JUMP_LABEL 2056 mod->jump_entries = section_objs(info, "__jump_table", 2057 sizeof(*mod->jump_entries), 2058 &mod->num_jump_entries); 2059 #endif 2060 #ifdef CONFIG_EVENT_TRACING 2061 mod->trace_events = section_objs(info, "_ftrace_events", 2062 sizeof(*mod->trace_events), 2063 &mod->num_trace_events); 2064 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 2065 sizeof(*mod->trace_evals), 2066 &mod->num_trace_evals); 2067 #endif 2068 #ifdef CONFIG_TRACING 2069 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2070 sizeof(*mod->trace_bprintk_fmt_start), 2071 &mod->num_trace_bprintk_fmt); 2072 #endif 2073 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 2074 /* sechdrs[0].sh_size is always zero */ 2075 mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 2076 sizeof(*mod->ftrace_callsites), 2077 &mod->num_ftrace_callsites); 2078 #endif 2079 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 2080 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 2081 sizeof(*mod->ei_funcs), 2082 &mod->num_ei_funcs); 2083 #endif 2084 #ifdef CONFIG_KPROBES 2085 mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 2086 &mod->kprobes_text_size); 2087 mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 2088 sizeof(unsigned long), 2089 &mod->num_kprobe_blacklist); 2090 #endif 2091 #ifdef CONFIG_PRINTK_INDEX 2092 mod->printk_index_start = section_objs(info, ".printk_index", 2093 sizeof(*mod->printk_index_start), 2094 &mod->printk_index_size); 2095 #endif 2096 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 2097 mod->static_call_sites = section_objs(info, ".static_call_sites", 2098 sizeof(*mod->static_call_sites), 2099 &mod->num_static_call_sites); 2100 #endif 2101 #if IS_ENABLED(CONFIG_KUNIT) 2102 mod->kunit_suites = section_objs(info, ".kunit_test_suites", 2103 sizeof(*mod->kunit_suites), 2104 &mod->num_kunit_suites); 2105 #endif 2106 2107 mod->extable = section_objs(info, "__ex_table", 2108 sizeof(*mod->extable), &mod->num_exentries); 2109 2110 if (section_addr(info, "__obsparm")) 2111 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2112 2113 info->dyndbg.descs = section_objs(info, "__dyndbg", 2114 sizeof(*info->dyndbg.descs), &info->dyndbg.num_descs); 2115 info->dyndbg.classes = section_objs(info, "__dyndbg_classes", 2116 sizeof(*info->dyndbg.classes), &info->dyndbg.num_classes); 2117 2118 return 0; 2119 } 2120 2121 static int move_module(struct module *mod, struct load_info *info) 2122 { 2123 int i; 2124 void *ptr; 2125 2126 /* Do the allocs. */ 2127 ptr = module_alloc(mod->core_layout.size); 2128 /* 2129 * The pointer to this block is stored in the module structure 2130 * which is inside the block. Just mark it as not being a 2131 * leak. 2132 */ 2133 kmemleak_not_leak(ptr); 2134 if (!ptr) 2135 return -ENOMEM; 2136 2137 memset(ptr, 0, mod->core_layout.size); 2138 mod->core_layout.base = ptr; 2139 2140 if (mod->init_layout.size) { 2141 ptr = module_alloc(mod->init_layout.size); 2142 /* 2143 * The pointer to this block is stored in the module structure 2144 * which is inside the block. This block doesn't need to be 2145 * scanned as it contains data and code that will be freed 2146 * after the module is initialized. 2147 */ 2148 kmemleak_ignore(ptr); 2149 if (!ptr) { 2150 module_memfree(mod->core_layout.base); 2151 return -ENOMEM; 2152 } 2153 memset(ptr, 0, mod->init_layout.size); 2154 mod->init_layout.base = ptr; 2155 } else 2156 mod->init_layout.base = NULL; 2157 2158 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 2159 /* Do the allocs. */ 2160 ptr = vzalloc(mod->data_layout.size); 2161 /* 2162 * The pointer to this block is stored in the module structure 2163 * which is inside the block. Just mark it as not being a 2164 * leak. 2165 */ 2166 kmemleak_not_leak(ptr); 2167 if (!ptr) { 2168 module_memfree(mod->core_layout.base); 2169 module_memfree(mod->init_layout.base); 2170 return -ENOMEM; 2171 } 2172 2173 mod->data_layout.base = ptr; 2174 #endif 2175 /* Transfer each section which specifies SHF_ALLOC */ 2176 pr_debug("final section addresses:\n"); 2177 for (i = 0; i < info->hdr->e_shnum; i++) { 2178 void *dest; 2179 Elf_Shdr *shdr = &info->sechdrs[i]; 2180 2181 if (!(shdr->sh_flags & SHF_ALLOC)) 2182 continue; 2183 2184 if (shdr->sh_entsize & INIT_OFFSET_MASK) 2185 dest = mod->init_layout.base 2186 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 2187 else if (!(shdr->sh_flags & SHF_EXECINSTR)) 2188 dest = mod->data_layout.base + shdr->sh_entsize; 2189 else 2190 dest = mod->core_layout.base + shdr->sh_entsize; 2191 2192 if (shdr->sh_type != SHT_NOBITS) 2193 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2194 /* Update sh_addr to point to copy in image. */ 2195 shdr->sh_addr = (unsigned long)dest; 2196 pr_debug("\t0x%lx %s\n", 2197 (long)shdr->sh_addr, info->secstrings + shdr->sh_name); 2198 } 2199 2200 return 0; 2201 } 2202 2203 static int check_module_license_and_versions(struct module *mod) 2204 { 2205 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 2206 2207 /* 2208 * ndiswrapper is under GPL by itself, but loads proprietary modules. 2209 * Don't use add_taint_module(), as it would prevent ndiswrapper from 2210 * using GPL-only symbols it needs. 2211 */ 2212 if (strcmp(mod->name, "ndiswrapper") == 0) 2213 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 2214 2215 /* driverloader was caught wrongly pretending to be under GPL */ 2216 if (strcmp(mod->name, "driverloader") == 0) 2217 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2218 LOCKDEP_NOW_UNRELIABLE); 2219 2220 /* lve claims to be GPL but upstream won't provide source */ 2221 if (strcmp(mod->name, "lve") == 0) 2222 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2223 LOCKDEP_NOW_UNRELIABLE); 2224 2225 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 2226 pr_warn("%s: module license taints kernel.\n", mod->name); 2227 2228 #ifdef CONFIG_MODVERSIONS 2229 if ((mod->num_syms && !mod->crcs) || 2230 (mod->num_gpl_syms && !mod->gpl_crcs)) { 2231 return try_to_force_load(mod, 2232 "no versions for exported symbols"); 2233 } 2234 #endif 2235 return 0; 2236 } 2237 2238 static void flush_module_icache(const struct module *mod) 2239 { 2240 /* 2241 * Flush the instruction cache, since we've played with text. 2242 * Do it before processing of module parameters, so the module 2243 * can provide parameter accessor functions of its own. 2244 */ 2245 if (mod->init_layout.base) 2246 flush_icache_range((unsigned long)mod->init_layout.base, 2247 (unsigned long)mod->init_layout.base 2248 + mod->init_layout.size); 2249 flush_icache_range((unsigned long)mod->core_layout.base, 2250 (unsigned long)mod->core_layout.base + mod->core_layout.size); 2251 } 2252 2253 bool __weak module_elf_check_arch(Elf_Ehdr *hdr) 2254 { 2255 return true; 2256 } 2257 2258 int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 2259 Elf_Shdr *sechdrs, 2260 char *secstrings, 2261 struct module *mod) 2262 { 2263 return 0; 2264 } 2265 2266 /* module_blacklist is a comma-separated list of module names */ 2267 static char *module_blacklist; 2268 static bool blacklisted(const char *module_name) 2269 { 2270 const char *p; 2271 size_t len; 2272 2273 if (!module_blacklist) 2274 return false; 2275 2276 for (p = module_blacklist; *p; p += len) { 2277 len = strcspn(p, ","); 2278 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 2279 return true; 2280 if (p[len] == ',') 2281 len++; 2282 } 2283 return false; 2284 } 2285 core_param(module_blacklist, module_blacklist, charp, 0400); 2286 2287 static struct module *layout_and_allocate(struct load_info *info, int flags) 2288 { 2289 struct module *mod; 2290 unsigned int ndx; 2291 int err; 2292 2293 err = check_modinfo(info->mod, info, flags); 2294 if (err) 2295 return ERR_PTR(err); 2296 2297 /* Allow arches to frob section contents and sizes. */ 2298 err = module_frob_arch_sections(info->hdr, info->sechdrs, 2299 info->secstrings, info->mod); 2300 if (err < 0) 2301 return ERR_PTR(err); 2302 2303 err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 2304 info->secstrings, info->mod); 2305 if (err < 0) 2306 return ERR_PTR(err); 2307 2308 /* We will do a special allocation for per-cpu sections later. */ 2309 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 2310 2311 /* 2312 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that 2313 * layout_sections() can put it in the right place. 2314 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 2315 */ 2316 ndx = find_sec(info, ".data..ro_after_init"); 2317 if (ndx) 2318 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2319 /* 2320 * Mark the __jump_table section as ro_after_init as well: these data 2321 * structures are never modified, with the exception of entries that 2322 * refer to code in the __init section, which are annotated as such 2323 * at module load time. 2324 */ 2325 ndx = find_sec(info, "__jump_table"); 2326 if (ndx) 2327 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2328 2329 /* 2330 * Determine total sizes, and put offsets in sh_entsize. For now 2331 * this is done generically; there doesn't appear to be any 2332 * special cases for the architectures. 2333 */ 2334 layout_sections(info->mod, info); 2335 layout_symtab(info->mod, info); 2336 2337 /* Allocate and move to the final place */ 2338 err = move_module(info->mod, info); 2339 if (err) 2340 return ERR_PTR(err); 2341 2342 /* Module has been copied to its final place now: return it. */ 2343 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2344 kmemleak_load_module(mod, info); 2345 return mod; 2346 } 2347 2348 /* mod is no longer valid after this! */ 2349 static void module_deallocate(struct module *mod, struct load_info *info) 2350 { 2351 percpu_modfree(mod); 2352 module_arch_freeing_init(mod); 2353 module_memfree(mod->init_layout.base); 2354 module_memfree(mod->core_layout.base); 2355 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 2356 vfree(mod->data_layout.base); 2357 #endif 2358 } 2359 2360 int __weak module_finalize(const Elf_Ehdr *hdr, 2361 const Elf_Shdr *sechdrs, 2362 struct module *me) 2363 { 2364 return 0; 2365 } 2366 2367 static int post_relocation(struct module *mod, const struct load_info *info) 2368 { 2369 /* Sort exception table now relocations are done. */ 2370 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2371 2372 /* Copy relocated percpu area over. */ 2373 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 2374 info->sechdrs[info->index.pcpu].sh_size); 2375 2376 /* Setup kallsyms-specific fields. */ 2377 add_kallsyms(mod, info); 2378 2379 /* Arch-specific module finalizing. */ 2380 return module_finalize(info->hdr, info->sechdrs, mod); 2381 } 2382 2383 /* Is this module of this name done loading? No locks held. */ 2384 static bool finished_loading(const char *name) 2385 { 2386 struct module *mod; 2387 bool ret; 2388 2389 /* 2390 * The module_mutex should not be a heavily contended lock; 2391 * if we get the occasional sleep here, we'll go an extra iteration 2392 * in the wait_event_interruptible(), which is harmless. 2393 */ 2394 sched_annotate_sleep(); 2395 mutex_lock(&module_mutex); 2396 mod = find_module_all(name, strlen(name), true); 2397 ret = !mod || mod->state == MODULE_STATE_LIVE 2398 || mod->state == MODULE_STATE_GOING; 2399 mutex_unlock(&module_mutex); 2400 2401 return ret; 2402 } 2403 2404 /* Call module constructors. */ 2405 static void do_mod_ctors(struct module *mod) 2406 { 2407 #ifdef CONFIG_CONSTRUCTORS 2408 unsigned long i; 2409 2410 for (i = 0; i < mod->num_ctors; i++) 2411 mod->ctors[i](); 2412 #endif 2413 } 2414 2415 /* For freeing module_init on success, in case kallsyms traversing */ 2416 struct mod_initfree { 2417 struct llist_node node; 2418 void *module_init; 2419 }; 2420 2421 static void do_free_init(struct work_struct *w) 2422 { 2423 struct llist_node *pos, *n, *list; 2424 struct mod_initfree *initfree; 2425 2426 list = llist_del_all(&init_free_list); 2427 2428 synchronize_rcu(); 2429 2430 llist_for_each_safe(pos, n, list) { 2431 initfree = container_of(pos, struct mod_initfree, node); 2432 module_memfree(initfree->module_init); 2433 kfree(initfree); 2434 } 2435 } 2436 2437 #undef MODULE_PARAM_PREFIX 2438 #define MODULE_PARAM_PREFIX "module." 2439 /* Default value for module->async_probe_requested */ 2440 static bool async_probe; 2441 module_param(async_probe, bool, 0644); 2442 2443 /* 2444 * This is where the real work happens. 2445 * 2446 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 2447 * helper command 'lx-symbols'. 2448 */ 2449 static noinline int do_init_module(struct module *mod) 2450 { 2451 int ret = 0; 2452 struct mod_initfree *freeinit; 2453 2454 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 2455 if (!freeinit) { 2456 ret = -ENOMEM; 2457 goto fail; 2458 } 2459 freeinit->module_init = mod->init_layout.base; 2460 2461 do_mod_ctors(mod); 2462 /* Start the module */ 2463 if (mod->init != NULL) 2464 ret = do_one_initcall(mod->init); 2465 if (ret < 0) { 2466 goto fail_free_freeinit; 2467 } 2468 if (ret > 0) { 2469 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 2470 "follow 0/-E convention\n" 2471 "%s: loading module anyway...\n", 2472 __func__, mod->name, ret, __func__); 2473 dump_stack(); 2474 } 2475 2476 /* Now it's a first class citizen! */ 2477 mod->state = MODULE_STATE_LIVE; 2478 blocking_notifier_call_chain(&module_notify_list, 2479 MODULE_STATE_LIVE, mod); 2480 2481 /* Delay uevent until module has finished its init routine */ 2482 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 2483 2484 /* 2485 * We need to finish all async code before the module init sequence 2486 * is done. This has potential to deadlock if synchronous module 2487 * loading is requested from async (which is not allowed!). 2488 * 2489 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 2490 * request_module() from async workers") for more details. 2491 */ 2492 if (!mod->async_probe_requested) 2493 async_synchronize_full(); 2494 2495 ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + 2496 mod->init_layout.size); 2497 mutex_lock(&module_mutex); 2498 /* Drop initial reference. */ 2499 module_put(mod); 2500 trim_init_extable(mod); 2501 #ifdef CONFIG_KALLSYMS 2502 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 2503 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 2504 #endif 2505 module_enable_ro(mod, true); 2506 mod_tree_remove_init(mod); 2507 module_arch_freeing_init(mod); 2508 mod->init_layout.base = NULL; 2509 mod->init_layout.size = 0; 2510 mod->init_layout.ro_size = 0; 2511 mod->init_layout.ro_after_init_size = 0; 2512 mod->init_layout.text_size = 0; 2513 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2514 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ 2515 mod->btf_data = NULL; 2516 #endif 2517 /* 2518 * We want to free module_init, but be aware that kallsyms may be 2519 * walking this with preempt disabled. In all the failure paths, we 2520 * call synchronize_rcu(), but we don't want to slow down the success 2521 * path. module_memfree() cannot be called in an interrupt, so do the 2522 * work and call synchronize_rcu() in a work queue. 2523 * 2524 * Note that module_alloc() on most architectures creates W+X page 2525 * mappings which won't be cleaned up until do_free_init() runs. Any 2526 * code such as mark_rodata_ro() which depends on those mappings to 2527 * be cleaned up needs to sync with the queued work - ie 2528 * rcu_barrier() 2529 */ 2530 if (llist_add(&freeinit->node, &init_free_list)) 2531 schedule_work(&init_free_wq); 2532 2533 mutex_unlock(&module_mutex); 2534 wake_up_all(&module_wq); 2535 2536 return 0; 2537 2538 fail_free_freeinit: 2539 kfree(freeinit); 2540 fail: 2541 /* Try to protect us from buggy refcounters. */ 2542 mod->state = MODULE_STATE_GOING; 2543 synchronize_rcu(); 2544 module_put(mod); 2545 blocking_notifier_call_chain(&module_notify_list, 2546 MODULE_STATE_GOING, mod); 2547 klp_module_going(mod); 2548 ftrace_release_mod(mod); 2549 free_module(mod); 2550 wake_up_all(&module_wq); 2551 return ret; 2552 } 2553 2554 static int may_init_module(void) 2555 { 2556 if (!capable(CAP_SYS_MODULE) || modules_disabled) 2557 return -EPERM; 2558 2559 return 0; 2560 } 2561 2562 /* 2563 * We try to place it in the list now to make sure it's unique before 2564 * we dedicate too many resources. In particular, temporary percpu 2565 * memory exhaustion. 2566 */ 2567 static int add_unformed_module(struct module *mod) 2568 { 2569 int err; 2570 struct module *old; 2571 2572 mod->state = MODULE_STATE_UNFORMED; 2573 2574 mutex_lock(&module_mutex); 2575 old = find_module_all(mod->name, strlen(mod->name), true); 2576 if (old != NULL) { 2577 if (old->state == MODULE_STATE_COMING 2578 || old->state == MODULE_STATE_UNFORMED) { 2579 /* Wait in case it fails to load. */ 2580 mutex_unlock(&module_mutex); 2581 err = wait_event_interruptible(module_wq, 2582 finished_loading(mod->name)); 2583 if (err) 2584 goto out_unlocked; 2585 2586 /* The module might have gone in the meantime. */ 2587 mutex_lock(&module_mutex); 2588 old = find_module_all(mod->name, strlen(mod->name), 2589 true); 2590 } 2591 2592 /* 2593 * We are here only when the same module was being loaded. Do 2594 * not try to load it again right now. It prevents long delays 2595 * caused by serialized module load failures. It might happen 2596 * when more devices of the same type trigger load of 2597 * a particular module. 2598 */ 2599 if (old && old->state == MODULE_STATE_LIVE) 2600 err = -EEXIST; 2601 else 2602 err = -EBUSY; 2603 goto out; 2604 } 2605 mod_update_bounds(mod); 2606 list_add_rcu(&mod->list, &modules); 2607 mod_tree_insert(mod); 2608 err = 0; 2609 2610 out: 2611 mutex_unlock(&module_mutex); 2612 out_unlocked: 2613 return err; 2614 } 2615 2616 static int complete_formation(struct module *mod, struct load_info *info) 2617 { 2618 int err; 2619 2620 mutex_lock(&module_mutex); 2621 2622 /* Find duplicate symbols (must be called under lock). */ 2623 err = verify_exported_symbols(mod); 2624 if (err < 0) 2625 goto out; 2626 2627 /* These rely on module_mutex for list integrity. */ 2628 module_bug_finalize(info->hdr, info->sechdrs, mod); 2629 module_cfi_finalize(info->hdr, info->sechdrs, mod); 2630 2631 if (module_check_misalignment(mod)) 2632 goto out_misaligned; 2633 2634 module_enable_ro(mod, false); 2635 module_enable_nx(mod); 2636 module_enable_x(mod); 2637 2638 /* 2639 * Mark state as coming so strong_try_module_get() ignores us, 2640 * but kallsyms etc. can see us. 2641 */ 2642 mod->state = MODULE_STATE_COMING; 2643 mutex_unlock(&module_mutex); 2644 2645 return 0; 2646 2647 out_misaligned: 2648 err = -EINVAL; 2649 out: 2650 mutex_unlock(&module_mutex); 2651 return err; 2652 } 2653 2654 static int prepare_coming_module(struct module *mod) 2655 { 2656 int err; 2657 2658 ftrace_module_enable(mod); 2659 err = klp_module_coming(mod); 2660 if (err) 2661 return err; 2662 2663 err = blocking_notifier_call_chain_robust(&module_notify_list, 2664 MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 2665 err = notifier_to_errno(err); 2666 if (err) 2667 klp_module_going(mod); 2668 2669 return err; 2670 } 2671 2672 static int unknown_module_param_cb(char *param, char *val, const char *modname, 2673 void *arg) 2674 { 2675 struct module *mod = arg; 2676 int ret; 2677 2678 if (strcmp(param, "async_probe") == 0) { 2679 if (kstrtobool(val, &mod->async_probe_requested)) 2680 mod->async_probe_requested = true; 2681 return 0; 2682 } 2683 2684 /* Check for magic 'dyndbg' arg */ 2685 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 2686 if (ret != 0) 2687 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 2688 return 0; 2689 } 2690 2691 /* 2692 * Allocate and load the module: note that size of section 0 is always 2693 * zero, and we rely on this for optional sections. 2694 */ 2695 static int load_module(struct load_info *info, const char __user *uargs, 2696 int flags) 2697 { 2698 struct module *mod; 2699 long err = 0; 2700 char *after_dashes; 2701 2702 /* 2703 * Do the signature check (if any) first. All that 2704 * the signature check needs is info->len, it does 2705 * not need any of the section info. That can be 2706 * set up later. This will minimize the chances 2707 * of a corrupt module causing problems before 2708 * we even get to the signature check. 2709 * 2710 * The check will also adjust info->len by stripping 2711 * off the sig length at the end of the module, making 2712 * checks against info->len more correct. 2713 */ 2714 err = module_sig_check(info, flags); 2715 if (err) 2716 goto free_copy; 2717 2718 /* 2719 * Do basic sanity checks against the ELF header and 2720 * sections. 2721 */ 2722 err = elf_validity_check(info); 2723 if (err) 2724 goto free_copy; 2725 2726 /* 2727 * Everything checks out, so set up the section info 2728 * in the info structure. 2729 */ 2730 err = setup_load_info(info, flags); 2731 if (err) 2732 goto free_copy; 2733 2734 /* 2735 * Now that we know we have the correct module name, check 2736 * if it's blacklisted. 2737 */ 2738 if (blacklisted(info->name)) { 2739 err = -EPERM; 2740 pr_err("Module %s is blacklisted\n", info->name); 2741 goto free_copy; 2742 } 2743 2744 err = rewrite_section_headers(info, flags); 2745 if (err) 2746 goto free_copy; 2747 2748 /* Check module struct version now, before we try to use module. */ 2749 if (!check_modstruct_version(info, info->mod)) { 2750 err = -ENOEXEC; 2751 goto free_copy; 2752 } 2753 2754 /* Figure out module layout, and allocate all the memory. */ 2755 mod = layout_and_allocate(info, flags); 2756 if (IS_ERR(mod)) { 2757 err = PTR_ERR(mod); 2758 goto free_copy; 2759 } 2760 2761 audit_log_kern_module(mod->name); 2762 2763 /* Reserve our place in the list. */ 2764 err = add_unformed_module(mod); 2765 if (err) 2766 goto free_module; 2767 2768 #ifdef CONFIG_MODULE_SIG 2769 mod->sig_ok = info->sig_ok; 2770 if (!mod->sig_ok) { 2771 pr_notice_once("%s: module verification failed: signature " 2772 "and/or required key missing - tainting " 2773 "kernel\n", mod->name); 2774 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 2775 } 2776 #endif 2777 2778 /* To avoid stressing percpu allocator, do this once we're unique. */ 2779 err = percpu_modalloc(mod, info); 2780 if (err) 2781 goto unlink_mod; 2782 2783 /* Now module is in final location, initialize linked lists, etc. */ 2784 err = module_unload_init(mod); 2785 if (err) 2786 goto unlink_mod; 2787 2788 init_param_lock(mod); 2789 2790 /* 2791 * Now we've got everything in the final locations, we can 2792 * find optional sections. 2793 */ 2794 err = find_module_sections(mod, info); 2795 if (err) 2796 goto free_unload; 2797 2798 err = check_module_license_and_versions(mod); 2799 if (err) 2800 goto free_unload; 2801 2802 /* Set up MODINFO_ATTR fields */ 2803 setup_modinfo(mod, info); 2804 2805 /* Fix up syms, so that st_value is a pointer to location. */ 2806 err = simplify_symbols(mod, info); 2807 if (err < 0) 2808 goto free_modinfo; 2809 2810 err = apply_relocations(mod, info); 2811 if (err < 0) 2812 goto free_modinfo; 2813 2814 err = post_relocation(mod, info); 2815 if (err < 0) 2816 goto free_modinfo; 2817 2818 flush_module_icache(mod); 2819 2820 /* Now copy in args */ 2821 mod->args = strndup_user(uargs, ~0UL >> 1); 2822 if (IS_ERR(mod->args)) { 2823 err = PTR_ERR(mod->args); 2824 goto free_arch_cleanup; 2825 } 2826 2827 init_build_id(mod, info); 2828 dynamic_debug_setup(mod, &info->dyndbg); 2829 2830 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 2831 ftrace_module_init(mod); 2832 2833 /* Finally it's fully formed, ready to start executing. */ 2834 err = complete_formation(mod, info); 2835 if (err) 2836 goto ddebug_cleanup; 2837 2838 err = prepare_coming_module(mod); 2839 if (err) 2840 goto bug_cleanup; 2841 2842 mod->async_probe_requested = async_probe; 2843 2844 /* Module is ready to execute: parsing args may do that. */ 2845 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 2846 -32768, 32767, mod, 2847 unknown_module_param_cb); 2848 if (IS_ERR(after_dashes)) { 2849 err = PTR_ERR(after_dashes); 2850 goto coming_cleanup; 2851 } else if (after_dashes) { 2852 pr_warn("%s: parameters '%s' after `--' ignored\n", 2853 mod->name, after_dashes); 2854 } 2855 2856 /* Link in to sysfs. */ 2857 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 2858 if (err < 0) 2859 goto coming_cleanup; 2860 2861 if (is_livepatch_module(mod)) { 2862 err = copy_module_elf(mod, info); 2863 if (err < 0) 2864 goto sysfs_cleanup; 2865 } 2866 2867 /* Get rid of temporary copy. */ 2868 free_copy(info, flags); 2869 2870 /* Done! */ 2871 trace_module_load(mod); 2872 2873 return do_init_module(mod); 2874 2875 sysfs_cleanup: 2876 mod_sysfs_teardown(mod); 2877 coming_cleanup: 2878 mod->state = MODULE_STATE_GOING; 2879 destroy_params(mod->kp, mod->num_kp); 2880 blocking_notifier_call_chain(&module_notify_list, 2881 MODULE_STATE_GOING, mod); 2882 klp_module_going(mod); 2883 bug_cleanup: 2884 mod->state = MODULE_STATE_GOING; 2885 /* module_bug_cleanup needs module_mutex protection */ 2886 mutex_lock(&module_mutex); 2887 module_bug_cleanup(mod); 2888 mutex_unlock(&module_mutex); 2889 2890 ddebug_cleanup: 2891 ftrace_release_mod(mod); 2892 dynamic_debug_remove(mod, &info->dyndbg); 2893 synchronize_rcu(); 2894 kfree(mod->args); 2895 free_arch_cleanup: 2896 module_arch_cleanup(mod); 2897 free_modinfo: 2898 free_modinfo(mod); 2899 free_unload: 2900 module_unload_free(mod); 2901 unlink_mod: 2902 mutex_lock(&module_mutex); 2903 /* Unlink carefully: kallsyms could be walking list. */ 2904 list_del_rcu(&mod->list); 2905 mod_tree_remove(mod); 2906 wake_up_all(&module_wq); 2907 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 2908 synchronize_rcu(); 2909 mutex_unlock(&module_mutex); 2910 free_module: 2911 /* Free lock-classes; relies on the preceding sync_rcu() */ 2912 lockdep_free_key_range(mod->data_layout.base, mod->data_layout.size); 2913 2914 module_deallocate(mod, info); 2915 free_copy: 2916 free_copy(info, flags); 2917 return err; 2918 } 2919 2920 SYSCALL_DEFINE3(init_module, void __user *, umod, 2921 unsigned long, len, const char __user *, uargs) 2922 { 2923 int err; 2924 struct load_info info = { }; 2925 2926 err = may_init_module(); 2927 if (err) 2928 return err; 2929 2930 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 2931 umod, len, uargs); 2932 2933 err = copy_module_from_user(umod, len, &info); 2934 if (err) 2935 return err; 2936 2937 return load_module(&info, uargs, 0); 2938 } 2939 2940 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 2941 { 2942 struct load_info info = { }; 2943 void *buf = NULL; 2944 int len; 2945 int err; 2946 2947 err = may_init_module(); 2948 if (err) 2949 return err; 2950 2951 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 2952 2953 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 2954 |MODULE_INIT_IGNORE_VERMAGIC 2955 |MODULE_INIT_COMPRESSED_FILE)) 2956 return -EINVAL; 2957 2958 len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL, 2959 READING_MODULE); 2960 if (len < 0) 2961 return len; 2962 2963 if (flags & MODULE_INIT_COMPRESSED_FILE) { 2964 err = module_decompress(&info, buf, len); 2965 vfree(buf); /* compressed data is no longer needed */ 2966 if (err) 2967 return err; 2968 } else { 2969 info.hdr = buf; 2970 info.len = len; 2971 } 2972 2973 return load_module(&info, uargs, flags); 2974 } 2975 2976 static inline int within(unsigned long addr, void *start, unsigned long size) 2977 { 2978 return ((void *)addr >= start && (void *)addr < start + size); 2979 } 2980 2981 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 2982 char *module_flags(struct module *mod, char *buf, bool show_state) 2983 { 2984 int bx = 0; 2985 2986 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 2987 if (!mod->taints && !show_state) 2988 goto out; 2989 if (mod->taints || 2990 mod->state == MODULE_STATE_GOING || 2991 mod->state == MODULE_STATE_COMING) { 2992 buf[bx++] = '('; 2993 bx += module_flags_taint(mod->taints, buf + bx); 2994 /* Show a - for module-is-being-unloaded */ 2995 if (mod->state == MODULE_STATE_GOING && show_state) 2996 buf[bx++] = '-'; 2997 /* Show a + for module-is-being-loaded */ 2998 if (mod->state == MODULE_STATE_COMING && show_state) 2999 buf[bx++] = '+'; 3000 buf[bx++] = ')'; 3001 } 3002 out: 3003 buf[bx] = '\0'; 3004 3005 return buf; 3006 } 3007 3008 /* Given an address, look for it in the module exception tables. */ 3009 const struct exception_table_entry *search_module_extables(unsigned long addr) 3010 { 3011 const struct exception_table_entry *e = NULL; 3012 struct module *mod; 3013 3014 preempt_disable(); 3015 mod = __module_address(addr); 3016 if (!mod) 3017 goto out; 3018 3019 if (!mod->num_exentries) 3020 goto out; 3021 3022 e = search_extable(mod->extable, 3023 mod->num_exentries, 3024 addr); 3025 out: 3026 preempt_enable(); 3027 3028 /* 3029 * Now, if we found one, we are running inside it now, hence 3030 * we cannot unload the module, hence no refcnt needed. 3031 */ 3032 return e; 3033 } 3034 3035 /** 3036 * is_module_address() - is this address inside a module? 3037 * @addr: the address to check. 3038 * 3039 * See is_module_text_address() if you simply want to see if the address 3040 * is code (not data). 3041 */ 3042 bool is_module_address(unsigned long addr) 3043 { 3044 bool ret; 3045 3046 preempt_disable(); 3047 ret = __module_address(addr) != NULL; 3048 preempt_enable(); 3049 3050 return ret; 3051 } 3052 3053 /** 3054 * __module_address() - get the module which contains an address. 3055 * @addr: the address. 3056 * 3057 * Must be called with preempt disabled or module mutex held so that 3058 * module doesn't get freed during this. 3059 */ 3060 struct module *__module_address(unsigned long addr) 3061 { 3062 struct module *mod; 3063 struct mod_tree_root *tree; 3064 3065 if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) 3066 tree = &mod_tree; 3067 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 3068 else if (addr >= mod_data_tree.addr_min && addr <= mod_data_tree.addr_max) 3069 tree = &mod_data_tree; 3070 #endif 3071 else 3072 return NULL; 3073 3074 module_assert_mutex_or_preempt(); 3075 3076 mod = mod_find(addr, tree); 3077 if (mod) { 3078 BUG_ON(!within_module(addr, mod)); 3079 if (mod->state == MODULE_STATE_UNFORMED) 3080 mod = NULL; 3081 } 3082 return mod; 3083 } 3084 3085 /** 3086 * is_module_text_address() - is this address inside module code? 3087 * @addr: the address to check. 3088 * 3089 * See is_module_address() if you simply want to see if the address is 3090 * anywhere in a module. See kernel_text_address() for testing if an 3091 * address corresponds to kernel or module code. 3092 */ 3093 bool is_module_text_address(unsigned long addr) 3094 { 3095 bool ret; 3096 3097 preempt_disable(); 3098 ret = __module_text_address(addr) != NULL; 3099 preempt_enable(); 3100 3101 return ret; 3102 } 3103 3104 /** 3105 * __module_text_address() - get the module whose code contains an address. 3106 * @addr: the address. 3107 * 3108 * Must be called with preempt disabled or module mutex held so that 3109 * module doesn't get freed during this. 3110 */ 3111 struct module *__module_text_address(unsigned long addr) 3112 { 3113 struct module *mod = __module_address(addr); 3114 if (mod) { 3115 /* Make sure it's within the text section. */ 3116 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) 3117 && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) 3118 mod = NULL; 3119 } 3120 return mod; 3121 } 3122 3123 /* Don't grab lock, we're oopsing. */ 3124 void print_modules(void) 3125 { 3126 struct module *mod; 3127 char buf[MODULE_FLAGS_BUF_SIZE]; 3128 3129 printk(KERN_DEFAULT "Modules linked in:"); 3130 /* Most callers should already have preempt disabled, but make sure */ 3131 preempt_disable(); 3132 list_for_each_entry_rcu(mod, &modules, list) { 3133 if (mod->state == MODULE_STATE_UNFORMED) 3134 continue; 3135 pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); 3136 } 3137 3138 print_unloaded_tainted_modules(); 3139 preempt_enable(); 3140 if (last_unloaded_module.name[0]) 3141 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 3142 last_unloaded_module.taints); 3143 pr_cont("\n"); 3144 } 3145