1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2002 Richard Henderson 4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org> 6 */ 7 8 #define INCLUDE_VERMAGIC 9 10 #include <linux/export.h> 11 #include <linux/extable.h> 12 #include <linux/moduleloader.h> 13 #include <linux/module_signature.h> 14 #include <linux/trace_events.h> 15 #include <linux/init.h> 16 #include <linux/kallsyms.h> 17 #include <linux/buildid.h> 18 #include <linux/fs.h> 19 #include <linux/kernel.h> 20 #include <linux/kernel_read_file.h> 21 #include <linux/kstrtox.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/elf.h> 25 #include <linux/seq_file.h> 26 #include <linux/syscalls.h> 27 #include <linux/fcntl.h> 28 #include <linux/rcupdate.h> 29 #include <linux/capability.h> 30 #include <linux/cpu.h> 31 #include <linux/moduleparam.h> 32 #include <linux/errno.h> 33 #include <linux/err.h> 34 #include <linux/vermagic.h> 35 #include <linux/notifier.h> 36 #include <linux/sched.h> 37 #include <linux/device.h> 38 #include <linux/string.h> 39 #include <linux/mutex.h> 40 #include <linux/rculist.h> 41 #include <linux/uaccess.h> 42 #include <asm/cacheflush.h> 43 #include <linux/set_memory.h> 44 #include <asm/mmu_context.h> 45 #include <linux/license.h> 46 #include <asm/sections.h> 47 #include <linux/tracepoint.h> 48 #include <linux/ftrace.h> 49 #include <linux/livepatch.h> 50 #include <linux/async.h> 51 #include <linux/percpu.h> 52 #include <linux/kmemleak.h> 53 #include <linux/jump_label.h> 54 #include <linux/pfn.h> 55 #include <linux/bsearch.h> 56 #include <linux/dynamic_debug.h> 57 #include <linux/audit.h> 58 #include <linux/cfi.h> 59 #include <linux/codetag.h> 60 #include <linux/debugfs.h> 61 #include <linux/execmem.h> 62 #include <uapi/linux/module.h> 63 #include "internal.h" 64 65 #define CREATE_TRACE_POINTS 66 #include <trace/events/module.h> 67 68 /* 69 * Mutex protects: 70 * 1) List of modules (also safely readable with preempt_disable), 71 * 2) module_use links, 72 * 3) mod_tree.addr_min/mod_tree.addr_max. 73 * (delete and add uses RCU list operations). 74 */ 75 DEFINE_MUTEX(module_mutex); 76 LIST_HEAD(modules); 77 78 /* Work queue for freeing init sections in success case */ 79 static void do_free_init(struct work_struct *w); 80 static DECLARE_WORK(init_free_wq, do_free_init); 81 static LLIST_HEAD(init_free_list); 82 83 struct mod_tree_root mod_tree __cacheline_aligned = { 84 .addr_min = -1UL, 85 }; 86 87 struct symsearch { 88 const struct kernel_symbol *start, *stop; 89 const s32 *crcs; 90 enum mod_license license; 91 }; 92 93 /* 94 * Bounds of module memory, for speeding up __module_address. 95 * Protected by module_mutex. 96 */ 97 static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base, 98 unsigned int size, struct mod_tree_root *tree) 99 { 100 unsigned long min = (unsigned long)base; 101 unsigned long max = min + size; 102 103 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 104 if (mod_mem_type_is_core_data(type)) { 105 if (min < tree->data_addr_min) 106 tree->data_addr_min = min; 107 if (max > tree->data_addr_max) 108 tree->data_addr_max = max; 109 return; 110 } 111 #endif 112 if (min < tree->addr_min) 113 tree->addr_min = min; 114 if (max > tree->addr_max) 115 tree->addr_max = max; 116 } 117 118 static void mod_update_bounds(struct module *mod) 119 { 120 for_each_mod_mem_type(type) { 121 struct module_memory *mod_mem = &mod->mem[type]; 122 123 if (mod_mem->size) 124 __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree); 125 } 126 } 127 128 /* Block module loading/unloading? */ 129 int modules_disabled; 130 core_param(nomodule, modules_disabled, bint, 0); 131 132 /* Waiting for a module to finish initializing? */ 133 static DECLARE_WAIT_QUEUE_HEAD(module_wq); 134 135 static BLOCKING_NOTIFIER_HEAD(module_notify_list); 136 137 int register_module_notifier(struct notifier_block *nb) 138 { 139 return blocking_notifier_chain_register(&module_notify_list, nb); 140 } 141 EXPORT_SYMBOL(register_module_notifier); 142 143 int unregister_module_notifier(struct notifier_block *nb) 144 { 145 return blocking_notifier_chain_unregister(&module_notify_list, nb); 146 } 147 EXPORT_SYMBOL(unregister_module_notifier); 148 149 /* 150 * We require a truly strong try_module_get(): 0 means success. 151 * Otherwise an error is returned due to ongoing or failed 152 * initialization etc. 153 */ 154 static inline int strong_try_module_get(struct module *mod) 155 { 156 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 157 if (mod && mod->state == MODULE_STATE_COMING) 158 return -EBUSY; 159 if (try_module_get(mod)) 160 return 0; 161 else 162 return -ENOENT; 163 } 164 165 static inline void add_taint_module(struct module *mod, unsigned flag, 166 enum lockdep_ok lockdep_ok) 167 { 168 add_taint(flag, lockdep_ok); 169 set_bit(flag, &mod->taints); 170 } 171 172 /* 173 * A thread that wants to hold a reference to a module only while it 174 * is running can call this to safely exit. 175 */ 176 void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 177 { 178 module_put(mod); 179 kthread_exit(code); 180 } 181 EXPORT_SYMBOL(__module_put_and_kthread_exit); 182 183 /* Find a module section: 0 means not found. */ 184 static unsigned int find_sec(const struct load_info *info, const char *name) 185 { 186 unsigned int i; 187 188 for (i = 1; i < info->hdr->e_shnum; i++) { 189 Elf_Shdr *shdr = &info->sechdrs[i]; 190 /* Alloc bit cleared means "ignore it." */ 191 if ((shdr->sh_flags & SHF_ALLOC) 192 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 193 return i; 194 } 195 return 0; 196 } 197 198 /** 199 * find_any_unique_sec() - Find a unique section index by name 200 * @info: Load info for the module to scan 201 * @name: Name of the section we're looking for 202 * 203 * Locates a unique section by name. Ignores SHF_ALLOC. 204 * 205 * Return: Section index if found uniquely, zero if absent, negative count 206 * of total instances if multiple were found. 207 */ 208 static int find_any_unique_sec(const struct load_info *info, const char *name) 209 { 210 unsigned int idx; 211 unsigned int count = 0; 212 int i; 213 214 for (i = 1; i < info->hdr->e_shnum; i++) { 215 if (strcmp(info->secstrings + info->sechdrs[i].sh_name, 216 name) == 0) { 217 count++; 218 idx = i; 219 } 220 } 221 if (count == 1) { 222 return idx; 223 } else if (count == 0) { 224 return 0; 225 } else { 226 return -count; 227 } 228 } 229 230 /* Find a module section, or NULL. */ 231 static void *section_addr(const struct load_info *info, const char *name) 232 { 233 /* Section 0 has sh_addr 0. */ 234 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 235 } 236 237 /* Find a module section, or NULL. Fill in number of "objects" in section. */ 238 static void *section_objs(const struct load_info *info, 239 const char *name, 240 size_t object_size, 241 unsigned int *num) 242 { 243 unsigned int sec = find_sec(info, name); 244 245 /* Section 0 has sh_addr 0 and sh_size 0. */ 246 *num = info->sechdrs[sec].sh_size / object_size; 247 return (void *)info->sechdrs[sec].sh_addr; 248 } 249 250 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 251 static unsigned int find_any_sec(const struct load_info *info, const char *name) 252 { 253 unsigned int i; 254 255 for (i = 1; i < info->hdr->e_shnum; i++) { 256 Elf_Shdr *shdr = &info->sechdrs[i]; 257 if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 258 return i; 259 } 260 return 0; 261 } 262 263 /* 264 * Find a module section, or NULL. Fill in number of "objects" in section. 265 * Ignores SHF_ALLOC flag. 266 */ 267 static __maybe_unused void *any_section_objs(const struct load_info *info, 268 const char *name, 269 size_t object_size, 270 unsigned int *num) 271 { 272 unsigned int sec = find_any_sec(info, name); 273 274 /* Section 0 has sh_addr 0 and sh_size 0. */ 275 *num = info->sechdrs[sec].sh_size / object_size; 276 return (void *)info->sechdrs[sec].sh_addr; 277 } 278 279 #ifndef CONFIG_MODVERSIONS 280 #define symversion(base, idx) NULL 281 #else 282 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 283 #endif 284 285 static const char *kernel_symbol_name(const struct kernel_symbol *sym) 286 { 287 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 288 return offset_to_ptr(&sym->name_offset); 289 #else 290 return sym->name; 291 #endif 292 } 293 294 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 295 { 296 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 297 if (!sym->namespace_offset) 298 return NULL; 299 return offset_to_ptr(&sym->namespace_offset); 300 #else 301 return sym->namespace; 302 #endif 303 } 304 305 int cmp_name(const void *name, const void *sym) 306 { 307 return strcmp(name, kernel_symbol_name(sym)); 308 } 309 310 static bool find_exported_symbol_in_section(const struct symsearch *syms, 311 struct module *owner, 312 struct find_symbol_arg *fsa) 313 { 314 struct kernel_symbol *sym; 315 316 if (!fsa->gplok && syms->license == GPL_ONLY) 317 return false; 318 319 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 320 sizeof(struct kernel_symbol), cmp_name); 321 if (!sym) 322 return false; 323 324 fsa->owner = owner; 325 fsa->crc = symversion(syms->crcs, sym - syms->start); 326 fsa->sym = sym; 327 fsa->license = syms->license; 328 329 return true; 330 } 331 332 /* 333 * Find an exported symbol and return it, along with, (optional) crc and 334 * (optional) module which owns it. Needs preempt disabled or module_mutex. 335 */ 336 bool find_symbol(struct find_symbol_arg *fsa) 337 { 338 static const struct symsearch arr[] = { 339 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 340 NOT_GPL_ONLY }, 341 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 342 __start___kcrctab_gpl, 343 GPL_ONLY }, 344 }; 345 struct module *mod; 346 unsigned int i; 347 348 module_assert_mutex_or_preempt(); 349 350 for (i = 0; i < ARRAY_SIZE(arr); i++) 351 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 352 return true; 353 354 list_for_each_entry_rcu(mod, &modules, list, 355 lockdep_is_held(&module_mutex)) { 356 struct symsearch arr[] = { 357 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 358 NOT_GPL_ONLY }, 359 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 360 mod->gpl_crcs, 361 GPL_ONLY }, 362 }; 363 364 if (mod->state == MODULE_STATE_UNFORMED) 365 continue; 366 367 for (i = 0; i < ARRAY_SIZE(arr); i++) 368 if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 369 return true; 370 } 371 372 pr_debug("Failed to find symbol %s\n", fsa->name); 373 return false; 374 } 375 376 /* 377 * Search for module by name: must hold module_mutex (or preempt disabled 378 * for read-only access). 379 */ 380 struct module *find_module_all(const char *name, size_t len, 381 bool even_unformed) 382 { 383 struct module *mod; 384 385 module_assert_mutex_or_preempt(); 386 387 list_for_each_entry_rcu(mod, &modules, list, 388 lockdep_is_held(&module_mutex)) { 389 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 390 continue; 391 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 392 return mod; 393 } 394 return NULL; 395 } 396 397 struct module *find_module(const char *name) 398 { 399 return find_module_all(name, strlen(name), false); 400 } 401 402 #ifdef CONFIG_SMP 403 404 static inline void __percpu *mod_percpu(struct module *mod) 405 { 406 return mod->percpu; 407 } 408 409 static int percpu_modalloc(struct module *mod, struct load_info *info) 410 { 411 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 412 unsigned long align = pcpusec->sh_addralign; 413 414 if (!pcpusec->sh_size) 415 return 0; 416 417 if (align > PAGE_SIZE) { 418 pr_warn("%s: per-cpu alignment %li > %li\n", 419 mod->name, align, PAGE_SIZE); 420 align = PAGE_SIZE; 421 } 422 423 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 424 if (!mod->percpu) { 425 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 426 mod->name, (unsigned long)pcpusec->sh_size); 427 return -ENOMEM; 428 } 429 mod->percpu_size = pcpusec->sh_size; 430 return 0; 431 } 432 433 static void percpu_modfree(struct module *mod) 434 { 435 free_percpu(mod->percpu); 436 } 437 438 static unsigned int find_pcpusec(struct load_info *info) 439 { 440 return find_sec(info, ".data..percpu"); 441 } 442 443 static void percpu_modcopy(struct module *mod, 444 const void *from, unsigned long size) 445 { 446 int cpu; 447 448 for_each_possible_cpu(cpu) 449 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 450 } 451 452 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 453 { 454 struct module *mod; 455 unsigned int cpu; 456 457 preempt_disable(); 458 459 list_for_each_entry_rcu(mod, &modules, list) { 460 if (mod->state == MODULE_STATE_UNFORMED) 461 continue; 462 if (!mod->percpu_size) 463 continue; 464 for_each_possible_cpu(cpu) { 465 void *start = per_cpu_ptr(mod->percpu, cpu); 466 void *va = (void *)addr; 467 468 if (va >= start && va < start + mod->percpu_size) { 469 if (can_addr) { 470 *can_addr = (unsigned long) (va - start); 471 *can_addr += (unsigned long) 472 per_cpu_ptr(mod->percpu, 473 get_boot_cpu_id()); 474 } 475 preempt_enable(); 476 return true; 477 } 478 } 479 } 480 481 preempt_enable(); 482 return false; 483 } 484 485 /** 486 * is_module_percpu_address() - test whether address is from module static percpu 487 * @addr: address to test 488 * 489 * Test whether @addr belongs to module static percpu area. 490 * 491 * Return: %true if @addr is from module static percpu area 492 */ 493 bool is_module_percpu_address(unsigned long addr) 494 { 495 return __is_module_percpu_address(addr, NULL); 496 } 497 498 #else /* ... !CONFIG_SMP */ 499 500 static inline void __percpu *mod_percpu(struct module *mod) 501 { 502 return NULL; 503 } 504 static int percpu_modalloc(struct module *mod, struct load_info *info) 505 { 506 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 507 if (info->sechdrs[info->index.pcpu].sh_size != 0) 508 return -ENOMEM; 509 return 0; 510 } 511 static inline void percpu_modfree(struct module *mod) 512 { 513 } 514 static unsigned int find_pcpusec(struct load_info *info) 515 { 516 return 0; 517 } 518 static inline void percpu_modcopy(struct module *mod, 519 const void *from, unsigned long size) 520 { 521 /* pcpusec should be 0, and size of that section should be 0. */ 522 BUG_ON(size != 0); 523 } 524 bool is_module_percpu_address(unsigned long addr) 525 { 526 return false; 527 } 528 529 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 530 { 531 return false; 532 } 533 534 #endif /* CONFIG_SMP */ 535 536 #define MODINFO_ATTR(field) \ 537 static void setup_modinfo_##field(struct module *mod, const char *s) \ 538 { \ 539 mod->field = kstrdup(s, GFP_KERNEL); \ 540 } \ 541 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ 542 struct module_kobject *mk, char *buffer) \ 543 { \ 544 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 545 } \ 546 static int modinfo_##field##_exists(struct module *mod) \ 547 { \ 548 return mod->field != NULL; \ 549 } \ 550 static void free_modinfo_##field(struct module *mod) \ 551 { \ 552 kfree(mod->field); \ 553 mod->field = NULL; \ 554 } \ 555 static struct module_attribute modinfo_##field = { \ 556 .attr = { .name = __stringify(field), .mode = 0444 }, \ 557 .show = show_modinfo_##field, \ 558 .setup = setup_modinfo_##field, \ 559 .test = modinfo_##field##_exists, \ 560 .free = free_modinfo_##field, \ 561 }; 562 563 MODINFO_ATTR(version); 564 MODINFO_ATTR(srcversion); 565 566 static struct { 567 char name[MODULE_NAME_LEN + 1]; 568 char taints[MODULE_FLAGS_BUF_SIZE]; 569 } last_unloaded_module; 570 571 #ifdef CONFIG_MODULE_UNLOAD 572 573 EXPORT_TRACEPOINT_SYMBOL(module_get); 574 575 /* MODULE_REF_BASE is the base reference count by kmodule loader. */ 576 #define MODULE_REF_BASE 1 577 578 /* Init the unload section of the module. */ 579 static int module_unload_init(struct module *mod) 580 { 581 /* 582 * Initialize reference counter to MODULE_REF_BASE. 583 * refcnt == 0 means module is going. 584 */ 585 atomic_set(&mod->refcnt, MODULE_REF_BASE); 586 587 INIT_LIST_HEAD(&mod->source_list); 588 INIT_LIST_HEAD(&mod->target_list); 589 590 /* Hold reference count during initialization. */ 591 atomic_inc(&mod->refcnt); 592 593 return 0; 594 } 595 596 /* Does a already use b? */ 597 static int already_uses(struct module *a, struct module *b) 598 { 599 struct module_use *use; 600 601 list_for_each_entry(use, &b->source_list, source_list) { 602 if (use->source == a) 603 return 1; 604 } 605 pr_debug("%s does not use %s!\n", a->name, b->name); 606 return 0; 607 } 608 609 /* 610 * Module a uses b 611 * - we add 'a' as a "source", 'b' as a "target" of module use 612 * - the module_use is added to the list of 'b' sources (so 613 * 'b' can walk the list to see who sourced them), and of 'a' 614 * targets (so 'a' can see what modules it targets). 615 */ 616 static int add_module_usage(struct module *a, struct module *b) 617 { 618 struct module_use *use; 619 620 pr_debug("Allocating new usage for %s.\n", a->name); 621 use = kmalloc(sizeof(*use), GFP_ATOMIC); 622 if (!use) 623 return -ENOMEM; 624 625 use->source = a; 626 use->target = b; 627 list_add(&use->source_list, &b->source_list); 628 list_add(&use->target_list, &a->target_list); 629 return 0; 630 } 631 632 /* Module a uses b: caller needs module_mutex() */ 633 static int ref_module(struct module *a, struct module *b) 634 { 635 int err; 636 637 if (b == NULL || already_uses(a, b)) 638 return 0; 639 640 /* If module isn't available, we fail. */ 641 err = strong_try_module_get(b); 642 if (err) 643 return err; 644 645 err = add_module_usage(a, b); 646 if (err) { 647 module_put(b); 648 return err; 649 } 650 return 0; 651 } 652 653 /* Clear the unload stuff of the module. */ 654 static void module_unload_free(struct module *mod) 655 { 656 struct module_use *use, *tmp; 657 658 mutex_lock(&module_mutex); 659 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 660 struct module *i = use->target; 661 pr_debug("%s unusing %s\n", mod->name, i->name); 662 module_put(i); 663 list_del(&use->source_list); 664 list_del(&use->target_list); 665 kfree(use); 666 } 667 mutex_unlock(&module_mutex); 668 } 669 670 #ifdef CONFIG_MODULE_FORCE_UNLOAD 671 static inline int try_force_unload(unsigned int flags) 672 { 673 int ret = (flags & O_TRUNC); 674 if (ret) 675 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 676 return ret; 677 } 678 #else 679 static inline int try_force_unload(unsigned int flags) 680 { 681 return 0; 682 } 683 #endif /* CONFIG_MODULE_FORCE_UNLOAD */ 684 685 /* Try to release refcount of module, 0 means success. */ 686 static int try_release_module_ref(struct module *mod) 687 { 688 int ret; 689 690 /* Try to decrement refcnt which we set at loading */ 691 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 692 BUG_ON(ret < 0); 693 if (ret) 694 /* Someone can put this right now, recover with checking */ 695 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 696 697 return ret; 698 } 699 700 static int try_stop_module(struct module *mod, int flags, int *forced) 701 { 702 /* If it's not unused, quit unless we're forcing. */ 703 if (try_release_module_ref(mod) != 0) { 704 *forced = try_force_unload(flags); 705 if (!(*forced)) 706 return -EWOULDBLOCK; 707 } 708 709 /* Mark it as dying. */ 710 mod->state = MODULE_STATE_GOING; 711 712 return 0; 713 } 714 715 /** 716 * module_refcount() - return the refcount or -1 if unloading 717 * @mod: the module we're checking 718 * 719 * Return: 720 * -1 if the module is in the process of unloading 721 * otherwise the number of references in the kernel to the module 722 */ 723 int module_refcount(struct module *mod) 724 { 725 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 726 } 727 EXPORT_SYMBOL(module_refcount); 728 729 /* This exists whether we can unload or not */ 730 static void free_module(struct module *mod); 731 732 SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 733 unsigned int, flags) 734 { 735 struct module *mod; 736 char name[MODULE_NAME_LEN]; 737 char buf[MODULE_FLAGS_BUF_SIZE]; 738 int ret, forced = 0; 739 740 if (!capable(CAP_SYS_MODULE) || modules_disabled) 741 return -EPERM; 742 743 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 744 return -EFAULT; 745 name[MODULE_NAME_LEN-1] = '\0'; 746 747 audit_log_kern_module(name); 748 749 if (mutex_lock_interruptible(&module_mutex) != 0) 750 return -EINTR; 751 752 mod = find_module(name); 753 if (!mod) { 754 ret = -ENOENT; 755 goto out; 756 } 757 758 if (!list_empty(&mod->source_list)) { 759 /* Other modules depend on us: get rid of them first. */ 760 ret = -EWOULDBLOCK; 761 goto out; 762 } 763 764 /* Doing init or already dying? */ 765 if (mod->state != MODULE_STATE_LIVE) { 766 /* FIXME: if (force), slam module count damn the torpedoes */ 767 pr_debug("%s already dying\n", mod->name); 768 ret = -EBUSY; 769 goto out; 770 } 771 772 /* If it has an init func, it must have an exit func to unload */ 773 if (mod->init && !mod->exit) { 774 forced = try_force_unload(flags); 775 if (!forced) { 776 /* This module can't be removed */ 777 ret = -EBUSY; 778 goto out; 779 } 780 } 781 782 ret = try_stop_module(mod, flags, &forced); 783 if (ret != 0) 784 goto out; 785 786 mutex_unlock(&module_mutex); 787 /* Final destruction now no one is using it. */ 788 if (mod->exit != NULL) 789 mod->exit(); 790 blocking_notifier_call_chain(&module_notify_list, 791 MODULE_STATE_GOING, mod); 792 klp_module_going(mod); 793 ftrace_release_mod(mod); 794 795 async_synchronize_full(); 796 797 /* Store the name and taints of the last unloaded module for diagnostic purposes */ 798 strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); 799 strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); 800 801 free_module(mod); 802 /* someone could wait for the module in add_unformed_module() */ 803 wake_up_all(&module_wq); 804 return 0; 805 out: 806 mutex_unlock(&module_mutex); 807 return ret; 808 } 809 810 void __symbol_put(const char *symbol) 811 { 812 struct find_symbol_arg fsa = { 813 .name = symbol, 814 .gplok = true, 815 }; 816 817 preempt_disable(); 818 BUG_ON(!find_symbol(&fsa)); 819 module_put(fsa.owner); 820 preempt_enable(); 821 } 822 EXPORT_SYMBOL(__symbol_put); 823 824 /* Note this assumes addr is a function, which it currently always is. */ 825 void symbol_put_addr(void *addr) 826 { 827 struct module *modaddr; 828 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 829 830 if (core_kernel_text(a)) 831 return; 832 833 /* 834 * Even though we hold a reference on the module; we still need to 835 * disable preemption in order to safely traverse the data structure. 836 */ 837 preempt_disable(); 838 modaddr = __module_text_address(a); 839 BUG_ON(!modaddr); 840 module_put(modaddr); 841 preempt_enable(); 842 } 843 EXPORT_SYMBOL_GPL(symbol_put_addr); 844 845 static ssize_t show_refcnt(struct module_attribute *mattr, 846 struct module_kobject *mk, char *buffer) 847 { 848 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 849 } 850 851 static struct module_attribute modinfo_refcnt = 852 __ATTR(refcnt, 0444, show_refcnt, NULL); 853 854 void __module_get(struct module *module) 855 { 856 if (module) { 857 atomic_inc(&module->refcnt); 858 trace_module_get(module, _RET_IP_); 859 } 860 } 861 EXPORT_SYMBOL(__module_get); 862 863 bool try_module_get(struct module *module) 864 { 865 bool ret = true; 866 867 if (module) { 868 /* Note: here, we can fail to get a reference */ 869 if (likely(module_is_live(module) && 870 atomic_inc_not_zero(&module->refcnt) != 0)) 871 trace_module_get(module, _RET_IP_); 872 else 873 ret = false; 874 } 875 return ret; 876 } 877 EXPORT_SYMBOL(try_module_get); 878 879 void module_put(struct module *module) 880 { 881 int ret; 882 883 if (module) { 884 ret = atomic_dec_if_positive(&module->refcnt); 885 WARN_ON(ret < 0); /* Failed to put refcount */ 886 trace_module_put(module, _RET_IP_); 887 } 888 } 889 EXPORT_SYMBOL(module_put); 890 891 #else /* !CONFIG_MODULE_UNLOAD */ 892 static inline void module_unload_free(struct module *mod) 893 { 894 } 895 896 static int ref_module(struct module *a, struct module *b) 897 { 898 return strong_try_module_get(b); 899 } 900 901 static inline int module_unload_init(struct module *mod) 902 { 903 return 0; 904 } 905 #endif /* CONFIG_MODULE_UNLOAD */ 906 907 size_t module_flags_taint(unsigned long taints, char *buf) 908 { 909 size_t l = 0; 910 int i; 911 912 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 913 if (taint_flags[i].module && test_bit(i, &taints)) 914 buf[l++] = taint_flags[i].c_true; 915 } 916 917 return l; 918 } 919 920 static ssize_t show_initstate(struct module_attribute *mattr, 921 struct module_kobject *mk, char *buffer) 922 { 923 const char *state = "unknown"; 924 925 switch (mk->mod->state) { 926 case MODULE_STATE_LIVE: 927 state = "live"; 928 break; 929 case MODULE_STATE_COMING: 930 state = "coming"; 931 break; 932 case MODULE_STATE_GOING: 933 state = "going"; 934 break; 935 default: 936 BUG(); 937 } 938 return sprintf(buffer, "%s\n", state); 939 } 940 941 static struct module_attribute modinfo_initstate = 942 __ATTR(initstate, 0444, show_initstate, NULL); 943 944 static ssize_t store_uevent(struct module_attribute *mattr, 945 struct module_kobject *mk, 946 const char *buffer, size_t count) 947 { 948 int rc; 949 950 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 951 return rc ? rc : count; 952 } 953 954 struct module_attribute module_uevent = 955 __ATTR(uevent, 0200, NULL, store_uevent); 956 957 static ssize_t show_coresize(struct module_attribute *mattr, 958 struct module_kobject *mk, char *buffer) 959 { 960 unsigned int size = mk->mod->mem[MOD_TEXT].size; 961 962 if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) { 963 for_class_mod_mem_type(type, core_data) 964 size += mk->mod->mem[type].size; 965 } 966 return sprintf(buffer, "%u\n", size); 967 } 968 969 static struct module_attribute modinfo_coresize = 970 __ATTR(coresize, 0444, show_coresize, NULL); 971 972 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 973 static ssize_t show_datasize(struct module_attribute *mattr, 974 struct module_kobject *mk, char *buffer) 975 { 976 unsigned int size = 0; 977 978 for_class_mod_mem_type(type, core_data) 979 size += mk->mod->mem[type].size; 980 return sprintf(buffer, "%u\n", size); 981 } 982 983 static struct module_attribute modinfo_datasize = 984 __ATTR(datasize, 0444, show_datasize, NULL); 985 #endif 986 987 static ssize_t show_initsize(struct module_attribute *mattr, 988 struct module_kobject *mk, char *buffer) 989 { 990 unsigned int size = 0; 991 992 for_class_mod_mem_type(type, init) 993 size += mk->mod->mem[type].size; 994 return sprintf(buffer, "%u\n", size); 995 } 996 997 static struct module_attribute modinfo_initsize = 998 __ATTR(initsize, 0444, show_initsize, NULL); 999 1000 static ssize_t show_taint(struct module_attribute *mattr, 1001 struct module_kobject *mk, char *buffer) 1002 { 1003 size_t l; 1004 1005 l = module_flags_taint(mk->mod->taints, buffer); 1006 buffer[l++] = '\n'; 1007 return l; 1008 } 1009 1010 static struct module_attribute modinfo_taint = 1011 __ATTR(taint, 0444, show_taint, NULL); 1012 1013 struct module_attribute *modinfo_attrs[] = { 1014 &module_uevent, 1015 &modinfo_version, 1016 &modinfo_srcversion, 1017 &modinfo_initstate, 1018 &modinfo_coresize, 1019 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 1020 &modinfo_datasize, 1021 #endif 1022 &modinfo_initsize, 1023 &modinfo_taint, 1024 #ifdef CONFIG_MODULE_UNLOAD 1025 &modinfo_refcnt, 1026 #endif 1027 NULL, 1028 }; 1029 1030 size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs); 1031 1032 static const char vermagic[] = VERMAGIC_STRING; 1033 1034 int try_to_force_load(struct module *mod, const char *reason) 1035 { 1036 #ifdef CONFIG_MODULE_FORCE_LOAD 1037 if (!test_taint(TAINT_FORCED_MODULE)) 1038 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1039 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1040 return 0; 1041 #else 1042 return -ENOEXEC; 1043 #endif 1044 } 1045 1046 /* Parse tag=value strings from .modinfo section */ 1047 char *module_next_tag_pair(char *string, unsigned long *secsize) 1048 { 1049 /* Skip non-zero chars */ 1050 while (string[0]) { 1051 string++; 1052 if ((*secsize)-- <= 1) 1053 return NULL; 1054 } 1055 1056 /* Skip any zero padding. */ 1057 while (!string[0]) { 1058 string++; 1059 if ((*secsize)-- <= 1) 1060 return NULL; 1061 } 1062 return string; 1063 } 1064 1065 static char *get_next_modinfo(const struct load_info *info, const char *tag, 1066 char *prev) 1067 { 1068 char *p; 1069 unsigned int taglen = strlen(tag); 1070 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 1071 unsigned long size = infosec->sh_size; 1072 1073 /* 1074 * get_modinfo() calls made before rewrite_section_headers() 1075 * must use sh_offset, as sh_addr isn't set! 1076 */ 1077 char *modinfo = (char *)info->hdr + infosec->sh_offset; 1078 1079 if (prev) { 1080 size -= prev - modinfo; 1081 modinfo = module_next_tag_pair(prev, &size); 1082 } 1083 1084 for (p = modinfo; p; p = module_next_tag_pair(p, &size)) { 1085 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 1086 return p + taglen + 1; 1087 } 1088 return NULL; 1089 } 1090 1091 static char *get_modinfo(const struct load_info *info, const char *tag) 1092 { 1093 return get_next_modinfo(info, tag, NULL); 1094 } 1095 1096 static int verify_namespace_is_imported(const struct load_info *info, 1097 const struct kernel_symbol *sym, 1098 struct module *mod) 1099 { 1100 const char *namespace; 1101 char *imported_namespace; 1102 1103 namespace = kernel_symbol_namespace(sym); 1104 if (namespace && namespace[0]) { 1105 for_each_modinfo_entry(imported_namespace, info, "import_ns") { 1106 if (strcmp(namespace, imported_namespace) == 0) 1107 return 0; 1108 } 1109 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1110 pr_warn( 1111 #else 1112 pr_err( 1113 #endif 1114 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1115 mod->name, kernel_symbol_name(sym), namespace); 1116 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1117 return -EINVAL; 1118 #endif 1119 } 1120 return 0; 1121 } 1122 1123 static bool inherit_taint(struct module *mod, struct module *owner, const char *name) 1124 { 1125 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1126 return true; 1127 1128 if (mod->using_gplonly_symbols) { 1129 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n", 1130 mod->name, name, owner->name); 1131 return false; 1132 } 1133 1134 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 1135 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n", 1136 mod->name, name, owner->name); 1137 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1138 } 1139 return true; 1140 } 1141 1142 /* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1143 static const struct kernel_symbol *resolve_symbol(struct module *mod, 1144 const struct load_info *info, 1145 const char *name, 1146 char ownername[]) 1147 { 1148 struct find_symbol_arg fsa = { 1149 .name = name, 1150 .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1151 .warn = true, 1152 }; 1153 int err; 1154 1155 /* 1156 * The module_mutex should not be a heavily contended lock; 1157 * if we get the occasional sleep here, we'll go an extra iteration 1158 * in the wait_event_interruptible(), which is harmless. 1159 */ 1160 sched_annotate_sleep(); 1161 mutex_lock(&module_mutex); 1162 if (!find_symbol(&fsa)) 1163 goto unlock; 1164 1165 if (fsa.license == GPL_ONLY) 1166 mod->using_gplonly_symbols = true; 1167 1168 if (!inherit_taint(mod, fsa.owner, name)) { 1169 fsa.sym = NULL; 1170 goto getname; 1171 } 1172 1173 if (!check_version(info, name, mod, fsa.crc)) { 1174 fsa.sym = ERR_PTR(-EINVAL); 1175 goto getname; 1176 } 1177 1178 err = verify_namespace_is_imported(info, fsa.sym, mod); 1179 if (err) { 1180 fsa.sym = ERR_PTR(err); 1181 goto getname; 1182 } 1183 1184 err = ref_module(mod, fsa.owner); 1185 if (err) { 1186 fsa.sym = ERR_PTR(err); 1187 goto getname; 1188 } 1189 1190 getname: 1191 /* We must make copy under the lock if we failed to get ref. */ 1192 strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1193 unlock: 1194 mutex_unlock(&module_mutex); 1195 return fsa.sym; 1196 } 1197 1198 static const struct kernel_symbol * 1199 resolve_symbol_wait(struct module *mod, 1200 const struct load_info *info, 1201 const char *name) 1202 { 1203 const struct kernel_symbol *ksym; 1204 char owner[MODULE_NAME_LEN]; 1205 1206 if (wait_event_interruptible_timeout(module_wq, 1207 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1208 || PTR_ERR(ksym) != -EBUSY, 1209 30 * HZ) <= 0) { 1210 pr_warn("%s: gave up waiting for init of module %s.\n", 1211 mod->name, owner); 1212 } 1213 return ksym; 1214 } 1215 1216 void __weak module_arch_cleanup(struct module *mod) 1217 { 1218 } 1219 1220 void __weak module_arch_freeing_init(struct module *mod) 1221 { 1222 } 1223 1224 void *__module_writable_address(struct module *mod, void *loc) 1225 { 1226 for_class_mod_mem_type(type, text) { 1227 struct module_memory *mem = &mod->mem[type]; 1228 1229 if (loc >= mem->base && loc < mem->base + mem->size) 1230 return loc + (mem->rw_copy - mem->base); 1231 } 1232 1233 return loc; 1234 } 1235 1236 static int module_memory_alloc(struct module *mod, enum mod_mem_type type) 1237 { 1238 unsigned int size = PAGE_ALIGN(mod->mem[type].size); 1239 enum execmem_type execmem_type; 1240 void *ptr; 1241 1242 mod->mem[type].size = size; 1243 1244 if (mod_mem_type_is_data(type)) 1245 execmem_type = EXECMEM_MODULE_DATA; 1246 else 1247 execmem_type = EXECMEM_MODULE_TEXT; 1248 1249 ptr = execmem_alloc(execmem_type, size); 1250 if (!ptr) 1251 return -ENOMEM; 1252 1253 mod->mem[type].base = ptr; 1254 1255 if (execmem_is_rox(execmem_type)) { 1256 ptr = vzalloc(size); 1257 1258 if (!ptr) { 1259 execmem_free(mod->mem[type].base); 1260 return -ENOMEM; 1261 } 1262 1263 mod->mem[type].rw_copy = ptr; 1264 mod->mem[type].is_rox = true; 1265 } else { 1266 mod->mem[type].rw_copy = mod->mem[type].base; 1267 memset(mod->mem[type].base, 0, size); 1268 } 1269 1270 /* 1271 * The pointer to these blocks of memory are stored on the module 1272 * structure and we keep that around so long as the module is 1273 * around. We only free that memory when we unload the module. 1274 * Just mark them as not being a leak then. The .init* ELF 1275 * sections *do* get freed after boot so we *could* treat them 1276 * slightly differently with kmemleak_ignore() and only grey 1277 * them out as they work as typical memory allocations which 1278 * *do* eventually get freed, but let's just keep things simple 1279 * and avoid *any* false positives. 1280 */ 1281 kmemleak_not_leak(ptr); 1282 1283 return 0; 1284 } 1285 1286 static void module_memory_free(struct module *mod, enum mod_mem_type type) 1287 { 1288 struct module_memory *mem = &mod->mem[type]; 1289 1290 if (mem->is_rox) 1291 vfree(mem->rw_copy); 1292 1293 execmem_free(mem->base); 1294 } 1295 1296 static void free_mod_mem(struct module *mod) 1297 { 1298 for_each_mod_mem_type(type) { 1299 struct module_memory *mod_mem = &mod->mem[type]; 1300 1301 if (type == MOD_DATA) 1302 continue; 1303 1304 /* Free lock-classes; relies on the preceding sync_rcu(). */ 1305 lockdep_free_key_range(mod_mem->base, mod_mem->size); 1306 if (mod_mem->size) 1307 module_memory_free(mod, type); 1308 } 1309 1310 /* MOD_DATA hosts mod, so free it at last */ 1311 lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); 1312 module_memory_free(mod, MOD_DATA); 1313 } 1314 1315 /* Free a module, remove from lists, etc. */ 1316 static void free_module(struct module *mod) 1317 { 1318 trace_module_free(mod); 1319 1320 codetag_unload_module(mod); 1321 1322 mod_sysfs_teardown(mod); 1323 1324 /* 1325 * We leave it in list to prevent duplicate loads, but make sure 1326 * that noone uses it while it's being deconstructed. 1327 */ 1328 mutex_lock(&module_mutex); 1329 mod->state = MODULE_STATE_UNFORMED; 1330 mutex_unlock(&module_mutex); 1331 1332 /* Arch-specific cleanup. */ 1333 module_arch_cleanup(mod); 1334 1335 /* Module unload stuff */ 1336 module_unload_free(mod); 1337 1338 /* Free any allocated parameters. */ 1339 destroy_params(mod->kp, mod->num_kp); 1340 1341 if (is_livepatch_module(mod)) 1342 free_module_elf(mod); 1343 1344 /* Now we can delete it from the lists */ 1345 mutex_lock(&module_mutex); 1346 /* Unlink carefully: kallsyms could be walking list. */ 1347 list_del_rcu(&mod->list); 1348 mod_tree_remove(mod); 1349 /* Remove this module from bug list, this uses list_del_rcu */ 1350 module_bug_cleanup(mod); 1351 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 1352 synchronize_rcu(); 1353 if (try_add_tainted_module(mod)) 1354 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", 1355 mod->name); 1356 mutex_unlock(&module_mutex); 1357 1358 /* This may be empty, but that's OK */ 1359 module_arch_freeing_init(mod); 1360 kfree(mod->args); 1361 percpu_modfree(mod); 1362 1363 free_mod_mem(mod); 1364 } 1365 1366 void *__symbol_get(const char *symbol) 1367 { 1368 struct find_symbol_arg fsa = { 1369 .name = symbol, 1370 .gplok = true, 1371 .warn = true, 1372 }; 1373 1374 preempt_disable(); 1375 if (!find_symbol(&fsa)) 1376 goto fail; 1377 if (fsa.license != GPL_ONLY) { 1378 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 1379 symbol); 1380 goto fail; 1381 } 1382 if (strong_try_module_get(fsa.owner)) 1383 goto fail; 1384 preempt_enable(); 1385 return (void *)kernel_symbol_value(fsa.sym); 1386 fail: 1387 preempt_enable(); 1388 return NULL; 1389 } 1390 EXPORT_SYMBOL_GPL(__symbol_get); 1391 1392 /* 1393 * Ensure that an exported symbol [global namespace] does not already exist 1394 * in the kernel or in some other module's exported symbol table. 1395 * 1396 * You must hold the module_mutex. 1397 */ 1398 static int verify_exported_symbols(struct module *mod) 1399 { 1400 unsigned int i; 1401 const struct kernel_symbol *s; 1402 struct { 1403 const struct kernel_symbol *sym; 1404 unsigned int num; 1405 } arr[] = { 1406 { mod->syms, mod->num_syms }, 1407 { mod->gpl_syms, mod->num_gpl_syms }, 1408 }; 1409 1410 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1411 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 1412 struct find_symbol_arg fsa = { 1413 .name = kernel_symbol_name(s), 1414 .gplok = true, 1415 }; 1416 if (find_symbol(&fsa)) { 1417 pr_err("%s: exports duplicate symbol %s" 1418 " (owned by %s)\n", 1419 mod->name, kernel_symbol_name(s), 1420 module_name(fsa.owner)); 1421 return -ENOEXEC; 1422 } 1423 } 1424 } 1425 return 0; 1426 } 1427 1428 static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 1429 { 1430 /* 1431 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 1432 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 1433 * i386 has a similar problem but may not deserve a fix. 1434 * 1435 * If we ever have to ignore many symbols, consider refactoring the code to 1436 * only warn if referenced by a relocation. 1437 */ 1438 if (emachine == EM_386 || emachine == EM_X86_64) 1439 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 1440 return false; 1441 } 1442 1443 /* Change all symbols so that st_value encodes the pointer directly. */ 1444 static int simplify_symbols(struct module *mod, const struct load_info *info) 1445 { 1446 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 1447 Elf_Sym *sym = (void *)symsec->sh_addr; 1448 unsigned long secbase; 1449 unsigned int i; 1450 int ret = 0; 1451 const struct kernel_symbol *ksym; 1452 1453 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 1454 const char *name = info->strtab + sym[i].st_name; 1455 1456 switch (sym[i].st_shndx) { 1457 case SHN_COMMON: 1458 /* Ignore common symbols */ 1459 if (!strncmp(name, "__gnu_lto", 9)) 1460 break; 1461 1462 /* 1463 * We compiled with -fno-common. These are not 1464 * supposed to happen. 1465 */ 1466 pr_debug("Common symbol: %s\n", name); 1467 pr_warn("%s: please compile with -fno-common\n", 1468 mod->name); 1469 ret = -ENOEXEC; 1470 break; 1471 1472 case SHN_ABS: 1473 /* Don't need to do anything */ 1474 pr_debug("Absolute symbol: 0x%08lx %s\n", 1475 (long)sym[i].st_value, name); 1476 break; 1477 1478 case SHN_LIVEPATCH: 1479 /* Livepatch symbols are resolved by livepatch */ 1480 break; 1481 1482 case SHN_UNDEF: 1483 ksym = resolve_symbol_wait(mod, info, name); 1484 /* Ok if resolved. */ 1485 if (ksym && !IS_ERR(ksym)) { 1486 sym[i].st_value = kernel_symbol_value(ksym); 1487 break; 1488 } 1489 1490 /* Ok if weak or ignored. */ 1491 if (!ksym && 1492 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 1493 ignore_undef_symbol(info->hdr->e_machine, name))) 1494 break; 1495 1496 ret = PTR_ERR(ksym) ?: -ENOENT; 1497 pr_warn("%s: Unknown symbol %s (err %d)\n", 1498 mod->name, name, ret); 1499 break; 1500 1501 default: 1502 /* Divert to percpu allocation if a percpu var. */ 1503 if (sym[i].st_shndx == info->index.pcpu) 1504 secbase = (unsigned long)mod_percpu(mod); 1505 else 1506 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 1507 sym[i].st_value += secbase; 1508 break; 1509 } 1510 } 1511 1512 return ret; 1513 } 1514 1515 static int apply_relocations(struct module *mod, const struct load_info *info) 1516 { 1517 unsigned int i; 1518 int err = 0; 1519 1520 /* Now do relocations. */ 1521 for (i = 1; i < info->hdr->e_shnum; i++) { 1522 unsigned int infosec = info->sechdrs[i].sh_info; 1523 1524 /* Not a valid relocation section? */ 1525 if (infosec >= info->hdr->e_shnum) 1526 continue; 1527 1528 /* Don't bother with non-allocated sections */ 1529 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 1530 continue; 1531 1532 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 1533 err = klp_apply_section_relocs(mod, info->sechdrs, 1534 info->secstrings, 1535 info->strtab, 1536 info->index.sym, i, 1537 NULL); 1538 else if (info->sechdrs[i].sh_type == SHT_REL) 1539 err = apply_relocate(info->sechdrs, info->strtab, 1540 info->index.sym, i, mod); 1541 else if (info->sechdrs[i].sh_type == SHT_RELA) 1542 err = apply_relocate_add(info->sechdrs, info->strtab, 1543 info->index.sym, i, mod); 1544 if (err < 0) 1545 break; 1546 } 1547 return err; 1548 } 1549 1550 /* Additional bytes needed by arch in front of individual sections */ 1551 unsigned int __weak arch_mod_section_prepend(struct module *mod, 1552 unsigned int section) 1553 { 1554 /* default implementation just returns zero */ 1555 return 0; 1556 } 1557 1558 long module_get_offset_and_type(struct module *mod, enum mod_mem_type type, 1559 Elf_Shdr *sechdr, unsigned int section) 1560 { 1561 long offset; 1562 long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT; 1563 1564 mod->mem[type].size += arch_mod_section_prepend(mod, section); 1565 offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1); 1566 mod->mem[type].size = offset + sechdr->sh_size; 1567 1568 WARN_ON_ONCE(offset & mask); 1569 return offset | mask; 1570 } 1571 1572 bool module_init_layout_section(const char *sname) 1573 { 1574 #ifndef CONFIG_MODULE_UNLOAD 1575 if (module_exit_section(sname)) 1576 return true; 1577 #endif 1578 return module_init_section(sname); 1579 } 1580 1581 static void __layout_sections(struct module *mod, struct load_info *info, bool is_init) 1582 { 1583 unsigned int m, i; 1584 1585 static const unsigned long masks[][2] = { 1586 /* 1587 * NOTE: all executable code must be the first section 1588 * in this array; otherwise modify the text_size 1589 * finder in the two loops below 1590 */ 1591 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 1592 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 1593 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 1594 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 1595 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 1596 }; 1597 static const int core_m_to_mem_type[] = { 1598 MOD_TEXT, 1599 MOD_RODATA, 1600 MOD_RO_AFTER_INIT, 1601 MOD_DATA, 1602 MOD_DATA, 1603 }; 1604 static const int init_m_to_mem_type[] = { 1605 MOD_INIT_TEXT, 1606 MOD_INIT_RODATA, 1607 MOD_INVALID, 1608 MOD_INIT_DATA, 1609 MOD_INIT_DATA, 1610 }; 1611 1612 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 1613 enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m]; 1614 1615 for (i = 0; i < info->hdr->e_shnum; ++i) { 1616 Elf_Shdr *s = &info->sechdrs[i]; 1617 const char *sname = info->secstrings + s->sh_name; 1618 1619 if ((s->sh_flags & masks[m][0]) != masks[m][0] 1620 || (s->sh_flags & masks[m][1]) 1621 || s->sh_entsize != ~0UL 1622 || is_init != module_init_layout_section(sname)) 1623 continue; 1624 1625 if (WARN_ON_ONCE(type == MOD_INVALID)) 1626 continue; 1627 1628 /* 1629 * Do not allocate codetag memory as we load it into 1630 * preallocated contiguous memory. 1631 */ 1632 if (codetag_needs_module_section(mod, sname, s->sh_size)) { 1633 /* 1634 * s->sh_entsize won't be used but populate the 1635 * type field to avoid confusion. 1636 */ 1637 s->sh_entsize = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) 1638 << SH_ENTSIZE_TYPE_SHIFT; 1639 continue; 1640 } 1641 1642 s->sh_entsize = module_get_offset_and_type(mod, type, s, i); 1643 pr_debug("\t%s\n", sname); 1644 } 1645 } 1646 } 1647 1648 /* 1649 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 1650 * might -- code, read-only data, read-write data, small data. Tally 1651 * sizes, and place the offsets into sh_entsize fields: high bit means it 1652 * belongs in init. 1653 */ 1654 static void layout_sections(struct module *mod, struct load_info *info) 1655 { 1656 unsigned int i; 1657 1658 for (i = 0; i < info->hdr->e_shnum; i++) 1659 info->sechdrs[i].sh_entsize = ~0UL; 1660 1661 pr_debug("Core section allocation order for %s:\n", mod->name); 1662 __layout_sections(mod, info, false); 1663 1664 pr_debug("Init section allocation order for %s:\n", mod->name); 1665 __layout_sections(mod, info, true); 1666 } 1667 1668 static void module_license_taint_check(struct module *mod, const char *license) 1669 { 1670 if (!license) 1671 license = "unspecified"; 1672 1673 if (!license_is_gpl_compatible(license)) { 1674 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 1675 pr_warn("%s: module license '%s' taints kernel.\n", 1676 mod->name, license); 1677 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 1678 LOCKDEP_NOW_UNRELIABLE); 1679 } 1680 } 1681 1682 static void setup_modinfo(struct module *mod, struct load_info *info) 1683 { 1684 struct module_attribute *attr; 1685 int i; 1686 1687 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1688 if (attr->setup) 1689 attr->setup(mod, get_modinfo(info, attr->attr.name)); 1690 } 1691 } 1692 1693 static void free_modinfo(struct module *mod) 1694 { 1695 struct module_attribute *attr; 1696 int i; 1697 1698 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1699 if (attr->free) 1700 attr->free(mod); 1701 } 1702 } 1703 1704 bool __weak module_init_section(const char *name) 1705 { 1706 return strstarts(name, ".init"); 1707 } 1708 1709 bool __weak module_exit_section(const char *name) 1710 { 1711 return strstarts(name, ".exit"); 1712 } 1713 1714 static int validate_section_offset(const struct load_info *info, Elf_Shdr *shdr) 1715 { 1716 #if defined(CONFIG_64BIT) 1717 unsigned long long secend; 1718 #else 1719 unsigned long secend; 1720 #endif 1721 1722 /* 1723 * Check for both overflow and offset/size being 1724 * too large. 1725 */ 1726 secend = shdr->sh_offset + shdr->sh_size; 1727 if (secend < shdr->sh_offset || secend > info->len) 1728 return -ENOEXEC; 1729 1730 return 0; 1731 } 1732 1733 /** 1734 * elf_validity_ehdr() - Checks an ELF header for module validity 1735 * @info: Load info containing the ELF header to check 1736 * 1737 * Checks whether an ELF header could belong to a valid module. Checks: 1738 * 1739 * * ELF header is within the data the user provided 1740 * * ELF magic is present 1741 * * It is relocatable (not final linked, not core file, etc.) 1742 * * The header's machine type matches what the architecture expects. 1743 * * Optional arch-specific hook for other properties 1744 * - module_elf_check_arch() is currently only used by PPC to check 1745 * ELF ABI version, but may be used by others in the future. 1746 * 1747 * Return: %0 if valid, %-ENOEXEC on failure. 1748 */ 1749 static int elf_validity_ehdr(const struct load_info *info) 1750 { 1751 if (info->len < sizeof(*(info->hdr))) { 1752 pr_err("Invalid ELF header len %lu\n", info->len); 1753 return -ENOEXEC; 1754 } 1755 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 1756 pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 1757 return -ENOEXEC; 1758 } 1759 if (info->hdr->e_type != ET_REL) { 1760 pr_err("Invalid ELF header type: %u != %u\n", 1761 info->hdr->e_type, ET_REL); 1762 return -ENOEXEC; 1763 } 1764 if (!elf_check_arch(info->hdr)) { 1765 pr_err("Invalid architecture in ELF header: %u\n", 1766 info->hdr->e_machine); 1767 return -ENOEXEC; 1768 } 1769 if (!module_elf_check_arch(info->hdr)) { 1770 pr_err("Invalid module architecture in ELF header: %u\n", 1771 info->hdr->e_machine); 1772 return -ENOEXEC; 1773 } 1774 return 0; 1775 } 1776 1777 /** 1778 * elf_validity_cache_sechdrs() - Cache section headers if valid 1779 * @info: Load info to compute section headers from 1780 * 1781 * Checks: 1782 * 1783 * * ELF header is valid (see elf_validity_ehdr()) 1784 * * Section headers are the size we expect 1785 * * Section array fits in the user provided data 1786 * * Section index 0 is NULL 1787 * * Section contents are inbounds 1788 * 1789 * Then updates @info with a &load_info->sechdrs pointer if valid. 1790 * 1791 * Return: %0 if valid, negative error code if validation failed. 1792 */ 1793 static int elf_validity_cache_sechdrs(struct load_info *info) 1794 { 1795 Elf_Shdr *sechdrs; 1796 Elf_Shdr *shdr; 1797 int i; 1798 int err; 1799 1800 err = elf_validity_ehdr(info); 1801 if (err < 0) 1802 return err; 1803 1804 if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 1805 pr_err("Invalid ELF section header size\n"); 1806 return -ENOEXEC; 1807 } 1808 1809 /* 1810 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 1811 * known and small. So e_shnum * sizeof(Elf_Shdr) 1812 * will not overflow unsigned long on any platform. 1813 */ 1814 if (info->hdr->e_shoff >= info->len 1815 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 1816 info->len - info->hdr->e_shoff)) { 1817 pr_err("Invalid ELF section header overflow\n"); 1818 return -ENOEXEC; 1819 } 1820 1821 sechdrs = (void *)info->hdr + info->hdr->e_shoff; 1822 1823 /* 1824 * The code assumes that section 0 has a length of zero and 1825 * an addr of zero, so check for it. 1826 */ 1827 if (sechdrs[0].sh_type != SHT_NULL 1828 || sechdrs[0].sh_size != 0 1829 || sechdrs[0].sh_addr != 0) { 1830 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 1831 sechdrs[0].sh_type); 1832 return -ENOEXEC; 1833 } 1834 1835 /* Validate contents are inbounds */ 1836 for (i = 1; i < info->hdr->e_shnum; i++) { 1837 shdr = &sechdrs[i]; 1838 switch (shdr->sh_type) { 1839 case SHT_NULL: 1840 case SHT_NOBITS: 1841 /* No contents, offset/size don't mean anything */ 1842 continue; 1843 default: 1844 err = validate_section_offset(info, shdr); 1845 if (err < 0) { 1846 pr_err("Invalid ELF section in module (section %u type %u)\n", 1847 i, shdr->sh_type); 1848 return err; 1849 } 1850 } 1851 } 1852 1853 info->sechdrs = sechdrs; 1854 1855 return 0; 1856 } 1857 1858 /** 1859 * elf_validity_cache_secstrings() - Caches section names if valid 1860 * @info: Load info to cache section names from. Must have valid sechdrs. 1861 * 1862 * Specifically checks: 1863 * 1864 * * Section name table index is inbounds of section headers 1865 * * Section name table is not empty 1866 * * Section name table is NUL terminated 1867 * * All section name offsets are inbounds of the section 1868 * 1869 * Then updates @info with a &load_info->secstrings pointer if valid. 1870 * 1871 * Return: %0 if valid, negative error code if validation failed. 1872 */ 1873 static int elf_validity_cache_secstrings(struct load_info *info) 1874 { 1875 Elf_Shdr *strhdr, *shdr; 1876 char *secstrings; 1877 int i; 1878 1879 /* 1880 * Verify if the section name table index is valid. 1881 */ 1882 if (info->hdr->e_shstrndx == SHN_UNDEF 1883 || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 1884 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 1885 info->hdr->e_shstrndx, info->hdr->e_shstrndx, 1886 info->hdr->e_shnum); 1887 return -ENOEXEC; 1888 } 1889 1890 strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 1891 1892 /* 1893 * The section name table must be NUL-terminated, as required 1894 * by the spec. This makes strcmp and pr_* calls that access 1895 * strings in the section safe. 1896 */ 1897 secstrings = (void *)info->hdr + strhdr->sh_offset; 1898 if (strhdr->sh_size == 0) { 1899 pr_err("empty section name table\n"); 1900 return -ENOEXEC; 1901 } 1902 if (secstrings[strhdr->sh_size - 1] != '\0') { 1903 pr_err("ELF Spec violation: section name table isn't null terminated\n"); 1904 return -ENOEXEC; 1905 } 1906 1907 for (i = 0; i < info->hdr->e_shnum; i++) { 1908 shdr = &info->sechdrs[i]; 1909 /* SHT_NULL means sh_name has an undefined value */ 1910 if (shdr->sh_type == SHT_NULL) 1911 continue; 1912 if (shdr->sh_name >= strhdr->sh_size) { 1913 pr_err("Invalid ELF section name in module (section %u type %u)\n", 1914 i, shdr->sh_type); 1915 return -ENOEXEC; 1916 } 1917 } 1918 1919 info->secstrings = secstrings; 1920 return 0; 1921 } 1922 1923 /** 1924 * elf_validity_cache_index_info() - Validate and cache modinfo section 1925 * @info: Load info to populate the modinfo index on. 1926 * Must have &load_info->sechdrs and &load_info->secstrings populated 1927 * 1928 * Checks that if there is a .modinfo section, it is unique. 1929 * Then, it caches its index in &load_info->index.info. 1930 * Finally, it tries to populate the name to improve error messages. 1931 * 1932 * Return: %0 if valid, %-ENOEXEC if multiple modinfo sections were found. 1933 */ 1934 static int elf_validity_cache_index_info(struct load_info *info) 1935 { 1936 int info_idx; 1937 1938 info_idx = find_any_unique_sec(info, ".modinfo"); 1939 1940 if (info_idx == 0) 1941 /* Early return, no .modinfo */ 1942 return 0; 1943 1944 if (info_idx < 0) { 1945 pr_err("Only one .modinfo section must exist.\n"); 1946 return -ENOEXEC; 1947 } 1948 1949 info->index.info = info_idx; 1950 /* Try to find a name early so we can log errors with a module name */ 1951 info->name = get_modinfo(info, "name"); 1952 1953 return 0; 1954 } 1955 1956 /** 1957 * elf_validity_cache_index_mod() - Validates and caches this_module section 1958 * @info: Load info to cache this_module on. 1959 * Must have &load_info->sechdrs and &load_info->secstrings populated 1960 * 1961 * The ".gnu.linkonce.this_module" ELF section is special. It is what modpost 1962 * uses to refer to __this_module and let's use rely on THIS_MODULE to point 1963 * to &__this_module properly. The kernel's modpost declares it on each 1964 * modules's *.mod.c file. If the struct module of the kernel changes a full 1965 * kernel rebuild is required. 1966 * 1967 * We have a few expectations for this special section, this function 1968 * validates all this for us: 1969 * 1970 * * The section has contents 1971 * * The section is unique 1972 * * We expect the kernel to always have to allocate it: SHF_ALLOC 1973 * * The section size must match the kernel's run time's struct module 1974 * size 1975 * 1976 * If all checks pass, the index will be cached in &load_info->index.mod 1977 * 1978 * Return: %0 on validation success, %-ENOEXEC on failure 1979 */ 1980 static int elf_validity_cache_index_mod(struct load_info *info) 1981 { 1982 Elf_Shdr *shdr; 1983 int mod_idx; 1984 1985 mod_idx = find_any_unique_sec(info, ".gnu.linkonce.this_module"); 1986 if (mod_idx <= 0) { 1987 pr_err("module %s: Exactly one .gnu.linkonce.this_module section must exist.\n", 1988 info->name ?: "(missing .modinfo section or name field)"); 1989 return -ENOEXEC; 1990 } 1991 1992 shdr = &info->sechdrs[mod_idx]; 1993 1994 if (shdr->sh_type == SHT_NOBITS) { 1995 pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n", 1996 info->name ?: "(missing .modinfo section or name field)"); 1997 return -ENOEXEC; 1998 } 1999 2000 if (!(shdr->sh_flags & SHF_ALLOC)) { 2001 pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n", 2002 info->name ?: "(missing .modinfo section or name field)"); 2003 return -ENOEXEC; 2004 } 2005 2006 if (shdr->sh_size != sizeof(struct module)) { 2007 pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n", 2008 info->name ?: "(missing .modinfo section or name field)"); 2009 return -ENOEXEC; 2010 } 2011 2012 info->index.mod = mod_idx; 2013 2014 return 0; 2015 } 2016 2017 /** 2018 * elf_validity_cache_index_sym() - Validate and cache symtab index 2019 * @info: Load info to cache symtab index in. 2020 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2021 * 2022 * Checks that there is exactly one symbol table, then caches its index in 2023 * &load_info->index.sym. 2024 * 2025 * Return: %0 if valid, %-ENOEXEC on failure. 2026 */ 2027 static int elf_validity_cache_index_sym(struct load_info *info) 2028 { 2029 unsigned int sym_idx; 2030 unsigned int num_sym_secs = 0; 2031 int i; 2032 2033 for (i = 1; i < info->hdr->e_shnum; i++) { 2034 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 2035 num_sym_secs++; 2036 sym_idx = i; 2037 } 2038 } 2039 2040 if (num_sym_secs != 1) { 2041 pr_warn("%s: module has no symbols (stripped?)\n", 2042 info->name ?: "(missing .modinfo section or name field)"); 2043 return -ENOEXEC; 2044 } 2045 2046 info->index.sym = sym_idx; 2047 2048 return 0; 2049 } 2050 2051 /** 2052 * elf_validity_cache_index_str() - Validate and cache strtab index 2053 * @info: Load info to cache strtab index in. 2054 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2055 * Must have &load_info->index.sym populated. 2056 * 2057 * Looks at the symbol table's associated string table, makes sure it is 2058 * in-bounds, and caches it. 2059 * 2060 * Return: %0 if valid, %-ENOEXEC on failure. 2061 */ 2062 static int elf_validity_cache_index_str(struct load_info *info) 2063 { 2064 unsigned int str_idx = info->sechdrs[info->index.sym].sh_link; 2065 2066 if (str_idx == SHN_UNDEF || str_idx >= info->hdr->e_shnum) { 2067 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 2068 str_idx, str_idx, info->hdr->e_shnum); 2069 return -ENOEXEC; 2070 } 2071 2072 info->index.str = str_idx; 2073 return 0; 2074 } 2075 2076 /** 2077 * elf_validity_cache_index() - Resolve, validate, cache section indices 2078 * @info: Load info to read from and update. 2079 * &load_info->sechdrs and &load_info->secstrings must be populated. 2080 * @flags: Load flags, relevant to suppress version loading, see 2081 * uapi/linux/module.h 2082 * 2083 * Populates &load_info->index, validating as it goes. 2084 * See child functions for per-field validation: 2085 * 2086 * * elf_validity_cache_index_info() 2087 * * elf_validity_cache_index_mod() 2088 * * elf_validity_cache_index_sym() 2089 * * elf_validity_cache_index_str() 2090 * 2091 * If versioning is not suppressed via flags, load the version index from 2092 * a section called "__versions" with no validation. 2093 * 2094 * If CONFIG_SMP is enabled, load the percpu section by name with no 2095 * validation. 2096 * 2097 * Return: 0 on success, negative error code if an index failed validation. 2098 */ 2099 static int elf_validity_cache_index(struct load_info *info, int flags) 2100 { 2101 int err; 2102 2103 err = elf_validity_cache_index_info(info); 2104 if (err < 0) 2105 return err; 2106 err = elf_validity_cache_index_mod(info); 2107 if (err < 0) 2108 return err; 2109 err = elf_validity_cache_index_sym(info); 2110 if (err < 0) 2111 return err; 2112 err = elf_validity_cache_index_str(info); 2113 if (err < 0) 2114 return err; 2115 2116 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) 2117 info->index.vers = 0; /* Pretend no __versions section! */ 2118 else 2119 info->index.vers = find_sec(info, "__versions"); 2120 2121 info->index.pcpu = find_pcpusec(info); 2122 2123 return 0; 2124 } 2125 2126 /** 2127 * elf_validity_cache_strtab() - Validate and cache symbol string table 2128 * @info: Load info to read from and update. 2129 * Must have &load_info->sechdrs and &load_info->secstrings populated. 2130 * Must have &load_info->index populated. 2131 * 2132 * Checks: 2133 * 2134 * * The string table is not empty. 2135 * * The string table starts and ends with NUL (required by ELF spec). 2136 * * Every &Elf_Sym->st_name offset in the symbol table is inbounds of the 2137 * string table. 2138 * 2139 * And caches the pointer as &load_info->strtab in @info. 2140 * 2141 * Return: 0 on success, negative error code if a check failed. 2142 */ 2143 static int elf_validity_cache_strtab(struct load_info *info) 2144 { 2145 Elf_Shdr *str_shdr = &info->sechdrs[info->index.str]; 2146 Elf_Shdr *sym_shdr = &info->sechdrs[info->index.sym]; 2147 char *strtab = (char *)info->hdr + str_shdr->sh_offset; 2148 Elf_Sym *syms = (void *)info->hdr + sym_shdr->sh_offset; 2149 int i; 2150 2151 if (str_shdr->sh_size == 0) { 2152 pr_err("empty symbol string table\n"); 2153 return -ENOEXEC; 2154 } 2155 if (strtab[0] != '\0') { 2156 pr_err("symbol string table missing leading NUL\n"); 2157 return -ENOEXEC; 2158 } 2159 if (strtab[str_shdr->sh_size - 1] != '\0') { 2160 pr_err("symbol string table isn't NUL terminated\n"); 2161 return -ENOEXEC; 2162 } 2163 2164 /* 2165 * Now that we know strtab is correctly structured, check symbol 2166 * starts are inbounds before they're used later. 2167 */ 2168 for (i = 0; i < sym_shdr->sh_size / sizeof(*syms); i++) { 2169 if (syms[i].st_name >= str_shdr->sh_size) { 2170 pr_err("symbol name out of bounds in string table"); 2171 return -ENOEXEC; 2172 } 2173 } 2174 2175 info->strtab = strtab; 2176 return 0; 2177 } 2178 2179 /* 2180 * Check userspace passed ELF module against our expectations, and cache 2181 * useful variables for further processing as we go. 2182 * 2183 * This does basic validity checks against section offsets and sizes, the 2184 * section name string table, and the indices used for it (sh_name). 2185 * 2186 * As a last step, since we're already checking the ELF sections we cache 2187 * useful variables which will be used later for our convenience: 2188 * 2189 * o pointers to section headers 2190 * o cache the modinfo symbol section 2191 * o cache the string symbol section 2192 * o cache the module section 2193 * 2194 * As a last step we set info->mod to the temporary copy of the module in 2195 * info->hdr. The final one will be allocated in move_module(). Any 2196 * modifications we make to our copy of the module will be carried over 2197 * to the final minted module. 2198 */ 2199 static int elf_validity_cache_copy(struct load_info *info, int flags) 2200 { 2201 int err; 2202 2203 err = elf_validity_cache_sechdrs(info); 2204 if (err < 0) 2205 return err; 2206 err = elf_validity_cache_secstrings(info); 2207 if (err < 0) 2208 return err; 2209 err = elf_validity_cache_index(info, flags); 2210 if (err < 0) 2211 return err; 2212 err = elf_validity_cache_strtab(info); 2213 if (err < 0) 2214 return err; 2215 2216 /* This is temporary: point mod into copy of data. */ 2217 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 2218 2219 /* 2220 * If we didn't load the .modinfo 'name' field earlier, fall back to 2221 * on-disk struct mod 'name' field. 2222 */ 2223 if (!info->name) 2224 info->name = info->mod->name; 2225 2226 return 0; 2227 } 2228 2229 #define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2230 2231 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2232 { 2233 do { 2234 unsigned long n = min(len, COPY_CHUNK_SIZE); 2235 2236 if (copy_from_user(dst, usrc, n) != 0) 2237 return -EFAULT; 2238 cond_resched(); 2239 dst += n; 2240 usrc += n; 2241 len -= n; 2242 } while (len); 2243 return 0; 2244 } 2245 2246 static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2247 { 2248 if (!get_modinfo(info, "livepatch")) 2249 /* Nothing more to do */ 2250 return 0; 2251 2252 if (set_livepatch_module(mod)) 2253 return 0; 2254 2255 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 2256 mod->name); 2257 return -ENOEXEC; 2258 } 2259 2260 static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 2261 { 2262 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 2263 return; 2264 2265 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 2266 mod->name); 2267 } 2268 2269 /* Sets info->hdr and info->len. */ 2270 static int copy_module_from_user(const void __user *umod, unsigned long len, 2271 struct load_info *info) 2272 { 2273 int err; 2274 2275 info->len = len; 2276 if (info->len < sizeof(*(info->hdr))) 2277 return -ENOEXEC; 2278 2279 err = security_kernel_load_data(LOADING_MODULE, true); 2280 if (err) 2281 return err; 2282 2283 /* Suck in entire file: we'll want most of it. */ 2284 info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 2285 if (!info->hdr) 2286 return -ENOMEM; 2287 2288 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2289 err = -EFAULT; 2290 goto out; 2291 } 2292 2293 err = security_kernel_post_load_data((char *)info->hdr, info->len, 2294 LOADING_MODULE, "init_module"); 2295 out: 2296 if (err) 2297 vfree(info->hdr); 2298 2299 return err; 2300 } 2301 2302 static void free_copy(struct load_info *info, int flags) 2303 { 2304 if (flags & MODULE_INIT_COMPRESSED_FILE) 2305 module_decompress_cleanup(info); 2306 else 2307 vfree(info->hdr); 2308 } 2309 2310 static int rewrite_section_headers(struct load_info *info, int flags) 2311 { 2312 unsigned int i; 2313 2314 /* This should always be true, but let's be sure. */ 2315 info->sechdrs[0].sh_addr = 0; 2316 2317 for (i = 1; i < info->hdr->e_shnum; i++) { 2318 Elf_Shdr *shdr = &info->sechdrs[i]; 2319 2320 /* 2321 * Mark all sections sh_addr with their address in the 2322 * temporary image. 2323 */ 2324 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2325 2326 } 2327 2328 /* Track but don't keep modinfo and version sections. */ 2329 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2330 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2331 2332 return 0; 2333 } 2334 2335 /* 2336 * These calls taint the kernel depending certain module circumstances */ 2337 static void module_augment_kernel_taints(struct module *mod, struct load_info *info) 2338 { 2339 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 2340 2341 if (!get_modinfo(info, "intree")) { 2342 if (!test_taint(TAINT_OOT_MODULE)) 2343 pr_warn("%s: loading out-of-tree module taints kernel.\n", 2344 mod->name); 2345 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 2346 } 2347 2348 check_modinfo_retpoline(mod, info); 2349 2350 if (get_modinfo(info, "staging")) { 2351 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 2352 pr_warn("%s: module is from the staging directory, the quality " 2353 "is unknown, you have been warned.\n", mod->name); 2354 } 2355 2356 if (is_livepatch_module(mod)) { 2357 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 2358 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 2359 mod->name); 2360 } 2361 2362 module_license_taint_check(mod, get_modinfo(info, "license")); 2363 2364 if (get_modinfo(info, "test")) { 2365 if (!test_taint(TAINT_TEST)) 2366 pr_warn("%s: loading test module taints kernel.\n", 2367 mod->name); 2368 add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK); 2369 } 2370 #ifdef CONFIG_MODULE_SIG 2371 mod->sig_ok = info->sig_ok; 2372 if (!mod->sig_ok) { 2373 pr_notice_once("%s: module verification failed: signature " 2374 "and/or required key missing - tainting " 2375 "kernel\n", mod->name); 2376 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 2377 } 2378 #endif 2379 2380 /* 2381 * ndiswrapper is under GPL by itself, but loads proprietary modules. 2382 * Don't use add_taint_module(), as it would prevent ndiswrapper from 2383 * using GPL-only symbols it needs. 2384 */ 2385 if (strcmp(mod->name, "ndiswrapper") == 0) 2386 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 2387 2388 /* driverloader was caught wrongly pretending to be under GPL */ 2389 if (strcmp(mod->name, "driverloader") == 0) 2390 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2391 LOCKDEP_NOW_UNRELIABLE); 2392 2393 /* lve claims to be GPL but upstream won't provide source */ 2394 if (strcmp(mod->name, "lve") == 0) 2395 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2396 LOCKDEP_NOW_UNRELIABLE); 2397 2398 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 2399 pr_warn("%s: module license taints kernel.\n", mod->name); 2400 2401 } 2402 2403 static int check_modinfo(struct module *mod, struct load_info *info, int flags) 2404 { 2405 const char *modmagic = get_modinfo(info, "vermagic"); 2406 int err; 2407 2408 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 2409 modmagic = NULL; 2410 2411 /* This is allowed: modprobe --force will invalidate it. */ 2412 if (!modmagic) { 2413 err = try_to_force_load(mod, "bad vermagic"); 2414 if (err) 2415 return err; 2416 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 2417 pr_err("%s: version magic '%s' should be '%s'\n", 2418 info->name, modmagic, vermagic); 2419 return -ENOEXEC; 2420 } 2421 2422 err = check_modinfo_livepatch(mod, info); 2423 if (err) 2424 return err; 2425 2426 return 0; 2427 } 2428 2429 static int find_module_sections(struct module *mod, struct load_info *info) 2430 { 2431 mod->kp = section_objs(info, "__param", 2432 sizeof(*mod->kp), &mod->num_kp); 2433 mod->syms = section_objs(info, "__ksymtab", 2434 sizeof(*mod->syms), &mod->num_syms); 2435 mod->crcs = section_addr(info, "__kcrctab"); 2436 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 2437 sizeof(*mod->gpl_syms), 2438 &mod->num_gpl_syms); 2439 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 2440 2441 #ifdef CONFIG_CONSTRUCTORS 2442 mod->ctors = section_objs(info, ".ctors", 2443 sizeof(*mod->ctors), &mod->num_ctors); 2444 if (!mod->ctors) 2445 mod->ctors = section_objs(info, ".init_array", 2446 sizeof(*mod->ctors), &mod->num_ctors); 2447 else if (find_sec(info, ".init_array")) { 2448 /* 2449 * This shouldn't happen with same compiler and binutils 2450 * building all parts of the module. 2451 */ 2452 pr_warn("%s: has both .ctors and .init_array.\n", 2453 mod->name); 2454 return -EINVAL; 2455 } 2456 #endif 2457 2458 mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 2459 &mod->noinstr_text_size); 2460 2461 #ifdef CONFIG_TRACEPOINTS 2462 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 2463 sizeof(*mod->tracepoints_ptrs), 2464 &mod->num_tracepoints); 2465 #endif 2466 #ifdef CONFIG_TREE_SRCU 2467 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 2468 sizeof(*mod->srcu_struct_ptrs), 2469 &mod->num_srcu_structs); 2470 #endif 2471 #ifdef CONFIG_BPF_EVENTS 2472 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 2473 sizeof(*mod->bpf_raw_events), 2474 &mod->num_bpf_raw_events); 2475 #endif 2476 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2477 mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 2478 mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, 2479 &mod->btf_base_data_size); 2480 #endif 2481 #ifdef CONFIG_JUMP_LABEL 2482 mod->jump_entries = section_objs(info, "__jump_table", 2483 sizeof(*mod->jump_entries), 2484 &mod->num_jump_entries); 2485 #endif 2486 #ifdef CONFIG_EVENT_TRACING 2487 mod->trace_events = section_objs(info, "_ftrace_events", 2488 sizeof(*mod->trace_events), 2489 &mod->num_trace_events); 2490 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 2491 sizeof(*mod->trace_evals), 2492 &mod->num_trace_evals); 2493 #endif 2494 #ifdef CONFIG_TRACING 2495 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2496 sizeof(*mod->trace_bprintk_fmt_start), 2497 &mod->num_trace_bprintk_fmt); 2498 #endif 2499 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 2500 /* sechdrs[0].sh_size is always zero */ 2501 mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 2502 sizeof(*mod->ftrace_callsites), 2503 &mod->num_ftrace_callsites); 2504 #endif 2505 #ifdef CONFIG_FUNCTION_ERROR_INJECTION 2506 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 2507 sizeof(*mod->ei_funcs), 2508 &mod->num_ei_funcs); 2509 #endif 2510 #ifdef CONFIG_KPROBES 2511 mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 2512 &mod->kprobes_text_size); 2513 mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 2514 sizeof(unsigned long), 2515 &mod->num_kprobe_blacklist); 2516 #endif 2517 #ifdef CONFIG_PRINTK_INDEX 2518 mod->printk_index_start = section_objs(info, ".printk_index", 2519 sizeof(*mod->printk_index_start), 2520 &mod->printk_index_size); 2521 #endif 2522 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE 2523 mod->static_call_sites = section_objs(info, ".static_call_sites", 2524 sizeof(*mod->static_call_sites), 2525 &mod->num_static_call_sites); 2526 #endif 2527 #if IS_ENABLED(CONFIG_KUNIT) 2528 mod->kunit_suites = section_objs(info, ".kunit_test_suites", 2529 sizeof(*mod->kunit_suites), 2530 &mod->num_kunit_suites); 2531 mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites", 2532 sizeof(*mod->kunit_init_suites), 2533 &mod->num_kunit_init_suites); 2534 #endif 2535 2536 mod->extable = section_objs(info, "__ex_table", 2537 sizeof(*mod->extable), &mod->num_exentries); 2538 2539 if (section_addr(info, "__obsparm")) 2540 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 2541 2542 #ifdef CONFIG_DYNAMIC_DEBUG_CORE 2543 mod->dyndbg_info.descs = section_objs(info, "__dyndbg", 2544 sizeof(*mod->dyndbg_info.descs), 2545 &mod->dyndbg_info.num_descs); 2546 mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes", 2547 sizeof(*mod->dyndbg_info.classes), 2548 &mod->dyndbg_info.num_classes); 2549 #endif 2550 2551 return 0; 2552 } 2553 2554 static int move_module(struct module *mod, struct load_info *info) 2555 { 2556 int i; 2557 enum mod_mem_type t = 0; 2558 int ret = -ENOMEM; 2559 bool codetag_section_found = false; 2560 2561 for_each_mod_mem_type(type) { 2562 if (!mod->mem[type].size) { 2563 mod->mem[type].base = NULL; 2564 mod->mem[type].rw_copy = NULL; 2565 continue; 2566 } 2567 2568 ret = module_memory_alloc(mod, type); 2569 if (ret) { 2570 t = type; 2571 goto out_err; 2572 } 2573 } 2574 2575 /* Transfer each section which specifies SHF_ALLOC */ 2576 pr_debug("Final section addresses for %s:\n", mod->name); 2577 for (i = 0; i < info->hdr->e_shnum; i++) { 2578 void *dest; 2579 Elf_Shdr *shdr = &info->sechdrs[i]; 2580 const char *sname; 2581 unsigned long addr; 2582 2583 if (!(shdr->sh_flags & SHF_ALLOC)) 2584 continue; 2585 2586 sname = info->secstrings + shdr->sh_name; 2587 /* 2588 * Load codetag sections separately as they might still be used 2589 * after module unload. 2590 */ 2591 if (codetag_needs_module_section(mod, sname, shdr->sh_size)) { 2592 dest = codetag_alloc_module_section(mod, sname, shdr->sh_size, 2593 arch_mod_section_prepend(mod, i), shdr->sh_addralign); 2594 if (WARN_ON(!dest)) { 2595 ret = -EINVAL; 2596 goto out_err; 2597 } 2598 if (IS_ERR(dest)) { 2599 ret = PTR_ERR(dest); 2600 goto out_err; 2601 } 2602 addr = (unsigned long)dest; 2603 codetag_section_found = true; 2604 } else { 2605 enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT; 2606 unsigned long offset = shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK; 2607 2608 addr = (unsigned long)mod->mem[type].base + offset; 2609 dest = mod->mem[type].rw_copy + offset; 2610 } 2611 2612 if (shdr->sh_type != SHT_NOBITS) { 2613 /* 2614 * Our ELF checker already validated this, but let's 2615 * be pedantic and make the goal clearer. We actually 2616 * end up copying over all modifications made to the 2617 * userspace copy of the entire struct module. 2618 */ 2619 if (i == info->index.mod && 2620 (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) { 2621 ret = -ENOEXEC; 2622 goto out_err; 2623 } 2624 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 2625 } 2626 /* 2627 * Update the userspace copy's ELF section address to point to 2628 * our newly allocated memory as a pure convenience so that 2629 * users of info can keep taking advantage and using the newly 2630 * minted official memory area. 2631 */ 2632 shdr->sh_addr = addr; 2633 pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr, 2634 (long)shdr->sh_size, info->secstrings + shdr->sh_name); 2635 } 2636 2637 return 0; 2638 out_err: 2639 for (t--; t >= 0; t--) 2640 module_memory_free(mod, t); 2641 if (codetag_section_found) 2642 codetag_free_module_sections(mod); 2643 2644 return ret; 2645 } 2646 2647 static int check_export_symbol_versions(struct module *mod) 2648 { 2649 #ifdef CONFIG_MODVERSIONS 2650 if ((mod->num_syms && !mod->crcs) || 2651 (mod->num_gpl_syms && !mod->gpl_crcs)) { 2652 return try_to_force_load(mod, 2653 "no versions for exported symbols"); 2654 } 2655 #endif 2656 return 0; 2657 } 2658 2659 static void flush_module_icache(const struct module *mod) 2660 { 2661 /* 2662 * Flush the instruction cache, since we've played with text. 2663 * Do it before processing of module parameters, so the module 2664 * can provide parameter accessor functions of its own. 2665 */ 2666 for_each_mod_mem_type(type) { 2667 const struct module_memory *mod_mem = &mod->mem[type]; 2668 2669 if (mod_mem->size) { 2670 flush_icache_range((unsigned long)mod_mem->base, 2671 (unsigned long)mod_mem->base + mod_mem->size); 2672 } 2673 } 2674 } 2675 2676 bool __weak module_elf_check_arch(Elf_Ehdr *hdr) 2677 { 2678 return true; 2679 } 2680 2681 int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 2682 Elf_Shdr *sechdrs, 2683 char *secstrings, 2684 struct module *mod) 2685 { 2686 return 0; 2687 } 2688 2689 /* module_blacklist is a comma-separated list of module names */ 2690 static char *module_blacklist; 2691 static bool blacklisted(const char *module_name) 2692 { 2693 const char *p; 2694 size_t len; 2695 2696 if (!module_blacklist) 2697 return false; 2698 2699 for (p = module_blacklist; *p; p += len) { 2700 len = strcspn(p, ","); 2701 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 2702 return true; 2703 if (p[len] == ',') 2704 len++; 2705 } 2706 return false; 2707 } 2708 core_param(module_blacklist, module_blacklist, charp, 0400); 2709 2710 static struct module *layout_and_allocate(struct load_info *info, int flags) 2711 { 2712 struct module *mod; 2713 unsigned int ndx; 2714 int err; 2715 2716 /* Allow arches to frob section contents and sizes. */ 2717 err = module_frob_arch_sections(info->hdr, info->sechdrs, 2718 info->secstrings, info->mod); 2719 if (err < 0) 2720 return ERR_PTR(err); 2721 2722 err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 2723 info->secstrings, info->mod); 2724 if (err < 0) 2725 return ERR_PTR(err); 2726 2727 /* We will do a special allocation for per-cpu sections later. */ 2728 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 2729 2730 /* 2731 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that 2732 * layout_sections() can put it in the right place. 2733 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 2734 */ 2735 ndx = find_sec(info, ".data..ro_after_init"); 2736 if (ndx) 2737 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2738 /* 2739 * Mark the __jump_table section as ro_after_init as well: these data 2740 * structures are never modified, with the exception of entries that 2741 * refer to code in the __init section, which are annotated as such 2742 * at module load time. 2743 */ 2744 ndx = find_sec(info, "__jump_table"); 2745 if (ndx) 2746 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 2747 2748 /* 2749 * Determine total sizes, and put offsets in sh_entsize. For now 2750 * this is done generically; there doesn't appear to be any 2751 * special cases for the architectures. 2752 */ 2753 layout_sections(info->mod, info); 2754 layout_symtab(info->mod, info); 2755 2756 /* Allocate and move to the final place */ 2757 err = move_module(info->mod, info); 2758 if (err) 2759 return ERR_PTR(err); 2760 2761 /* Module has been copied to its final place now: return it. */ 2762 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 2763 kmemleak_load_module(mod, info); 2764 codetag_module_replaced(info->mod, mod); 2765 2766 return mod; 2767 } 2768 2769 /* mod is no longer valid after this! */ 2770 static void module_deallocate(struct module *mod, struct load_info *info) 2771 { 2772 percpu_modfree(mod); 2773 module_arch_freeing_init(mod); 2774 2775 free_mod_mem(mod); 2776 } 2777 2778 int __weak module_finalize(const Elf_Ehdr *hdr, 2779 const Elf_Shdr *sechdrs, 2780 struct module *me) 2781 { 2782 return 0; 2783 } 2784 2785 int __weak module_post_finalize(const Elf_Ehdr *hdr, 2786 const Elf_Shdr *sechdrs, 2787 struct module *me) 2788 { 2789 return 0; 2790 } 2791 2792 static int post_relocation(struct module *mod, const struct load_info *info) 2793 { 2794 int ret; 2795 2796 /* Sort exception table now relocations are done. */ 2797 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2798 2799 /* Copy relocated percpu area over. */ 2800 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 2801 info->sechdrs[info->index.pcpu].sh_size); 2802 2803 /* Setup kallsyms-specific fields. */ 2804 add_kallsyms(mod, info); 2805 2806 /* Arch-specific module finalizing. */ 2807 ret = module_finalize(info->hdr, info->sechdrs, mod); 2808 if (ret) 2809 return ret; 2810 2811 for_each_mod_mem_type(type) { 2812 struct module_memory *mem = &mod->mem[type]; 2813 2814 if (mem->is_rox) { 2815 if (!execmem_update_copy(mem->base, mem->rw_copy, 2816 mem->size)) 2817 return -ENOMEM; 2818 2819 vfree(mem->rw_copy); 2820 mem->rw_copy = NULL; 2821 } 2822 } 2823 2824 return module_post_finalize(info->hdr, info->sechdrs, mod); 2825 } 2826 2827 /* Call module constructors. */ 2828 static void do_mod_ctors(struct module *mod) 2829 { 2830 #ifdef CONFIG_CONSTRUCTORS 2831 unsigned long i; 2832 2833 for (i = 0; i < mod->num_ctors; i++) 2834 mod->ctors[i](); 2835 #endif 2836 } 2837 2838 /* For freeing module_init on success, in case kallsyms traversing */ 2839 struct mod_initfree { 2840 struct llist_node node; 2841 void *init_text; 2842 void *init_data; 2843 void *init_rodata; 2844 }; 2845 2846 static void do_free_init(struct work_struct *w) 2847 { 2848 struct llist_node *pos, *n, *list; 2849 struct mod_initfree *initfree; 2850 2851 list = llist_del_all(&init_free_list); 2852 2853 synchronize_rcu(); 2854 2855 llist_for_each_safe(pos, n, list) { 2856 initfree = container_of(pos, struct mod_initfree, node); 2857 execmem_free(initfree->init_text); 2858 execmem_free(initfree->init_data); 2859 execmem_free(initfree->init_rodata); 2860 kfree(initfree); 2861 } 2862 } 2863 2864 void flush_module_init_free_work(void) 2865 { 2866 flush_work(&init_free_wq); 2867 } 2868 2869 #undef MODULE_PARAM_PREFIX 2870 #define MODULE_PARAM_PREFIX "module." 2871 /* Default value for module->async_probe_requested */ 2872 static bool async_probe; 2873 module_param(async_probe, bool, 0644); 2874 2875 /* 2876 * This is where the real work happens. 2877 * 2878 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 2879 * helper command 'lx-symbols'. 2880 */ 2881 static noinline int do_init_module(struct module *mod) 2882 { 2883 int ret = 0; 2884 struct mod_initfree *freeinit; 2885 #if defined(CONFIG_MODULE_STATS) 2886 unsigned int text_size = 0, total_size = 0; 2887 2888 for_each_mod_mem_type(type) { 2889 const struct module_memory *mod_mem = &mod->mem[type]; 2890 if (mod_mem->size) { 2891 total_size += mod_mem->size; 2892 if (type == MOD_TEXT || type == MOD_INIT_TEXT) 2893 text_size += mod_mem->size; 2894 } 2895 } 2896 #endif 2897 2898 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 2899 if (!freeinit) { 2900 ret = -ENOMEM; 2901 goto fail; 2902 } 2903 freeinit->init_text = mod->mem[MOD_INIT_TEXT].base; 2904 freeinit->init_data = mod->mem[MOD_INIT_DATA].base; 2905 freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base; 2906 2907 do_mod_ctors(mod); 2908 /* Start the module */ 2909 if (mod->init != NULL) 2910 ret = do_one_initcall(mod->init); 2911 if (ret < 0) { 2912 goto fail_free_freeinit; 2913 } 2914 if (ret > 0) { 2915 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 2916 "follow 0/-E convention\n" 2917 "%s: loading module anyway...\n", 2918 __func__, mod->name, ret, __func__); 2919 dump_stack(); 2920 } 2921 2922 /* Now it's a first class citizen! */ 2923 mod->state = MODULE_STATE_LIVE; 2924 blocking_notifier_call_chain(&module_notify_list, 2925 MODULE_STATE_LIVE, mod); 2926 2927 /* Delay uevent until module has finished its init routine */ 2928 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 2929 2930 /* 2931 * We need to finish all async code before the module init sequence 2932 * is done. This has potential to deadlock if synchronous module 2933 * loading is requested from async (which is not allowed!). 2934 * 2935 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 2936 * request_module() from async workers") for more details. 2937 */ 2938 if (!mod->async_probe_requested) 2939 async_synchronize_full(); 2940 2941 ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base, 2942 mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size); 2943 mutex_lock(&module_mutex); 2944 /* Drop initial reference. */ 2945 module_put(mod); 2946 trim_init_extable(mod); 2947 #ifdef CONFIG_KALLSYMS 2948 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 2949 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 2950 #endif 2951 ret = module_enable_rodata_ro(mod, true); 2952 if (ret) 2953 goto fail_mutex_unlock; 2954 mod_tree_remove_init(mod); 2955 module_arch_freeing_init(mod); 2956 for_class_mod_mem_type(type, init) { 2957 mod->mem[type].base = NULL; 2958 mod->mem[type].size = 0; 2959 } 2960 2961 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES 2962 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ 2963 mod->btf_data = NULL; 2964 mod->btf_base_data = NULL; 2965 #endif 2966 /* 2967 * We want to free module_init, but be aware that kallsyms may be 2968 * walking this with preempt disabled. In all the failure paths, we 2969 * call synchronize_rcu(), but we don't want to slow down the success 2970 * path. execmem_free() cannot be called in an interrupt, so do the 2971 * work and call synchronize_rcu() in a work queue. 2972 * 2973 * Note that execmem_alloc() on most architectures creates W+X page 2974 * mappings which won't be cleaned up until do_free_init() runs. Any 2975 * code such as mark_rodata_ro() which depends on those mappings to 2976 * be cleaned up needs to sync with the queued work by invoking 2977 * flush_module_init_free_work(). 2978 */ 2979 if (llist_add(&freeinit->node, &init_free_list)) 2980 schedule_work(&init_free_wq); 2981 2982 mutex_unlock(&module_mutex); 2983 wake_up_all(&module_wq); 2984 2985 mod_stat_add_long(text_size, &total_text_size); 2986 mod_stat_add_long(total_size, &total_mod_size); 2987 2988 mod_stat_inc(&modcount); 2989 2990 return 0; 2991 2992 fail_mutex_unlock: 2993 mutex_unlock(&module_mutex); 2994 fail_free_freeinit: 2995 kfree(freeinit); 2996 fail: 2997 /* Try to protect us from buggy refcounters. */ 2998 mod->state = MODULE_STATE_GOING; 2999 synchronize_rcu(); 3000 module_put(mod); 3001 blocking_notifier_call_chain(&module_notify_list, 3002 MODULE_STATE_GOING, mod); 3003 klp_module_going(mod); 3004 ftrace_release_mod(mod); 3005 free_module(mod); 3006 wake_up_all(&module_wq); 3007 3008 return ret; 3009 } 3010 3011 static int may_init_module(void) 3012 { 3013 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3014 return -EPERM; 3015 3016 return 0; 3017 } 3018 3019 /* Is this module of this name done loading? No locks held. */ 3020 static bool finished_loading(const char *name) 3021 { 3022 struct module *mod; 3023 bool ret; 3024 3025 /* 3026 * The module_mutex should not be a heavily contended lock; 3027 * if we get the occasional sleep here, we'll go an extra iteration 3028 * in the wait_event_interruptible(), which is harmless. 3029 */ 3030 sched_annotate_sleep(); 3031 mutex_lock(&module_mutex); 3032 mod = find_module_all(name, strlen(name), true); 3033 ret = !mod || mod->state == MODULE_STATE_LIVE 3034 || mod->state == MODULE_STATE_GOING; 3035 mutex_unlock(&module_mutex); 3036 3037 return ret; 3038 } 3039 3040 /* Must be called with module_mutex held */ 3041 static int module_patient_check_exists(const char *name, 3042 enum fail_dup_mod_reason reason) 3043 { 3044 struct module *old; 3045 int err = 0; 3046 3047 old = find_module_all(name, strlen(name), true); 3048 if (old == NULL) 3049 return 0; 3050 3051 if (old->state == MODULE_STATE_COMING || 3052 old->state == MODULE_STATE_UNFORMED) { 3053 /* Wait in case it fails to load. */ 3054 mutex_unlock(&module_mutex); 3055 err = wait_event_interruptible(module_wq, 3056 finished_loading(name)); 3057 mutex_lock(&module_mutex); 3058 if (err) 3059 return err; 3060 3061 /* The module might have gone in the meantime. */ 3062 old = find_module_all(name, strlen(name), true); 3063 } 3064 3065 if (try_add_failed_module(name, reason)) 3066 pr_warn("Could not add fail-tracking for module: %s\n", name); 3067 3068 /* 3069 * We are here only when the same module was being loaded. Do 3070 * not try to load it again right now. It prevents long delays 3071 * caused by serialized module load failures. It might happen 3072 * when more devices of the same type trigger load of 3073 * a particular module. 3074 */ 3075 if (old && old->state == MODULE_STATE_LIVE) 3076 return -EEXIST; 3077 return -EBUSY; 3078 } 3079 3080 /* 3081 * We try to place it in the list now to make sure it's unique before 3082 * we dedicate too many resources. In particular, temporary percpu 3083 * memory exhaustion. 3084 */ 3085 static int add_unformed_module(struct module *mod) 3086 { 3087 int err; 3088 3089 mod->state = MODULE_STATE_UNFORMED; 3090 3091 mutex_lock(&module_mutex); 3092 err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD); 3093 if (err) 3094 goto out; 3095 3096 mod_update_bounds(mod); 3097 list_add_rcu(&mod->list, &modules); 3098 mod_tree_insert(mod); 3099 err = 0; 3100 3101 out: 3102 mutex_unlock(&module_mutex); 3103 return err; 3104 } 3105 3106 static int complete_formation(struct module *mod, struct load_info *info) 3107 { 3108 int err; 3109 3110 mutex_lock(&module_mutex); 3111 3112 /* Find duplicate symbols (must be called under lock). */ 3113 err = verify_exported_symbols(mod); 3114 if (err < 0) 3115 goto out; 3116 3117 /* These rely on module_mutex for list integrity. */ 3118 module_bug_finalize(info->hdr, info->sechdrs, mod); 3119 module_cfi_finalize(info->hdr, info->sechdrs, mod); 3120 3121 err = module_enable_rodata_ro(mod, false); 3122 if (err) 3123 goto out_strict_rwx; 3124 err = module_enable_data_nx(mod); 3125 if (err) 3126 goto out_strict_rwx; 3127 err = module_enable_text_rox(mod); 3128 if (err) 3129 goto out_strict_rwx; 3130 3131 /* 3132 * Mark state as coming so strong_try_module_get() ignores us, 3133 * but kallsyms etc. can see us. 3134 */ 3135 mod->state = MODULE_STATE_COMING; 3136 mutex_unlock(&module_mutex); 3137 3138 return 0; 3139 3140 out_strict_rwx: 3141 module_bug_cleanup(mod); 3142 out: 3143 mutex_unlock(&module_mutex); 3144 return err; 3145 } 3146 3147 static int prepare_coming_module(struct module *mod) 3148 { 3149 int err; 3150 3151 ftrace_module_enable(mod); 3152 err = klp_module_coming(mod); 3153 if (err) 3154 return err; 3155 3156 err = blocking_notifier_call_chain_robust(&module_notify_list, 3157 MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 3158 err = notifier_to_errno(err); 3159 if (err) 3160 klp_module_going(mod); 3161 3162 return err; 3163 } 3164 3165 static int unknown_module_param_cb(char *param, char *val, const char *modname, 3166 void *arg) 3167 { 3168 struct module *mod = arg; 3169 int ret; 3170 3171 if (strcmp(param, "async_probe") == 0) { 3172 if (kstrtobool(val, &mod->async_probe_requested)) 3173 mod->async_probe_requested = true; 3174 return 0; 3175 } 3176 3177 /* Check for magic 'dyndbg' arg */ 3178 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3179 if (ret != 0) 3180 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3181 return 0; 3182 } 3183 3184 /* Module within temporary copy, this doesn't do any allocation */ 3185 static int early_mod_check(struct load_info *info, int flags) 3186 { 3187 int err; 3188 3189 /* 3190 * Now that we know we have the correct module name, check 3191 * if it's blacklisted. 3192 */ 3193 if (blacklisted(info->name)) { 3194 pr_err("Module %s is blacklisted\n", info->name); 3195 return -EPERM; 3196 } 3197 3198 err = rewrite_section_headers(info, flags); 3199 if (err) 3200 return err; 3201 3202 /* Check module struct version now, before we try to use module. */ 3203 if (!check_modstruct_version(info, info->mod)) 3204 return -ENOEXEC; 3205 3206 err = check_modinfo(info->mod, info, flags); 3207 if (err) 3208 return err; 3209 3210 mutex_lock(&module_mutex); 3211 err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING); 3212 mutex_unlock(&module_mutex); 3213 3214 return err; 3215 } 3216 3217 /* 3218 * Allocate and load the module: note that size of section 0 is always 3219 * zero, and we rely on this for optional sections. 3220 */ 3221 static int load_module(struct load_info *info, const char __user *uargs, 3222 int flags) 3223 { 3224 struct module *mod; 3225 bool module_allocated = false; 3226 long err = 0; 3227 char *after_dashes; 3228 3229 /* 3230 * Do the signature check (if any) first. All that 3231 * the signature check needs is info->len, it does 3232 * not need any of the section info. That can be 3233 * set up later. This will minimize the chances 3234 * of a corrupt module causing problems before 3235 * we even get to the signature check. 3236 * 3237 * The check will also adjust info->len by stripping 3238 * off the sig length at the end of the module, making 3239 * checks against info->len more correct. 3240 */ 3241 err = module_sig_check(info, flags); 3242 if (err) 3243 goto free_copy; 3244 3245 /* 3246 * Do basic sanity checks against the ELF header and 3247 * sections. Cache useful sections and set the 3248 * info->mod to the userspace passed struct module. 3249 */ 3250 err = elf_validity_cache_copy(info, flags); 3251 if (err) 3252 goto free_copy; 3253 3254 err = early_mod_check(info, flags); 3255 if (err) 3256 goto free_copy; 3257 3258 /* Figure out module layout, and allocate all the memory. */ 3259 mod = layout_and_allocate(info, flags); 3260 if (IS_ERR(mod)) { 3261 err = PTR_ERR(mod); 3262 goto free_copy; 3263 } 3264 3265 module_allocated = true; 3266 3267 audit_log_kern_module(mod->name); 3268 3269 /* Reserve our place in the list. */ 3270 err = add_unformed_module(mod); 3271 if (err) 3272 goto free_module; 3273 3274 /* 3275 * We are tainting your kernel if your module gets into 3276 * the modules linked list somehow. 3277 */ 3278 module_augment_kernel_taints(mod, info); 3279 3280 /* To avoid stressing percpu allocator, do this once we're unique. */ 3281 err = percpu_modalloc(mod, info); 3282 if (err) 3283 goto unlink_mod; 3284 3285 /* Now module is in final location, initialize linked lists, etc. */ 3286 err = module_unload_init(mod); 3287 if (err) 3288 goto unlink_mod; 3289 3290 init_param_lock(mod); 3291 3292 /* 3293 * Now we've got everything in the final locations, we can 3294 * find optional sections. 3295 */ 3296 err = find_module_sections(mod, info); 3297 if (err) 3298 goto free_unload; 3299 3300 err = check_export_symbol_versions(mod); 3301 if (err) 3302 goto free_unload; 3303 3304 /* Set up MODINFO_ATTR fields */ 3305 setup_modinfo(mod, info); 3306 3307 /* Fix up syms, so that st_value is a pointer to location. */ 3308 err = simplify_symbols(mod, info); 3309 if (err < 0) 3310 goto free_modinfo; 3311 3312 err = apply_relocations(mod, info); 3313 if (err < 0) 3314 goto free_modinfo; 3315 3316 err = post_relocation(mod, info); 3317 if (err < 0) 3318 goto free_modinfo; 3319 3320 flush_module_icache(mod); 3321 3322 /* Now copy in args */ 3323 mod->args = strndup_user(uargs, ~0UL >> 1); 3324 if (IS_ERR(mod->args)) { 3325 err = PTR_ERR(mod->args); 3326 goto free_arch_cleanup; 3327 } 3328 3329 init_build_id(mod, info); 3330 3331 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3332 ftrace_module_init(mod); 3333 3334 /* Finally it's fully formed, ready to start executing. */ 3335 err = complete_formation(mod, info); 3336 if (err) 3337 goto ddebug_cleanup; 3338 3339 err = prepare_coming_module(mod); 3340 if (err) 3341 goto bug_cleanup; 3342 3343 mod->async_probe_requested = async_probe; 3344 3345 /* Module is ready to execute: parsing args may do that. */ 3346 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3347 -32768, 32767, mod, 3348 unknown_module_param_cb); 3349 if (IS_ERR(after_dashes)) { 3350 err = PTR_ERR(after_dashes); 3351 goto coming_cleanup; 3352 } else if (after_dashes) { 3353 pr_warn("%s: parameters '%s' after `--' ignored\n", 3354 mod->name, after_dashes); 3355 } 3356 3357 /* Link in to sysfs. */ 3358 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3359 if (err < 0) 3360 goto coming_cleanup; 3361 3362 if (is_livepatch_module(mod)) { 3363 err = copy_module_elf(mod, info); 3364 if (err < 0) 3365 goto sysfs_cleanup; 3366 } 3367 3368 /* Get rid of temporary copy. */ 3369 free_copy(info, flags); 3370 3371 codetag_load_module(mod); 3372 3373 /* Done! */ 3374 trace_module_load(mod); 3375 3376 return do_init_module(mod); 3377 3378 sysfs_cleanup: 3379 mod_sysfs_teardown(mod); 3380 coming_cleanup: 3381 mod->state = MODULE_STATE_GOING; 3382 destroy_params(mod->kp, mod->num_kp); 3383 blocking_notifier_call_chain(&module_notify_list, 3384 MODULE_STATE_GOING, mod); 3385 klp_module_going(mod); 3386 bug_cleanup: 3387 mod->state = MODULE_STATE_GOING; 3388 /* module_bug_cleanup needs module_mutex protection */ 3389 mutex_lock(&module_mutex); 3390 module_bug_cleanup(mod); 3391 mutex_unlock(&module_mutex); 3392 3393 ddebug_cleanup: 3394 ftrace_release_mod(mod); 3395 synchronize_rcu(); 3396 kfree(mod->args); 3397 free_arch_cleanup: 3398 module_arch_cleanup(mod); 3399 free_modinfo: 3400 free_modinfo(mod); 3401 free_unload: 3402 module_unload_free(mod); 3403 unlink_mod: 3404 mutex_lock(&module_mutex); 3405 /* Unlink carefully: kallsyms could be walking list. */ 3406 list_del_rcu(&mod->list); 3407 mod_tree_remove(mod); 3408 wake_up_all(&module_wq); 3409 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3410 synchronize_rcu(); 3411 mutex_unlock(&module_mutex); 3412 free_module: 3413 mod_stat_bump_invalid(info, flags); 3414 /* Free lock-classes; relies on the preceding sync_rcu() */ 3415 for_class_mod_mem_type(type, core_data) { 3416 lockdep_free_key_range(mod->mem[type].base, 3417 mod->mem[type].size); 3418 } 3419 3420 module_deallocate(mod, info); 3421 free_copy: 3422 /* 3423 * The info->len is always set. We distinguish between 3424 * failures once the proper module was allocated and 3425 * before that. 3426 */ 3427 if (!module_allocated) 3428 mod_stat_bump_becoming(info, flags); 3429 free_copy(info, flags); 3430 return err; 3431 } 3432 3433 SYSCALL_DEFINE3(init_module, void __user *, umod, 3434 unsigned long, len, const char __user *, uargs) 3435 { 3436 int err; 3437 struct load_info info = { }; 3438 3439 err = may_init_module(); 3440 if (err) 3441 return err; 3442 3443 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3444 umod, len, uargs); 3445 3446 err = copy_module_from_user(umod, len, &info); 3447 if (err) { 3448 mod_stat_inc(&failed_kreads); 3449 mod_stat_add_long(len, &invalid_kread_bytes); 3450 return err; 3451 } 3452 3453 return load_module(&info, uargs, 0); 3454 } 3455 3456 struct idempotent { 3457 const void *cookie; 3458 struct hlist_node entry; 3459 struct completion complete; 3460 int ret; 3461 }; 3462 3463 #define IDEM_HASH_BITS 8 3464 static struct hlist_head idem_hash[1 << IDEM_HASH_BITS]; 3465 static DEFINE_SPINLOCK(idem_lock); 3466 3467 static bool idempotent(struct idempotent *u, const void *cookie) 3468 { 3469 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3470 struct hlist_head *head = idem_hash + hash; 3471 struct idempotent *existing; 3472 bool first; 3473 3474 u->ret = -EINTR; 3475 u->cookie = cookie; 3476 init_completion(&u->complete); 3477 3478 spin_lock(&idem_lock); 3479 first = true; 3480 hlist_for_each_entry(existing, head, entry) { 3481 if (existing->cookie != cookie) 3482 continue; 3483 first = false; 3484 break; 3485 } 3486 hlist_add_head(&u->entry, idem_hash + hash); 3487 spin_unlock(&idem_lock); 3488 3489 return !first; 3490 } 3491 3492 /* 3493 * We were the first one with 'cookie' on the list, and we ended 3494 * up completing the operation. We now need to walk the list, 3495 * remove everybody - which includes ourselves - fill in the return 3496 * value, and then complete the operation. 3497 */ 3498 static int idempotent_complete(struct idempotent *u, int ret) 3499 { 3500 const void *cookie = u->cookie; 3501 int hash = hash_ptr(cookie, IDEM_HASH_BITS); 3502 struct hlist_head *head = idem_hash + hash; 3503 struct hlist_node *next; 3504 struct idempotent *pos; 3505 3506 spin_lock(&idem_lock); 3507 hlist_for_each_entry_safe(pos, next, head, entry) { 3508 if (pos->cookie != cookie) 3509 continue; 3510 hlist_del_init(&pos->entry); 3511 pos->ret = ret; 3512 complete(&pos->complete); 3513 } 3514 spin_unlock(&idem_lock); 3515 return ret; 3516 } 3517 3518 /* 3519 * Wait for the idempotent worker. 3520 * 3521 * If we get interrupted, we need to remove ourselves from the 3522 * the idempotent list, and the completion may still come in. 3523 * 3524 * The 'idem_lock' protects against the race, and 'idem.ret' was 3525 * initialized to -EINTR and is thus always the right return 3526 * value even if the idempotent work then completes between 3527 * the wait_for_completion and the cleanup. 3528 */ 3529 static int idempotent_wait_for_completion(struct idempotent *u) 3530 { 3531 if (wait_for_completion_interruptible(&u->complete)) { 3532 spin_lock(&idem_lock); 3533 if (!hlist_unhashed(&u->entry)) 3534 hlist_del(&u->entry); 3535 spin_unlock(&idem_lock); 3536 } 3537 return u->ret; 3538 } 3539 3540 static int init_module_from_file(struct file *f, const char __user * uargs, int flags) 3541 { 3542 struct load_info info = { }; 3543 void *buf = NULL; 3544 int len; 3545 3546 len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE); 3547 if (len < 0) { 3548 mod_stat_inc(&failed_kreads); 3549 return len; 3550 } 3551 3552 if (flags & MODULE_INIT_COMPRESSED_FILE) { 3553 int err = module_decompress(&info, buf, len); 3554 vfree(buf); /* compressed data is no longer needed */ 3555 if (err) { 3556 mod_stat_inc(&failed_decompress); 3557 mod_stat_add_long(len, &invalid_decompress_bytes); 3558 return err; 3559 } 3560 } else { 3561 info.hdr = buf; 3562 info.len = len; 3563 } 3564 3565 return load_module(&info, uargs, flags); 3566 } 3567 3568 static int idempotent_init_module(struct file *f, const char __user * uargs, int flags) 3569 { 3570 struct idempotent idem; 3571 3572 if (!(f->f_mode & FMODE_READ)) 3573 return -EBADF; 3574 3575 /* Are we the winners of the race and get to do this? */ 3576 if (!idempotent(&idem, file_inode(f))) { 3577 int ret = init_module_from_file(f, uargs, flags); 3578 return idempotent_complete(&idem, ret); 3579 } 3580 3581 /* 3582 * Somebody else won the race and is loading the module. 3583 */ 3584 return idempotent_wait_for_completion(&idem); 3585 } 3586 3587 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3588 { 3589 int err = may_init_module(); 3590 if (err) 3591 return err; 3592 3593 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3594 3595 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3596 |MODULE_INIT_IGNORE_VERMAGIC 3597 |MODULE_INIT_COMPRESSED_FILE)) 3598 return -EINVAL; 3599 3600 CLASS(fd, f)(fd); 3601 if (fd_empty(f)) 3602 return -EBADF; 3603 return idempotent_init_module(fd_file(f), uargs, flags); 3604 } 3605 3606 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 3607 char *module_flags(struct module *mod, char *buf, bool show_state) 3608 { 3609 int bx = 0; 3610 3611 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 3612 if (!mod->taints && !show_state) 3613 goto out; 3614 if (mod->taints || 3615 mod->state == MODULE_STATE_GOING || 3616 mod->state == MODULE_STATE_COMING) { 3617 buf[bx++] = '('; 3618 bx += module_flags_taint(mod->taints, buf + bx); 3619 /* Show a - for module-is-being-unloaded */ 3620 if (mod->state == MODULE_STATE_GOING && show_state) 3621 buf[bx++] = '-'; 3622 /* Show a + for module-is-being-loaded */ 3623 if (mod->state == MODULE_STATE_COMING && show_state) 3624 buf[bx++] = '+'; 3625 buf[bx++] = ')'; 3626 } 3627 out: 3628 buf[bx] = '\0'; 3629 3630 return buf; 3631 } 3632 3633 /* Given an address, look for it in the module exception tables. */ 3634 const struct exception_table_entry *search_module_extables(unsigned long addr) 3635 { 3636 const struct exception_table_entry *e = NULL; 3637 struct module *mod; 3638 3639 preempt_disable(); 3640 mod = __module_address(addr); 3641 if (!mod) 3642 goto out; 3643 3644 if (!mod->num_exentries) 3645 goto out; 3646 3647 e = search_extable(mod->extable, 3648 mod->num_exentries, 3649 addr); 3650 out: 3651 preempt_enable(); 3652 3653 /* 3654 * Now, if we found one, we are running inside it now, hence 3655 * we cannot unload the module, hence no refcnt needed. 3656 */ 3657 return e; 3658 } 3659 3660 /** 3661 * is_module_address() - is this address inside a module? 3662 * @addr: the address to check. 3663 * 3664 * See is_module_text_address() if you simply want to see if the address 3665 * is code (not data). 3666 */ 3667 bool is_module_address(unsigned long addr) 3668 { 3669 bool ret; 3670 3671 preempt_disable(); 3672 ret = __module_address(addr) != NULL; 3673 preempt_enable(); 3674 3675 return ret; 3676 } 3677 3678 /** 3679 * __module_address() - get the module which contains an address. 3680 * @addr: the address. 3681 * 3682 * Must be called with preempt disabled or module mutex held so that 3683 * module doesn't get freed during this. 3684 */ 3685 struct module *__module_address(unsigned long addr) 3686 { 3687 struct module *mod; 3688 3689 if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max) 3690 goto lookup; 3691 3692 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC 3693 if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max) 3694 goto lookup; 3695 #endif 3696 3697 return NULL; 3698 3699 lookup: 3700 module_assert_mutex_or_preempt(); 3701 3702 mod = mod_find(addr, &mod_tree); 3703 if (mod) { 3704 BUG_ON(!within_module(addr, mod)); 3705 if (mod->state == MODULE_STATE_UNFORMED) 3706 mod = NULL; 3707 } 3708 return mod; 3709 } 3710 3711 /** 3712 * is_module_text_address() - is this address inside module code? 3713 * @addr: the address to check. 3714 * 3715 * See is_module_address() if you simply want to see if the address is 3716 * anywhere in a module. See kernel_text_address() for testing if an 3717 * address corresponds to kernel or module code. 3718 */ 3719 bool is_module_text_address(unsigned long addr) 3720 { 3721 bool ret; 3722 3723 preempt_disable(); 3724 ret = __module_text_address(addr) != NULL; 3725 preempt_enable(); 3726 3727 return ret; 3728 } 3729 3730 /** 3731 * __module_text_address() - get the module whose code contains an address. 3732 * @addr: the address. 3733 * 3734 * Must be called with preempt disabled or module mutex held so that 3735 * module doesn't get freed during this. 3736 */ 3737 struct module *__module_text_address(unsigned long addr) 3738 { 3739 struct module *mod = __module_address(addr); 3740 if (mod) { 3741 /* Make sure it's within the text section. */ 3742 if (!within_module_mem_type(addr, mod, MOD_TEXT) && 3743 !within_module_mem_type(addr, mod, MOD_INIT_TEXT)) 3744 mod = NULL; 3745 } 3746 return mod; 3747 } 3748 3749 /* Don't grab lock, we're oopsing. */ 3750 void print_modules(void) 3751 { 3752 struct module *mod; 3753 char buf[MODULE_FLAGS_BUF_SIZE]; 3754 3755 printk(KERN_DEFAULT "Modules linked in:"); 3756 /* Most callers should already have preempt disabled, but make sure */ 3757 preempt_disable(); 3758 list_for_each_entry_rcu(mod, &modules, list) { 3759 if (mod->state == MODULE_STATE_UNFORMED) 3760 continue; 3761 pr_cont(" %s%s", mod->name, module_flags(mod, buf, true)); 3762 } 3763 3764 print_unloaded_tainted_modules(); 3765 preempt_enable(); 3766 if (last_unloaded_module.name[0]) 3767 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 3768 last_unloaded_module.taints); 3769 pr_cont("\n"); 3770 } 3771 3772 #ifdef CONFIG_MODULE_DEBUGFS 3773 struct dentry *mod_debugfs_root; 3774 3775 static int module_debugfs_init(void) 3776 { 3777 mod_debugfs_root = debugfs_create_dir("modules", NULL); 3778 return 0; 3779 } 3780 module_init(module_debugfs_init); 3781 #endif 3782