main.c (bc6b94d3ea062454ca889884db99e145efffcb93) main.c (12af2b83d0b17ec8b379b721dd4a8fbcd5d791f3)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2002 Richard Henderson
4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
6 */
7
8#define INCLUDE_VERMAGIC

--- 43 unchanged lines hidden (view full) ---

52#include <linux/kmemleak.h>
53#include <linux/jump_label.h>
54#include <linux/pfn.h>
55#include <linux/bsearch.h>
56#include <linux/dynamic_debug.h>
57#include <linux/audit.h>
58#include <linux/cfi.h>
59#include <linux/debugfs.h>
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2002 Richard Henderson
4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
6 */
7
8#define INCLUDE_VERMAGIC

--- 43 unchanged lines hidden (view full) ---

52#include <linux/kmemleak.h>
53#include <linux/jump_label.h>
54#include <linux/pfn.h>
55#include <linux/bsearch.h>
56#include <linux/dynamic_debug.h>
57#include <linux/audit.h>
58#include <linux/cfi.h>
59#include <linux/debugfs.h>
60#include <linux/execmem.h>
60#include <uapi/linux/module.h>
61#include "internal.h"
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/module.h>
65
66/*
67 * Mutex protects:

--- 1106 unchanged lines hidden (view full) ---

1174 || PTR_ERR(ksym) != -EBUSY,
1175 30 * HZ) <= 0) {
1176 pr_warn("%s: gave up waiting for init of module %s.\n",
1177 mod->name, owner);
1178 }
1179 return ksym;
1180}
1181
61#include <uapi/linux/module.h>
62#include "internal.h"
63
64#define CREATE_TRACE_POINTS
65#include <trace/events/module.h>
66
67/*
68 * Mutex protects:

--- 1106 unchanged lines hidden (view full) ---

1175 || PTR_ERR(ksym) != -EBUSY,
1176 30 * HZ) <= 0) {
1177 pr_warn("%s: gave up waiting for init of module %s.\n",
1178 mod->name, owner);
1179 }
1180 return ksym;
1181}
1182
1182void __weak module_memfree(void *module_region)
1183{
1184 /*
1185 * This memory may be RO, and freeing RO memory in an interrupt is not
1186 * supported by vmalloc.
1187 */
1188 WARN_ON(in_interrupt());
1189 vfree(module_region);
1190}
1191
1192void __weak module_arch_cleanup(struct module *mod)
1193{
1194}
1195
1196void __weak module_arch_freeing_init(struct module *mod)
1197{
1198}
1199

--- 8 unchanged lines hidden (view full) ---

1208 unsigned int size = PAGE_ALIGN(mod->mem[type].size);
1209 void *ptr;
1210
1211 mod->mem[type].size = size;
1212
1213 if (mod_mem_use_vmalloc(type))
1214 ptr = vmalloc(size);
1215 else
1183void __weak module_arch_cleanup(struct module *mod)
1184{
1185}
1186
1187void __weak module_arch_freeing_init(struct module *mod)
1188{
1189}
1190

--- 8 unchanged lines hidden (view full) ---

1199 unsigned int size = PAGE_ALIGN(mod->mem[type].size);
1200 void *ptr;
1201
1202 mod->mem[type].size = size;
1203
1204 if (mod_mem_use_vmalloc(type))
1205 ptr = vmalloc(size);
1206 else
1216 ptr = module_alloc(size);
1207 ptr = execmem_alloc(EXECMEM_MODULE_TEXT, size);
1217
1218 if (!ptr)
1219 return -ENOMEM;
1220
1221 /*
1222 * The pointer to these blocks of memory are stored on the module
1223 * structure and we keep that around so long as the module is
1224 * around. We only free that memory when we unload the module.

--- 14 unchanged lines hidden (view full) ---

1239
1240static void module_memory_free(struct module *mod, enum mod_mem_type type)
1241{
1242 void *ptr = mod->mem[type].base;
1243
1244 if (mod_mem_use_vmalloc(type))
1245 vfree(ptr);
1246 else
1208
1209 if (!ptr)
1210 return -ENOMEM;
1211
1212 /*
1213 * The pointer to these blocks of memory are stored on the module
1214 * structure and we keep that around so long as the module is
1215 * around. We only free that memory when we unload the module.

--- 14 unchanged lines hidden (view full) ---

1230
1231static void module_memory_free(struct module *mod, enum mod_mem_type type)
1232{
1233 void *ptr = mod->mem[type].base;
1234
1235 if (mod_mem_use_vmalloc(type))
1236 vfree(ptr);
1237 else
1247 module_memfree(ptr);
1238 execmem_free(ptr);
1248}
1249
1250static void free_mod_mem(struct module *mod)
1251{
1252 for_each_mod_mem_type(type) {
1253 struct module_memory *mod_mem = &mod->mem[type];
1254
1255 if (type == MOD_DATA)

--- 1235 unchanged lines hidden (view full) ---

2491 struct mod_initfree *initfree;
2492
2493 list = llist_del_all(&init_free_list);
2494
2495 synchronize_rcu();
2496
2497 llist_for_each_safe(pos, n, list) {
2498 initfree = container_of(pos, struct mod_initfree, node);
1239}
1240
1241static void free_mod_mem(struct module *mod)
1242{
1243 for_each_mod_mem_type(type) {
1244 struct module_memory *mod_mem = &mod->mem[type];
1245
1246 if (type == MOD_DATA)

--- 1235 unchanged lines hidden (view full) ---

2482 struct mod_initfree *initfree;
2483
2484 list = llist_del_all(&init_free_list);
2485
2486 synchronize_rcu();
2487
2488 llist_for_each_safe(pos, n, list) {
2489 initfree = container_of(pos, struct mod_initfree, node);
2499 module_memfree(initfree->init_text);
2500 module_memfree(initfree->init_data);
2501 module_memfree(initfree->init_rodata);
2490 execmem_free(initfree->init_text);
2491 execmem_free(initfree->init_data);
2492 execmem_free(initfree->init_rodata);
2502 kfree(initfree);
2503 }
2504}
2505
2506void flush_module_init_free_work(void)
2507{
2508 flush_work(&init_free_wq);
2509}

--- 93 unchanged lines hidden (view full) ---

2603#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2604 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
2605 mod->btf_data = NULL;
2606#endif
2607 /*
2608 * We want to free module_init, but be aware that kallsyms may be
2609 * walking this with preempt disabled. In all the failure paths, we
2610 * call synchronize_rcu(), but we don't want to slow down the success
2493 kfree(initfree);
2494 }
2495}
2496
2497void flush_module_init_free_work(void)
2498{
2499 flush_work(&init_free_wq);
2500}

--- 93 unchanged lines hidden (view full) ---

2594#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2595 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */
2596 mod->btf_data = NULL;
2597#endif
2598 /*
2599 * We want to free module_init, but be aware that kallsyms may be
2600 * walking this with preempt disabled. In all the failure paths, we
2601 * call synchronize_rcu(), but we don't want to slow down the success
2611 * path. module_memfree() cannot be called in an interrupt, so do the
2602 * path. execmem_free() cannot be called in an interrupt, so do the
2612 * work and call synchronize_rcu() in a work queue.
2613 *
2603 * work and call synchronize_rcu() in a work queue.
2604 *
2614 * Note that module_alloc() on most architectures creates W+X page
2605 * Note that execmem_alloc() on most architectures creates W+X page
2615 * mappings which won't be cleaned up until do_free_init() runs. Any
2616 * code such as mark_rodata_ro() which depends on those mappings to
2617 * be cleaned up needs to sync with the queued work by invoking
2618 * flush_module_init_free_work().
2619 */
2620 if (llist_add(&freeinit->node, &init_free_list))
2621 schedule_work(&init_free_wq);
2622

--- 778 unchanged lines hidden ---
2606 * mappings which won't be cleaned up until do_free_init() runs. Any
2607 * code such as mark_rodata_ro() which depends on those mappings to
2608 * be cleaned up needs to sync with the queued work by invoking
2609 * flush_module_init_free_work().
2610 */
2611 if (llist_add(&freeinit->node, &init_free_list))
2612 schedule_work(&init_free_wq);
2613

--- 778 unchanged lines hidden ---