xref: /linux/arch/um/kernel/skas/mmu.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
4  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 
11 #include <asm/pgalloc.h>
12 #include <asm/sections.h>
13 #include <asm/mmu_context.h>
14 #include <as-layout.h>
15 #include <os.h>
16 #include <skas.h>
17 #include <stub-data.h>
18 
19 /* Ensure the stub_data struct covers the allocated area */
20 static_assert(sizeof(struct stub_data) == STUB_DATA_PAGES * UM_KERN_PAGE_SIZE);
21 
22 int init_new_context(struct task_struct *task, struct mm_struct *mm)
23 {
24 	struct mm_id *new_id = &mm->context.id;
25 	unsigned long stack = 0;
26 	int ret = -ENOMEM;
27 
28 	stack = __get_free_pages(GFP_KERNEL | __GFP_ZERO, ilog2(STUB_DATA_PAGES));
29 	if (stack == 0)
30 		goto out;
31 
32 	new_id->stack = stack;
33 
34 	block_signals_trace();
35 	new_id->pid = start_userspace(stack);
36 	unblock_signals_trace();
37 
38 	if (new_id->pid < 0) {
39 		ret = new_id->pid;
40 		goto out_free;
41 	}
42 
43 	/*
44 	 * Ensure the new MM is clean and nothing unwanted is mapped.
45 	 *
46 	 * TODO: We should clear the memory up to STUB_START to ensure there is
47 	 * nothing mapped there, i.e. we (currently) have:
48 	 *
49 	 * |- user memory -|- unused        -|- stub        -|- unused    -|
50 	 *                 ^ TASK_SIZE      ^ STUB_START
51 	 *
52 	 * Meaning we have two unused areas where we may still have valid
53 	 * mappings from our internal clone(). That isn't really a problem as
54 	 * userspace is not going to access them, but it is definitely not
55 	 * correct.
56 	 *
57 	 * However, we are "lucky" and if rseq is configured, then on 32 bit
58 	 * it will fall into the first empty range while on 64 bit it is going
59 	 * to use an anonymous mapping in the second range. As such, things
60 	 * continue to work for now as long as we don't start unmapping these
61 	 * areas.
62 	 *
63 	 * Change this to STUB_START once we have a clean userspace.
64 	 */
65 	unmap(new_id, 0, TASK_SIZE);
66 
67 	return 0;
68 
69  out_free:
70 	if (new_id->stack != 0)
71 		free_pages(new_id->stack, ilog2(STUB_DATA_PAGES));
72  out:
73 	return ret;
74 }
75 
76 void destroy_context(struct mm_struct *mm)
77 {
78 	struct mm_context *mmu = &mm->context;
79 
80 	/*
81 	 * If init_new_context wasn't called, this will be
82 	 * zero, resulting in a kill(0), which will result in the
83 	 * whole UML suddenly dying.  Also, cover negative and
84 	 * 1 cases, since they shouldn't happen either.
85 	 */
86 	if (mmu->id.pid < 2) {
87 		printk(KERN_ERR "corrupt mm_context - pid = %d\n",
88 		       mmu->id.pid);
89 		return;
90 	}
91 	os_kill_ptraced_process(mmu->id.pid, 1);
92 
93 	free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES));
94 }
95