xref: /linux/arch/x86/kernel/ldt.c (revision b85d45947951d23cb22d90caecf4c1eb81342c96)
1 /*
2  * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
3  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
4  * Copyright (C) 2002 Andi Kleen
5  *
6  * This handles calls from both 32bit and 64bit mode.
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 
19 #include <asm/ldt.h>
20 #include <asm/desc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/syscalls.h>
23 
24 /* context.lock is held for us, so we don't need any locking. */
25 static void flush_ldt(void *current_mm)
26 {
27 	mm_context_t *pc;
28 
29 	if (current->active_mm != current_mm)
30 		return;
31 
32 	pc = &current->active_mm->context;
33 	set_ldt(pc->ldt->entries, pc->ldt->size);
34 }
35 
36 /* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37 static struct ldt_struct *alloc_ldt_struct(int size)
38 {
39 	struct ldt_struct *new_ldt;
40 	int alloc_size;
41 
42 	if (size > LDT_ENTRIES)
43 		return NULL;
44 
45 	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
46 	if (!new_ldt)
47 		return NULL;
48 
49 	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 	alloc_size = size * LDT_ENTRY_SIZE;
51 
52 	/*
53 	 * Xen is very picky: it requires a page-aligned LDT that has no
54 	 * trailing nonzero bytes in any page that contains LDT descriptors.
55 	 * Keep it simple: zero the whole allocation and never allocate less
56 	 * than PAGE_SIZE.
57 	 */
58 	if (alloc_size > PAGE_SIZE)
59 		new_ldt->entries = vzalloc(alloc_size);
60 	else
61 		new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL);
62 
63 	if (!new_ldt->entries) {
64 		kfree(new_ldt);
65 		return NULL;
66 	}
67 
68 	new_ldt->size = size;
69 	return new_ldt;
70 }
71 
72 /* After calling this, the LDT is immutable. */
73 static void finalize_ldt_struct(struct ldt_struct *ldt)
74 {
75 	paravirt_alloc_ldt(ldt->entries, ldt->size);
76 }
77 
78 /* context.lock is held */
79 static void install_ldt(struct mm_struct *current_mm,
80 			struct ldt_struct *ldt)
81 {
82 	/* Synchronizes with lockless_dereference in load_mm_ldt. */
83 	smp_store_release(&current_mm->context.ldt, ldt);
84 
85 	/* Activate the LDT for all CPUs using current_mm. */
86 	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87 }
88 
89 static void free_ldt_struct(struct ldt_struct *ldt)
90 {
91 	if (likely(!ldt))
92 		return;
93 
94 	paravirt_free_ldt(ldt->entries, ldt->size);
95 	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
96 		vfree(ldt->entries);
97 	else
98 		free_page((unsigned long)ldt->entries);
99 	kfree(ldt);
100 }
101 
102 /*
103  * we do not have to muck with descriptors here, that is
104  * done in switch_mm() as needed.
105  */
106 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
107 {
108 	struct ldt_struct *new_ldt;
109 	struct mm_struct *old_mm;
110 	int retval = 0;
111 
112 	mutex_init(&mm->context.lock);
113 	old_mm = current->mm;
114 	if (!old_mm) {
115 		mm->context.ldt = NULL;
116 		return 0;
117 	}
118 
119 	mutex_lock(&old_mm->context.lock);
120 	if (!old_mm->context.ldt) {
121 		mm->context.ldt = NULL;
122 		goto out_unlock;
123 	}
124 
125 	new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 	if (!new_ldt) {
127 		retval = -ENOMEM;
128 		goto out_unlock;
129 	}
130 
131 	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 	       new_ldt->size * LDT_ENTRY_SIZE);
133 	finalize_ldt_struct(new_ldt);
134 
135 	mm->context.ldt = new_ldt;
136 
137 out_unlock:
138 	mutex_unlock(&old_mm->context.lock);
139 	return retval;
140 }
141 
142 /*
143  * No need to lock the MM as we are the last user
144  *
145  * 64bit: Don't touch the LDT register - we're already in the next thread.
146  */
147 void destroy_context(struct mm_struct *mm)
148 {
149 	free_ldt_struct(mm->context.ldt);
150 	mm->context.ldt = NULL;
151 }
152 
153 static int read_ldt(void __user *ptr, unsigned long bytecount)
154 {
155 	int retval;
156 	unsigned long size;
157 	struct mm_struct *mm = current->mm;
158 
159 	mutex_lock(&mm->context.lock);
160 
161 	if (!mm->context.ldt) {
162 		retval = 0;
163 		goto out_unlock;
164 	}
165 
166 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
167 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
168 
169 	size = mm->context.ldt->size * LDT_ENTRY_SIZE;
170 	if (size > bytecount)
171 		size = bytecount;
172 
173 	if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
174 		retval = -EFAULT;
175 		goto out_unlock;
176 	}
177 
178 	if (size != bytecount) {
179 		/* Zero-fill the rest and pretend we read bytecount bytes. */
180 		if (clear_user(ptr + size, bytecount - size)) {
181 			retval = -EFAULT;
182 			goto out_unlock;
183 		}
184 	}
185 	retval = bytecount;
186 
187 out_unlock:
188 	mutex_unlock(&mm->context.lock);
189 	return retval;
190 }
191 
192 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
193 {
194 	/* CHECKME: Can we use _one_ random number ? */
195 #ifdef CONFIG_X86_32
196 	unsigned long size = 5 * sizeof(struct desc_struct);
197 #else
198 	unsigned long size = 128;
199 #endif
200 	if (bytecount > size)
201 		bytecount = size;
202 	if (clear_user(ptr, bytecount))
203 		return -EFAULT;
204 	return bytecount;
205 }
206 
207 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
208 {
209 	struct mm_struct *mm = current->mm;
210 	struct desc_struct ldt;
211 	int error;
212 	struct user_desc ldt_info;
213 	int oldsize, newsize;
214 	struct ldt_struct *new_ldt, *old_ldt;
215 
216 	error = -EINVAL;
217 	if (bytecount != sizeof(ldt_info))
218 		goto out;
219 	error = -EFAULT;
220 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
221 		goto out;
222 
223 	error = -EINVAL;
224 	if (ldt_info.entry_number >= LDT_ENTRIES)
225 		goto out;
226 	if (ldt_info.contents == 3) {
227 		if (oldmode)
228 			goto out;
229 		if (ldt_info.seg_not_present == 0)
230 			goto out;
231 	}
232 
233 	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
234 	    LDT_empty(&ldt_info)) {
235 		/* The user wants to clear the entry. */
236 		memset(&ldt, 0, sizeof(ldt));
237 	} else {
238 		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
239 			error = -EINVAL;
240 			goto out;
241 		}
242 
243 		fill_ldt(&ldt, &ldt_info);
244 		if (oldmode)
245 			ldt.avl = 0;
246 	}
247 
248 	mutex_lock(&mm->context.lock);
249 
250 	old_ldt = mm->context.ldt;
251 	oldsize = old_ldt ? old_ldt->size : 0;
252 	newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253 
254 	error = -ENOMEM;
255 	new_ldt = alloc_ldt_struct(newsize);
256 	if (!new_ldt)
257 		goto out_unlock;
258 
259 	if (old_ldt)
260 		memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
261 	new_ldt->entries[ldt_info.entry_number] = ldt;
262 	finalize_ldt_struct(new_ldt);
263 
264 	install_ldt(mm, new_ldt);
265 	free_ldt_struct(old_ldt);
266 	error = 0;
267 
268 out_unlock:
269 	mutex_unlock(&mm->context.lock);
270 out:
271 	return error;
272 }
273 
274 asmlinkage int sys_modify_ldt(int func, void __user *ptr,
275 			      unsigned long bytecount)
276 {
277 	int ret = -ENOSYS;
278 
279 	switch (func) {
280 	case 0:
281 		ret = read_ldt(ptr, bytecount);
282 		break;
283 	case 1:
284 		ret = write_ldt(ptr, bytecount, 1);
285 		break;
286 	case 2:
287 		ret = read_default_ldt(ptr, bytecount);
288 		break;
289 	case 0x11:
290 		ret = write_ldt(ptr, bytecount, 0);
291 		break;
292 	}
293 	return ret;
294 }
295