xref: /linux/io_uring/memmap.c (revision 663a917475530feff868a4f2bda286ea4171f420)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/errno.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/io_uring.h>
10 #include <linux/io_uring_types.h>
11 #include <asm/shmparam.h>
12 
13 #include "memmap.h"
14 #include "kbuf.h"
15 #include "rsrc.h"
16 
17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
18 				   size_t size, gfp_t gfp)
19 {
20 	struct page *page;
21 	int i, order;
22 
23 	order = get_order(size);
24 	if (order > MAX_PAGE_ORDER)
25 		return ERR_PTR(-ENOMEM);
26 	else if (order)
27 		gfp |= __GFP_COMP;
28 
29 	page = alloc_pages(gfp, order);
30 	if (!page)
31 		return ERR_PTR(-ENOMEM);
32 
33 	for (i = 0; i < nr_pages; i++)
34 		pages[i] = page + i;
35 
36 	return page_address(page);
37 }
38 
39 static void *io_mem_alloc_single(struct page **pages, int nr_pages, size_t size,
40 				 gfp_t gfp)
41 {
42 	void *ret;
43 	int i;
44 
45 	for (i = 0; i < nr_pages; i++) {
46 		pages[i] = alloc_page(gfp);
47 		if (!pages[i])
48 			goto err;
49 	}
50 
51 	ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52 	if (ret)
53 		return ret;
54 err:
55 	while (i--)
56 		put_page(pages[i]);
57 	return ERR_PTR(-ENOMEM);
58 }
59 
60 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
61 		   size_t size)
62 {
63 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
64 	struct page **pages;
65 	int nr_pages;
66 	void *ret;
67 
68 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
69 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), gfp);
70 	if (!pages)
71 		return ERR_PTR(-ENOMEM);
72 
73 	ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
74 	if (!IS_ERR(ret))
75 		goto done;
76 
77 	ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
78 	if (!IS_ERR(ret)) {
79 done:
80 		*out_pages = pages;
81 		*npages = nr_pages;
82 		return ret;
83 	}
84 
85 	kvfree(pages);
86 	*out_pages = NULL;
87 	*npages = 0;
88 	return ret;
89 }
90 
91 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
92 		    bool put_pages)
93 {
94 	bool do_vunmap = false;
95 
96 	if (!ptr)
97 		return;
98 
99 	if (put_pages && *npages) {
100 		struct page **to_free = *pages;
101 		int i;
102 
103 		/*
104 		 * Only did vmap for the non-compound multiple page case.
105 		 * For the compound page, we just need to put the head.
106 		 */
107 		if (PageCompound(to_free[0]))
108 			*npages = 1;
109 		else if (*npages > 1)
110 			do_vunmap = true;
111 		for (i = 0; i < *npages; i++)
112 			put_page(to_free[i]);
113 	}
114 	if (do_vunmap)
115 		vunmap(ptr);
116 	kvfree(*pages);
117 	*pages = NULL;
118 	*npages = 0;
119 }
120 
121 void io_pages_free(struct page ***pages, int npages)
122 {
123 	struct page **page_array = *pages;
124 
125 	if (!page_array)
126 		return;
127 
128 	unpin_user_pages(page_array, npages);
129 	kvfree(page_array);
130 	*pages = NULL;
131 }
132 
133 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
134 {
135 	unsigned long start, end, nr_pages;
136 	struct page **pages;
137 	int ret;
138 
139 	end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
140 	start = uaddr >> PAGE_SHIFT;
141 	nr_pages = end - start;
142 	if (WARN_ON_ONCE(!nr_pages))
143 		return ERR_PTR(-EINVAL);
144 	if (WARN_ON_ONCE(nr_pages > INT_MAX))
145 		return ERR_PTR(-EOVERFLOW);
146 
147 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
148 	if (!pages)
149 		return ERR_PTR(-ENOMEM);
150 
151 	ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
152 					pages);
153 	/* success, mapped all pages */
154 	if (ret == nr_pages) {
155 		*npages = nr_pages;
156 		return pages;
157 	}
158 
159 	/* partial map, or didn't map anything */
160 	if (ret >= 0) {
161 		/* if we did partial map, release any pages we did get */
162 		if (ret)
163 			unpin_user_pages(pages, ret);
164 		ret = -EFAULT;
165 	}
166 	kvfree(pages);
167 	return ERR_PTR(ret);
168 }
169 
170 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
171 		     unsigned long uaddr, size_t size)
172 {
173 	struct page **page_array;
174 	unsigned int nr_pages;
175 	void *page_addr;
176 
177 	*npages = 0;
178 
179 	if (uaddr & (PAGE_SIZE - 1) || !size)
180 		return ERR_PTR(-EINVAL);
181 
182 	nr_pages = 0;
183 	page_array = io_pin_pages(uaddr, size, &nr_pages);
184 	if (IS_ERR(page_array))
185 		return page_array;
186 
187 	page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
188 	if (page_addr) {
189 		*pages = page_array;
190 		*npages = nr_pages;
191 		return page_addr;
192 	}
193 
194 	io_pages_free(&page_array, nr_pages);
195 	return ERR_PTR(-ENOMEM);
196 }
197 
198 void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
199 {
200 	if (mr->pages) {
201 		unpin_user_pages(mr->pages, mr->nr_pages);
202 		kvfree(mr->pages);
203 	}
204 	if (mr->vmap_ptr)
205 		vunmap(mr->vmap_ptr);
206 	if (mr->nr_pages && ctx->user)
207 		__io_unaccount_mem(ctx->user, mr->nr_pages);
208 
209 	memset(mr, 0, sizeof(*mr));
210 }
211 
212 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
213 		     struct io_uring_region_desc *reg)
214 {
215 	int pages_accounted = 0;
216 	struct page **pages;
217 	int nr_pages, ret;
218 	void *vptr;
219 	u64 end;
220 
221 	if (WARN_ON_ONCE(mr->pages || mr->vmap_ptr || mr->nr_pages))
222 		return -EFAULT;
223 	if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv)))
224 		return -EINVAL;
225 	if (reg->flags != IORING_MEM_REGION_TYPE_USER)
226 		return -EINVAL;
227 	if (!reg->user_addr)
228 		return -EFAULT;
229 	if (!reg->size || reg->mmap_offset || reg->id)
230 		return -EINVAL;
231 	if ((reg->size >> PAGE_SHIFT) > INT_MAX)
232 		return E2BIG;
233 	if ((reg->user_addr | reg->size) & ~PAGE_MASK)
234 		return -EINVAL;
235 	if (check_add_overflow(reg->user_addr, reg->size, &end))
236 		return -EOVERFLOW;
237 
238 	pages = io_pin_pages(reg->user_addr, reg->size, &nr_pages);
239 	if (IS_ERR(pages))
240 		return PTR_ERR(pages);
241 
242 	if (ctx->user) {
243 		ret = __io_account_mem(ctx->user, nr_pages);
244 		if (ret)
245 			goto out_free;
246 		pages_accounted = nr_pages;
247 	}
248 
249 	vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
250 	if (!vptr) {
251 		ret = -ENOMEM;
252 		goto out_free;
253 	}
254 
255 	mr->pages = pages;
256 	mr->vmap_ptr = vptr;
257 	mr->nr_pages = nr_pages;
258 	return 0;
259 out_free:
260 	if (pages_accounted)
261 		__io_unaccount_mem(ctx->user, pages_accounted);
262 	io_pages_free(&pages, nr_pages);
263 	return ret;
264 }
265 
266 static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
267 					    size_t sz)
268 {
269 	struct io_ring_ctx *ctx = file->private_data;
270 	loff_t offset = pgoff << PAGE_SHIFT;
271 
272 	switch ((pgoff << PAGE_SHIFT) & IORING_OFF_MMAP_MASK) {
273 	case IORING_OFF_SQ_RING:
274 	case IORING_OFF_CQ_RING:
275 		/* Don't allow mmap if the ring was setup without it */
276 		if (ctx->flags & IORING_SETUP_NO_MMAP)
277 			return ERR_PTR(-EINVAL);
278 		if (!ctx->rings)
279 			return ERR_PTR(-EFAULT);
280 		return ctx->rings;
281 	case IORING_OFF_SQES:
282 		/* Don't allow mmap if the ring was setup without it */
283 		if (ctx->flags & IORING_SETUP_NO_MMAP)
284 			return ERR_PTR(-EINVAL);
285 		if (!ctx->sq_sqes)
286 			return ERR_PTR(-EFAULT);
287 		return ctx->sq_sqes;
288 	case IORING_OFF_PBUF_RING: {
289 		struct io_buffer_list *bl;
290 		unsigned int bgid;
291 		void *ptr;
292 
293 		bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
294 		bl = io_pbuf_get_bl(ctx, bgid);
295 		if (IS_ERR(bl))
296 			return bl;
297 		ptr = bl->buf_ring;
298 		io_put_bl(ctx, bl);
299 		return ptr;
300 		}
301 	}
302 
303 	return ERR_PTR(-EINVAL);
304 }
305 
306 int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
307 			struct page **pages, int npages)
308 {
309 	unsigned long nr_pages = npages;
310 
311 	vm_flags_set(vma, VM_DONTEXPAND);
312 	return vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
313 }
314 
315 #ifdef CONFIG_MMU
316 
317 __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
318 {
319 	struct io_ring_ctx *ctx = file->private_data;
320 	size_t sz = vma->vm_end - vma->vm_start;
321 	long offset = vma->vm_pgoff << PAGE_SHIFT;
322 	unsigned int npages;
323 	void *ptr;
324 
325 	guard(mutex)(&ctx->resize_lock);
326 
327 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
328 	if (IS_ERR(ptr))
329 		return PTR_ERR(ptr);
330 
331 	switch (offset & IORING_OFF_MMAP_MASK) {
332 	case IORING_OFF_SQ_RING:
333 	case IORING_OFF_CQ_RING:
334 		npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
335 		return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
336 	case IORING_OFF_SQES:
337 		return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
338 						ctx->n_sqe_pages);
339 	case IORING_OFF_PBUF_RING:
340 		return io_pbuf_mmap(file, vma);
341 	}
342 
343 	return -EINVAL;
344 }
345 
346 unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
347 					 unsigned long len, unsigned long pgoff,
348 					 unsigned long flags)
349 {
350 	struct io_ring_ctx *ctx = filp->private_data;
351 	void *ptr;
352 
353 	/*
354 	 * Do not allow to map to user-provided address to avoid breaking the
355 	 * aliasing rules. Userspace is not able to guess the offset address of
356 	 * kernel kmalloc()ed memory area.
357 	 */
358 	if (addr)
359 		return -EINVAL;
360 
361 	guard(mutex)(&ctx->resize_lock);
362 
363 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
364 	if (IS_ERR(ptr))
365 		return -ENOMEM;
366 
367 	/*
368 	 * Some architectures have strong cache aliasing requirements.
369 	 * For such architectures we need a coherent mapping which aliases
370 	 * kernel memory *and* userspace memory. To achieve that:
371 	 * - use a NULL file pointer to reference physical memory, and
372 	 * - use the kernel virtual address of the shared io_uring context
373 	 *   (instead of the userspace-provided address, which has to be 0UL
374 	 *   anyway).
375 	 * - use the same pgoff which the get_unmapped_area() uses to
376 	 *   calculate the page colouring.
377 	 * For architectures without such aliasing requirements, the
378 	 * architecture will return any suitable mapping because addr is 0.
379 	 */
380 	filp = NULL;
381 	flags |= MAP_SHARED;
382 	pgoff = 0;	/* has been translated to ptr above */
383 #ifdef SHM_COLOUR
384 	addr = (uintptr_t) ptr;
385 	pgoff = addr >> PAGE_SHIFT;
386 #else
387 	addr = 0UL;
388 #endif
389 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
390 }
391 
392 #else /* !CONFIG_MMU */
393 
394 int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
395 {
396 	return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
397 }
398 
399 unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
400 {
401 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
402 }
403 
404 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
405 					 unsigned long len, unsigned long pgoff,
406 					 unsigned long flags)
407 {
408 	struct io_ring_ctx *ctx = file->private_data;
409 	void *ptr;
410 
411 	guard(mutex)(&ctx->resize_lock);
412 
413 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
414 	if (IS_ERR(ptr))
415 		return PTR_ERR(ptr);
416 
417 	return (unsigned long) ptr;
418 }
419 
420 #endif /* !CONFIG_MMU */
421