xref: /linux/io_uring/memmap.c (revision c8b90d40d5bba8e6fba457b8a7c10d3c0d467e37)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/errno.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/io_uring.h>
10 #include <linux/io_uring_types.h>
11 #include <asm/shmparam.h>
12 
13 #include "memmap.h"
14 #include "kbuf.h"
15 #include "rsrc.h"
16 
17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
18 				   size_t size, gfp_t gfp)
19 {
20 	struct page *page;
21 	int i, order;
22 
23 	order = get_order(size);
24 	if (order > MAX_PAGE_ORDER)
25 		return ERR_PTR(-ENOMEM);
26 	else if (order)
27 		gfp |= __GFP_COMP;
28 
29 	page = alloc_pages(gfp, order);
30 	if (!page)
31 		return ERR_PTR(-ENOMEM);
32 
33 	for (i = 0; i < nr_pages; i++)
34 		pages[i] = page + i;
35 
36 	return page_address(page);
37 }
38 
39 static void *io_mem_alloc_single(struct page **pages, int nr_pages, size_t size,
40 				 gfp_t gfp)
41 {
42 	void *ret;
43 	int i;
44 
45 	for (i = 0; i < nr_pages; i++) {
46 		pages[i] = alloc_page(gfp);
47 		if (!pages[i])
48 			goto err;
49 	}
50 
51 	ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52 	if (ret)
53 		return ret;
54 err:
55 	while (i--)
56 		put_page(pages[i]);
57 	return ERR_PTR(-ENOMEM);
58 }
59 
60 void *io_pages_map(struct page ***out_pages, unsigned short *npages,
61 		   size_t size)
62 {
63 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
64 	struct page **pages;
65 	int nr_pages;
66 	void *ret;
67 
68 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
69 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), gfp);
70 	if (!pages)
71 		return ERR_PTR(-ENOMEM);
72 
73 	ret = io_mem_alloc_compound(pages, nr_pages, size, gfp);
74 	if (!IS_ERR(ret))
75 		goto done;
76 	if (nr_pages == 1)
77 		goto fail;
78 
79 	ret = io_mem_alloc_single(pages, nr_pages, size, gfp);
80 	if (!IS_ERR(ret)) {
81 done:
82 		*out_pages = pages;
83 		*npages = nr_pages;
84 		return ret;
85 	}
86 fail:
87 	kvfree(pages);
88 	*out_pages = NULL;
89 	*npages = 0;
90 	return ret;
91 }
92 
93 void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages,
94 		    bool put_pages)
95 {
96 	bool do_vunmap = false;
97 
98 	if (!ptr)
99 		return;
100 
101 	if (put_pages && *npages) {
102 		struct page **to_free = *pages;
103 		int i;
104 
105 		/*
106 		 * Only did vmap for the non-compound multiple page case.
107 		 * For the compound page, we just need to put the head.
108 		 */
109 		if (PageCompound(to_free[0]))
110 			*npages = 1;
111 		else if (*npages > 1)
112 			do_vunmap = true;
113 		for (i = 0; i < *npages; i++)
114 			put_page(to_free[i]);
115 	}
116 	if (do_vunmap)
117 		vunmap(ptr);
118 	kvfree(*pages);
119 	*pages = NULL;
120 	*npages = 0;
121 }
122 
123 void io_pages_free(struct page ***pages, int npages)
124 {
125 	struct page **page_array = *pages;
126 
127 	if (!page_array)
128 		return;
129 
130 	unpin_user_pages(page_array, npages);
131 	kvfree(page_array);
132 	*pages = NULL;
133 }
134 
135 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
136 {
137 	unsigned long start, end, nr_pages;
138 	struct page **pages;
139 	int ret;
140 
141 	if (check_add_overflow(uaddr, len, &end))
142 		return ERR_PTR(-EOVERFLOW);
143 	if (check_add_overflow(end, PAGE_SIZE - 1, &end))
144 		return ERR_PTR(-EOVERFLOW);
145 
146 	end = end >> PAGE_SHIFT;
147 	start = uaddr >> PAGE_SHIFT;
148 	nr_pages = end - start;
149 	if (WARN_ON_ONCE(!nr_pages))
150 		return ERR_PTR(-EINVAL);
151 	if (WARN_ON_ONCE(nr_pages > INT_MAX))
152 		return ERR_PTR(-EOVERFLOW);
153 
154 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
155 	if (!pages)
156 		return ERR_PTR(-ENOMEM);
157 
158 	ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
159 					pages);
160 	/* success, mapped all pages */
161 	if (ret == nr_pages) {
162 		*npages = nr_pages;
163 		return pages;
164 	}
165 
166 	/* partial map, or didn't map anything */
167 	if (ret >= 0) {
168 		/* if we did partial map, release any pages we did get */
169 		if (ret)
170 			unpin_user_pages(pages, ret);
171 		ret = -EFAULT;
172 	}
173 	kvfree(pages);
174 	return ERR_PTR(ret);
175 }
176 
177 void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
178 		     unsigned long uaddr, size_t size)
179 {
180 	struct page **page_array;
181 	unsigned int nr_pages;
182 	void *page_addr;
183 
184 	*npages = 0;
185 
186 	if (uaddr & (PAGE_SIZE - 1) || !size)
187 		return ERR_PTR(-EINVAL);
188 
189 	nr_pages = 0;
190 	page_array = io_pin_pages(uaddr, size, &nr_pages);
191 	if (IS_ERR(page_array))
192 		return page_array;
193 
194 	page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
195 	if (page_addr) {
196 		*pages = page_array;
197 		*npages = nr_pages;
198 		return page_addr;
199 	}
200 
201 	io_pages_free(&page_array, nr_pages);
202 	return ERR_PTR(-ENOMEM);
203 }
204 
205 void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
206 {
207 	if (mr->pages) {
208 		unpin_user_pages(mr->pages, mr->nr_pages);
209 		kvfree(mr->pages);
210 	}
211 	if (mr->vmap_ptr)
212 		vunmap(mr->vmap_ptr);
213 	if (mr->nr_pages && ctx->user)
214 		__io_unaccount_mem(ctx->user, mr->nr_pages);
215 
216 	memset(mr, 0, sizeof(*mr));
217 }
218 
219 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
220 		     struct io_uring_region_desc *reg)
221 {
222 	int pages_accounted = 0;
223 	struct page **pages;
224 	int nr_pages, ret;
225 	void *vptr;
226 	u64 end;
227 
228 	if (WARN_ON_ONCE(mr->pages || mr->vmap_ptr || mr->nr_pages))
229 		return -EFAULT;
230 	if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv)))
231 		return -EINVAL;
232 	if (reg->flags != IORING_MEM_REGION_TYPE_USER)
233 		return -EINVAL;
234 	if (!reg->user_addr)
235 		return -EFAULT;
236 	if (!reg->size || reg->mmap_offset || reg->id)
237 		return -EINVAL;
238 	if ((reg->size >> PAGE_SHIFT) > INT_MAX)
239 		return -E2BIG;
240 	if ((reg->user_addr | reg->size) & ~PAGE_MASK)
241 		return -EINVAL;
242 	if (check_add_overflow(reg->user_addr, reg->size, &end))
243 		return -EOVERFLOW;
244 
245 	pages = io_pin_pages(reg->user_addr, reg->size, &nr_pages);
246 	if (IS_ERR(pages))
247 		return PTR_ERR(pages);
248 
249 	if (ctx->user) {
250 		ret = __io_account_mem(ctx->user, nr_pages);
251 		if (ret)
252 			goto out_free;
253 		pages_accounted = nr_pages;
254 	}
255 
256 	vptr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
257 	if (!vptr) {
258 		ret = -ENOMEM;
259 		goto out_free;
260 	}
261 
262 	mr->pages = pages;
263 	mr->vmap_ptr = vptr;
264 	mr->nr_pages = nr_pages;
265 	return 0;
266 out_free:
267 	if (pages_accounted)
268 		__io_unaccount_mem(ctx->user, pages_accounted);
269 	io_pages_free(&pages, nr_pages);
270 	return ret;
271 }
272 
273 static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
274 					    size_t sz)
275 {
276 	struct io_ring_ctx *ctx = file->private_data;
277 	loff_t offset = pgoff << PAGE_SHIFT;
278 
279 	switch ((pgoff << PAGE_SHIFT) & IORING_OFF_MMAP_MASK) {
280 	case IORING_OFF_SQ_RING:
281 	case IORING_OFF_CQ_RING:
282 		/* Don't allow mmap if the ring was setup without it */
283 		if (ctx->flags & IORING_SETUP_NO_MMAP)
284 			return ERR_PTR(-EINVAL);
285 		if (!ctx->rings)
286 			return ERR_PTR(-EFAULT);
287 		return ctx->rings;
288 	case IORING_OFF_SQES:
289 		/* Don't allow mmap if the ring was setup without it */
290 		if (ctx->flags & IORING_SETUP_NO_MMAP)
291 			return ERR_PTR(-EINVAL);
292 		if (!ctx->sq_sqes)
293 			return ERR_PTR(-EFAULT);
294 		return ctx->sq_sqes;
295 	case IORING_OFF_PBUF_RING: {
296 		struct io_buffer_list *bl;
297 		unsigned int bgid;
298 		void *ptr;
299 
300 		bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
301 		bl = io_pbuf_get_bl(ctx, bgid);
302 		if (IS_ERR(bl))
303 			return bl;
304 		ptr = bl->buf_ring;
305 		io_put_bl(ctx, bl);
306 		return ptr;
307 		}
308 	}
309 
310 	return ERR_PTR(-EINVAL);
311 }
312 
313 int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma,
314 			struct page **pages, int npages)
315 {
316 	unsigned long nr_pages = npages;
317 
318 	vm_flags_set(vma, VM_DONTEXPAND);
319 	return vm_insert_pages(vma, vma->vm_start, pages, &nr_pages);
320 }
321 
322 #ifdef CONFIG_MMU
323 
324 __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
325 {
326 	struct io_ring_ctx *ctx = file->private_data;
327 	size_t sz = vma->vm_end - vma->vm_start;
328 	long offset = vma->vm_pgoff << PAGE_SHIFT;
329 	unsigned int npages;
330 	void *ptr;
331 
332 	guard(mutex)(&ctx->resize_lock);
333 
334 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
335 	if (IS_ERR(ptr))
336 		return PTR_ERR(ptr);
337 
338 	switch (offset & IORING_OFF_MMAP_MASK) {
339 	case IORING_OFF_SQ_RING:
340 	case IORING_OFF_CQ_RING:
341 		npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT);
342 		return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages);
343 	case IORING_OFF_SQES:
344 		return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages,
345 						ctx->n_sqe_pages);
346 	case IORING_OFF_PBUF_RING:
347 		return io_pbuf_mmap(file, vma);
348 	}
349 
350 	return -EINVAL;
351 }
352 
353 unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
354 					 unsigned long len, unsigned long pgoff,
355 					 unsigned long flags)
356 {
357 	struct io_ring_ctx *ctx = filp->private_data;
358 	void *ptr;
359 
360 	/*
361 	 * Do not allow to map to user-provided address to avoid breaking the
362 	 * aliasing rules. Userspace is not able to guess the offset address of
363 	 * kernel kmalloc()ed memory area.
364 	 */
365 	if (addr)
366 		return -EINVAL;
367 
368 	guard(mutex)(&ctx->resize_lock);
369 
370 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
371 	if (IS_ERR(ptr))
372 		return -ENOMEM;
373 
374 	/*
375 	 * Some architectures have strong cache aliasing requirements.
376 	 * For such architectures we need a coherent mapping which aliases
377 	 * kernel memory *and* userspace memory. To achieve that:
378 	 * - use a NULL file pointer to reference physical memory, and
379 	 * - use the kernel virtual address of the shared io_uring context
380 	 *   (instead of the userspace-provided address, which has to be 0UL
381 	 *   anyway).
382 	 * - use the same pgoff which the get_unmapped_area() uses to
383 	 *   calculate the page colouring.
384 	 * For architectures without such aliasing requirements, the
385 	 * architecture will return any suitable mapping because addr is 0.
386 	 */
387 	filp = NULL;
388 	flags |= MAP_SHARED;
389 	pgoff = 0;	/* has been translated to ptr above */
390 #ifdef SHM_COLOUR
391 	addr = (uintptr_t) ptr;
392 	pgoff = addr >> PAGE_SHIFT;
393 #else
394 	addr = 0UL;
395 #endif
396 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
397 }
398 
399 #else /* !CONFIG_MMU */
400 
401 int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
402 {
403 	return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
404 }
405 
406 unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
407 {
408 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
409 }
410 
411 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
412 					 unsigned long len, unsigned long pgoff,
413 					 unsigned long flags)
414 {
415 	struct io_ring_ctx *ctx = file->private_data;
416 	void *ptr;
417 
418 	guard(mutex)(&ctx->resize_lock);
419 
420 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
421 	if (IS_ERR(ptr))
422 		return PTR_ERR(ptr);
423 
424 	return (unsigned long) ptr;
425 }
426 
427 #endif /* !CONFIG_MMU */
428