xref: /linux/io_uring/memmap.c (revision 8838a1a2d219a86ab05e679c73f68dd75a25aca5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/errno.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/io_uring.h>
10 #include <linux/io_uring_types.h>
11 #include <asm/shmparam.h>
12 
13 #include "memmap.h"
14 #include "kbuf.h"
15 #include "rsrc.h"
16 
17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages,
18 				   size_t size, gfp_t gfp)
19 {
20 	struct page *page;
21 	int i, order;
22 
23 	order = get_order(size);
24 	if (order > MAX_PAGE_ORDER)
25 		return ERR_PTR(-ENOMEM);
26 	else if (order)
27 		gfp |= __GFP_COMP;
28 
29 	page = alloc_pages(gfp, order);
30 	if (!page)
31 		return ERR_PTR(-ENOMEM);
32 
33 	for (i = 0; i < nr_pages; i++)
34 		pages[i] = page + i;
35 
36 	return page_address(page);
37 }
38 
39 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
40 {
41 	unsigned long start, end, nr_pages;
42 	struct page **pages;
43 	int ret;
44 
45 	if (check_add_overflow(uaddr, len, &end))
46 		return ERR_PTR(-EOVERFLOW);
47 	if (check_add_overflow(end, PAGE_SIZE - 1, &end))
48 		return ERR_PTR(-EOVERFLOW);
49 
50 	end = end >> PAGE_SHIFT;
51 	start = uaddr >> PAGE_SHIFT;
52 	nr_pages = end - start;
53 	if (WARN_ON_ONCE(!nr_pages))
54 		return ERR_PTR(-EINVAL);
55 	if (WARN_ON_ONCE(nr_pages > INT_MAX))
56 		return ERR_PTR(-EOVERFLOW);
57 
58 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
59 	if (!pages)
60 		return ERR_PTR(-ENOMEM);
61 
62 	ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
63 					pages);
64 	/* success, mapped all pages */
65 	if (ret == nr_pages) {
66 		*npages = nr_pages;
67 		return pages;
68 	}
69 
70 	/* partial map, or didn't map anything */
71 	if (ret >= 0) {
72 		/* if we did partial map, release any pages we did get */
73 		if (ret)
74 			unpin_user_pages(pages, ret);
75 		ret = -EFAULT;
76 	}
77 	kvfree(pages);
78 	return ERR_PTR(ret);
79 }
80 
81 enum {
82 	/* memory was vmap'ed for the kernel, freeing the region vunmap's it */
83 	IO_REGION_F_VMAP			= 1,
84 	/* memory is provided by user and pinned by the kernel */
85 	IO_REGION_F_USER_PROVIDED		= 2,
86 	/* only the first page in the array is ref'ed */
87 	IO_REGION_F_SINGLE_REF			= 4,
88 };
89 
90 void io_free_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr)
91 {
92 	if (mr->pages) {
93 		long nr_refs = mr->nr_pages;
94 
95 		if (mr->flags & IO_REGION_F_SINGLE_REF)
96 			nr_refs = 1;
97 
98 		if (mr->flags & IO_REGION_F_USER_PROVIDED)
99 			unpin_user_pages(mr->pages, nr_refs);
100 		else
101 			release_pages(mr->pages, nr_refs);
102 
103 		kvfree(mr->pages);
104 	}
105 	if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr)
106 		vunmap(mr->ptr);
107 	if (mr->nr_pages && ctx->user)
108 		__io_unaccount_mem(ctx->user, mr->nr_pages);
109 
110 	memset(mr, 0, sizeof(*mr));
111 }
112 
113 static int io_region_init_ptr(struct io_mapped_region *mr)
114 {
115 	struct io_imu_folio_data ifd;
116 	void *ptr;
117 
118 	if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
119 		if (ifd.nr_folios == 1) {
120 			mr->ptr = page_address(mr->pages[0]);
121 			return 0;
122 		}
123 	}
124 	ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
125 	if (!ptr)
126 		return -ENOMEM;
127 
128 	mr->ptr = ptr;
129 	mr->flags |= IO_REGION_F_VMAP;
130 	return 0;
131 }
132 
133 static int io_region_pin_pages(struct io_ring_ctx *ctx,
134 				struct io_mapped_region *mr,
135 				struct io_uring_region_desc *reg)
136 {
137 	unsigned long size = mr->nr_pages << PAGE_SHIFT;
138 	struct page **pages;
139 	int nr_pages;
140 
141 	pages = io_pin_pages(reg->user_addr, size, &nr_pages);
142 	if (IS_ERR(pages))
143 		return PTR_ERR(pages);
144 	if (WARN_ON_ONCE(nr_pages != mr->nr_pages))
145 		return -EFAULT;
146 
147 	mr->pages = pages;
148 	mr->flags |= IO_REGION_F_USER_PROVIDED;
149 	return 0;
150 }
151 
152 static int io_region_allocate_pages(struct io_ring_ctx *ctx,
153 				    struct io_mapped_region *mr,
154 				    struct io_uring_region_desc *reg,
155 				    unsigned long mmap_offset)
156 {
157 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
158 	unsigned long size = mr->nr_pages << PAGE_SHIFT;
159 	unsigned long nr_allocated;
160 	struct page **pages;
161 	void *p;
162 
163 	pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp);
164 	if (!pages)
165 		return -ENOMEM;
166 
167 	p = io_mem_alloc_compound(pages, mr->nr_pages, size, gfp);
168 	if (!IS_ERR(p)) {
169 		mr->flags |= IO_REGION_F_SINGLE_REF;
170 		goto done;
171 	}
172 
173 	nr_allocated = alloc_pages_bulk_array_node(gfp, NUMA_NO_NODE,
174 						   mr->nr_pages, pages);
175 	if (nr_allocated != mr->nr_pages) {
176 		if (nr_allocated)
177 			release_pages(pages, nr_allocated);
178 		kvfree(pages);
179 		return -ENOMEM;
180 	}
181 done:
182 	reg->mmap_offset = mmap_offset;
183 	mr->pages = pages;
184 	return 0;
185 }
186 
187 int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
188 		     struct io_uring_region_desc *reg,
189 		     unsigned long mmap_offset)
190 {
191 	int nr_pages, ret;
192 	u64 end;
193 
194 	if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
195 		return -EFAULT;
196 	if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv)))
197 		return -EINVAL;
198 	if (reg->flags & ~IORING_MEM_REGION_TYPE_USER)
199 		return -EINVAL;
200 	/* user_addr should be set IFF it's a user memory backed region */
201 	if ((reg->flags & IORING_MEM_REGION_TYPE_USER) != !!reg->user_addr)
202 		return -EFAULT;
203 	if (!reg->size || reg->mmap_offset || reg->id)
204 		return -EINVAL;
205 	if ((reg->size >> PAGE_SHIFT) > INT_MAX)
206 		return -E2BIG;
207 	if ((reg->user_addr | reg->size) & ~PAGE_MASK)
208 		return -EINVAL;
209 	if (check_add_overflow(reg->user_addr, reg->size, &end))
210 		return -EOVERFLOW;
211 
212 	nr_pages = reg->size >> PAGE_SHIFT;
213 	if (ctx->user) {
214 		ret = __io_account_mem(ctx->user, nr_pages);
215 		if (ret)
216 			return ret;
217 	}
218 	mr->nr_pages = nr_pages;
219 
220 	if (reg->flags & IORING_MEM_REGION_TYPE_USER)
221 		ret = io_region_pin_pages(ctx, mr, reg);
222 	else
223 		ret = io_region_allocate_pages(ctx, mr, reg, mmap_offset);
224 	if (ret)
225 		goto out_free;
226 
227 	ret = io_region_init_ptr(mr);
228 	if (ret)
229 		goto out_free;
230 	return 0;
231 out_free:
232 	io_free_region(ctx, mr);
233 	return ret;
234 }
235 
236 int io_create_region_mmap_safe(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
237 				struct io_uring_region_desc *reg,
238 				unsigned long mmap_offset)
239 {
240 	struct io_mapped_region tmp_mr;
241 	int ret;
242 
243 	memcpy(&tmp_mr, mr, sizeof(tmp_mr));
244 	ret = io_create_region(ctx, &tmp_mr, reg, mmap_offset);
245 	if (ret)
246 		return ret;
247 
248 	/*
249 	 * Once published mmap can find it without holding only the ->mmap_lock
250 	 * and not ->uring_lock.
251 	 */
252 	guard(mutex)(&ctx->mmap_lock);
253 	memcpy(mr, &tmp_mr, sizeof(tmp_mr));
254 	return 0;
255 }
256 
257 static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
258 						   loff_t pgoff)
259 {
260 	loff_t offset = pgoff << PAGE_SHIFT;
261 	unsigned int bgid;
262 
263 	switch (offset & IORING_OFF_MMAP_MASK) {
264 	case IORING_OFF_SQ_RING:
265 	case IORING_OFF_CQ_RING:
266 		return &ctx->ring_region;
267 	case IORING_OFF_SQES:
268 		return &ctx->sq_region;
269 	case IORING_OFF_PBUF_RING:
270 		bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
271 		return io_pbuf_get_region(ctx, bgid);
272 	case IORING_MAP_OFF_PARAM_REGION:
273 		return &ctx->param_region;
274 	}
275 	return NULL;
276 }
277 
278 static void *io_region_validate_mmap(struct io_ring_ctx *ctx,
279 				     struct io_mapped_region *mr)
280 {
281 	lockdep_assert_held(&ctx->mmap_lock);
282 
283 	if (!io_region_is_set(mr))
284 		return ERR_PTR(-EINVAL);
285 	if (mr->flags & IO_REGION_F_USER_PROVIDED)
286 		return ERR_PTR(-EINVAL);
287 
288 	return io_region_get_ptr(mr);
289 }
290 
291 static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
292 					    size_t sz)
293 {
294 	struct io_ring_ctx *ctx = file->private_data;
295 	struct io_mapped_region *region;
296 
297 	region = io_mmap_get_region(ctx, pgoff);
298 	if (!region)
299 		return ERR_PTR(-EINVAL);
300 	return io_region_validate_mmap(ctx, region);
301 }
302 
303 #ifdef CONFIG_MMU
304 
305 static int io_region_mmap(struct io_ring_ctx *ctx,
306 			  struct io_mapped_region *mr,
307 			  struct vm_area_struct *vma,
308 			  unsigned max_pages)
309 {
310 	unsigned long nr_pages = min(mr->nr_pages, max_pages);
311 
312 	vm_flags_set(vma, VM_DONTEXPAND);
313 	return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages);
314 }
315 
316 __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
317 {
318 	struct io_ring_ctx *ctx = file->private_data;
319 	size_t sz = vma->vm_end - vma->vm_start;
320 	long offset = vma->vm_pgoff << PAGE_SHIFT;
321 	unsigned int page_limit = UINT_MAX;
322 	struct io_mapped_region *region;
323 	void *ptr;
324 
325 	guard(mutex)(&ctx->mmap_lock);
326 
327 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
328 	if (IS_ERR(ptr))
329 		return PTR_ERR(ptr);
330 
331 	switch (offset & IORING_OFF_MMAP_MASK) {
332 	case IORING_OFF_SQ_RING:
333 	case IORING_OFF_CQ_RING:
334 		page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 		break;
336 	}
337 
338 	region = io_mmap_get_region(ctx, vma->vm_pgoff);
339 	return io_region_mmap(ctx, region, vma, page_limit);
340 }
341 
342 unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
343 					 unsigned long len, unsigned long pgoff,
344 					 unsigned long flags)
345 {
346 	struct io_ring_ctx *ctx = filp->private_data;
347 	void *ptr;
348 
349 	/*
350 	 * Do not allow to map to user-provided address to avoid breaking the
351 	 * aliasing rules. Userspace is not able to guess the offset address of
352 	 * kernel kmalloc()ed memory area.
353 	 */
354 	if (addr)
355 		return -EINVAL;
356 
357 	guard(mutex)(&ctx->mmap_lock);
358 
359 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
360 	if (IS_ERR(ptr))
361 		return -ENOMEM;
362 
363 	/*
364 	 * Some architectures have strong cache aliasing requirements.
365 	 * For such architectures we need a coherent mapping which aliases
366 	 * kernel memory *and* userspace memory. To achieve that:
367 	 * - use a NULL file pointer to reference physical memory, and
368 	 * - use the kernel virtual address of the shared io_uring context
369 	 *   (instead of the userspace-provided address, which has to be 0UL
370 	 *   anyway).
371 	 * - use the same pgoff which the get_unmapped_area() uses to
372 	 *   calculate the page colouring.
373 	 * For architectures without such aliasing requirements, the
374 	 * architecture will return any suitable mapping because addr is 0.
375 	 */
376 	filp = NULL;
377 	flags |= MAP_SHARED;
378 	pgoff = 0;	/* has been translated to ptr above */
379 #ifdef SHM_COLOUR
380 	addr = (uintptr_t) ptr;
381 	pgoff = addr >> PAGE_SHIFT;
382 #else
383 	addr = 0UL;
384 #endif
385 	return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
386 }
387 
388 #else /* !CONFIG_MMU */
389 
390 int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
391 {
392 	return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
393 }
394 
395 unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
396 {
397 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
398 }
399 
400 unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
401 					 unsigned long len, unsigned long pgoff,
402 					 unsigned long flags)
403 {
404 	struct io_ring_ctx *ctx = file->private_data;
405 	void *ptr;
406 
407 	guard(mutex)(&ctx->mmap_lock);
408 
409 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
410 	if (IS_ERR(ptr))
411 		return PTR_ERR(ptr);
412 
413 	return (unsigned long) ptr;
414 }
415 
416 #endif /* !CONFIG_MMU */
417