xref: /linux/io_uring/rsrc.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "alloc_cache.h"
17 #include "openclose.h"
18 #include "rsrc.h"
19 #include "memmap.h"
20 
21 struct io_rsrc_update {
22 	struct file			*file;
23 	u64				arg;
24 	u32				nr_args;
25 	u32				offset;
26 };
27 
28 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
29 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
30 				  struct io_mapped_ubuf **pimu,
31 				  struct page **last_hpage);
32 
33 /* only define max */
34 #define IORING_MAX_FIXED_FILES	(1U << 20)
35 #define IORING_MAX_REG_BUFFERS	(1U << 14)
36 
37 static const struct io_mapped_ubuf dummy_ubuf = {
38 	/* set invalid range, so io_import_fixed() fails meeting it */
39 	.ubuf = -1UL,
40 	.ubuf_end = 0,
41 };
42 
43 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
44 {
45 	unsigned long page_limit, cur_pages, new_pages;
46 
47 	if (!nr_pages)
48 		return 0;
49 
50 	/* Don't allow more pages than we can safely lock */
51 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
52 
53 	cur_pages = atomic_long_read(&user->locked_vm);
54 	do {
55 		new_pages = cur_pages + nr_pages;
56 		if (new_pages > page_limit)
57 			return -ENOMEM;
58 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
59 					  &cur_pages, new_pages));
60 	return 0;
61 }
62 
63 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64 {
65 	if (ctx->user)
66 		__io_unaccount_mem(ctx->user, nr_pages);
67 
68 	if (ctx->mm_account)
69 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
70 }
71 
72 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
73 {
74 	int ret;
75 
76 	if (ctx->user) {
77 		ret = __io_account_mem(ctx->user, nr_pages);
78 		if (ret)
79 			return ret;
80 	}
81 
82 	if (ctx->mm_account)
83 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
84 
85 	return 0;
86 }
87 
88 static int io_buffer_validate(struct iovec *iov)
89 {
90 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
91 
92 	/*
93 	 * Don't impose further limits on the size and buffer
94 	 * constraints here, we'll -EINVAL later when IO is
95 	 * submitted if they are wrong.
96 	 */
97 	if (!iov->iov_base)
98 		return iov->iov_len ? -EFAULT : 0;
99 	if (!iov->iov_len)
100 		return -EFAULT;
101 
102 	/* arbitrary limit, but we need something */
103 	if (iov->iov_len > SZ_1G)
104 		return -EFAULT;
105 
106 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
107 		return -EOVERFLOW;
108 
109 	return 0;
110 }
111 
112 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
113 {
114 	struct io_mapped_ubuf *imu = *slot;
115 	unsigned int i;
116 
117 	if (imu != &dummy_ubuf) {
118 		for (i = 0; i < imu->nr_bvecs; i++)
119 			unpin_user_page(imu->bvec[i].bv_page);
120 		if (imu->acct_pages)
121 			io_unaccount_mem(ctx, imu->acct_pages);
122 		kvfree(imu);
123 	}
124 	*slot = NULL;
125 }
126 
127 static void io_rsrc_put_work(struct io_rsrc_node *node)
128 {
129 	struct io_rsrc_put *prsrc = &node->item;
130 
131 	if (prsrc->tag)
132 		io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
133 
134 	switch (node->type) {
135 	case IORING_RSRC_FILE:
136 		fput(prsrc->file);
137 		break;
138 	case IORING_RSRC_BUFFER:
139 		io_rsrc_buf_put(node->ctx, prsrc);
140 		break;
141 	default:
142 		WARN_ON_ONCE(1);
143 		break;
144 	}
145 }
146 
147 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
148 {
149 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
150 		kfree(node);
151 }
152 
153 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
154 	__must_hold(&node->ctx->uring_lock)
155 {
156 	struct io_ring_ctx *ctx = node->ctx;
157 
158 	while (!list_empty(&ctx->rsrc_ref_list)) {
159 		node = list_first_entry(&ctx->rsrc_ref_list,
160 					    struct io_rsrc_node, node);
161 		/* recycle ref nodes in order */
162 		if (node->refs)
163 			break;
164 		list_del(&node->node);
165 
166 		if (likely(!node->empty))
167 			io_rsrc_put_work(node);
168 		io_rsrc_node_destroy(ctx, node);
169 	}
170 	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
171 		wake_up_all(&ctx->rsrc_quiesce_wq);
172 }
173 
174 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
175 {
176 	struct io_rsrc_node *ref_node;
177 
178 	ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
179 	if (!ref_node) {
180 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
181 		if (!ref_node)
182 			return NULL;
183 	}
184 
185 	ref_node->ctx = ctx;
186 	ref_node->empty = 0;
187 	ref_node->refs = 1;
188 	return ref_node;
189 }
190 
191 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
192 				      struct io_ring_ctx *ctx)
193 {
194 	struct io_rsrc_node *backup;
195 	DEFINE_WAIT(we);
196 	int ret;
197 
198 	/* As We may drop ->uring_lock, other task may have started quiesce */
199 	if (data->quiesce)
200 		return -ENXIO;
201 
202 	backup = io_rsrc_node_alloc(ctx);
203 	if (!backup)
204 		return -ENOMEM;
205 	ctx->rsrc_node->empty = true;
206 	ctx->rsrc_node->type = -1;
207 	list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
208 	io_put_rsrc_node(ctx, ctx->rsrc_node);
209 	ctx->rsrc_node = backup;
210 
211 	if (list_empty(&ctx->rsrc_ref_list))
212 		return 0;
213 
214 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
215 		atomic_set(&ctx->cq_wait_nr, 1);
216 		smp_mb();
217 	}
218 
219 	ctx->rsrc_quiesce++;
220 	data->quiesce = true;
221 	do {
222 		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
223 		mutex_unlock(&ctx->uring_lock);
224 
225 		ret = io_run_task_work_sig(ctx);
226 		if (ret < 0) {
227 			finish_wait(&ctx->rsrc_quiesce_wq, &we);
228 			mutex_lock(&ctx->uring_lock);
229 			if (list_empty(&ctx->rsrc_ref_list))
230 				ret = 0;
231 			break;
232 		}
233 
234 		schedule();
235 		mutex_lock(&ctx->uring_lock);
236 		ret = 0;
237 	} while (!list_empty(&ctx->rsrc_ref_list));
238 
239 	finish_wait(&ctx->rsrc_quiesce_wq, &we);
240 	data->quiesce = false;
241 	ctx->rsrc_quiesce--;
242 
243 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
244 		atomic_set(&ctx->cq_wait_nr, 0);
245 		smp_mb();
246 	}
247 	return ret;
248 }
249 
250 static void io_free_page_table(void **table, size_t size)
251 {
252 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
253 
254 	for (i = 0; i < nr_tables; i++)
255 		kfree(table[i]);
256 	kfree(table);
257 }
258 
259 static void io_rsrc_data_free(struct io_rsrc_data *data)
260 {
261 	size_t size = data->nr * sizeof(data->tags[0][0]);
262 
263 	if (data->tags)
264 		io_free_page_table((void **)data->tags, size);
265 	kfree(data);
266 }
267 
268 static __cold void **io_alloc_page_table(size_t size)
269 {
270 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
271 	size_t init_size = size;
272 	void **table;
273 
274 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
275 	if (!table)
276 		return NULL;
277 
278 	for (i = 0; i < nr_tables; i++) {
279 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
280 
281 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
282 		if (!table[i]) {
283 			io_free_page_table(table, init_size);
284 			return NULL;
285 		}
286 		size -= this_size;
287 	}
288 	return table;
289 }
290 
291 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
292 				     u64 __user *utags,
293 				     unsigned nr, struct io_rsrc_data **pdata)
294 {
295 	struct io_rsrc_data *data;
296 	int ret = 0;
297 	unsigned i;
298 
299 	data = kzalloc(sizeof(*data), GFP_KERNEL);
300 	if (!data)
301 		return -ENOMEM;
302 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
303 	if (!data->tags) {
304 		kfree(data);
305 		return -ENOMEM;
306 	}
307 
308 	data->nr = nr;
309 	data->ctx = ctx;
310 	data->rsrc_type = type;
311 	if (utags) {
312 		ret = -EFAULT;
313 		for (i = 0; i < nr; i++) {
314 			u64 *tag_slot = io_get_tag_slot(data, i);
315 
316 			if (copy_from_user(tag_slot, &utags[i],
317 					   sizeof(*tag_slot)))
318 				goto fail;
319 		}
320 	}
321 	*pdata = data;
322 	return 0;
323 fail:
324 	io_rsrc_data_free(data);
325 	return ret;
326 }
327 
328 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
329 				 struct io_uring_rsrc_update2 *up,
330 				 unsigned nr_args)
331 {
332 	u64 __user *tags = u64_to_user_ptr(up->tags);
333 	__s32 __user *fds = u64_to_user_ptr(up->data);
334 	struct io_rsrc_data *data = ctx->file_data;
335 	struct io_fixed_file *file_slot;
336 	int fd, i, err = 0;
337 	unsigned int done;
338 
339 	if (!ctx->file_data)
340 		return -ENXIO;
341 	if (up->offset + nr_args > ctx->nr_user_files)
342 		return -EINVAL;
343 
344 	for (done = 0; done < nr_args; done++) {
345 		u64 tag = 0;
346 
347 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
348 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
349 			err = -EFAULT;
350 			break;
351 		}
352 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
353 			err = -EINVAL;
354 			break;
355 		}
356 		if (fd == IORING_REGISTER_FILES_SKIP)
357 			continue;
358 
359 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
360 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
361 
362 		if (file_slot->file_ptr) {
363 			err = io_queue_rsrc_removal(data, i,
364 						    io_slot_file(file_slot));
365 			if (err)
366 				break;
367 			file_slot->file_ptr = 0;
368 			io_file_bitmap_clear(&ctx->file_table, i);
369 		}
370 		if (fd != -1) {
371 			struct file *file = fget(fd);
372 
373 			if (!file) {
374 				err = -EBADF;
375 				break;
376 			}
377 			/*
378 			 * Don't allow io_uring instances to be registered.
379 			 */
380 			if (io_is_uring_fops(file)) {
381 				fput(file);
382 				err = -EBADF;
383 				break;
384 			}
385 			*io_get_tag_slot(data, i) = tag;
386 			io_fixed_file_set(file_slot, file);
387 			io_file_bitmap_set(&ctx->file_table, i);
388 		}
389 	}
390 	return done ? done : err;
391 }
392 
393 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
394 				   struct io_uring_rsrc_update2 *up,
395 				   unsigned int nr_args)
396 {
397 	struct iovec __user *uvec = u64_to_user_ptr(up->data);
398 	u64 __user *tags = u64_to_user_ptr(up->tags);
399 	struct iovec fast_iov, *iov;
400 	struct page *last_hpage = NULL;
401 	__u32 done;
402 	int i, err;
403 
404 	if (!ctx->buf_data)
405 		return -ENXIO;
406 	if (up->offset + nr_args > ctx->nr_user_bufs)
407 		return -EINVAL;
408 
409 	for (done = 0; done < nr_args; done++) {
410 		struct io_mapped_ubuf *imu;
411 		u64 tag = 0;
412 
413 		iov = iovec_from_user(&uvec[done], 1, 1, &fast_iov, ctx->compat);
414 		if (IS_ERR(iov)) {
415 			err = PTR_ERR(iov);
416 			break;
417 		}
418 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
419 			err = -EFAULT;
420 			break;
421 		}
422 		err = io_buffer_validate(iov);
423 		if (err)
424 			break;
425 		if (!iov->iov_base && tag) {
426 			err = -EINVAL;
427 			break;
428 		}
429 		err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
430 		if (err)
431 			break;
432 
433 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
434 		if (ctx->user_bufs[i] != &dummy_ubuf) {
435 			err = io_queue_rsrc_removal(ctx->buf_data, i,
436 						    ctx->user_bufs[i]);
437 			if (unlikely(err)) {
438 				io_buffer_unmap(ctx, &imu);
439 				break;
440 			}
441 			ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
442 		}
443 
444 		ctx->user_bufs[i] = imu;
445 		*io_get_tag_slot(ctx->buf_data, i) = tag;
446 	}
447 	return done ? done : err;
448 }
449 
450 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
451 				     struct io_uring_rsrc_update2 *up,
452 				     unsigned nr_args)
453 {
454 	__u32 tmp;
455 
456 	lockdep_assert_held(&ctx->uring_lock);
457 
458 	if (check_add_overflow(up->offset, nr_args, &tmp))
459 		return -EOVERFLOW;
460 
461 	switch (type) {
462 	case IORING_RSRC_FILE:
463 		return __io_sqe_files_update(ctx, up, nr_args);
464 	case IORING_RSRC_BUFFER:
465 		return __io_sqe_buffers_update(ctx, up, nr_args);
466 	}
467 	return -EINVAL;
468 }
469 
470 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
471 			     unsigned nr_args)
472 {
473 	struct io_uring_rsrc_update2 up;
474 
475 	if (!nr_args)
476 		return -EINVAL;
477 	memset(&up, 0, sizeof(up));
478 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
479 		return -EFAULT;
480 	if (up.resv || up.resv2)
481 		return -EINVAL;
482 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
483 }
484 
485 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
486 			    unsigned size, unsigned type)
487 {
488 	struct io_uring_rsrc_update2 up;
489 
490 	if (size != sizeof(up))
491 		return -EINVAL;
492 	if (copy_from_user(&up, arg, sizeof(up)))
493 		return -EFAULT;
494 	if (!up.nr || up.resv || up.resv2)
495 		return -EINVAL;
496 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
497 }
498 
499 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
500 			    unsigned int size, unsigned int type)
501 {
502 	struct io_uring_rsrc_register rr;
503 
504 	/* keep it extendible */
505 	if (size != sizeof(rr))
506 		return -EINVAL;
507 
508 	memset(&rr, 0, sizeof(rr));
509 	if (copy_from_user(&rr, arg, size))
510 		return -EFAULT;
511 	if (!rr.nr || rr.resv2)
512 		return -EINVAL;
513 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
514 		return -EINVAL;
515 
516 	switch (type) {
517 	case IORING_RSRC_FILE:
518 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
519 			break;
520 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
521 					     rr.nr, u64_to_user_ptr(rr.tags));
522 	case IORING_RSRC_BUFFER:
523 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
524 			break;
525 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
526 					       rr.nr, u64_to_user_ptr(rr.tags));
527 	}
528 	return -EINVAL;
529 }
530 
531 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
532 {
533 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
534 
535 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
536 		return -EINVAL;
537 	if (sqe->rw_flags || sqe->splice_fd_in)
538 		return -EINVAL;
539 
540 	up->offset = READ_ONCE(sqe->off);
541 	up->nr_args = READ_ONCE(sqe->len);
542 	if (!up->nr_args)
543 		return -EINVAL;
544 	up->arg = READ_ONCE(sqe->addr);
545 	return 0;
546 }
547 
548 static int io_files_update_with_index_alloc(struct io_kiocb *req,
549 					    unsigned int issue_flags)
550 {
551 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
552 	__s32 __user *fds = u64_to_user_ptr(up->arg);
553 	unsigned int done;
554 	struct file *file;
555 	int ret, fd;
556 
557 	if (!req->ctx->file_data)
558 		return -ENXIO;
559 
560 	for (done = 0; done < up->nr_args; done++) {
561 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
562 			ret = -EFAULT;
563 			break;
564 		}
565 
566 		file = fget(fd);
567 		if (!file) {
568 			ret = -EBADF;
569 			break;
570 		}
571 		ret = io_fixed_fd_install(req, issue_flags, file,
572 					  IORING_FILE_INDEX_ALLOC);
573 		if (ret < 0)
574 			break;
575 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
576 			__io_close_fixed(req->ctx, issue_flags, ret);
577 			ret = -EFAULT;
578 			break;
579 		}
580 	}
581 
582 	if (done)
583 		return done;
584 	return ret;
585 }
586 
587 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
588 {
589 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
590 	struct io_ring_ctx *ctx = req->ctx;
591 	struct io_uring_rsrc_update2 up2;
592 	int ret;
593 
594 	up2.offset = up->offset;
595 	up2.data = up->arg;
596 	up2.nr = 0;
597 	up2.tags = 0;
598 	up2.resv = 0;
599 	up2.resv2 = 0;
600 
601 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
602 		ret = io_files_update_with_index_alloc(req, issue_flags);
603 	} else {
604 		io_ring_submit_lock(ctx, issue_flags);
605 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
606 						&up2, up->nr_args);
607 		io_ring_submit_unlock(ctx, issue_flags);
608 	}
609 
610 	if (ret < 0)
611 		req_set_fail(req);
612 	io_req_set_res(req, ret, 0);
613 	return IOU_OK;
614 }
615 
616 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
617 {
618 	struct io_ring_ctx *ctx = data->ctx;
619 	struct io_rsrc_node *node = ctx->rsrc_node;
620 	u64 *tag_slot = io_get_tag_slot(data, idx);
621 
622 	ctx->rsrc_node = io_rsrc_node_alloc(ctx);
623 	if (unlikely(!ctx->rsrc_node)) {
624 		ctx->rsrc_node = node;
625 		return -ENOMEM;
626 	}
627 
628 	node->item.rsrc = rsrc;
629 	node->type = data->rsrc_type;
630 	node->item.tag = *tag_slot;
631 	*tag_slot = 0;
632 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
633 	io_put_rsrc_node(ctx, node);
634 	return 0;
635 }
636 
637 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
638 {
639 	int i;
640 
641 	for (i = 0; i < ctx->nr_user_files; i++) {
642 		struct file *file = io_file_from_index(&ctx->file_table, i);
643 
644 		if (!file)
645 			continue;
646 		io_file_bitmap_clear(&ctx->file_table, i);
647 		fput(file);
648 	}
649 
650 	io_free_file_tables(&ctx->file_table);
651 	io_file_table_set_alloc_range(ctx, 0, 0);
652 	io_rsrc_data_free(ctx->file_data);
653 	ctx->file_data = NULL;
654 	ctx->nr_user_files = 0;
655 }
656 
657 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
658 {
659 	unsigned nr = ctx->nr_user_files;
660 	int ret;
661 
662 	if (!ctx->file_data)
663 		return -ENXIO;
664 
665 	/*
666 	 * Quiesce may unlock ->uring_lock, and while it's not held
667 	 * prevent new requests using the table.
668 	 */
669 	ctx->nr_user_files = 0;
670 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
671 	ctx->nr_user_files = nr;
672 	if (!ret)
673 		__io_sqe_files_unregister(ctx);
674 	return ret;
675 }
676 
677 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
678 			  unsigned nr_args, u64 __user *tags)
679 {
680 	__s32 __user *fds = (__s32 __user *) arg;
681 	struct file *file;
682 	int fd, ret;
683 	unsigned i;
684 
685 	if (ctx->file_data)
686 		return -EBUSY;
687 	if (!nr_args)
688 		return -EINVAL;
689 	if (nr_args > IORING_MAX_FIXED_FILES)
690 		return -EMFILE;
691 	if (nr_args > rlimit(RLIMIT_NOFILE))
692 		return -EMFILE;
693 	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
694 				 &ctx->file_data);
695 	if (ret)
696 		return ret;
697 
698 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
699 		io_rsrc_data_free(ctx->file_data);
700 		ctx->file_data = NULL;
701 		return -ENOMEM;
702 	}
703 
704 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
705 		struct io_fixed_file *file_slot;
706 
707 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
708 			ret = -EFAULT;
709 			goto fail;
710 		}
711 		/* allow sparse sets */
712 		if (!fds || fd == -1) {
713 			ret = -EINVAL;
714 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
715 				goto fail;
716 			continue;
717 		}
718 
719 		file = fget(fd);
720 		ret = -EBADF;
721 		if (unlikely(!file))
722 			goto fail;
723 
724 		/*
725 		 * Don't allow io_uring instances to be registered.
726 		 */
727 		if (io_is_uring_fops(file)) {
728 			fput(file);
729 			goto fail;
730 		}
731 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
732 		io_fixed_file_set(file_slot, file);
733 		io_file_bitmap_set(&ctx->file_table, i);
734 	}
735 
736 	/* default it to the whole table */
737 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
738 	return 0;
739 fail:
740 	__io_sqe_files_unregister(ctx);
741 	return ret;
742 }
743 
744 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
745 {
746 	io_buffer_unmap(ctx, &prsrc->buf);
747 	prsrc->buf = NULL;
748 }
749 
750 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
751 {
752 	unsigned int i;
753 
754 	for (i = 0; i < ctx->nr_user_bufs; i++)
755 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
756 	kfree(ctx->user_bufs);
757 	io_rsrc_data_free(ctx->buf_data);
758 	ctx->user_bufs = NULL;
759 	ctx->buf_data = NULL;
760 	ctx->nr_user_bufs = 0;
761 }
762 
763 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
764 {
765 	unsigned nr = ctx->nr_user_bufs;
766 	int ret;
767 
768 	if (!ctx->buf_data)
769 		return -ENXIO;
770 
771 	/*
772 	 * Quiesce may unlock ->uring_lock, and while it's not held
773 	 * prevent new requests using the table.
774 	 */
775 	ctx->nr_user_bufs = 0;
776 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
777 	ctx->nr_user_bufs = nr;
778 	if (!ret)
779 		__io_sqe_buffers_unregister(ctx);
780 	return ret;
781 }
782 
783 /*
784  * Not super efficient, but this is just a registration time. And we do cache
785  * the last compound head, so generally we'll only do a full search if we don't
786  * match that one.
787  *
788  * We check if the given compound head page has already been accounted, to
789  * avoid double accounting it. This allows us to account the full size of the
790  * page, not just the constituent pages of a huge page.
791  */
792 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
793 				  int nr_pages, struct page *hpage)
794 {
795 	int i, j;
796 
797 	/* check current page array */
798 	for (i = 0; i < nr_pages; i++) {
799 		if (!PageCompound(pages[i]))
800 			continue;
801 		if (compound_head(pages[i]) == hpage)
802 			return true;
803 	}
804 
805 	/* check previously registered pages */
806 	for (i = 0; i < ctx->nr_user_bufs; i++) {
807 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
808 
809 		for (j = 0; j < imu->nr_bvecs; j++) {
810 			if (!PageCompound(imu->bvec[j].bv_page))
811 				continue;
812 			if (compound_head(imu->bvec[j].bv_page) == hpage)
813 				return true;
814 		}
815 	}
816 
817 	return false;
818 }
819 
820 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
821 				 int nr_pages, struct io_mapped_ubuf *imu,
822 				 struct page **last_hpage)
823 {
824 	int i, ret;
825 
826 	imu->acct_pages = 0;
827 	for (i = 0; i < nr_pages; i++) {
828 		if (!PageCompound(pages[i])) {
829 			imu->acct_pages++;
830 		} else {
831 			struct page *hpage;
832 
833 			hpage = compound_head(pages[i]);
834 			if (hpage == *last_hpage)
835 				continue;
836 			*last_hpage = hpage;
837 			if (headpage_already_acct(ctx, pages, i, hpage))
838 				continue;
839 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
840 		}
841 	}
842 
843 	if (!imu->acct_pages)
844 		return 0;
845 
846 	ret = io_account_mem(ctx, imu->acct_pages);
847 	if (ret)
848 		imu->acct_pages = 0;
849 	return ret;
850 }
851 
852 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
853 				  struct io_mapped_ubuf **pimu,
854 				  struct page **last_hpage)
855 {
856 	struct io_mapped_ubuf *imu = NULL;
857 	struct page **pages = NULL;
858 	unsigned long off;
859 	size_t size;
860 	int ret, nr_pages, i;
861 	struct folio *folio = NULL;
862 
863 	*pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
864 	if (!iov->iov_base)
865 		return 0;
866 
867 	ret = -ENOMEM;
868 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
869 				&nr_pages);
870 	if (IS_ERR(pages)) {
871 		ret = PTR_ERR(pages);
872 		pages = NULL;
873 		goto done;
874 	}
875 
876 	/* If it's a huge page, try to coalesce them into a single bvec entry */
877 	if (nr_pages > 1) {
878 		folio = page_folio(pages[0]);
879 		for (i = 1; i < nr_pages; i++) {
880 			/*
881 			 * Pages must be consecutive and on the same folio for
882 			 * this to work
883 			 */
884 			if (page_folio(pages[i]) != folio ||
885 			    pages[i] != pages[i - 1] + 1) {
886 				folio = NULL;
887 				break;
888 			}
889 		}
890 		if (folio) {
891 			/*
892 			 * The pages are bound to the folio, it doesn't
893 			 * actually unpin them but drops all but one reference,
894 			 * which is usually put down by io_buffer_unmap().
895 			 * Note, needs a better helper.
896 			 */
897 			unpin_user_pages(&pages[1], nr_pages - 1);
898 			nr_pages = 1;
899 		}
900 	}
901 
902 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
903 	if (!imu)
904 		goto done;
905 
906 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
907 	if (ret) {
908 		unpin_user_pages(pages, nr_pages);
909 		goto done;
910 	}
911 
912 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
913 	size = iov->iov_len;
914 	/* store original address for later verification */
915 	imu->ubuf = (unsigned long) iov->iov_base;
916 	imu->ubuf_end = imu->ubuf + iov->iov_len;
917 	imu->nr_bvecs = nr_pages;
918 	*pimu = imu;
919 	ret = 0;
920 
921 	if (folio) {
922 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
923 		goto done;
924 	}
925 	for (i = 0; i < nr_pages; i++) {
926 		size_t vec_len;
927 
928 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
929 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
930 		off = 0;
931 		size -= vec_len;
932 	}
933 done:
934 	if (ret)
935 		kvfree(imu);
936 	kvfree(pages);
937 	return ret;
938 }
939 
940 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
941 {
942 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
943 	return ctx->user_bufs ? 0 : -ENOMEM;
944 }
945 
946 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
947 			    unsigned int nr_args, u64 __user *tags)
948 {
949 	struct page *last_hpage = NULL;
950 	struct io_rsrc_data *data;
951 	struct iovec fast_iov, *iov = &fast_iov;
952 	const struct iovec __user *uvec = (struct iovec * __user) arg;
953 	int i, ret;
954 
955 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
956 
957 	if (ctx->user_bufs)
958 		return -EBUSY;
959 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
960 		return -EINVAL;
961 	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
962 	if (ret)
963 		return ret;
964 	ret = io_buffers_map_alloc(ctx, nr_args);
965 	if (ret) {
966 		io_rsrc_data_free(data);
967 		return ret;
968 	}
969 
970 	if (!arg)
971 		memset(iov, 0, sizeof(*iov));
972 
973 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
974 		if (arg) {
975 			iov = iovec_from_user(&uvec[i], 1, 1, &fast_iov, ctx->compat);
976 			if (IS_ERR(iov)) {
977 				ret = PTR_ERR(iov);
978 				break;
979 			}
980 			ret = io_buffer_validate(iov);
981 			if (ret)
982 				break;
983 		}
984 
985 		if (!iov->iov_base && *io_get_tag_slot(data, i)) {
986 			ret = -EINVAL;
987 			break;
988 		}
989 
990 		ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
991 					     &last_hpage);
992 		if (ret)
993 			break;
994 	}
995 
996 	WARN_ON_ONCE(ctx->buf_data);
997 
998 	ctx->buf_data = data;
999 	if (ret)
1000 		__io_sqe_buffers_unregister(ctx);
1001 	return ret;
1002 }
1003 
1004 int io_import_fixed(int ddir, struct iov_iter *iter,
1005 			   struct io_mapped_ubuf *imu,
1006 			   u64 buf_addr, size_t len)
1007 {
1008 	u64 buf_end;
1009 	size_t offset;
1010 
1011 	if (WARN_ON_ONCE(!imu))
1012 		return -EFAULT;
1013 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1014 		return -EFAULT;
1015 	/* not inside the mapped region */
1016 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1017 		return -EFAULT;
1018 
1019 	/*
1020 	 * Might not be a start of buffer, set size appropriately
1021 	 * and advance us to the beginning.
1022 	 */
1023 	offset = buf_addr - imu->ubuf;
1024 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1025 
1026 	if (offset) {
1027 		/*
1028 		 * Don't use iov_iter_advance() here, as it's really slow for
1029 		 * using the latter parts of a big fixed buffer - it iterates
1030 		 * over each segment manually. We can cheat a bit here, because
1031 		 * we know that:
1032 		 *
1033 		 * 1) it's a BVEC iter, we set it up
1034 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1035 		 *    first and last bvec
1036 		 *
1037 		 * So just find our index, and adjust the iterator afterwards.
1038 		 * If the offset is within the first bvec (or the whole first
1039 		 * bvec, just use iov_iter_advance(). This makes it easier
1040 		 * since we can just skip the first segment, which may not
1041 		 * be PAGE_SIZE aligned.
1042 		 */
1043 		const struct bio_vec *bvec = imu->bvec;
1044 
1045 		if (offset < bvec->bv_len) {
1046 			/*
1047 			 * Note, huge pages buffers consists of one large
1048 			 * bvec entry and should always go this way. The other
1049 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1050 			 */
1051 			iter->bvec = bvec;
1052 			iter->count -= offset;
1053 			iter->iov_offset = offset;
1054 		} else {
1055 			unsigned long seg_skip;
1056 
1057 			/* skip first vec */
1058 			offset -= bvec->bv_len;
1059 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1060 
1061 			iter->bvec = bvec + seg_skip;
1062 			iter->nr_segs -= seg_skip;
1063 			iter->count -= bvec->bv_len + offset;
1064 			iter->iov_offset = offset & ~PAGE_MASK;
1065 		}
1066 	}
1067 
1068 	return 0;
1069 }
1070