1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 #include <linux/io_uring/cmd.h>
13
14 #include <uapi/linux/io_uring.h>
15
16 #include "io_uring.h"
17 #include "openclose.h"
18 #include "rsrc.h"
19 #include "memmap.h"
20 #include "register.h"
21
22 struct io_rsrc_update {
23 struct file *file;
24 u64 arg;
25 u32 nr_args;
26 u32 offset;
27 };
28
29 static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
30 struct iovec *iov, struct page **last_hpage);
31
32 /* only define max */
33 #define IORING_MAX_FIXED_FILES (1U << 20)
34 #define IORING_MAX_REG_BUFFERS (1U << 14)
35
36 #define IO_CACHED_BVECS_SEGS 32
37
__io_account_mem(struct user_struct * user,unsigned long nr_pages)38 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
39 {
40 unsigned long page_limit, cur_pages, new_pages;
41
42 if (!nr_pages)
43 return 0;
44
45 /* Don't allow more pages than we can safely lock */
46 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47
48 cur_pages = atomic_long_read(&user->locked_vm);
49 do {
50 new_pages = cur_pages + nr_pages;
51 if (new_pages > page_limit)
52 return -ENOMEM;
53 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
54 &cur_pages, new_pages));
55 return 0;
56 }
57
io_unaccount_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)58 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
59 {
60 if (ctx->user)
61 __io_unaccount_mem(ctx->user, nr_pages);
62
63 if (ctx->mm_account)
64 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
65 }
66
io_account_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)67 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
68 {
69 int ret;
70
71 if (ctx->user) {
72 ret = __io_account_mem(ctx->user, nr_pages);
73 if (ret)
74 return ret;
75 }
76
77 if (ctx->mm_account)
78 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
79
80 return 0;
81 }
82
io_buffer_validate(struct iovec * iov)83 static int io_buffer_validate(struct iovec *iov)
84 {
85 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
86
87 /*
88 * Don't impose further limits on the size and buffer
89 * constraints here, we'll -EINVAL later when IO is
90 * submitted if they are wrong.
91 */
92 if (!iov->iov_base)
93 return iov->iov_len ? -EFAULT : 0;
94 if (!iov->iov_len)
95 return -EFAULT;
96
97 /* arbitrary limit, but we need something */
98 if (iov->iov_len > SZ_1G)
99 return -EFAULT;
100
101 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
102 return -EOVERFLOW;
103
104 return 0;
105 }
106
io_release_ubuf(void * priv)107 static void io_release_ubuf(void *priv)
108 {
109 struct io_mapped_ubuf *imu = priv;
110 unsigned int i;
111
112 for (i = 0; i < imu->nr_bvecs; i++)
113 unpin_user_page(imu->bvec[i].bv_page);
114 }
115
io_alloc_imu(struct io_ring_ctx * ctx,int nr_bvecs)116 static struct io_mapped_ubuf *io_alloc_imu(struct io_ring_ctx *ctx,
117 int nr_bvecs)
118 {
119 if (nr_bvecs <= IO_CACHED_BVECS_SEGS)
120 return io_cache_alloc(&ctx->imu_cache, GFP_KERNEL);
121 return kvmalloc(struct_size_t(struct io_mapped_ubuf, bvec, nr_bvecs),
122 GFP_KERNEL);
123 }
124
io_free_imu(struct io_ring_ctx * ctx,struct io_mapped_ubuf * imu)125 static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
126 {
127 if (imu->nr_bvecs <= IO_CACHED_BVECS_SEGS)
128 io_cache_free(&ctx->imu_cache, imu);
129 else
130 kvfree(imu);
131 }
132
io_buffer_unmap(struct io_ring_ctx * ctx,struct io_mapped_ubuf * imu)133 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
134 {
135 if (!refcount_dec_and_test(&imu->refs))
136 return;
137
138 if (imu->acct_pages)
139 io_unaccount_mem(ctx, imu->acct_pages);
140 imu->release(imu->priv);
141 io_free_imu(ctx, imu);
142 }
143
io_rsrc_node_alloc(struct io_ring_ctx * ctx,int type)144 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
145 {
146 struct io_rsrc_node *node;
147
148 node = io_cache_alloc(&ctx->node_cache, GFP_KERNEL);
149 if (node) {
150 node->type = type;
151 node->refs = 1;
152 node->tag = 0;
153 node->file_ptr = 0;
154 }
155 return node;
156 }
157
io_rsrc_cache_init(struct io_ring_ctx * ctx)158 bool io_rsrc_cache_init(struct io_ring_ctx *ctx)
159 {
160 const int imu_cache_size = struct_size_t(struct io_mapped_ubuf, bvec,
161 IO_CACHED_BVECS_SEGS);
162 const int node_size = sizeof(struct io_rsrc_node);
163 bool ret;
164
165 ret = io_alloc_cache_init(&ctx->node_cache, IO_ALLOC_CACHE_MAX,
166 node_size, 0);
167 ret |= io_alloc_cache_init(&ctx->imu_cache, IO_ALLOC_CACHE_MAX,
168 imu_cache_size, 0);
169 return ret;
170 }
171
io_rsrc_cache_free(struct io_ring_ctx * ctx)172 void io_rsrc_cache_free(struct io_ring_ctx *ctx)
173 {
174 io_alloc_cache_free(&ctx->node_cache, kfree);
175 io_alloc_cache_free(&ctx->imu_cache, kfree);
176 }
177
io_rsrc_data_free(struct io_ring_ctx * ctx,struct io_rsrc_data * data)178 __cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
179 struct io_rsrc_data *data)
180 {
181 if (!data->nr)
182 return;
183 while (data->nr--) {
184 if (data->nodes[data->nr])
185 io_put_rsrc_node(ctx, data->nodes[data->nr]);
186 }
187 kvfree(data->nodes);
188 data->nodes = NULL;
189 data->nr = 0;
190 }
191
io_rsrc_data_alloc(struct io_rsrc_data * data,unsigned nr)192 __cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
193 {
194 data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
195 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
196 if (data->nodes) {
197 data->nr = nr;
198 return 0;
199 }
200 return -ENOMEM;
201 }
202
__io_sqe_files_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned nr_args)203 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
204 struct io_uring_rsrc_update2 *up,
205 unsigned nr_args)
206 {
207 u64 __user *tags = u64_to_user_ptr(up->tags);
208 __s32 __user *fds = u64_to_user_ptr(up->data);
209 int fd, i, err = 0;
210 unsigned int done;
211
212 if (!ctx->file_table.data.nr)
213 return -ENXIO;
214 if (up->offset + nr_args > ctx->file_table.data.nr)
215 return -EINVAL;
216
217 for (done = 0; done < nr_args; done++) {
218 u64 tag = 0;
219
220 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
221 copy_from_user(&fd, &fds[done], sizeof(fd))) {
222 err = -EFAULT;
223 break;
224 }
225 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
226 err = -EINVAL;
227 break;
228 }
229 if (fd == IORING_REGISTER_FILES_SKIP)
230 continue;
231
232 i = up->offset + done;
233 if (io_reset_rsrc_node(ctx, &ctx->file_table.data, i))
234 io_file_bitmap_clear(&ctx->file_table, i);
235
236 if (fd != -1) {
237 struct file *file = fget(fd);
238 struct io_rsrc_node *node;
239
240 if (!file) {
241 err = -EBADF;
242 break;
243 }
244 /*
245 * Don't allow io_uring instances to be registered.
246 */
247 if (io_is_uring_fops(file)) {
248 fput(file);
249 err = -EBADF;
250 break;
251 }
252 node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
253 if (!node) {
254 err = -ENOMEM;
255 fput(file);
256 break;
257 }
258 ctx->file_table.data.nodes[i] = node;
259 if (tag)
260 node->tag = tag;
261 io_fixed_file_set(node, file);
262 io_file_bitmap_set(&ctx->file_table, i);
263 }
264 }
265 return done ? done : err;
266 }
267
__io_sqe_buffers_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned int nr_args)268 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
269 struct io_uring_rsrc_update2 *up,
270 unsigned int nr_args)
271 {
272 u64 __user *tags = u64_to_user_ptr(up->tags);
273 struct iovec fast_iov, *iov;
274 struct page *last_hpage = NULL;
275 struct iovec __user *uvec;
276 u64 user_data = up->data;
277 __u32 done;
278 int i, err;
279
280 if (!ctx->buf_table.nr)
281 return -ENXIO;
282 if (up->offset + nr_args > ctx->buf_table.nr)
283 return -EINVAL;
284
285 for (done = 0; done < nr_args; done++) {
286 struct io_rsrc_node *node;
287 u64 tag = 0;
288
289 uvec = u64_to_user_ptr(user_data);
290 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
291 if (IS_ERR(iov)) {
292 err = PTR_ERR(iov);
293 break;
294 }
295 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
296 err = -EFAULT;
297 break;
298 }
299 err = io_buffer_validate(iov);
300 if (err)
301 break;
302 node = io_sqe_buffer_register(ctx, iov, &last_hpage);
303 if (IS_ERR(node)) {
304 err = PTR_ERR(node);
305 break;
306 }
307 if (tag) {
308 if (!node) {
309 err = -EINVAL;
310 break;
311 }
312 node->tag = tag;
313 }
314 i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
315 io_reset_rsrc_node(ctx, &ctx->buf_table, i);
316 ctx->buf_table.nodes[i] = node;
317 if (ctx->compat)
318 user_data += sizeof(struct compat_iovec);
319 else
320 user_data += sizeof(struct iovec);
321 }
322 return done ? done : err;
323 }
324
__io_register_rsrc_update(struct io_ring_ctx * ctx,unsigned type,struct io_uring_rsrc_update2 * up,unsigned nr_args)325 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
326 struct io_uring_rsrc_update2 *up,
327 unsigned nr_args)
328 {
329 __u32 tmp;
330
331 lockdep_assert_held(&ctx->uring_lock);
332
333 if (check_add_overflow(up->offset, nr_args, &tmp))
334 return -EOVERFLOW;
335
336 switch (type) {
337 case IORING_RSRC_FILE:
338 return __io_sqe_files_update(ctx, up, nr_args);
339 case IORING_RSRC_BUFFER:
340 return __io_sqe_buffers_update(ctx, up, nr_args);
341 }
342 return -EINVAL;
343 }
344
io_register_files_update(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)345 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
346 unsigned nr_args)
347 {
348 struct io_uring_rsrc_update2 up;
349
350 if (!nr_args)
351 return -EINVAL;
352 memset(&up, 0, sizeof(up));
353 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
354 return -EFAULT;
355 if (up.resv || up.resv2)
356 return -EINVAL;
357 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
358 }
359
io_register_rsrc_update(struct io_ring_ctx * ctx,void __user * arg,unsigned size,unsigned type)360 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
361 unsigned size, unsigned type)
362 {
363 struct io_uring_rsrc_update2 up;
364
365 if (size != sizeof(up))
366 return -EINVAL;
367 if (copy_from_user(&up, arg, sizeof(up)))
368 return -EFAULT;
369 if (!up.nr || up.resv || up.resv2)
370 return -EINVAL;
371 return __io_register_rsrc_update(ctx, type, &up, up.nr);
372 }
373
io_register_rsrc(struct io_ring_ctx * ctx,void __user * arg,unsigned int size,unsigned int type)374 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
375 unsigned int size, unsigned int type)
376 {
377 struct io_uring_rsrc_register rr;
378
379 /* keep it extendible */
380 if (size != sizeof(rr))
381 return -EINVAL;
382
383 memset(&rr, 0, sizeof(rr));
384 if (copy_from_user(&rr, arg, size))
385 return -EFAULT;
386 if (!rr.nr || rr.resv2)
387 return -EINVAL;
388 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
389 return -EINVAL;
390
391 switch (type) {
392 case IORING_RSRC_FILE:
393 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
394 break;
395 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
396 rr.nr, u64_to_user_ptr(rr.tags));
397 case IORING_RSRC_BUFFER:
398 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
399 break;
400 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
401 rr.nr, u64_to_user_ptr(rr.tags));
402 }
403 return -EINVAL;
404 }
405
io_files_update_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)406 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
407 {
408 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
409
410 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
411 return -EINVAL;
412 if (sqe->rw_flags || sqe->splice_fd_in)
413 return -EINVAL;
414
415 up->offset = READ_ONCE(sqe->off);
416 up->nr_args = READ_ONCE(sqe->len);
417 if (!up->nr_args)
418 return -EINVAL;
419 up->arg = READ_ONCE(sqe->addr);
420 return 0;
421 }
422
io_files_update_with_index_alloc(struct io_kiocb * req,unsigned int issue_flags)423 static int io_files_update_with_index_alloc(struct io_kiocb *req,
424 unsigned int issue_flags)
425 {
426 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
427 __s32 __user *fds = u64_to_user_ptr(up->arg);
428 unsigned int done;
429 struct file *file;
430 int ret, fd;
431
432 if (!req->ctx->file_table.data.nr)
433 return -ENXIO;
434
435 for (done = 0; done < up->nr_args; done++) {
436 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
437 ret = -EFAULT;
438 break;
439 }
440
441 file = fget(fd);
442 if (!file) {
443 ret = -EBADF;
444 break;
445 }
446 ret = io_fixed_fd_install(req, issue_flags, file,
447 IORING_FILE_INDEX_ALLOC);
448 if (ret < 0)
449 break;
450 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
451 __io_close_fixed(req->ctx, issue_flags, ret);
452 ret = -EFAULT;
453 break;
454 }
455 }
456
457 if (done)
458 return done;
459 return ret;
460 }
461
io_files_update(struct io_kiocb * req,unsigned int issue_flags)462 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
463 {
464 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
465 struct io_ring_ctx *ctx = req->ctx;
466 struct io_uring_rsrc_update2 up2;
467 int ret;
468
469 up2.offset = up->offset;
470 up2.data = up->arg;
471 up2.nr = 0;
472 up2.tags = 0;
473 up2.resv = 0;
474 up2.resv2 = 0;
475
476 if (up->offset == IORING_FILE_INDEX_ALLOC) {
477 ret = io_files_update_with_index_alloc(req, issue_flags);
478 } else {
479 io_ring_submit_lock(ctx, issue_flags);
480 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
481 &up2, up->nr_args);
482 io_ring_submit_unlock(ctx, issue_flags);
483 }
484
485 if (ret < 0)
486 req_set_fail(req);
487 io_req_set_res(req, ret, 0);
488 return IOU_OK;
489 }
490
io_free_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_node * node)491 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
492 {
493 if (node->tag)
494 io_post_aux_cqe(ctx, node->tag, 0, 0);
495
496 switch (node->type) {
497 case IORING_RSRC_FILE:
498 fput(io_slot_file(node));
499 break;
500 case IORING_RSRC_BUFFER:
501 io_buffer_unmap(ctx, node->buf);
502 break;
503 default:
504 WARN_ON_ONCE(1);
505 break;
506 }
507
508 io_cache_free(&ctx->node_cache, node);
509 }
510
io_sqe_files_unregister(struct io_ring_ctx * ctx)511 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
512 {
513 if (!ctx->file_table.data.nr)
514 return -ENXIO;
515
516 io_free_file_tables(ctx, &ctx->file_table);
517 io_file_table_set_alloc_range(ctx, 0, 0);
518 return 0;
519 }
520
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args,u64 __user * tags)521 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
522 unsigned nr_args, u64 __user *tags)
523 {
524 __s32 __user *fds = (__s32 __user *) arg;
525 struct file *file;
526 int fd, ret;
527 unsigned i;
528
529 if (ctx->file_table.data.nr)
530 return -EBUSY;
531 if (!nr_args)
532 return -EINVAL;
533 if (nr_args > IORING_MAX_FIXED_FILES)
534 return -EMFILE;
535 if (nr_args > rlimit(RLIMIT_NOFILE))
536 return -EMFILE;
537 if (!io_alloc_file_tables(ctx, &ctx->file_table, nr_args))
538 return -ENOMEM;
539
540 for (i = 0; i < nr_args; i++) {
541 struct io_rsrc_node *node;
542 u64 tag = 0;
543
544 ret = -EFAULT;
545 if (tags && copy_from_user(&tag, &tags[i], sizeof(tag)))
546 goto fail;
547 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd)))
548 goto fail;
549 /* allow sparse sets */
550 if (!fds || fd == -1) {
551 ret = -EINVAL;
552 if (tag)
553 goto fail;
554 continue;
555 }
556
557 file = fget(fd);
558 ret = -EBADF;
559 if (unlikely(!file))
560 goto fail;
561
562 /*
563 * Don't allow io_uring instances to be registered.
564 */
565 if (io_is_uring_fops(file)) {
566 fput(file);
567 goto fail;
568 }
569 ret = -ENOMEM;
570 node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
571 if (!node) {
572 fput(file);
573 goto fail;
574 }
575 if (tag)
576 node->tag = tag;
577 ctx->file_table.data.nodes[i] = node;
578 io_fixed_file_set(node, file);
579 io_file_bitmap_set(&ctx->file_table, i);
580 }
581
582 /* default it to the whole table */
583 io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
584 return 0;
585 fail:
586 io_sqe_files_unregister(ctx);
587 return ret;
588 }
589
io_sqe_buffers_unregister(struct io_ring_ctx * ctx)590 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
591 {
592 if (!ctx->buf_table.nr)
593 return -ENXIO;
594 io_rsrc_data_free(ctx, &ctx->buf_table);
595 return 0;
596 }
597
598 /*
599 * Not super efficient, but this is just a registration time. And we do cache
600 * the last compound head, so generally we'll only do a full search if we don't
601 * match that one.
602 *
603 * We check if the given compound head page has already been accounted, to
604 * avoid double accounting it. This allows us to account the full size of the
605 * page, not just the constituent pages of a huge page.
606 */
headpage_already_acct(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct page * hpage)607 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
608 int nr_pages, struct page *hpage)
609 {
610 int i, j;
611
612 /* check current page array */
613 for (i = 0; i < nr_pages; i++) {
614 if (!PageCompound(pages[i]))
615 continue;
616 if (compound_head(pages[i]) == hpage)
617 return true;
618 }
619
620 /* check previously registered pages */
621 for (i = 0; i < ctx->buf_table.nr; i++) {
622 struct io_rsrc_node *node = ctx->buf_table.nodes[i];
623 struct io_mapped_ubuf *imu;
624
625 if (!node)
626 continue;
627 imu = node->buf;
628 for (j = 0; j < imu->nr_bvecs; j++) {
629 if (!PageCompound(imu->bvec[j].bv_page))
630 continue;
631 if (compound_head(imu->bvec[j].bv_page) == hpage)
632 return true;
633 }
634 }
635
636 return false;
637 }
638
io_buffer_account_pin(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct io_mapped_ubuf * imu,struct page ** last_hpage)639 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
640 int nr_pages, struct io_mapped_ubuf *imu,
641 struct page **last_hpage)
642 {
643 int i, ret;
644
645 imu->acct_pages = 0;
646 for (i = 0; i < nr_pages; i++) {
647 if (!PageCompound(pages[i])) {
648 imu->acct_pages++;
649 } else {
650 struct page *hpage;
651
652 hpage = compound_head(pages[i]);
653 if (hpage == *last_hpage)
654 continue;
655 *last_hpage = hpage;
656 if (headpage_already_acct(ctx, pages, i, hpage))
657 continue;
658 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
659 }
660 }
661
662 if (!imu->acct_pages)
663 return 0;
664
665 ret = io_account_mem(ctx, imu->acct_pages);
666 if (ret)
667 imu->acct_pages = 0;
668 return ret;
669 }
670
io_coalesce_buffer(struct page *** pages,int * nr_pages,struct io_imu_folio_data * data)671 static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
672 struct io_imu_folio_data *data)
673 {
674 struct page **page_array = *pages, **new_array = NULL;
675 int nr_pages_left = *nr_pages, i, j;
676 int nr_folios = data->nr_folios;
677
678 /* Store head pages only*/
679 new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
680 GFP_KERNEL);
681 if (!new_array)
682 return false;
683
684 new_array[0] = compound_head(page_array[0]);
685 /*
686 * The pages are bound to the folio, it doesn't
687 * actually unpin them but drops all but one reference,
688 * which is usually put down by io_buffer_unmap().
689 * Note, needs a better helper.
690 */
691 if (data->nr_pages_head > 1)
692 unpin_user_pages(&page_array[1], data->nr_pages_head - 1);
693
694 j = data->nr_pages_head;
695 nr_pages_left -= data->nr_pages_head;
696 for (i = 1; i < nr_folios; i++) {
697 unsigned int nr_unpin;
698
699 new_array[i] = page_array[j];
700 nr_unpin = min_t(unsigned int, nr_pages_left - 1,
701 data->nr_pages_mid - 1);
702 if (nr_unpin)
703 unpin_user_pages(&page_array[j+1], nr_unpin);
704 j += data->nr_pages_mid;
705 nr_pages_left -= data->nr_pages_mid;
706 }
707 kvfree(page_array);
708 *pages = new_array;
709 *nr_pages = nr_folios;
710 return true;
711 }
712
io_check_coalesce_buffer(struct page ** page_array,int nr_pages,struct io_imu_folio_data * data)713 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
714 struct io_imu_folio_data *data)
715 {
716 struct folio *folio = page_folio(page_array[0]);
717 unsigned int count = 1, nr_folios = 1;
718 int i;
719
720 data->nr_pages_mid = folio_nr_pages(folio);
721 data->folio_shift = folio_shift(folio);
722
723 /*
724 * Check if pages are contiguous inside a folio, and all folios have
725 * the same page count except for the head and tail.
726 */
727 for (i = 1; i < nr_pages; i++) {
728 if (page_folio(page_array[i]) == folio &&
729 page_array[i] == page_array[i-1] + 1) {
730 count++;
731 continue;
732 }
733
734 if (nr_folios == 1) {
735 if (folio_page_idx(folio, page_array[i-1]) !=
736 data->nr_pages_mid - 1)
737 return false;
738
739 data->nr_pages_head = count;
740 } else if (count != data->nr_pages_mid) {
741 return false;
742 }
743
744 folio = page_folio(page_array[i]);
745 if (folio_size(folio) != (1UL << data->folio_shift) ||
746 folio_page_idx(folio, page_array[i]) != 0)
747 return false;
748
749 count = 1;
750 nr_folios++;
751 }
752 if (nr_folios == 1)
753 data->nr_pages_head = count;
754
755 data->nr_folios = nr_folios;
756 return true;
757 }
758
io_sqe_buffer_register(struct io_ring_ctx * ctx,struct iovec * iov,struct page ** last_hpage)759 static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
760 struct iovec *iov,
761 struct page **last_hpage)
762 {
763 struct io_mapped_ubuf *imu = NULL;
764 struct page **pages = NULL;
765 struct io_rsrc_node *node;
766 unsigned long off;
767 size_t size;
768 int ret, nr_pages, i;
769 struct io_imu_folio_data data;
770 bool coalesced = false;
771
772 if (!iov->iov_base)
773 return NULL;
774
775 node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
776 if (!node)
777 return ERR_PTR(-ENOMEM);
778
779 ret = -ENOMEM;
780 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
781 &nr_pages);
782 if (IS_ERR(pages)) {
783 ret = PTR_ERR(pages);
784 pages = NULL;
785 goto done;
786 }
787
788 /* If it's huge page(s), try to coalesce them into fewer bvec entries */
789 if (nr_pages > 1 && io_check_coalesce_buffer(pages, nr_pages, &data)) {
790 if (data.nr_pages_mid != 1)
791 coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
792 }
793
794 imu = io_alloc_imu(ctx, nr_pages);
795 if (!imu)
796 goto done;
797
798 imu->nr_bvecs = nr_pages;
799 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
800 if (ret) {
801 unpin_user_pages(pages, nr_pages);
802 goto done;
803 }
804
805 size = iov->iov_len;
806 /* store original address for later verification */
807 imu->ubuf = (unsigned long) iov->iov_base;
808 imu->len = iov->iov_len;
809 imu->folio_shift = PAGE_SHIFT;
810 imu->release = io_release_ubuf;
811 imu->priv = imu;
812 imu->is_kbuf = false;
813 imu->dir = IO_IMU_DEST | IO_IMU_SOURCE;
814 if (coalesced)
815 imu->folio_shift = data.folio_shift;
816 refcount_set(&imu->refs, 1);
817 off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
818 node->buf = imu;
819 ret = 0;
820
821 for (i = 0; i < nr_pages; i++) {
822 size_t vec_len;
823
824 vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
825 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
826 off = 0;
827 size -= vec_len;
828 }
829 done:
830 if (ret) {
831 if (imu)
832 io_free_imu(ctx, imu);
833 io_cache_free(&ctx->node_cache, node);
834 node = ERR_PTR(ret);
835 }
836 kvfree(pages);
837 return node;
838 }
839
io_sqe_buffers_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args,u64 __user * tags)840 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
841 unsigned int nr_args, u64 __user *tags)
842 {
843 struct page *last_hpage = NULL;
844 struct io_rsrc_data data;
845 struct iovec fast_iov, *iov = &fast_iov;
846 const struct iovec __user *uvec;
847 int i, ret;
848
849 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
850
851 if (ctx->buf_table.nr)
852 return -EBUSY;
853 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
854 return -EINVAL;
855 ret = io_rsrc_data_alloc(&data, nr_args);
856 if (ret)
857 return ret;
858
859 if (!arg)
860 memset(iov, 0, sizeof(*iov));
861
862 for (i = 0; i < nr_args; i++) {
863 struct io_rsrc_node *node;
864 u64 tag = 0;
865
866 if (arg) {
867 uvec = (struct iovec __user *) arg;
868 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
869 if (IS_ERR(iov)) {
870 ret = PTR_ERR(iov);
871 break;
872 }
873 ret = io_buffer_validate(iov);
874 if (ret)
875 break;
876 if (ctx->compat)
877 arg += sizeof(struct compat_iovec);
878 else
879 arg += sizeof(struct iovec);
880 }
881
882 if (tags) {
883 if (copy_from_user(&tag, &tags[i], sizeof(tag))) {
884 ret = -EFAULT;
885 break;
886 }
887 }
888
889 node = io_sqe_buffer_register(ctx, iov, &last_hpage);
890 if (IS_ERR(node)) {
891 ret = PTR_ERR(node);
892 break;
893 }
894 if (tag) {
895 if (!node) {
896 ret = -EINVAL;
897 break;
898 }
899 node->tag = tag;
900 }
901 data.nodes[i] = node;
902 }
903
904 ctx->buf_table = data;
905 if (ret)
906 io_sqe_buffers_unregister(ctx);
907 return ret;
908 }
909
io_buffer_register_bvec(struct io_uring_cmd * cmd,struct request * rq,void (* release)(void *),unsigned int index,unsigned int issue_flags)910 int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
911 void (*release)(void *), unsigned int index,
912 unsigned int issue_flags)
913 {
914 struct io_ring_ctx *ctx = cmd_to_io_kiocb(cmd)->ctx;
915 struct io_rsrc_data *data = &ctx->buf_table;
916 struct req_iterator rq_iter;
917 struct io_mapped_ubuf *imu;
918 struct io_rsrc_node *node;
919 struct bio_vec bv, *bvec;
920 u16 nr_bvecs;
921 int ret = 0;
922
923 io_ring_submit_lock(ctx, issue_flags);
924 if (index >= data->nr) {
925 ret = -EINVAL;
926 goto unlock;
927 }
928 index = array_index_nospec(index, data->nr);
929
930 if (data->nodes[index]) {
931 ret = -EBUSY;
932 goto unlock;
933 }
934
935 node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
936 if (!node) {
937 ret = -ENOMEM;
938 goto unlock;
939 }
940
941 nr_bvecs = blk_rq_nr_phys_segments(rq);
942 imu = io_alloc_imu(ctx, nr_bvecs);
943 if (!imu) {
944 kfree(node);
945 ret = -ENOMEM;
946 goto unlock;
947 }
948
949 imu->ubuf = 0;
950 imu->len = blk_rq_bytes(rq);
951 imu->acct_pages = 0;
952 imu->folio_shift = PAGE_SHIFT;
953 imu->nr_bvecs = nr_bvecs;
954 refcount_set(&imu->refs, 1);
955 imu->release = release;
956 imu->priv = rq;
957 imu->is_kbuf = true;
958 imu->dir = 1 << rq_data_dir(rq);
959
960 bvec = imu->bvec;
961 rq_for_each_bvec(bv, rq, rq_iter)
962 *bvec++ = bv;
963
964 node->buf = imu;
965 data->nodes[index] = node;
966 unlock:
967 io_ring_submit_unlock(ctx, issue_flags);
968 return ret;
969 }
970 EXPORT_SYMBOL_GPL(io_buffer_register_bvec);
971
io_buffer_unregister_bvec(struct io_uring_cmd * cmd,unsigned int index,unsigned int issue_flags)972 int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
973 unsigned int issue_flags)
974 {
975 struct io_ring_ctx *ctx = cmd_to_io_kiocb(cmd)->ctx;
976 struct io_rsrc_data *data = &ctx->buf_table;
977 struct io_rsrc_node *node;
978 int ret = 0;
979
980 io_ring_submit_lock(ctx, issue_flags);
981 if (index >= data->nr) {
982 ret = -EINVAL;
983 goto unlock;
984 }
985 index = array_index_nospec(index, data->nr);
986
987 node = data->nodes[index];
988 if (!node) {
989 ret = -EINVAL;
990 goto unlock;
991 }
992 if (!node->buf->is_kbuf) {
993 ret = -EBUSY;
994 goto unlock;
995 }
996
997 io_put_rsrc_node(ctx, node);
998 data->nodes[index] = NULL;
999 unlock:
1000 io_ring_submit_unlock(ctx, issue_flags);
1001 return ret;
1002 }
1003 EXPORT_SYMBOL_GPL(io_buffer_unregister_bvec);
1004
io_import_fixed(int ddir,struct iov_iter * iter,struct io_mapped_ubuf * imu,u64 buf_addr,size_t len)1005 static int io_import_fixed(int ddir, struct iov_iter *iter,
1006 struct io_mapped_ubuf *imu,
1007 u64 buf_addr, size_t len)
1008 {
1009 u64 buf_end;
1010 size_t offset;
1011
1012 if (WARN_ON_ONCE(!imu))
1013 return -EFAULT;
1014 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1015 return -EFAULT;
1016 /* not inside the mapped region */
1017 if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
1018 return -EFAULT;
1019 if (!(imu->dir & (1 << ddir)))
1020 return -EFAULT;
1021
1022 /*
1023 * Might not be a start of buffer, set size appropriately
1024 * and advance us to the beginning.
1025 */
1026 offset = buf_addr - imu->ubuf;
1027 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1028
1029 if (offset) {
1030 /*
1031 * Don't use iov_iter_advance() here, as it's really slow for
1032 * using the latter parts of a big fixed buffer - it iterates
1033 * over each segment manually. We can cheat a bit here for user
1034 * registered nodes, because we know that:
1035 *
1036 * 1) it's a BVEC iter, we set it up
1037 * 2) all bvecs are the same in size, except potentially the
1038 * first and last bvec
1039 *
1040 * So just find our index, and adjust the iterator afterwards.
1041 * If the offset is within the first bvec (or the whole first
1042 * bvec, just use iov_iter_advance(). This makes it easier
1043 * since we can just skip the first segment, which may not
1044 * be folio_size aligned.
1045 */
1046 const struct bio_vec *bvec = imu->bvec;
1047
1048 /*
1049 * Kernel buffer bvecs, on the other hand, don't necessarily
1050 * have the size property of user registered ones, so we have
1051 * to use the slow iter advance.
1052 */
1053 if (offset < bvec->bv_len) {
1054 iter->count -= offset;
1055 iter->iov_offset = offset;
1056 } else if (imu->is_kbuf) {
1057 iov_iter_advance(iter, offset);
1058 } else {
1059 unsigned long seg_skip;
1060
1061 /* skip first vec */
1062 offset -= bvec->bv_len;
1063 seg_skip = 1 + (offset >> imu->folio_shift);
1064
1065 iter->bvec += seg_skip;
1066 iter->nr_segs -= seg_skip;
1067 iter->count -= bvec->bv_len + offset;
1068 iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
1069 }
1070 }
1071
1072 return 0;
1073 }
1074
io_find_buf_node(struct io_kiocb * req,unsigned issue_flags)1075 inline struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
1076 unsigned issue_flags)
1077 {
1078 struct io_ring_ctx *ctx = req->ctx;
1079 struct io_rsrc_node *node;
1080
1081 if (req->flags & REQ_F_BUF_NODE)
1082 return req->buf_node;
1083
1084 io_ring_submit_lock(ctx, issue_flags);
1085 node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
1086 if (node)
1087 io_req_assign_buf_node(req, node);
1088 io_ring_submit_unlock(ctx, issue_flags);
1089 return node;
1090 }
1091
io_import_reg_buf(struct io_kiocb * req,struct iov_iter * iter,u64 buf_addr,size_t len,int ddir,unsigned issue_flags)1092 int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
1093 u64 buf_addr, size_t len, int ddir,
1094 unsigned issue_flags)
1095 {
1096 struct io_rsrc_node *node;
1097
1098 node = io_find_buf_node(req, issue_flags);
1099 if (!node)
1100 return -EFAULT;
1101 return io_import_fixed(ddir, iter, node->buf, buf_addr, len);
1102 }
1103
1104 /* Lock two rings at once. The rings must be different! */
lock_two_rings(struct io_ring_ctx * ctx1,struct io_ring_ctx * ctx2)1105 static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2)
1106 {
1107 if (ctx1 > ctx2)
1108 swap(ctx1, ctx2);
1109 mutex_lock(&ctx1->uring_lock);
1110 mutex_lock_nested(&ctx2->uring_lock, SINGLE_DEPTH_NESTING);
1111 }
1112
1113 /* Both rings are locked by the caller. */
io_clone_buffers(struct io_ring_ctx * ctx,struct io_ring_ctx * src_ctx,struct io_uring_clone_buffers * arg)1114 static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
1115 struct io_uring_clone_buffers *arg)
1116 {
1117 struct io_rsrc_data data;
1118 int i, ret, off, nr;
1119 unsigned int nbufs;
1120
1121 lockdep_assert_held(&ctx->uring_lock);
1122 lockdep_assert_held(&src_ctx->uring_lock);
1123
1124 /*
1125 * Accounting state is shared between the two rings; that only works if
1126 * both rings are accounted towards the same counters.
1127 */
1128 if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
1129 return -EINVAL;
1130
1131 /* if offsets are given, must have nr specified too */
1132 if (!arg->nr && (arg->dst_off || arg->src_off))
1133 return -EINVAL;
1134 /* not allowed unless REPLACE is set */
1135 if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
1136 return -EBUSY;
1137
1138 nbufs = src_ctx->buf_table.nr;
1139 if (!arg->nr)
1140 arg->nr = nbufs;
1141 else if (arg->nr > nbufs)
1142 return -EINVAL;
1143 else if (arg->nr > IORING_MAX_REG_BUFFERS)
1144 return -EINVAL;
1145 if (check_add_overflow(arg->nr, arg->dst_off, &nbufs))
1146 return -EOVERFLOW;
1147
1148 ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.nr));
1149 if (ret)
1150 return ret;
1151
1152 /* Fill entries in data from dst that won't overlap with src */
1153 for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) {
1154 struct io_rsrc_node *src_node = ctx->buf_table.nodes[i];
1155
1156 if (src_node) {
1157 data.nodes[i] = src_node;
1158 src_node->refs++;
1159 }
1160 }
1161
1162 ret = -ENXIO;
1163 nbufs = src_ctx->buf_table.nr;
1164 if (!nbufs)
1165 goto out_free;
1166 ret = -EINVAL;
1167 if (!arg->nr)
1168 arg->nr = nbufs;
1169 else if (arg->nr > nbufs)
1170 goto out_free;
1171 ret = -EOVERFLOW;
1172 if (check_add_overflow(arg->nr, arg->src_off, &off))
1173 goto out_free;
1174 if (off > nbufs)
1175 goto out_free;
1176
1177 off = arg->dst_off;
1178 i = arg->src_off;
1179 nr = arg->nr;
1180 while (nr--) {
1181 struct io_rsrc_node *dst_node, *src_node;
1182
1183 src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
1184 if (!src_node) {
1185 dst_node = NULL;
1186 } else {
1187 dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
1188 if (!dst_node) {
1189 ret = -ENOMEM;
1190 goto out_free;
1191 }
1192
1193 refcount_inc(&src_node->buf->refs);
1194 dst_node->buf = src_node->buf;
1195 }
1196 data.nodes[off++] = dst_node;
1197 i++;
1198 }
1199
1200 /*
1201 * If asked for replace, put the old table. data->nodes[] holds both
1202 * old and new nodes at this point.
1203 */
1204 if (arg->flags & IORING_REGISTER_DST_REPLACE)
1205 io_rsrc_data_free(ctx, &ctx->buf_table);
1206
1207 /*
1208 * ctx->buf_table must be empty now - either the contents are being
1209 * replaced and we just freed the table, or the contents are being
1210 * copied to a ring that does not have buffers yet (checked at function
1211 * entry).
1212 */
1213 WARN_ON_ONCE(ctx->buf_table.nr);
1214 ctx->buf_table = data;
1215 return 0;
1216
1217 out_free:
1218 io_rsrc_data_free(ctx, &data);
1219 return ret;
1220 }
1221
1222 /*
1223 * Copy the registered buffers from the source ring whose file descriptor
1224 * is given in the src_fd to the current ring. This is identical to registering
1225 * the buffers with ctx, except faster as mappings already exist.
1226 *
1227 * Since the memory is already accounted once, don't account it again.
1228 */
io_register_clone_buffers(struct io_ring_ctx * ctx,void __user * arg)1229 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
1230 {
1231 struct io_uring_clone_buffers buf;
1232 struct io_ring_ctx *src_ctx;
1233 bool registered_src;
1234 struct file *file;
1235 int ret;
1236
1237 if (copy_from_user(&buf, arg, sizeof(buf)))
1238 return -EFAULT;
1239 if (buf.flags & ~(IORING_REGISTER_SRC_REGISTERED|IORING_REGISTER_DST_REPLACE))
1240 return -EINVAL;
1241 if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.nr)
1242 return -EBUSY;
1243 if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
1244 return -EINVAL;
1245
1246 registered_src = (buf.flags & IORING_REGISTER_SRC_REGISTERED) != 0;
1247 file = io_uring_register_get_file(buf.src_fd, registered_src);
1248 if (IS_ERR(file))
1249 return PTR_ERR(file);
1250
1251 src_ctx = file->private_data;
1252 if (src_ctx != ctx) {
1253 mutex_unlock(&ctx->uring_lock);
1254 lock_two_rings(ctx, src_ctx);
1255 }
1256
1257 ret = io_clone_buffers(ctx, src_ctx, &buf);
1258
1259 if (src_ctx != ctx)
1260 mutex_unlock(&src_ctx->uring_lock);
1261
1262 fput(file);
1263 return ret;
1264 }
1265