1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22
23 struct kmem_cache *io_buf_cachep;
24
25 struct io_provide_buf {
26 struct file *file;
27 __u64 addr;
28 __u32 len;
29 __u32 bgid;
30 __u32 nbufs;
31 __u16 bid;
32 };
33
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
35 unsigned int bgid)
36 {
37 lockdep_assert_held(&ctx->uring_lock);
38
39 return xa_load(&ctx->io_bl_xa, bgid);
40 }
41
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)42 static int io_buffer_add_list(struct io_ring_ctx *ctx,
43 struct io_buffer_list *bl, unsigned int bgid)
44 {
45 /*
46 * Store buffer group ID and finally mark the list as visible.
47 * The normal lookup doesn't care about the visibility as we're
48 * always under the ->uring_lock, but the RCU lookup from mmap does.
49 */
50 bl->bgid = bgid;
51 atomic_set(&bl->refs, 1);
52 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
53 }
54
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
56 {
57 struct io_ring_ctx *ctx = req->ctx;
58 struct io_buffer_list *bl;
59 struct io_buffer *buf;
60
61 io_ring_submit_lock(ctx, issue_flags);
62
63 buf = req->kbuf;
64 bl = io_buffer_get_list(ctx, buf->bgid);
65 list_add(&buf->list, &bl->buf_list);
66 req->flags &= ~REQ_F_BUFFER_SELECTED;
67 req->buf_index = buf->bgid;
68
69 io_ring_submit_unlock(ctx, issue_flags);
70 return true;
71 }
72
__io_put_kbuf(struct io_kiocb * req,int len,unsigned issue_flags)73 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
74 {
75 /*
76 * We can add this buffer back to two lists:
77 *
78 * 1) The io_buffers_cache list. This one is protected by the
79 * ctx->uring_lock. If we already hold this lock, add back to this
80 * list as we can grab it from issue as well.
81 * 2) The io_buffers_comp list. This one is protected by the
82 * ctx->completion_lock.
83 *
84 * We migrate buffers from the comp_list to the issue cache list
85 * when we need one.
86 */
87 if (issue_flags & IO_URING_F_UNLOCKED) {
88 struct io_ring_ctx *ctx = req->ctx;
89
90 spin_lock(&ctx->completion_lock);
91 __io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
92 spin_unlock(&ctx->completion_lock);
93 } else {
94 lockdep_assert_held(&req->ctx->uring_lock);
95
96 __io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
97 }
98 }
99
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)100 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
101 struct io_buffer_list *bl)
102 {
103 if (!list_empty(&bl->buf_list)) {
104 struct io_buffer *kbuf;
105
106 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
107 list_del(&kbuf->list);
108 if (*len == 0 || *len > kbuf->len)
109 *len = kbuf->len;
110 if (list_empty(&bl->buf_list))
111 req->flags |= REQ_F_BL_EMPTY;
112 req->flags |= REQ_F_BUFFER_SELECTED;
113 req->kbuf = kbuf;
114 req->buf_index = kbuf->bid;
115 return u64_to_user_ptr(kbuf->addr);
116 }
117 return NULL;
118 }
119
io_provided_buffers_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,struct iovec * iov)120 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
121 struct io_buffer_list *bl,
122 struct iovec *iov)
123 {
124 void __user *buf;
125
126 buf = io_provided_buffer_select(req, len, bl);
127 if (unlikely(!buf))
128 return -ENOBUFS;
129
130 iov[0].iov_base = buf;
131 iov[0].iov_len = *len;
132 return 1;
133 }
134
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)135 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
136 struct io_buffer_list *bl,
137 unsigned int issue_flags)
138 {
139 struct io_uring_buf_ring *br = bl->buf_ring;
140 __u16 tail, head = bl->head;
141 struct io_uring_buf *buf;
142
143 tail = smp_load_acquire(&br->tail);
144 if (unlikely(tail == head))
145 return NULL;
146
147 if (head + 1 == tail)
148 req->flags |= REQ_F_BL_EMPTY;
149
150 buf = io_ring_head_to_buf(br, head, bl->mask);
151 if (*len == 0 || *len > buf->len)
152 *len = buf->len;
153 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
154 req->buf_list = bl;
155 req->buf_index = buf->bid;
156
157 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
158 /*
159 * If we came in unlocked, we have no choice but to consume the
160 * buffer here, otherwise nothing ensures that the buffer won't
161 * get used by others. This does mean it'll be pinned until the
162 * IO completes, coming in unlocked means we're being called from
163 * io-wq context and there may be further retries in async hybrid
164 * mode. For the locked case, the caller must call commit when
165 * the transfer completes (or if we get -EAGAIN and must poll of
166 * retry).
167 */
168 io_kbuf_commit(req, bl, *len, 1);
169 req->buf_list = NULL;
170 }
171 return u64_to_user_ptr(buf->addr);
172 }
173
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)174 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
175 unsigned int issue_flags)
176 {
177 struct io_ring_ctx *ctx = req->ctx;
178 struct io_buffer_list *bl;
179 void __user *ret = NULL;
180
181 io_ring_submit_lock(req->ctx, issue_flags);
182
183 bl = io_buffer_get_list(ctx, req->buf_index);
184 if (likely(bl)) {
185 if (bl->flags & IOBL_BUF_RING)
186 ret = io_ring_buffer_select(req, len, bl, issue_flags);
187 else
188 ret = io_provided_buffer_select(req, len, bl);
189 }
190 io_ring_submit_unlock(req->ctx, issue_flags);
191 return ret;
192 }
193
194 /* cap it at a reasonable 256, will be one page even for 4K */
195 #define PEEK_MAX_IMPORT 256
196
io_ring_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_buffer_list * bl)197 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
198 struct io_buffer_list *bl)
199 {
200 struct io_uring_buf_ring *br = bl->buf_ring;
201 struct iovec *iov = arg->iovs;
202 int nr_iovs = arg->nr_iovs;
203 __u16 nr_avail, tail, head;
204 struct io_uring_buf *buf;
205
206 tail = smp_load_acquire(&br->tail);
207 head = bl->head;
208 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
209 if (unlikely(!nr_avail))
210 return -ENOBUFS;
211
212 buf = io_ring_head_to_buf(br, head, bl->mask);
213 if (arg->max_len) {
214 u32 len = READ_ONCE(buf->len);
215
216 if (unlikely(!len))
217 return -ENOBUFS;
218 /*
219 * Limit incremental buffers to 1 segment. No point trying
220 * to peek ahead and map more than we need, when the buffers
221 * themselves should be large when setup with
222 * IOU_PBUF_RING_INC.
223 */
224 if (bl->flags & IOBL_INC) {
225 nr_avail = 1;
226 } else {
227 size_t needed;
228
229 needed = (arg->max_len + len - 1) / len;
230 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
231 if (nr_avail > needed)
232 nr_avail = needed;
233 }
234 }
235
236 /*
237 * only alloc a bigger array if we know we have data to map, eg not
238 * a speculative peek operation.
239 */
240 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
241 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
242 if (unlikely(!iov))
243 return -ENOMEM;
244 if (arg->mode & KBUF_MODE_FREE)
245 kfree(arg->iovs);
246 arg->iovs = iov;
247 nr_iovs = nr_avail;
248 } else if (nr_avail < nr_iovs) {
249 nr_iovs = nr_avail;
250 }
251
252 /* set it to max, if not set, so we can use it unconditionally */
253 if (!arg->max_len)
254 arg->max_len = INT_MAX;
255
256 req->buf_index = buf->bid;
257 do {
258 u32 len = buf->len;
259
260 /* truncate end piece, if needed, for non partial buffers */
261 if (len > arg->max_len) {
262 len = arg->max_len;
263 if (!(bl->flags & IOBL_INC))
264 buf->len = len;
265 }
266
267 iov->iov_base = u64_to_user_ptr(buf->addr);
268 iov->iov_len = len;
269 iov++;
270
271 arg->out_len += len;
272 arg->max_len -= len;
273 if (!arg->max_len)
274 break;
275
276 buf = io_ring_head_to_buf(br, ++head, bl->mask);
277 } while (--nr_iovs);
278
279 if (head == tail)
280 req->flags |= REQ_F_BL_EMPTY;
281
282 req->flags |= REQ_F_BUFFER_RING;
283 req->buf_list = bl;
284 return iov - arg->iovs;
285 }
286
io_buffers_select(struct io_kiocb * req,struct buf_sel_arg * arg,unsigned int issue_flags)287 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
288 unsigned int issue_flags)
289 {
290 struct io_ring_ctx *ctx = req->ctx;
291 struct io_buffer_list *bl;
292 int ret = -ENOENT;
293
294 io_ring_submit_lock(ctx, issue_flags);
295 bl = io_buffer_get_list(ctx, req->buf_index);
296 if (unlikely(!bl))
297 goto out_unlock;
298
299 if (bl->flags & IOBL_BUF_RING) {
300 ret = io_ring_buffers_peek(req, arg, bl);
301 /*
302 * Don't recycle these buffers if we need to go through poll.
303 * Nobody else can use them anyway, and holding on to provided
304 * buffers for a send/write operation would happen on the app
305 * side anyway with normal buffers. Besides, we already
306 * committed them, they cannot be put back in the queue.
307 */
308 if (ret > 0) {
309 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
310 io_kbuf_commit(req, bl, arg->out_len, ret);
311 }
312 } else {
313 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
314 }
315 out_unlock:
316 io_ring_submit_unlock(ctx, issue_flags);
317 return ret;
318 }
319
io_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg)320 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
321 {
322 struct io_ring_ctx *ctx = req->ctx;
323 struct io_buffer_list *bl;
324 int ret;
325
326 lockdep_assert_held(&ctx->uring_lock);
327
328 bl = io_buffer_get_list(ctx, req->buf_index);
329 if (unlikely(!bl))
330 return -ENOENT;
331
332 if (bl->flags & IOBL_BUF_RING) {
333 ret = io_ring_buffers_peek(req, arg, bl);
334 if (ret > 0)
335 req->flags |= REQ_F_BUFFERS_COMMIT;
336 return ret;
337 }
338
339 /* don't support multiple buffer selections for legacy */
340 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
341 }
342
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)343 static int __io_remove_buffers(struct io_ring_ctx *ctx,
344 struct io_buffer_list *bl, unsigned nbufs)
345 {
346 unsigned i = 0;
347
348 /* shouldn't happen */
349 if (!nbufs)
350 return 0;
351
352 if (bl->flags & IOBL_BUF_RING) {
353 i = bl->buf_ring->tail - bl->head;
354 if (bl->buf_nr_pages) {
355 int j;
356
357 if (!(bl->flags & IOBL_MMAP)) {
358 for (j = 0; j < bl->buf_nr_pages; j++)
359 unpin_user_page(bl->buf_pages[j]);
360 }
361 io_pages_unmap(bl->buf_ring, &bl->buf_pages,
362 &bl->buf_nr_pages, bl->flags & IOBL_MMAP);
363 bl->flags &= ~IOBL_MMAP;
364 }
365 /* make sure it's seen as empty */
366 INIT_LIST_HEAD(&bl->buf_list);
367 bl->flags &= ~IOBL_BUF_RING;
368 return i;
369 }
370
371 /* protects io_buffers_cache */
372 lockdep_assert_held(&ctx->uring_lock);
373
374 while (!list_empty(&bl->buf_list)) {
375 struct io_buffer *nxt;
376
377 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
378 list_move(&nxt->list, &ctx->io_buffers_cache);
379 if (++i == nbufs)
380 return i;
381 cond_resched();
382 }
383
384 return i;
385 }
386
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)387 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
388 {
389 if (atomic_dec_and_test(&bl->refs)) {
390 __io_remove_buffers(ctx, bl, -1U);
391 kfree_rcu(bl, rcu);
392 }
393 }
394
io_destroy_buffers(struct io_ring_ctx * ctx)395 void io_destroy_buffers(struct io_ring_ctx *ctx)
396 {
397 struct io_buffer_list *bl;
398 struct list_head *item, *tmp;
399 struct io_buffer *buf;
400 unsigned long index;
401
402 xa_for_each(&ctx->io_bl_xa, index, bl) {
403 xa_erase(&ctx->io_bl_xa, bl->bgid);
404 io_put_bl(ctx, bl);
405 }
406
407 /*
408 * Move deferred locked entries to cache before pruning
409 */
410 spin_lock(&ctx->completion_lock);
411 if (!list_empty(&ctx->io_buffers_comp))
412 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
413 spin_unlock(&ctx->completion_lock);
414
415 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
416 buf = list_entry(item, struct io_buffer, list);
417 kmem_cache_free(io_buf_cachep, buf);
418 }
419 }
420
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)421 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
422 {
423 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
424 u64 tmp;
425
426 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
427 sqe->splice_fd_in)
428 return -EINVAL;
429
430 tmp = READ_ONCE(sqe->fd);
431 if (!tmp || tmp > MAX_BIDS_PER_BGID)
432 return -EINVAL;
433
434 memset(p, 0, sizeof(*p));
435 p->nbufs = tmp;
436 p->bgid = READ_ONCE(sqe->buf_group);
437 return 0;
438 }
439
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)440 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
441 {
442 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
443 struct io_ring_ctx *ctx = req->ctx;
444 struct io_buffer_list *bl;
445 int ret = 0;
446
447 io_ring_submit_lock(ctx, issue_flags);
448
449 ret = -ENOENT;
450 bl = io_buffer_get_list(ctx, p->bgid);
451 if (bl) {
452 ret = -EINVAL;
453 /* can't use provide/remove buffers command on mapped buffers */
454 if (!(bl->flags & IOBL_BUF_RING))
455 ret = __io_remove_buffers(ctx, bl, p->nbufs);
456 }
457 io_ring_submit_unlock(ctx, issue_flags);
458 if (ret < 0)
459 req_set_fail(req);
460 io_req_set_res(req, ret, 0);
461 return IOU_OK;
462 }
463
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)464 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
465 {
466 unsigned long size, tmp_check;
467 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
468 u64 tmp;
469
470 if (sqe->rw_flags || sqe->splice_fd_in)
471 return -EINVAL;
472
473 tmp = READ_ONCE(sqe->fd);
474 if (!tmp || tmp > MAX_BIDS_PER_BGID)
475 return -E2BIG;
476 p->nbufs = tmp;
477 p->addr = READ_ONCE(sqe->addr);
478 p->len = READ_ONCE(sqe->len);
479
480 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
481 &size))
482 return -EOVERFLOW;
483 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
484 return -EOVERFLOW;
485
486 size = (unsigned long)p->len * p->nbufs;
487 if (!access_ok(u64_to_user_ptr(p->addr), size))
488 return -EFAULT;
489
490 p->bgid = READ_ONCE(sqe->buf_group);
491 tmp = READ_ONCE(sqe->off);
492 if (tmp > USHRT_MAX)
493 return -E2BIG;
494 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
495 return -EINVAL;
496 p->bid = tmp;
497 return 0;
498 }
499
500 #define IO_BUFFER_ALLOC_BATCH 64
501
io_refill_buffer_cache(struct io_ring_ctx * ctx)502 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
503 {
504 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
505 int allocated;
506
507 /*
508 * Completions that don't happen inline (eg not under uring_lock) will
509 * add to ->io_buffers_comp. If we don't have any free buffers, check
510 * the completion list and splice those entries first.
511 */
512 if (!list_empty_careful(&ctx->io_buffers_comp)) {
513 spin_lock(&ctx->completion_lock);
514 if (!list_empty(&ctx->io_buffers_comp)) {
515 list_splice_init(&ctx->io_buffers_comp,
516 &ctx->io_buffers_cache);
517 spin_unlock(&ctx->completion_lock);
518 return 0;
519 }
520 spin_unlock(&ctx->completion_lock);
521 }
522
523 /*
524 * No free buffers and no completion entries either. Allocate a new
525 * batch of buffer entries and add those to our freelist.
526 */
527
528 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
529 ARRAY_SIZE(bufs), (void **) bufs);
530 if (unlikely(!allocated)) {
531 /*
532 * Bulk alloc is all-or-nothing. If we fail to get a batch,
533 * retry single alloc to be on the safe side.
534 */
535 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
536 if (!bufs[0])
537 return -ENOMEM;
538 allocated = 1;
539 }
540
541 while (allocated)
542 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
543
544 return 0;
545 }
546
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)547 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
548 struct io_buffer_list *bl)
549 {
550 struct io_buffer *buf;
551 u64 addr = pbuf->addr;
552 int i, bid = pbuf->bid;
553
554 for (i = 0; i < pbuf->nbufs; i++) {
555 if (list_empty(&ctx->io_buffers_cache) &&
556 io_refill_buffer_cache(ctx))
557 break;
558 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
559 list);
560 list_move_tail(&buf->list, &bl->buf_list);
561 buf->addr = addr;
562 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
563 buf->bid = bid;
564 buf->bgid = pbuf->bgid;
565 addr += pbuf->len;
566 bid++;
567 cond_resched();
568 }
569
570 return i ? 0 : -ENOMEM;
571 }
572
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)573 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
574 {
575 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
576 struct io_ring_ctx *ctx = req->ctx;
577 struct io_buffer_list *bl;
578 int ret = 0;
579
580 io_ring_submit_lock(ctx, issue_flags);
581
582 bl = io_buffer_get_list(ctx, p->bgid);
583 if (unlikely(!bl)) {
584 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
585 if (!bl) {
586 ret = -ENOMEM;
587 goto err;
588 }
589 INIT_LIST_HEAD(&bl->buf_list);
590 ret = io_buffer_add_list(ctx, bl, p->bgid);
591 if (ret) {
592 /*
593 * Doesn't need rcu free as it was never visible, but
594 * let's keep it consistent throughout.
595 */
596 kfree_rcu(bl, rcu);
597 goto err;
598 }
599 }
600 /* can't add buffers via this command for a mapped buffer ring */
601 if (bl->flags & IOBL_BUF_RING) {
602 ret = -EINVAL;
603 goto err;
604 }
605
606 ret = io_add_buffers(ctx, p, bl);
607 err:
608 io_ring_submit_unlock(ctx, issue_flags);
609
610 if (ret < 0)
611 req_set_fail(req);
612 io_req_set_res(req, ret, 0);
613 return IOU_OK;
614 }
615
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)616 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
617 struct io_buffer_list *bl)
618 {
619 struct io_uring_buf_ring *br = NULL;
620 struct page **pages;
621 int nr_pages, ret;
622
623 pages = io_pin_pages(reg->ring_addr,
624 flex_array_size(br, bufs, reg->ring_entries),
625 &nr_pages);
626 if (IS_ERR(pages))
627 return PTR_ERR(pages);
628
629 br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
630 if (!br) {
631 ret = -ENOMEM;
632 goto error_unpin;
633 }
634
635 #ifdef SHM_COLOUR
636 /*
637 * On platforms that have specific aliasing requirements, SHM_COLOUR
638 * is set and we must guarantee that the kernel and user side align
639 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
640 * the application mmap's the provided ring buffer. Fail the request
641 * if we, by chance, don't end up with aligned addresses. The app
642 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
643 * this transparently.
644 */
645 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
646 ret = -EINVAL;
647 goto error_unpin;
648 }
649 #endif
650 bl->buf_pages = pages;
651 bl->buf_nr_pages = nr_pages;
652 bl->buf_ring = br;
653 bl->flags |= IOBL_BUF_RING;
654 bl->flags &= ~IOBL_MMAP;
655 return 0;
656 error_unpin:
657 unpin_user_pages(pages, nr_pages);
658 kvfree(pages);
659 vunmap(br);
660 return ret;
661 }
662
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)663 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
664 struct io_uring_buf_reg *reg,
665 struct io_buffer_list *bl)
666 {
667 size_t ring_size;
668
669 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
670
671 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
672 if (IS_ERR(bl->buf_ring)) {
673 bl->buf_ring = NULL;
674 return -ENOMEM;
675 }
676
677 bl->flags |= (IOBL_BUF_RING | IOBL_MMAP);
678 return 0;
679 }
680
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)681 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
682 {
683 struct io_uring_buf_reg reg;
684 struct io_buffer_list *bl, *free_bl = NULL;
685 int ret;
686
687 lockdep_assert_held(&ctx->uring_lock);
688
689 if (copy_from_user(®, arg, sizeof(reg)))
690 return -EFAULT;
691
692 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
693 return -EINVAL;
694 if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
695 return -EINVAL;
696 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
697 if (!reg.ring_addr)
698 return -EFAULT;
699 if (reg.ring_addr & ~PAGE_MASK)
700 return -EINVAL;
701 } else {
702 if (reg.ring_addr)
703 return -EINVAL;
704 }
705
706 if (!is_power_of_2(reg.ring_entries))
707 return -EINVAL;
708
709 /* cannot disambiguate full vs empty due to head/tail size */
710 if (reg.ring_entries >= 65536)
711 return -EINVAL;
712
713 bl = io_buffer_get_list(ctx, reg.bgid);
714 if (bl) {
715 /* if mapped buffer ring OR classic exists, don't allow */
716 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
717 return -EEXIST;
718 } else {
719 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
720 if (!bl)
721 return -ENOMEM;
722 }
723
724 if (!(reg.flags & IOU_PBUF_RING_MMAP))
725 ret = io_pin_pbuf_ring(®, bl);
726 else
727 ret = io_alloc_pbuf_ring(ctx, ®, bl);
728
729 if (!ret) {
730 bl->nr_entries = reg.ring_entries;
731 bl->mask = reg.ring_entries - 1;
732 if (reg.flags & IOU_PBUF_RING_INC)
733 bl->flags |= IOBL_INC;
734
735 io_buffer_add_list(ctx, bl, reg.bgid);
736 return 0;
737 }
738
739 kfree_rcu(free_bl, rcu);
740 return ret;
741 }
742
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)743 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
744 {
745 struct io_uring_buf_reg reg;
746 struct io_buffer_list *bl;
747
748 lockdep_assert_held(&ctx->uring_lock);
749
750 if (copy_from_user(®, arg, sizeof(reg)))
751 return -EFAULT;
752 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
753 return -EINVAL;
754 if (reg.flags)
755 return -EINVAL;
756
757 bl = io_buffer_get_list(ctx, reg.bgid);
758 if (!bl)
759 return -ENOENT;
760 if (!(bl->flags & IOBL_BUF_RING))
761 return -EINVAL;
762
763 xa_erase(&ctx->io_bl_xa, bl->bgid);
764 io_put_bl(ctx, bl);
765 return 0;
766 }
767
io_register_pbuf_status(struct io_ring_ctx * ctx,void __user * arg)768 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
769 {
770 struct io_uring_buf_status buf_status;
771 struct io_buffer_list *bl;
772 int i;
773
774 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
775 return -EFAULT;
776
777 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
778 if (buf_status.resv[i])
779 return -EINVAL;
780
781 bl = io_buffer_get_list(ctx, buf_status.buf_group);
782 if (!bl)
783 return -ENOENT;
784 if (!(bl->flags & IOBL_BUF_RING))
785 return -EINVAL;
786
787 buf_status.head = bl->head;
788 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
789 return -EFAULT;
790
791 return 0;
792 }
793
io_pbuf_get_bl(struct io_ring_ctx * ctx,unsigned long bgid)794 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
795 unsigned long bgid)
796 {
797 struct io_buffer_list *bl;
798 bool ret;
799
800 /*
801 * We have to be a bit careful here - we're inside mmap and cannot grab
802 * the uring_lock. This means the buffer_list could be simultaneously
803 * going away, if someone is trying to be sneaky. Look it up under rcu
804 * so we know it's not going away, and attempt to grab a reference to
805 * it. If the ref is already zero, then fail the mapping. If successful,
806 * the caller will call io_put_bl() to drop the the reference at at the
807 * end. This may then safely free the buffer_list (and drop the pages)
808 * at that point, vm_insert_pages() would've already grabbed the
809 * necessary vma references.
810 */
811 rcu_read_lock();
812 bl = xa_load(&ctx->io_bl_xa, bgid);
813 /* must be a mmap'able buffer ring and have pages */
814 ret = false;
815 if (bl && bl->flags & IOBL_MMAP)
816 ret = atomic_inc_not_zero(&bl->refs);
817 rcu_read_unlock();
818
819 if (ret)
820 return bl;
821
822 return ERR_PTR(-EINVAL);
823 }
824
io_pbuf_mmap(struct file * file,struct vm_area_struct * vma)825 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
826 {
827 struct io_ring_ctx *ctx = file->private_data;
828 loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
829 struct io_buffer_list *bl;
830 int bgid, ret;
831
832 bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
833 bl = io_pbuf_get_bl(ctx, bgid);
834 if (IS_ERR(bl))
835 return PTR_ERR(bl);
836
837 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
838 io_put_bl(ctx, bl);
839 return ret;
840 }
841