1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22
23 /* Mapped buffer ring, return io_uring_buf from head */
24 #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
25
26 struct io_provide_buf {
27 struct file *file;
28 __u64 addr;
29 __u32 len;
30 __u32 bgid;
31 __u32 nbufs;
32 __u16 bid;
33 };
34
io_kbuf_inc_commit(struct io_buffer_list * bl,int len)35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36 {
37 while (len) {
38 struct io_uring_buf *buf;
39 u32 this_len;
40
41 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42 this_len = min_t(int, len, buf->len);
43 buf->len -= this_len;
44 if (buf->len) {
45 buf->addr += this_len;
46 return false;
47 }
48 bl->head++;
49 len -= this_len;
50 }
51 return true;
52 }
53
io_kbuf_commit(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)54 bool io_kbuf_commit(struct io_kiocb *req,
55 struct io_buffer_list *bl, int len, int nr)
56 {
57 if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
58 return true;
59
60 req->flags &= ~REQ_F_BUFFERS_COMMIT;
61
62 if (unlikely(len < 0))
63 return true;
64 if (bl->flags & IOBL_INC)
65 return io_kbuf_inc_commit(bl, len);
66 bl->head += nr;
67 return true;
68 }
69
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)70 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
71 unsigned int bgid)
72 {
73 lockdep_assert_held(&ctx->uring_lock);
74
75 return xa_load(&ctx->io_bl_xa, bgid);
76 }
77
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)78 static int io_buffer_add_list(struct io_ring_ctx *ctx,
79 struct io_buffer_list *bl, unsigned int bgid)
80 {
81 /*
82 * Store buffer group ID and finally mark the list as visible.
83 * The normal lookup doesn't care about the visibility as we're
84 * always under the ->uring_lock, but lookups from mmap do.
85 */
86 bl->bgid = bgid;
87 guard(mutex)(&ctx->mmap_lock);
88 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
89 }
90
io_kbuf_drop_legacy(struct io_kiocb * req)91 void io_kbuf_drop_legacy(struct io_kiocb *req)
92 {
93 if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
94 return;
95 req->flags &= ~REQ_F_BUFFER_SELECTED;
96 kfree(req->kbuf);
97 req->kbuf = NULL;
98 }
99
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)100 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
101 {
102 struct io_ring_ctx *ctx = req->ctx;
103 struct io_buffer_list *bl;
104 struct io_buffer *buf;
105
106 io_ring_submit_lock(ctx, issue_flags);
107
108 buf = req->kbuf;
109 bl = io_buffer_get_list(ctx, buf->bgid);
110 list_add(&buf->list, &bl->buf_list);
111 bl->nbufs++;
112 req->flags &= ~REQ_F_BUFFER_SELECTED;
113
114 io_ring_submit_unlock(ctx, issue_flags);
115 return true;
116 }
117
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)118 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
119 struct io_buffer_list *bl)
120 {
121 if (!list_empty(&bl->buf_list)) {
122 struct io_buffer *kbuf;
123
124 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
125 list_del(&kbuf->list);
126 bl->nbufs--;
127 if (*len == 0 || *len > kbuf->len)
128 *len = kbuf->len;
129 if (list_empty(&bl->buf_list))
130 req->flags |= REQ_F_BL_EMPTY;
131 req->flags |= REQ_F_BUFFER_SELECTED;
132 req->kbuf = kbuf;
133 req->buf_index = kbuf->bid;
134 return u64_to_user_ptr(kbuf->addr);
135 }
136 return NULL;
137 }
138
io_provided_buffers_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,struct iovec * iov)139 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
140 struct io_buffer_list *bl,
141 struct iovec *iov)
142 {
143 void __user *buf;
144
145 buf = io_provided_buffer_select(req, len, bl);
146 if (unlikely(!buf))
147 return -ENOBUFS;
148
149 iov[0].iov_base = buf;
150 iov[0].iov_len = *len;
151 return 1;
152 }
153
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)154 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
155 struct io_buffer_list *bl,
156 unsigned int issue_flags)
157 {
158 struct io_uring_buf_ring *br = bl->buf_ring;
159 __u16 tail, head = bl->head;
160 struct io_uring_buf *buf;
161 void __user *ret;
162
163 tail = smp_load_acquire(&br->tail);
164 if (unlikely(tail == head))
165 return NULL;
166
167 if (head + 1 == tail)
168 req->flags |= REQ_F_BL_EMPTY;
169
170 buf = io_ring_head_to_buf(br, head, bl->mask);
171 if (*len == 0 || *len > buf->len)
172 *len = buf->len;
173 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
174 req->buf_list = bl;
175 req->buf_index = buf->bid;
176 ret = u64_to_user_ptr(buf->addr);
177
178 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
179 /*
180 * If we came in unlocked, we have no choice but to consume the
181 * buffer here, otherwise nothing ensures that the buffer won't
182 * get used by others. This does mean it'll be pinned until the
183 * IO completes, coming in unlocked means we're being called from
184 * io-wq context and there may be further retries in async hybrid
185 * mode. For the locked case, the caller must call commit when
186 * the transfer completes (or if we get -EAGAIN and must poll of
187 * retry).
188 */
189 io_kbuf_commit(req, bl, *len, 1);
190 req->buf_list = NULL;
191 }
192 return ret;
193 }
194
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned buf_group,unsigned int issue_flags)195 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
196 unsigned buf_group, unsigned int issue_flags)
197 {
198 struct io_ring_ctx *ctx = req->ctx;
199 struct io_buffer_list *bl;
200 void __user *ret = NULL;
201
202 io_ring_submit_lock(req->ctx, issue_flags);
203
204 bl = io_buffer_get_list(ctx, buf_group);
205 if (likely(bl)) {
206 if (bl->flags & IOBL_BUF_RING)
207 ret = io_ring_buffer_select(req, len, bl, issue_flags);
208 else
209 ret = io_provided_buffer_select(req, len, bl);
210 }
211 io_ring_submit_unlock(req->ctx, issue_flags);
212 return ret;
213 }
214
215 /* cap it at a reasonable 256, will be one page even for 4K */
216 #define PEEK_MAX_IMPORT 256
217
io_ring_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_buffer_list * bl)218 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
219 struct io_buffer_list *bl)
220 {
221 struct io_uring_buf_ring *br = bl->buf_ring;
222 struct iovec *iov = arg->iovs;
223 int nr_iovs = arg->nr_iovs;
224 __u16 nr_avail, tail, head;
225 struct io_uring_buf *buf;
226
227 tail = smp_load_acquire(&br->tail);
228 head = bl->head;
229 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
230 if (unlikely(!nr_avail))
231 return -ENOBUFS;
232
233 buf = io_ring_head_to_buf(br, head, bl->mask);
234 if (arg->max_len) {
235 u32 len = READ_ONCE(buf->len);
236 size_t needed;
237
238 if (unlikely(!len))
239 return -ENOBUFS;
240 needed = (arg->max_len + len - 1) / len;
241 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
242 if (nr_avail > needed)
243 nr_avail = needed;
244 }
245
246 /*
247 * only alloc a bigger array if we know we have data to map, eg not
248 * a speculative peek operation.
249 */
250 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
251 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
252 if (unlikely(!iov))
253 return -ENOMEM;
254 if (arg->mode & KBUF_MODE_FREE)
255 kfree(arg->iovs);
256 arg->iovs = iov;
257 nr_iovs = nr_avail;
258 } else if (nr_avail < nr_iovs) {
259 nr_iovs = nr_avail;
260 }
261
262 /* set it to max, if not set, so we can use it unconditionally */
263 if (!arg->max_len)
264 arg->max_len = INT_MAX;
265
266 req->buf_index = buf->bid;
267 do {
268 u32 len = buf->len;
269
270 /* truncate end piece, if needed, for non partial buffers */
271 if (len > arg->max_len) {
272 len = arg->max_len;
273 if (!(bl->flags & IOBL_INC)) {
274 arg->partial_map = 1;
275 if (iov != arg->iovs)
276 break;
277 buf->len = len;
278 }
279 }
280
281 iov->iov_base = u64_to_user_ptr(buf->addr);
282 iov->iov_len = len;
283 iov++;
284
285 arg->out_len += len;
286 arg->max_len -= len;
287 if (!arg->max_len)
288 break;
289
290 buf = io_ring_head_to_buf(br, ++head, bl->mask);
291 } while (--nr_iovs);
292
293 if (head == tail)
294 req->flags |= REQ_F_BL_EMPTY;
295
296 req->flags |= REQ_F_BUFFER_RING;
297 req->buf_list = bl;
298 return iov - arg->iovs;
299 }
300
io_buffers_select(struct io_kiocb * req,struct buf_sel_arg * arg,unsigned int issue_flags)301 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
302 unsigned int issue_flags)
303 {
304 struct io_ring_ctx *ctx = req->ctx;
305 struct io_buffer_list *bl;
306 int ret = -ENOENT;
307
308 io_ring_submit_lock(ctx, issue_flags);
309 bl = io_buffer_get_list(ctx, arg->buf_group);
310 if (unlikely(!bl))
311 goto out_unlock;
312
313 if (bl->flags & IOBL_BUF_RING) {
314 ret = io_ring_buffers_peek(req, arg, bl);
315 /*
316 * Don't recycle these buffers if we need to go through poll.
317 * Nobody else can use them anyway, and holding on to provided
318 * buffers for a send/write operation would happen on the app
319 * side anyway with normal buffers. Besides, we already
320 * committed them, they cannot be put back in the queue.
321 */
322 if (ret > 0) {
323 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
324 io_kbuf_commit(req, bl, arg->out_len, ret);
325 }
326 } else {
327 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
328 }
329 out_unlock:
330 io_ring_submit_unlock(ctx, issue_flags);
331 return ret;
332 }
333
io_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg)334 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
335 {
336 struct io_ring_ctx *ctx = req->ctx;
337 struct io_buffer_list *bl;
338 int ret;
339
340 lockdep_assert_held(&ctx->uring_lock);
341
342 bl = io_buffer_get_list(ctx, arg->buf_group);
343 if (unlikely(!bl))
344 return -ENOENT;
345
346 if (bl->flags & IOBL_BUF_RING) {
347 ret = io_ring_buffers_peek(req, arg, bl);
348 if (ret > 0)
349 req->flags |= REQ_F_BUFFERS_COMMIT;
350 return ret;
351 }
352
353 /* don't support multiple buffer selections for legacy */
354 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
355 }
356
__io_put_kbuf_ring(struct io_kiocb * req,int len,int nr)357 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
358 {
359 struct io_buffer_list *bl = req->buf_list;
360 bool ret = true;
361
362 if (bl)
363 ret = io_kbuf_commit(req, bl, len, nr);
364
365 req->flags &= ~REQ_F_BUFFER_RING;
366 return ret;
367 }
368
__io_put_kbufs(struct io_kiocb * req,int len,int nbufs)369 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
370 {
371 unsigned int ret;
372
373 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
374
375 if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
376 io_kbuf_drop_legacy(req);
377 return ret;
378 }
379
380 if (!__io_put_kbuf_ring(req, len, nbufs))
381 ret |= IORING_CQE_F_BUF_MORE;
382 return ret;
383 }
384
io_remove_buffers_legacy(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned long nbufs)385 static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
386 struct io_buffer_list *bl,
387 unsigned long nbufs)
388 {
389 unsigned long i = 0;
390 struct io_buffer *nxt;
391
392 /* protects io_buffers_cache */
393 lockdep_assert_held(&ctx->uring_lock);
394 WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
395
396 for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
397 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
398 list_del(&nxt->list);
399 bl->nbufs--;
400 kfree(nxt);
401 cond_resched();
402 }
403 return i;
404 }
405
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)406 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
407 {
408 if (bl->flags & IOBL_BUF_RING)
409 io_free_region(ctx, &bl->region);
410 else
411 io_remove_buffers_legacy(ctx, bl, -1U);
412
413 kfree(bl);
414 }
415
io_destroy_buffers(struct io_ring_ctx * ctx)416 void io_destroy_buffers(struct io_ring_ctx *ctx)
417 {
418 struct io_buffer_list *bl;
419
420 while (1) {
421 unsigned long index = 0;
422
423 scoped_guard(mutex, &ctx->mmap_lock) {
424 bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
425 if (bl)
426 xa_erase(&ctx->io_bl_xa, bl->bgid);
427 }
428 if (!bl)
429 break;
430 io_put_bl(ctx, bl);
431 }
432 }
433
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)434 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
435 {
436 scoped_guard(mutex, &ctx->mmap_lock)
437 WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
438 io_put_bl(ctx, bl);
439 }
440
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)441 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
442 {
443 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
444 u64 tmp;
445
446 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
447 sqe->splice_fd_in)
448 return -EINVAL;
449
450 tmp = READ_ONCE(sqe->fd);
451 if (!tmp || tmp > MAX_BIDS_PER_BGID)
452 return -EINVAL;
453
454 memset(p, 0, sizeof(*p));
455 p->nbufs = tmp;
456 p->bgid = READ_ONCE(sqe->buf_group);
457 return 0;
458 }
459
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)460 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
461 {
462 unsigned long size, tmp_check;
463 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
464 u64 tmp;
465
466 if (sqe->rw_flags || sqe->splice_fd_in)
467 return -EINVAL;
468
469 tmp = READ_ONCE(sqe->fd);
470 if (!tmp || tmp > MAX_BIDS_PER_BGID)
471 return -E2BIG;
472 p->nbufs = tmp;
473 p->addr = READ_ONCE(sqe->addr);
474 p->len = READ_ONCE(sqe->len);
475 if (!p->len)
476 return -EINVAL;
477
478 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
479 &size))
480 return -EOVERFLOW;
481 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
482 return -EOVERFLOW;
483 if (!access_ok(u64_to_user_ptr(p->addr), size))
484 return -EFAULT;
485
486 p->bgid = READ_ONCE(sqe->buf_group);
487 tmp = READ_ONCE(sqe->off);
488 if (tmp > USHRT_MAX)
489 return -E2BIG;
490 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
491 return -EINVAL;
492 p->bid = tmp;
493 return 0;
494 }
495
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)496 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
497 struct io_buffer_list *bl)
498 {
499 struct io_buffer *buf;
500 u64 addr = pbuf->addr;
501 int ret = -ENOMEM, i, bid = pbuf->bid;
502
503 for (i = 0; i < pbuf->nbufs; i++) {
504 /*
505 * Nonsensical to have more than sizeof(bid) buffers in a
506 * buffer list, as the application then has no way of knowing
507 * which duplicate bid refers to what buffer.
508 */
509 if (bl->nbufs == USHRT_MAX) {
510 ret = -EOVERFLOW;
511 break;
512 }
513 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
514 if (!buf)
515 break;
516
517 list_add_tail(&buf->list, &bl->buf_list);
518 bl->nbufs++;
519 buf->addr = addr;
520 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
521 buf->bid = bid;
522 buf->bgid = pbuf->bgid;
523 addr += pbuf->len;
524 bid++;
525 cond_resched();
526 }
527
528 return i ? 0 : ret;
529 }
530
__io_manage_buffers_legacy(struct io_kiocb * req,struct io_buffer_list * bl)531 static int __io_manage_buffers_legacy(struct io_kiocb *req,
532 struct io_buffer_list *bl)
533 {
534 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
535 int ret;
536
537 if (!bl) {
538 if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
539 return -ENOENT;
540 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
541 if (!bl)
542 return -ENOMEM;
543
544 INIT_LIST_HEAD(&bl->buf_list);
545 ret = io_buffer_add_list(req->ctx, bl, p->bgid);
546 if (ret) {
547 kfree(bl);
548 return ret;
549 }
550 }
551 /* can't use provide/remove buffers command on mapped buffers */
552 if (bl->flags & IOBL_BUF_RING)
553 return -EINVAL;
554 if (req->opcode == IORING_OP_PROVIDE_BUFFERS)
555 return io_add_buffers(req->ctx, p, bl);
556 return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
557 }
558
io_manage_buffers_legacy(struct io_kiocb * req,unsigned int issue_flags)559 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags)
560 {
561 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
562 struct io_ring_ctx *ctx = req->ctx;
563 struct io_buffer_list *bl;
564 int ret;
565
566 io_ring_submit_lock(ctx, issue_flags);
567 bl = io_buffer_get_list(ctx, p->bgid);
568 ret = __io_manage_buffers_legacy(req, bl);
569 io_ring_submit_unlock(ctx, issue_flags);
570
571 if (ret < 0)
572 req_set_fail(req);
573 io_req_set_res(req, ret, 0);
574 return IOU_COMPLETE;
575 }
576
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)577 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
578 {
579 struct io_uring_buf_reg reg;
580 struct io_buffer_list *bl;
581 struct io_uring_region_desc rd;
582 struct io_uring_buf_ring *br;
583 unsigned long mmap_offset;
584 unsigned long ring_size;
585 int ret;
586
587 lockdep_assert_held(&ctx->uring_lock);
588
589 if (copy_from_user(®, arg, sizeof(reg)))
590 return -EFAULT;
591 if (!mem_is_zero(reg.resv, sizeof(reg.resv)))
592 return -EINVAL;
593 if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
594 return -EINVAL;
595 if (!is_power_of_2(reg.ring_entries))
596 return -EINVAL;
597 /* cannot disambiguate full vs empty due to head/tail size */
598 if (reg.ring_entries >= 65536)
599 return -EINVAL;
600
601 bl = io_buffer_get_list(ctx, reg.bgid);
602 if (bl) {
603 /* if mapped buffer ring OR classic exists, don't allow */
604 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
605 return -EEXIST;
606 io_destroy_bl(ctx, bl);
607 }
608
609 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
610 if (!bl)
611 return -ENOMEM;
612
613 mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
614 ring_size = flex_array_size(br, bufs, reg.ring_entries);
615
616 memset(&rd, 0, sizeof(rd));
617 rd.size = PAGE_ALIGN(ring_size);
618 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
619 rd.user_addr = reg.ring_addr;
620 rd.flags |= IORING_MEM_REGION_TYPE_USER;
621 }
622 ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
623 if (ret)
624 goto fail;
625 br = io_region_get_ptr(&bl->region);
626
627 #ifdef SHM_COLOUR
628 /*
629 * On platforms that have specific aliasing requirements, SHM_COLOUR
630 * is set and we must guarantee that the kernel and user side align
631 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
632 * the application mmap's the provided ring buffer. Fail the request
633 * if we, by chance, don't end up with aligned addresses. The app
634 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
635 * this transparently.
636 */
637 if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
638 ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
639 ret = -EINVAL;
640 goto fail;
641 }
642 #endif
643
644 bl->nr_entries = reg.ring_entries;
645 bl->mask = reg.ring_entries - 1;
646 bl->flags |= IOBL_BUF_RING;
647 bl->buf_ring = br;
648 if (reg.flags & IOU_PBUF_RING_INC)
649 bl->flags |= IOBL_INC;
650 io_buffer_add_list(ctx, bl, reg.bgid);
651 return 0;
652 fail:
653 io_free_region(ctx, &bl->region);
654 kfree(bl);
655 return ret;
656 }
657
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)658 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
659 {
660 struct io_uring_buf_reg reg;
661 struct io_buffer_list *bl;
662
663 lockdep_assert_held(&ctx->uring_lock);
664
665 if (copy_from_user(®, arg, sizeof(reg)))
666 return -EFAULT;
667 if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags)
668 return -EINVAL;
669
670 bl = io_buffer_get_list(ctx, reg.bgid);
671 if (!bl)
672 return -ENOENT;
673 if (!(bl->flags & IOBL_BUF_RING))
674 return -EINVAL;
675
676 scoped_guard(mutex, &ctx->mmap_lock)
677 xa_erase(&ctx->io_bl_xa, bl->bgid);
678
679 io_put_bl(ctx, bl);
680 return 0;
681 }
682
io_register_pbuf_status(struct io_ring_ctx * ctx,void __user * arg)683 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
684 {
685 struct io_uring_buf_status buf_status;
686 struct io_buffer_list *bl;
687
688 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
689 return -EFAULT;
690 if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv)))
691 return -EINVAL;
692
693 bl = io_buffer_get_list(ctx, buf_status.buf_group);
694 if (!bl)
695 return -ENOENT;
696 if (!(bl->flags & IOBL_BUF_RING))
697 return -EINVAL;
698
699 buf_status.head = bl->head;
700 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
701 return -EFAULT;
702
703 return 0;
704 }
705
io_pbuf_get_region(struct io_ring_ctx * ctx,unsigned int bgid)706 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
707 unsigned int bgid)
708 {
709 struct io_buffer_list *bl;
710
711 lockdep_assert_held(&ctx->mmap_lock);
712
713 bl = xa_load(&ctx->io_bl_xa, bgid);
714 if (!bl || !(bl->flags & IOBL_BUF_RING))
715 return NULL;
716 return &bl->region;
717 }
718