xref: /linux/io_uring/kbuf.c (revision afcefc58fdfd687e3a9a9bef0be5846b96f710b7)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19 
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22 
23 /* Mapped buffer ring, return io_uring_buf from head */
24 #define io_ring_head_to_buf(br, head, mask)	&(br)->bufs[(head) & (mask)]
25 
26 struct io_provide_buf {
27 	struct file			*file;
28 	__u64				addr;
29 	__u32				len;
30 	__u32				bgid;
31 	__u32				nbufs;
32 	__u16				bid;
33 };
34 
35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36 {
37 	while (len) {
38 		struct io_uring_buf *buf;
39 		u32 this_len;
40 
41 		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42 		this_len = min_t(int, len, buf->len);
43 		buf->len -= this_len;
44 		if (buf->len) {
45 			buf->addr += this_len;
46 			return false;
47 		}
48 		bl->head++;
49 		len -= this_len;
50 	}
51 	return true;
52 }
53 
54 bool io_kbuf_commit(struct io_kiocb *req,
55 		    struct io_buffer_list *bl, int len, int nr)
56 {
57 	if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
58 		return true;
59 
60 	req->flags &= ~REQ_F_BUFFERS_COMMIT;
61 
62 	if (unlikely(len < 0))
63 		return true;
64 	if (bl->flags & IOBL_INC)
65 		return io_kbuf_inc_commit(bl, len);
66 	bl->head += nr;
67 	return true;
68 }
69 
70 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
71 							unsigned int bgid)
72 {
73 	lockdep_assert_held(&ctx->uring_lock);
74 
75 	return xa_load(&ctx->io_bl_xa, bgid);
76 }
77 
78 static int io_buffer_add_list(struct io_ring_ctx *ctx,
79 			      struct io_buffer_list *bl, unsigned int bgid)
80 {
81 	/*
82 	 * Store buffer group ID and finally mark the list as visible.
83 	 * The normal lookup doesn't care about the visibility as we're
84 	 * always under the ->uring_lock, but lookups from mmap do.
85 	 */
86 	bl->bgid = bgid;
87 	guard(mutex)(&ctx->mmap_lock);
88 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
89 }
90 
91 void io_kbuf_drop_legacy(struct io_kiocb *req)
92 {
93 	if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
94 		return;
95 	req->flags &= ~REQ_F_BUFFER_SELECTED;
96 	kfree(req->kbuf);
97 	req->kbuf = NULL;
98 }
99 
100 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
101 {
102 	struct io_ring_ctx *ctx = req->ctx;
103 	struct io_buffer_list *bl;
104 	struct io_buffer *buf;
105 
106 	io_ring_submit_lock(ctx, issue_flags);
107 
108 	buf = req->kbuf;
109 	bl = io_buffer_get_list(ctx, buf->bgid);
110 	list_add(&buf->list, &bl->buf_list);
111 	bl->nbufs++;
112 	req->flags &= ~REQ_F_BUFFER_SELECTED;
113 
114 	io_ring_submit_unlock(ctx, issue_flags);
115 	return true;
116 }
117 
118 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
119 					      struct io_buffer_list *bl)
120 {
121 	if (!list_empty(&bl->buf_list)) {
122 		struct io_buffer *kbuf;
123 
124 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
125 		list_del(&kbuf->list);
126 		bl->nbufs--;
127 		if (*len == 0 || *len > kbuf->len)
128 			*len = kbuf->len;
129 		if (list_empty(&bl->buf_list))
130 			req->flags |= REQ_F_BL_EMPTY;
131 		req->flags |= REQ_F_BUFFER_SELECTED;
132 		req->kbuf = kbuf;
133 		req->buf_index = kbuf->bid;
134 		return u64_to_user_ptr(kbuf->addr);
135 	}
136 	return NULL;
137 }
138 
139 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
140 				      struct io_buffer_list *bl,
141 				      struct iovec *iov)
142 {
143 	void __user *buf;
144 
145 	buf = io_provided_buffer_select(req, len, bl);
146 	if (unlikely(!buf))
147 		return -ENOBUFS;
148 
149 	iov[0].iov_base = buf;
150 	iov[0].iov_len = *len;
151 	return 1;
152 }
153 
154 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
155 					  struct io_buffer_list *bl,
156 					  unsigned int issue_flags)
157 {
158 	struct io_uring_buf_ring *br = bl->buf_ring;
159 	__u16 tail, head = bl->head;
160 	struct io_uring_buf *buf;
161 	void __user *ret;
162 
163 	tail = smp_load_acquire(&br->tail);
164 	if (unlikely(tail == head))
165 		return NULL;
166 
167 	if (head + 1 == tail)
168 		req->flags |= REQ_F_BL_EMPTY;
169 
170 	buf = io_ring_head_to_buf(br, head, bl->mask);
171 	if (*len == 0 || *len > buf->len)
172 		*len = buf->len;
173 	req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
174 	req->buf_list = bl;
175 	req->buf_index = buf->bid;
176 	ret = u64_to_user_ptr(buf->addr);
177 
178 	if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
179 		/*
180 		 * If we came in unlocked, we have no choice but to consume the
181 		 * buffer here, otherwise nothing ensures that the buffer won't
182 		 * get used by others. This does mean it'll be pinned until the
183 		 * IO completes, coming in unlocked means we're being called from
184 		 * io-wq context and there may be further retries in async hybrid
185 		 * mode. For the locked case, the caller must call commit when
186 		 * the transfer completes (or if we get -EAGAIN and must poll of
187 		 * retry).
188 		 */
189 		io_kbuf_commit(req, bl, *len, 1);
190 		req->buf_list = NULL;
191 	}
192 	return ret;
193 }
194 
195 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
196 			      unsigned buf_group, unsigned int issue_flags)
197 {
198 	struct io_ring_ctx *ctx = req->ctx;
199 	struct io_buffer_list *bl;
200 	void __user *ret = NULL;
201 
202 	io_ring_submit_lock(req->ctx, issue_flags);
203 
204 	bl = io_buffer_get_list(ctx, buf_group);
205 	if (likely(bl)) {
206 		if (bl->flags & IOBL_BUF_RING)
207 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
208 		else
209 			ret = io_provided_buffer_select(req, len, bl);
210 	}
211 	io_ring_submit_unlock(req->ctx, issue_flags);
212 	return ret;
213 }
214 
215 /* cap it at a reasonable 256, will be one page even for 4K */
216 #define PEEK_MAX_IMPORT		256
217 
218 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
219 				struct io_buffer_list *bl)
220 {
221 	struct io_uring_buf_ring *br = bl->buf_ring;
222 	struct iovec *iov = arg->iovs;
223 	int nr_iovs = arg->nr_iovs;
224 	__u16 nr_avail, tail, head;
225 	struct io_uring_buf *buf;
226 
227 	tail = smp_load_acquire(&br->tail);
228 	head = bl->head;
229 	nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
230 	if (unlikely(!nr_avail))
231 		return -ENOBUFS;
232 
233 	buf = io_ring_head_to_buf(br, head, bl->mask);
234 	if (arg->max_len) {
235 		u32 len = READ_ONCE(buf->len);
236 		size_t needed;
237 
238 		if (unlikely(!len))
239 			return -ENOBUFS;
240 		needed = (arg->max_len + len - 1) / len;
241 		needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
242 		if (nr_avail > needed)
243 			nr_avail = needed;
244 	}
245 
246 	/*
247 	 * only alloc a bigger array if we know we have data to map, eg not
248 	 * a speculative peek operation.
249 	 */
250 	if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
251 		iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
252 		if (unlikely(!iov))
253 			return -ENOMEM;
254 		if (arg->mode & KBUF_MODE_FREE)
255 			kfree(arg->iovs);
256 		arg->iovs = iov;
257 		nr_iovs = nr_avail;
258 	} else if (nr_avail < nr_iovs) {
259 		nr_iovs = nr_avail;
260 	}
261 
262 	/* set it to max, if not set, so we can use it unconditionally */
263 	if (!arg->max_len)
264 		arg->max_len = INT_MAX;
265 
266 	req->buf_index = buf->bid;
267 	do {
268 		u32 len = buf->len;
269 
270 		/* truncate end piece, if needed, for non partial buffers */
271 		if (len > arg->max_len) {
272 			len = arg->max_len;
273 			if (!(bl->flags & IOBL_INC)) {
274 				if (iov != arg->iovs)
275 					break;
276 				buf->len = len;
277 			}
278 		}
279 
280 		iov->iov_base = u64_to_user_ptr(buf->addr);
281 		iov->iov_len = len;
282 		iov++;
283 
284 		arg->out_len += len;
285 		arg->max_len -= len;
286 		if (!arg->max_len)
287 			break;
288 
289 		buf = io_ring_head_to_buf(br, ++head, bl->mask);
290 	} while (--nr_iovs);
291 
292 	if (head == tail)
293 		req->flags |= REQ_F_BL_EMPTY;
294 
295 	req->flags |= REQ_F_BUFFER_RING;
296 	req->buf_list = bl;
297 	return iov - arg->iovs;
298 }
299 
300 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
301 		      unsigned int issue_flags)
302 {
303 	struct io_ring_ctx *ctx = req->ctx;
304 	struct io_buffer_list *bl;
305 	int ret = -ENOENT;
306 
307 	io_ring_submit_lock(ctx, issue_flags);
308 	bl = io_buffer_get_list(ctx, arg->buf_group);
309 	if (unlikely(!bl))
310 		goto out_unlock;
311 
312 	if (bl->flags & IOBL_BUF_RING) {
313 		ret = io_ring_buffers_peek(req, arg, bl);
314 		/*
315 		 * Don't recycle these buffers if we need to go through poll.
316 		 * Nobody else can use them anyway, and holding on to provided
317 		 * buffers for a send/write operation would happen on the app
318 		 * side anyway with normal buffers. Besides, we already
319 		 * committed them, they cannot be put back in the queue.
320 		 */
321 		if (ret > 0) {
322 			req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
323 			io_kbuf_commit(req, bl, arg->out_len, ret);
324 		}
325 	} else {
326 		ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
327 	}
328 out_unlock:
329 	io_ring_submit_unlock(ctx, issue_flags);
330 	return ret;
331 }
332 
333 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
334 {
335 	struct io_ring_ctx *ctx = req->ctx;
336 	struct io_buffer_list *bl;
337 	int ret;
338 
339 	lockdep_assert_held(&ctx->uring_lock);
340 
341 	bl = io_buffer_get_list(ctx, arg->buf_group);
342 	if (unlikely(!bl))
343 		return -ENOENT;
344 
345 	if (bl->flags & IOBL_BUF_RING) {
346 		ret = io_ring_buffers_peek(req, arg, bl);
347 		if (ret > 0)
348 			req->flags |= REQ_F_BUFFERS_COMMIT;
349 		return ret;
350 	}
351 
352 	/* don't support multiple buffer selections for legacy */
353 	return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
354 }
355 
356 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
357 {
358 	struct io_buffer_list *bl = req->buf_list;
359 	bool ret = true;
360 
361 	if (bl)
362 		ret = io_kbuf_commit(req, bl, len, nr);
363 
364 	req->flags &= ~REQ_F_BUFFER_RING;
365 	return ret;
366 }
367 
368 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
369 {
370 	unsigned int ret;
371 
372 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
373 
374 	if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
375 		io_kbuf_drop_legacy(req);
376 		return ret;
377 	}
378 
379 	if (!__io_put_kbuf_ring(req, len, nbufs))
380 		ret |= IORING_CQE_F_BUF_MORE;
381 	return ret;
382 }
383 
384 static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
385 				    struct io_buffer_list *bl,
386 				    unsigned long nbufs)
387 {
388 	unsigned long i = 0;
389 	struct io_buffer *nxt;
390 
391 	/* protects io_buffers_cache */
392 	lockdep_assert_held(&ctx->uring_lock);
393 	WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
394 
395 	for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
396 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
397 		list_del(&nxt->list);
398 		bl->nbufs--;
399 		kfree(nxt);
400 		cond_resched();
401 	}
402 	return i;
403 }
404 
405 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
406 {
407 	if (bl->flags & IOBL_BUF_RING)
408 		io_free_region(ctx, &bl->region);
409 	else
410 		io_remove_buffers_legacy(ctx, bl, -1U);
411 
412 	kfree(bl);
413 }
414 
415 void io_destroy_buffers(struct io_ring_ctx *ctx)
416 {
417 	struct io_buffer_list *bl;
418 
419 	while (1) {
420 		unsigned long index = 0;
421 
422 		scoped_guard(mutex, &ctx->mmap_lock) {
423 			bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
424 			if (bl)
425 				xa_erase(&ctx->io_bl_xa, bl->bgid);
426 		}
427 		if (!bl)
428 			break;
429 		io_put_bl(ctx, bl);
430 	}
431 }
432 
433 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
434 {
435 	scoped_guard(mutex, &ctx->mmap_lock)
436 		WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
437 	io_put_bl(ctx, bl);
438 }
439 
440 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
441 {
442 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
443 	u64 tmp;
444 
445 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
446 	    sqe->splice_fd_in)
447 		return -EINVAL;
448 
449 	tmp = READ_ONCE(sqe->fd);
450 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
451 		return -EINVAL;
452 
453 	memset(p, 0, sizeof(*p));
454 	p->nbufs = tmp;
455 	p->bgid = READ_ONCE(sqe->buf_group);
456 	return 0;
457 }
458 
459 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
460 {
461 	unsigned long size, tmp_check;
462 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
463 	u64 tmp;
464 
465 	if (sqe->rw_flags || sqe->splice_fd_in)
466 		return -EINVAL;
467 
468 	tmp = READ_ONCE(sqe->fd);
469 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
470 		return -E2BIG;
471 	p->nbufs = tmp;
472 	p->addr = READ_ONCE(sqe->addr);
473 	p->len = READ_ONCE(sqe->len);
474 	if (!p->len)
475 		return -EINVAL;
476 
477 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
478 				&size))
479 		return -EOVERFLOW;
480 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
481 		return -EOVERFLOW;
482 	if (!access_ok(u64_to_user_ptr(p->addr), size))
483 		return -EFAULT;
484 
485 	p->bgid = READ_ONCE(sqe->buf_group);
486 	tmp = READ_ONCE(sqe->off);
487 	if (tmp > USHRT_MAX)
488 		return -E2BIG;
489 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
490 		return -EINVAL;
491 	p->bid = tmp;
492 	return 0;
493 }
494 
495 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
496 			  struct io_buffer_list *bl)
497 {
498 	struct io_buffer *buf;
499 	u64 addr = pbuf->addr;
500 	int ret = -ENOMEM, i, bid = pbuf->bid;
501 
502 	for (i = 0; i < pbuf->nbufs; i++) {
503 		/*
504 		 * Nonsensical to have more than sizeof(bid) buffers in a
505 		 * buffer list, as the application then has no way of knowing
506 		 * which duplicate bid refers to what buffer.
507 		 */
508 		if (bl->nbufs == USHRT_MAX) {
509 			ret = -EOVERFLOW;
510 			break;
511 		}
512 		buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
513 		if (!buf)
514 			break;
515 
516 		list_add_tail(&buf->list, &bl->buf_list);
517 		bl->nbufs++;
518 		buf->addr = addr;
519 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
520 		buf->bid = bid;
521 		buf->bgid = pbuf->bgid;
522 		addr += pbuf->len;
523 		bid++;
524 		cond_resched();
525 	}
526 
527 	return i ? 0 : ret;
528 }
529 
530 static int __io_manage_buffers_legacy(struct io_kiocb *req,
531 					struct io_buffer_list *bl)
532 {
533 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
534 	int ret;
535 
536 	if (!bl) {
537 		if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
538 			return -ENOENT;
539 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
540 		if (!bl)
541 			return -ENOMEM;
542 
543 		INIT_LIST_HEAD(&bl->buf_list);
544 		ret = io_buffer_add_list(req->ctx, bl, p->bgid);
545 		if (ret) {
546 			kfree(bl);
547 			return ret;
548 		}
549 	}
550 	/* can't use provide/remove buffers command on mapped buffers */
551 	if (bl->flags & IOBL_BUF_RING)
552 		return -EINVAL;
553 	if (req->opcode == IORING_OP_PROVIDE_BUFFERS)
554 		return io_add_buffers(req->ctx, p, bl);
555 	return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
556 }
557 
558 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags)
559 {
560 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
561 	struct io_ring_ctx *ctx = req->ctx;
562 	struct io_buffer_list *bl;
563 	int ret;
564 
565 	io_ring_submit_lock(ctx, issue_flags);
566 	bl = io_buffer_get_list(ctx, p->bgid);
567 	ret = __io_manage_buffers_legacy(req, bl);
568 	io_ring_submit_unlock(ctx, issue_flags);
569 
570 	if (ret < 0)
571 		req_set_fail(req);
572 	io_req_set_res(req, ret, 0);
573 	return IOU_COMPLETE;
574 }
575 
576 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
577 {
578 	struct io_uring_buf_reg reg;
579 	struct io_buffer_list *bl;
580 	struct io_uring_region_desc rd;
581 	struct io_uring_buf_ring *br;
582 	unsigned long mmap_offset;
583 	unsigned long ring_size;
584 	int ret;
585 
586 	lockdep_assert_held(&ctx->uring_lock);
587 
588 	if (copy_from_user(&reg, arg, sizeof(reg)))
589 		return -EFAULT;
590 	if (!mem_is_zero(reg.resv, sizeof(reg.resv)))
591 		return -EINVAL;
592 	if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
593 		return -EINVAL;
594 	if (!is_power_of_2(reg.ring_entries))
595 		return -EINVAL;
596 	/* cannot disambiguate full vs empty due to head/tail size */
597 	if (reg.ring_entries >= 65536)
598 		return -EINVAL;
599 
600 	bl = io_buffer_get_list(ctx, reg.bgid);
601 	if (bl) {
602 		/* if mapped buffer ring OR classic exists, don't allow */
603 		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
604 			return -EEXIST;
605 		io_destroy_bl(ctx, bl);
606 	}
607 
608 	bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
609 	if (!bl)
610 		return -ENOMEM;
611 
612 	mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
613 	ring_size = flex_array_size(br, bufs, reg.ring_entries);
614 
615 	memset(&rd, 0, sizeof(rd));
616 	rd.size = PAGE_ALIGN(ring_size);
617 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
618 		rd.user_addr = reg.ring_addr;
619 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
620 	}
621 	ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
622 	if (ret)
623 		goto fail;
624 	br = io_region_get_ptr(&bl->region);
625 
626 #ifdef SHM_COLOUR
627 	/*
628 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
629 	 * is set and we must guarantee that the kernel and user side align
630 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
631 	 * the application mmap's the provided ring buffer. Fail the request
632 	 * if we, by chance, don't end up with aligned addresses. The app
633 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
634 	 * this transparently.
635 	 */
636 	if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
637 	    ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
638 		ret = -EINVAL;
639 		goto fail;
640 	}
641 #endif
642 
643 	bl->nr_entries = reg.ring_entries;
644 	bl->mask = reg.ring_entries - 1;
645 	bl->flags |= IOBL_BUF_RING;
646 	bl->buf_ring = br;
647 	if (reg.flags & IOU_PBUF_RING_INC)
648 		bl->flags |= IOBL_INC;
649 	io_buffer_add_list(ctx, bl, reg.bgid);
650 	return 0;
651 fail:
652 	io_free_region(ctx, &bl->region);
653 	kfree(bl);
654 	return ret;
655 }
656 
657 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
658 {
659 	struct io_uring_buf_reg reg;
660 	struct io_buffer_list *bl;
661 
662 	lockdep_assert_held(&ctx->uring_lock);
663 
664 	if (copy_from_user(&reg, arg, sizeof(reg)))
665 		return -EFAULT;
666 	if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags)
667 		return -EINVAL;
668 
669 	bl = io_buffer_get_list(ctx, reg.bgid);
670 	if (!bl)
671 		return -ENOENT;
672 	if (!(bl->flags & IOBL_BUF_RING))
673 		return -EINVAL;
674 
675 	scoped_guard(mutex, &ctx->mmap_lock)
676 		xa_erase(&ctx->io_bl_xa, bl->bgid);
677 
678 	io_put_bl(ctx, bl);
679 	return 0;
680 }
681 
682 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
683 {
684 	struct io_uring_buf_status buf_status;
685 	struct io_buffer_list *bl;
686 
687 	if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
688 		return -EFAULT;
689 	if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv)))
690 		return -EINVAL;
691 
692 	bl = io_buffer_get_list(ctx, buf_status.buf_group);
693 	if (!bl)
694 		return -ENOENT;
695 	if (!(bl->flags & IOBL_BUF_RING))
696 		return -EINVAL;
697 
698 	buf_status.head = bl->head;
699 	if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
700 		return -EFAULT;
701 
702 	return 0;
703 }
704 
705 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
706 					    unsigned int bgid)
707 {
708 	struct io_buffer_list *bl;
709 
710 	lockdep_assert_held(&ctx->mmap_lock);
711 
712 	bl = xa_load(&ctx->io_bl_xa, bgid);
713 	if (!bl || !(bl->flags & IOBL_BUF_RING))
714 		return NULL;
715 	return &bl->region;
716 }
717