xref: /linux/io_uring/kbuf.c (revision 91928e0d3cc29789f4483bffee5f36218f23942b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19 
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22 
23 /* Mapped buffer ring, return io_uring_buf from head */
24 #define io_ring_head_to_buf(br, head, mask)	&(br)->bufs[(head) & (mask)]
25 
26 struct io_provide_buf {
27 	struct file			*file;
28 	__u64				addr;
29 	__u32				len;
30 	__u32				bgid;
31 	__u32				nbufs;
32 	__u16				bid;
33 };
34 
io_kbuf_inc_commit(struct io_buffer_list * bl,int len)35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
36 {
37 	while (len) {
38 		struct io_uring_buf *buf;
39 		u32 this_len;
40 
41 		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
42 		this_len = min_t(int, len, buf->len);
43 		buf->len -= this_len;
44 		if (buf->len) {
45 			buf->addr += this_len;
46 			return false;
47 		}
48 		bl->head++;
49 		len -= this_len;
50 	}
51 	return true;
52 }
53 
io_kbuf_commit(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)54 bool io_kbuf_commit(struct io_kiocb *req,
55 		    struct io_buffer_list *bl, int len, int nr)
56 {
57 	if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
58 		return true;
59 
60 	req->flags &= ~REQ_F_BUFFERS_COMMIT;
61 
62 	if (unlikely(len < 0))
63 		return true;
64 	if (bl->flags & IOBL_INC)
65 		return io_kbuf_inc_commit(bl, len);
66 	bl->head += nr;
67 	return true;
68 }
69 
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)70 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
71 							unsigned int bgid)
72 {
73 	lockdep_assert_held(&ctx->uring_lock);
74 
75 	return xa_load(&ctx->io_bl_xa, bgid);
76 }
77 
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)78 static int io_buffer_add_list(struct io_ring_ctx *ctx,
79 			      struct io_buffer_list *bl, unsigned int bgid)
80 {
81 	/*
82 	 * Store buffer group ID and finally mark the list as visible.
83 	 * The normal lookup doesn't care about the visibility as we're
84 	 * always under the ->uring_lock, but lookups from mmap do.
85 	 */
86 	bl->bgid = bgid;
87 	guard(mutex)(&ctx->mmap_lock);
88 	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
89 }
90 
io_kbuf_drop_legacy(struct io_kiocb * req)91 void io_kbuf_drop_legacy(struct io_kiocb *req)
92 {
93 	if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
94 		return;
95 	req->buf_index = req->kbuf->bgid;
96 	req->flags &= ~REQ_F_BUFFER_SELECTED;
97 	kfree(req->kbuf);
98 	req->kbuf = NULL;
99 }
100 
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)101 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
102 {
103 	struct io_ring_ctx *ctx = req->ctx;
104 	struct io_buffer_list *bl;
105 	struct io_buffer *buf;
106 
107 	io_ring_submit_lock(ctx, issue_flags);
108 
109 	buf = req->kbuf;
110 	bl = io_buffer_get_list(ctx, buf->bgid);
111 	list_add(&buf->list, &bl->buf_list);
112 	req->flags &= ~REQ_F_BUFFER_SELECTED;
113 	req->buf_index = buf->bgid;
114 
115 	io_ring_submit_unlock(ctx, issue_flags);
116 	return true;
117 }
118 
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)119 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
120 					      struct io_buffer_list *bl)
121 {
122 	if (!list_empty(&bl->buf_list)) {
123 		struct io_buffer *kbuf;
124 
125 		kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
126 		list_del(&kbuf->list);
127 		if (*len == 0 || *len > kbuf->len)
128 			*len = kbuf->len;
129 		if (list_empty(&bl->buf_list))
130 			req->flags |= REQ_F_BL_EMPTY;
131 		req->flags |= REQ_F_BUFFER_SELECTED;
132 		req->kbuf = kbuf;
133 		req->buf_index = kbuf->bid;
134 		return u64_to_user_ptr(kbuf->addr);
135 	}
136 	return NULL;
137 }
138 
io_provided_buffers_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,struct iovec * iov)139 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
140 				      struct io_buffer_list *bl,
141 				      struct iovec *iov)
142 {
143 	void __user *buf;
144 
145 	buf = io_provided_buffer_select(req, len, bl);
146 	if (unlikely(!buf))
147 		return -ENOBUFS;
148 
149 	iov[0].iov_base = buf;
150 	iov[0].iov_len = *len;
151 	return 1;
152 }
153 
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)154 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
155 					  struct io_buffer_list *bl,
156 					  unsigned int issue_flags)
157 {
158 	struct io_uring_buf_ring *br = bl->buf_ring;
159 	__u16 tail, head = bl->head;
160 	struct io_uring_buf *buf;
161 	void __user *ret;
162 
163 	tail = smp_load_acquire(&br->tail);
164 	if (unlikely(tail == head))
165 		return NULL;
166 
167 	if (head + 1 == tail)
168 		req->flags |= REQ_F_BL_EMPTY;
169 
170 	buf = io_ring_head_to_buf(br, head, bl->mask);
171 	if (*len == 0 || *len > buf->len)
172 		*len = buf->len;
173 	req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
174 	req->buf_list = bl;
175 	req->buf_index = buf->bid;
176 	ret = u64_to_user_ptr(buf->addr);
177 
178 	if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
179 		/*
180 		 * If we came in unlocked, we have no choice but to consume the
181 		 * buffer here, otherwise nothing ensures that the buffer won't
182 		 * get used by others. This does mean it'll be pinned until the
183 		 * IO completes, coming in unlocked means we're being called from
184 		 * io-wq context and there may be further retries in async hybrid
185 		 * mode. For the locked case, the caller must call commit when
186 		 * the transfer completes (or if we get -EAGAIN and must poll of
187 		 * retry).
188 		 */
189 		io_kbuf_commit(req, bl, *len, 1);
190 		req->buf_list = NULL;
191 	}
192 	return ret;
193 }
194 
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)195 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
196 			      unsigned int issue_flags)
197 {
198 	struct io_ring_ctx *ctx = req->ctx;
199 	struct io_buffer_list *bl;
200 	void __user *ret = NULL;
201 
202 	io_ring_submit_lock(req->ctx, issue_flags);
203 
204 	bl = io_buffer_get_list(ctx, req->buf_index);
205 	if (likely(bl)) {
206 		if (bl->flags & IOBL_BUF_RING)
207 			ret = io_ring_buffer_select(req, len, bl, issue_flags);
208 		else
209 			ret = io_provided_buffer_select(req, len, bl);
210 	}
211 	io_ring_submit_unlock(req->ctx, issue_flags);
212 	return ret;
213 }
214 
215 /* cap it at a reasonable 256, will be one page even for 4K */
216 #define PEEK_MAX_IMPORT		256
217 
io_ring_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_buffer_list * bl)218 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
219 				struct io_buffer_list *bl)
220 {
221 	struct io_uring_buf_ring *br = bl->buf_ring;
222 	struct iovec *iov = arg->iovs;
223 	int nr_iovs = arg->nr_iovs;
224 	__u16 nr_avail, tail, head;
225 	struct io_uring_buf *buf;
226 
227 	tail = smp_load_acquire(&br->tail);
228 	head = bl->head;
229 	nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
230 	if (unlikely(!nr_avail))
231 		return -ENOBUFS;
232 
233 	buf = io_ring_head_to_buf(br, head, bl->mask);
234 	if (arg->max_len) {
235 		u32 len = READ_ONCE(buf->len);
236 		size_t needed;
237 
238 		if (unlikely(!len))
239 			return -ENOBUFS;
240 		needed = (arg->max_len + len - 1) / len;
241 		needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
242 		if (nr_avail > needed)
243 			nr_avail = needed;
244 	}
245 
246 	/*
247 	 * only alloc a bigger array if we know we have data to map, eg not
248 	 * a speculative peek operation.
249 	 */
250 	if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
251 		iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
252 		if (unlikely(!iov))
253 			return -ENOMEM;
254 		if (arg->mode & KBUF_MODE_FREE)
255 			kfree(arg->iovs);
256 		arg->iovs = iov;
257 		nr_iovs = nr_avail;
258 	} else if (nr_avail < nr_iovs) {
259 		nr_iovs = nr_avail;
260 	}
261 
262 	/* set it to max, if not set, so we can use it unconditionally */
263 	if (!arg->max_len)
264 		arg->max_len = INT_MAX;
265 
266 	req->buf_index = buf->bid;
267 	do {
268 		u32 len = buf->len;
269 
270 		/* truncate end piece, if needed, for non partial buffers */
271 		if (len > arg->max_len) {
272 			len = arg->max_len;
273 			if (!(bl->flags & IOBL_INC))
274 				buf->len = len;
275 		}
276 
277 		iov->iov_base = u64_to_user_ptr(buf->addr);
278 		iov->iov_len = len;
279 		iov++;
280 
281 		arg->out_len += len;
282 		arg->max_len -= len;
283 		if (!arg->max_len)
284 			break;
285 
286 		buf = io_ring_head_to_buf(br, ++head, bl->mask);
287 	} while (--nr_iovs);
288 
289 	if (head == tail)
290 		req->flags |= REQ_F_BL_EMPTY;
291 
292 	req->flags |= REQ_F_BUFFER_RING;
293 	req->buf_list = bl;
294 	return iov - arg->iovs;
295 }
296 
io_buffers_select(struct io_kiocb * req,struct buf_sel_arg * arg,unsigned int issue_flags)297 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
298 		      unsigned int issue_flags)
299 {
300 	struct io_ring_ctx *ctx = req->ctx;
301 	struct io_buffer_list *bl;
302 	int ret = -ENOENT;
303 
304 	io_ring_submit_lock(ctx, issue_flags);
305 	bl = io_buffer_get_list(ctx, req->buf_index);
306 	if (unlikely(!bl))
307 		goto out_unlock;
308 
309 	if (bl->flags & IOBL_BUF_RING) {
310 		ret = io_ring_buffers_peek(req, arg, bl);
311 		/*
312 		 * Don't recycle these buffers if we need to go through poll.
313 		 * Nobody else can use them anyway, and holding on to provided
314 		 * buffers for a send/write operation would happen on the app
315 		 * side anyway with normal buffers. Besides, we already
316 		 * committed them, they cannot be put back in the queue.
317 		 */
318 		if (ret > 0) {
319 			req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
320 			io_kbuf_commit(req, bl, arg->out_len, ret);
321 		}
322 	} else {
323 		ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
324 	}
325 out_unlock:
326 	io_ring_submit_unlock(ctx, issue_flags);
327 	return ret;
328 }
329 
io_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg)330 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
331 {
332 	struct io_ring_ctx *ctx = req->ctx;
333 	struct io_buffer_list *bl;
334 	int ret;
335 
336 	lockdep_assert_held(&ctx->uring_lock);
337 
338 	bl = io_buffer_get_list(ctx, req->buf_index);
339 	if (unlikely(!bl))
340 		return -ENOENT;
341 
342 	if (bl->flags & IOBL_BUF_RING) {
343 		ret = io_ring_buffers_peek(req, arg, bl);
344 		if (ret > 0)
345 			req->flags |= REQ_F_BUFFERS_COMMIT;
346 		return ret;
347 	}
348 
349 	/* don't support multiple buffer selections for legacy */
350 	return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
351 }
352 
__io_put_kbuf_ring(struct io_kiocb * req,int len,int nr)353 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
354 {
355 	struct io_buffer_list *bl = req->buf_list;
356 	bool ret = true;
357 
358 	if (bl) {
359 		ret = io_kbuf_commit(req, bl, len, nr);
360 		req->buf_index = bl->bgid;
361 	}
362 	req->flags &= ~REQ_F_BUFFER_RING;
363 	return ret;
364 }
365 
__io_put_kbufs(struct io_kiocb * req,int len,int nbufs)366 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
367 {
368 	unsigned int ret;
369 
370 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
371 
372 	if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
373 		io_kbuf_drop_legacy(req);
374 		return ret;
375 	}
376 
377 	if (!__io_put_kbuf_ring(req, len, nbufs))
378 		ret |= IORING_CQE_F_BUF_MORE;
379 	return ret;
380 }
381 
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)382 static int __io_remove_buffers(struct io_ring_ctx *ctx,
383 			       struct io_buffer_list *bl, unsigned nbufs)
384 {
385 	unsigned i = 0;
386 
387 	/* shouldn't happen */
388 	if (!nbufs)
389 		return 0;
390 
391 	if (bl->flags & IOBL_BUF_RING) {
392 		i = bl->buf_ring->tail - bl->head;
393 		io_free_region(ctx, &bl->region);
394 		/* make sure it's seen as empty */
395 		INIT_LIST_HEAD(&bl->buf_list);
396 		bl->flags &= ~IOBL_BUF_RING;
397 		return i;
398 	}
399 
400 	/* protects io_buffers_cache */
401 	lockdep_assert_held(&ctx->uring_lock);
402 
403 	while (!list_empty(&bl->buf_list)) {
404 		struct io_buffer *nxt;
405 
406 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
407 		list_del(&nxt->list);
408 		kfree(nxt);
409 
410 		if (++i == nbufs)
411 			return i;
412 		cond_resched();
413 	}
414 
415 	return i;
416 }
417 
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)418 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
419 {
420 	__io_remove_buffers(ctx, bl, -1U);
421 	kfree(bl);
422 }
423 
io_destroy_buffers(struct io_ring_ctx * ctx)424 void io_destroy_buffers(struct io_ring_ctx *ctx)
425 {
426 	struct io_buffer_list *bl;
427 
428 	while (1) {
429 		unsigned long index = 0;
430 
431 		scoped_guard(mutex, &ctx->mmap_lock) {
432 			bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
433 			if (bl)
434 				xa_erase(&ctx->io_bl_xa, bl->bgid);
435 		}
436 		if (!bl)
437 			break;
438 		io_put_bl(ctx, bl);
439 	}
440 }
441 
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)442 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
443 {
444 	scoped_guard(mutex, &ctx->mmap_lock)
445 		WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
446 	io_put_bl(ctx, bl);
447 }
448 
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)449 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
450 {
451 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
452 	u64 tmp;
453 
454 	if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
455 	    sqe->splice_fd_in)
456 		return -EINVAL;
457 
458 	tmp = READ_ONCE(sqe->fd);
459 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
460 		return -EINVAL;
461 
462 	memset(p, 0, sizeof(*p));
463 	p->nbufs = tmp;
464 	p->bgid = READ_ONCE(sqe->buf_group);
465 	return 0;
466 }
467 
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)468 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
469 {
470 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
471 	struct io_ring_ctx *ctx = req->ctx;
472 	struct io_buffer_list *bl;
473 	int ret = 0;
474 
475 	io_ring_submit_lock(ctx, issue_flags);
476 
477 	ret = -ENOENT;
478 	bl = io_buffer_get_list(ctx, p->bgid);
479 	if (bl) {
480 		ret = -EINVAL;
481 		/* can't use provide/remove buffers command on mapped buffers */
482 		if (!(bl->flags & IOBL_BUF_RING))
483 			ret = __io_remove_buffers(ctx, bl, p->nbufs);
484 	}
485 	io_ring_submit_unlock(ctx, issue_flags);
486 	if (ret < 0)
487 		req_set_fail(req);
488 	io_req_set_res(req, ret, 0);
489 	return IOU_OK;
490 }
491 
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)492 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
493 {
494 	unsigned long size, tmp_check;
495 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
496 	u64 tmp;
497 
498 	if (sqe->rw_flags || sqe->splice_fd_in)
499 		return -EINVAL;
500 
501 	tmp = READ_ONCE(sqe->fd);
502 	if (!tmp || tmp > MAX_BIDS_PER_BGID)
503 		return -E2BIG;
504 	p->nbufs = tmp;
505 	p->addr = READ_ONCE(sqe->addr);
506 	p->len = READ_ONCE(sqe->len);
507 
508 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
509 				&size))
510 		return -EOVERFLOW;
511 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
512 		return -EOVERFLOW;
513 
514 	size = (unsigned long)p->len * p->nbufs;
515 	if (!access_ok(u64_to_user_ptr(p->addr), size))
516 		return -EFAULT;
517 
518 	p->bgid = READ_ONCE(sqe->buf_group);
519 	tmp = READ_ONCE(sqe->off);
520 	if (tmp > USHRT_MAX)
521 		return -E2BIG;
522 	if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
523 		return -EINVAL;
524 	p->bid = tmp;
525 	return 0;
526 }
527 
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)528 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
529 			  struct io_buffer_list *bl)
530 {
531 	struct io_buffer *buf;
532 	u64 addr = pbuf->addr;
533 	int i, bid = pbuf->bid;
534 
535 	for (i = 0; i < pbuf->nbufs; i++) {
536 		buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
537 		if (!buf)
538 			break;
539 
540 		list_add_tail(&buf->list, &bl->buf_list);
541 		buf->addr = addr;
542 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
543 		buf->bid = bid;
544 		buf->bgid = pbuf->bgid;
545 		addr += pbuf->len;
546 		bid++;
547 		cond_resched();
548 	}
549 
550 	return i ? 0 : -ENOMEM;
551 }
552 
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)553 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
554 {
555 	struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
556 	struct io_ring_ctx *ctx = req->ctx;
557 	struct io_buffer_list *bl;
558 	int ret = 0;
559 
560 	io_ring_submit_lock(ctx, issue_flags);
561 
562 	bl = io_buffer_get_list(ctx, p->bgid);
563 	if (unlikely(!bl)) {
564 		bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
565 		if (!bl) {
566 			ret = -ENOMEM;
567 			goto err;
568 		}
569 		INIT_LIST_HEAD(&bl->buf_list);
570 		ret = io_buffer_add_list(ctx, bl, p->bgid);
571 		if (ret) {
572 			kfree(bl);
573 			goto err;
574 		}
575 	}
576 	/* can't add buffers via this command for a mapped buffer ring */
577 	if (bl->flags & IOBL_BUF_RING) {
578 		ret = -EINVAL;
579 		goto err;
580 	}
581 
582 	ret = io_add_buffers(ctx, p, bl);
583 err:
584 	io_ring_submit_unlock(ctx, issue_flags);
585 
586 	if (ret < 0)
587 		req_set_fail(req);
588 	io_req_set_res(req, ret, 0);
589 	return IOU_OK;
590 }
591 
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)592 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
593 {
594 	struct io_uring_buf_reg reg;
595 	struct io_buffer_list *bl, *free_bl = NULL;
596 	struct io_uring_region_desc rd;
597 	struct io_uring_buf_ring *br;
598 	unsigned long mmap_offset;
599 	unsigned long ring_size;
600 	int ret;
601 
602 	lockdep_assert_held(&ctx->uring_lock);
603 
604 	if (copy_from_user(&reg, arg, sizeof(reg)))
605 		return -EFAULT;
606 
607 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
608 		return -EINVAL;
609 	if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
610 		return -EINVAL;
611 	if (!is_power_of_2(reg.ring_entries))
612 		return -EINVAL;
613 	/* cannot disambiguate full vs empty due to head/tail size */
614 	if (reg.ring_entries >= 65536)
615 		return -EINVAL;
616 
617 	bl = io_buffer_get_list(ctx, reg.bgid);
618 	if (bl) {
619 		/* if mapped buffer ring OR classic exists, don't allow */
620 		if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
621 			return -EEXIST;
622 		io_destroy_bl(ctx, bl);
623 	}
624 
625 	free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
626 	if (!bl)
627 		return -ENOMEM;
628 
629 	mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
630 	ring_size = flex_array_size(br, bufs, reg.ring_entries);
631 
632 	memset(&rd, 0, sizeof(rd));
633 	rd.size = PAGE_ALIGN(ring_size);
634 	if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
635 		rd.user_addr = reg.ring_addr;
636 		rd.flags |= IORING_MEM_REGION_TYPE_USER;
637 	}
638 	ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
639 	if (ret)
640 		goto fail;
641 	br = io_region_get_ptr(&bl->region);
642 
643 #ifdef SHM_COLOUR
644 	/*
645 	 * On platforms that have specific aliasing requirements, SHM_COLOUR
646 	 * is set and we must guarantee that the kernel and user side align
647 	 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
648 	 * the application mmap's the provided ring buffer. Fail the request
649 	 * if we, by chance, don't end up with aligned addresses. The app
650 	 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
651 	 * this transparently.
652 	 */
653 	if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
654 	    ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
655 		ret = -EINVAL;
656 		goto fail;
657 	}
658 #endif
659 
660 	bl->nr_entries = reg.ring_entries;
661 	bl->mask = reg.ring_entries - 1;
662 	bl->flags |= IOBL_BUF_RING;
663 	bl->buf_ring = br;
664 	if (reg.flags & IOU_PBUF_RING_INC)
665 		bl->flags |= IOBL_INC;
666 	io_buffer_add_list(ctx, bl, reg.bgid);
667 	return 0;
668 fail:
669 	io_free_region(ctx, &bl->region);
670 	kfree(free_bl);
671 	return ret;
672 }
673 
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)674 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
675 {
676 	struct io_uring_buf_reg reg;
677 	struct io_buffer_list *bl;
678 
679 	lockdep_assert_held(&ctx->uring_lock);
680 
681 	if (copy_from_user(&reg, arg, sizeof(reg)))
682 		return -EFAULT;
683 	if (reg.resv[0] || reg.resv[1] || reg.resv[2])
684 		return -EINVAL;
685 	if (reg.flags)
686 		return -EINVAL;
687 
688 	bl = io_buffer_get_list(ctx, reg.bgid);
689 	if (!bl)
690 		return -ENOENT;
691 	if (!(bl->flags & IOBL_BUF_RING))
692 		return -EINVAL;
693 
694 	scoped_guard(mutex, &ctx->mmap_lock)
695 		xa_erase(&ctx->io_bl_xa, bl->bgid);
696 
697 	io_put_bl(ctx, bl);
698 	return 0;
699 }
700 
io_register_pbuf_status(struct io_ring_ctx * ctx,void __user * arg)701 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
702 {
703 	struct io_uring_buf_status buf_status;
704 	struct io_buffer_list *bl;
705 	int i;
706 
707 	if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
708 		return -EFAULT;
709 
710 	for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
711 		if (buf_status.resv[i])
712 			return -EINVAL;
713 
714 	bl = io_buffer_get_list(ctx, buf_status.buf_group);
715 	if (!bl)
716 		return -ENOENT;
717 	if (!(bl->flags & IOBL_BUF_RING))
718 		return -EINVAL;
719 
720 	buf_status.head = bl->head;
721 	if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
722 		return -EFAULT;
723 
724 	return 0;
725 }
726 
io_pbuf_get_region(struct io_ring_ctx * ctx,unsigned int bgid)727 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
728 					    unsigned int bgid)
729 {
730 	struct io_buffer_list *bl;
731 
732 	lockdep_assert_held(&ctx->mmap_lock);
733 
734 	bl = xa_load(&ctx->io_bl_xa, bgid);
735 	if (!bl || !(bl->flags & IOBL_BUF_RING))
736 		return NULL;
737 	return &bl->region;
738 }
739