xref: /linux/io_uring/notif.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/file.h>
4 #include <linux/slab.h>
5 #include <linux/net.h>
6 #include <linux/io_uring.h>
7 
8 #include "io_uring.h"
9 #include "notif.h"
10 #include "rsrc.h"
11 
12 static const struct ubuf_info_ops io_ubuf_ops;
13 
io_notif_tw_complete(struct io_kiocb * notif,struct io_tw_state * ts)14 static void io_notif_tw_complete(struct io_kiocb *notif, struct io_tw_state *ts)
15 {
16 	struct io_notif_data *nd = io_notif_to_data(notif);
17 
18 	do {
19 		notif = cmd_to_io_kiocb(nd);
20 
21 		lockdep_assert(refcount_read(&nd->uarg.refcnt) == 0);
22 
23 		if (unlikely(nd->zc_report) && (nd->zc_copied || !nd->zc_used))
24 			notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
25 
26 		if (nd->account_pages && notif->ctx->user) {
27 			__io_unaccount_mem(notif->ctx->user, nd->account_pages);
28 			nd->account_pages = 0;
29 		}
30 
31 		nd = nd->next;
32 		io_req_task_complete(notif, ts);
33 	} while (nd);
34 }
35 
io_tx_ubuf_complete(struct sk_buff * skb,struct ubuf_info * uarg,bool success)36 void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg,
37 			 bool success)
38 {
39 	struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
40 	struct io_kiocb *notif = cmd_to_io_kiocb(nd);
41 	unsigned tw_flags;
42 
43 	if (nd->zc_report) {
44 		if (success && !nd->zc_used && skb)
45 			WRITE_ONCE(nd->zc_used, true);
46 		else if (!success && !nd->zc_copied)
47 			WRITE_ONCE(nd->zc_copied, true);
48 	}
49 
50 	if (!refcount_dec_and_test(&uarg->refcnt))
51 		return;
52 
53 	if (nd->head != nd) {
54 		io_tx_ubuf_complete(skb, &nd->head->uarg, success);
55 		return;
56 	}
57 
58 	tw_flags = nd->next ? 0 : IOU_F_TWQ_LAZY_WAKE;
59 	notif->io_task_work.func = io_notif_tw_complete;
60 	__io_req_task_work_add(notif, tw_flags);
61 }
62 
io_link_skb(struct sk_buff * skb,struct ubuf_info * uarg)63 static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
64 {
65 	struct io_notif_data *nd, *prev_nd;
66 	struct io_kiocb *prev_notif, *notif;
67 	struct ubuf_info *prev_uarg = skb_zcopy(skb);
68 
69 	nd = container_of(uarg, struct io_notif_data, uarg);
70 	notif = cmd_to_io_kiocb(nd);
71 
72 	if (!prev_uarg) {
73 		net_zcopy_get(&nd->uarg);
74 		skb_zcopy_init(skb, &nd->uarg);
75 		return 0;
76 	}
77 	/* handle it separately as we can't link a notif to itself */
78 	if (unlikely(prev_uarg == &nd->uarg))
79 		return 0;
80 	/* we can't join two links together, just request a fresh skb */
81 	if (unlikely(nd->head != nd || nd->next))
82 		return -EEXIST;
83 	/* don't mix zc providers */
84 	if (unlikely(prev_uarg->ops != &io_ubuf_ops))
85 		return -EEXIST;
86 
87 	prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
88 	prev_notif = cmd_to_io_kiocb(nd);
89 
90 	/* make sure all noifications can be finished in the same task_work */
91 	if (unlikely(notif->ctx != prev_notif->ctx ||
92 		     notif->task != prev_notif->task))
93 		return -EEXIST;
94 
95 	nd->head = prev_nd->head;
96 	nd->next = prev_nd->next;
97 	prev_nd->next = nd;
98 	net_zcopy_get(&nd->head->uarg);
99 	return 0;
100 }
101 
102 static const struct ubuf_info_ops io_ubuf_ops = {
103 	.complete = io_tx_ubuf_complete,
104 	.link_skb = io_link_skb,
105 };
106 
io_alloc_notif(struct io_ring_ctx * ctx)107 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
108 	__must_hold(&ctx->uring_lock)
109 {
110 	struct io_kiocb *notif;
111 	struct io_notif_data *nd;
112 
113 	if (unlikely(!io_alloc_req(ctx, &notif)))
114 		return NULL;
115 	notif->opcode = IORING_OP_NOP;
116 	notif->flags = 0;
117 	notif->file = NULL;
118 	notif->task = current;
119 	io_get_task_refs(1);
120 	notif->rsrc_node = NULL;
121 
122 	nd = io_notif_to_data(notif);
123 	nd->zc_report = false;
124 	nd->account_pages = 0;
125 	nd->next = NULL;
126 	nd->head = nd;
127 
128 	nd->uarg.flags = IO_NOTIF_UBUF_FLAGS;
129 	nd->uarg.ops = &io_ubuf_ops;
130 	refcount_set(&nd->uarg.refcnt, 1);
131 	return notif;
132 }
133