1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/file.h> 4 #include <linux/slab.h> 5 #include <linux/net.h> 6 #include <linux/io_uring.h> 7 8 #include "io_uring.h" 9 #include "notif.h" 10 #include "rsrc.h" 11 12 static const struct ubuf_info_ops io_ubuf_ops; 13 14 static void io_notif_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw) 15 { 16 struct io_kiocb *notif = tw_req.req; 17 struct io_notif_data *nd = io_notif_to_data(notif); 18 struct io_ring_ctx *ctx = notif->ctx; 19 20 lockdep_assert_held(&ctx->uring_lock); 21 22 do { 23 notif = cmd_to_io_kiocb(nd); 24 25 if (WARN_ON_ONCE(ctx != notif->ctx)) 26 return; 27 lockdep_assert(refcount_read(&nd->uarg.refcnt) == 0); 28 29 if (unlikely(nd->zc_report) && (nd->zc_copied || !nd->zc_used)) 30 notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED; 31 32 if (nd->account_pages && notif->ctx->user) { 33 __io_unaccount_mem(notif->ctx->user, nd->account_pages); 34 nd->account_pages = 0; 35 } 36 37 nd = nd->next; 38 io_req_task_complete((struct io_tw_req){notif}, tw); 39 } while (nd); 40 } 41 42 void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg, 43 bool success) 44 { 45 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); 46 struct io_kiocb *notif = cmd_to_io_kiocb(nd); 47 unsigned tw_flags; 48 49 if (nd->zc_report) { 50 if (success && !nd->zc_used && skb) 51 WRITE_ONCE(nd->zc_used, true); 52 else if (!success && !nd->zc_copied) 53 WRITE_ONCE(nd->zc_copied, true); 54 } 55 56 if (!refcount_dec_and_test(&uarg->refcnt)) 57 return; 58 59 if (nd->head != nd) { 60 io_tx_ubuf_complete(skb, &nd->head->uarg, success); 61 return; 62 } 63 64 tw_flags = nd->next ? 0 : IOU_F_TWQ_LAZY_WAKE; 65 notif->io_task_work.func = io_notif_tw_complete; 66 __io_req_task_work_add(notif, tw_flags); 67 } 68 69 static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg) 70 { 71 struct io_notif_data *nd, *prev_nd; 72 struct io_kiocb *prev_notif, *notif; 73 struct ubuf_info *prev_uarg = skb_zcopy(skb); 74 75 nd = container_of(uarg, struct io_notif_data, uarg); 76 notif = cmd_to_io_kiocb(nd); 77 78 if (!prev_uarg) { 79 net_zcopy_get(&nd->uarg); 80 skb_zcopy_init(skb, &nd->uarg); 81 return 0; 82 } 83 /* handle it separately as we can't link a notif to itself */ 84 if (unlikely(prev_uarg == &nd->uarg)) 85 return 0; 86 /* we can't join two links together, just request a fresh skb */ 87 if (unlikely(nd->head != nd || nd->next)) 88 return -EEXIST; 89 /* don't mix zc providers */ 90 if (unlikely(prev_uarg->ops != &io_ubuf_ops)) 91 return -EEXIST; 92 93 prev_nd = container_of(prev_uarg, struct io_notif_data, uarg); 94 prev_notif = cmd_to_io_kiocb(prev_nd); 95 96 /* make sure all notifications can be finished in the same task_work */ 97 if (unlikely(notif->ctx != prev_notif->ctx || 98 notif->tctx != prev_notif->tctx)) 99 return -EEXIST; 100 101 nd->head = prev_nd->head; 102 nd->next = prev_nd->next; 103 prev_nd->next = nd; 104 net_zcopy_get(&nd->head->uarg); 105 return 0; 106 } 107 108 static const struct ubuf_info_ops io_ubuf_ops = { 109 .complete = io_tx_ubuf_complete, 110 .link_skb = io_link_skb, 111 }; 112 113 struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) 114 __must_hold(&ctx->uring_lock) 115 { 116 struct io_kiocb *notif; 117 struct io_notif_data *nd; 118 119 if (unlikely(!io_alloc_req(ctx, ¬if))) 120 return NULL; 121 notif->ctx = ctx; 122 notif->opcode = IORING_OP_NOP; 123 notif->flags = 0; 124 notif->file = NULL; 125 notif->tctx = current->io_uring; 126 io_get_task_refs(1); 127 notif->file_node = NULL; 128 notif->buf_node = NULL; 129 130 nd = io_notif_to_data(notif); 131 nd->zc_report = false; 132 nd->account_pages = 0; 133 nd->next = NULL; 134 nd->head = nd; 135 136 nd->uarg.flags = IO_NOTIF_UBUF_FLAGS; 137 nd->uarg.ops = &io_ubuf_ops; 138 refcount_set(&nd->uarg.refcnt, 1); 139 return notif; 140 } 141