1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_RSRC_H 3 #define IOU_RSRC_H 4 5 #include <net/af_unix.h> 6 7 #include "alloc_cache.h" 8 9 #define IO_NODE_ALLOC_CACHE_MAX 32 10 11 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) 12 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) 13 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) 14 15 enum { 16 IORING_RSRC_FILE = 0, 17 IORING_RSRC_BUFFER = 1, 18 }; 19 20 struct io_rsrc_put { 21 u64 tag; 22 union { 23 void *rsrc; 24 struct file *file; 25 struct io_mapped_ubuf *buf; 26 }; 27 }; 28 29 typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); 30 31 struct io_rsrc_data { 32 struct io_ring_ctx *ctx; 33 34 u64 **tags; 35 unsigned int nr; 36 u16 rsrc_type; 37 bool quiesce; 38 }; 39 40 struct io_rsrc_node { 41 union { 42 struct io_cache_entry cache; 43 struct io_ring_ctx *ctx; 44 }; 45 int refs; 46 bool empty; 47 u16 type; 48 struct list_head node; 49 struct io_rsrc_put item; 50 }; 51 52 struct io_mapped_ubuf { 53 u64 ubuf; 54 u64 ubuf_end; 55 unsigned int nr_bvecs; 56 unsigned long acct_pages; 57 struct bio_vec bvec[] __counted_by(nr_bvecs); 58 }; 59 60 void io_rsrc_node_ref_zero(struct io_rsrc_node *node); 61 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node); 62 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); 63 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc); 64 65 int io_import_fixed(int ddir, struct iov_iter *iter, 66 struct io_mapped_ubuf *imu, 67 u64 buf_addr, size_t len); 68 69 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 70 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 71 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 72 unsigned int nr_args, u64 __user *tags); 73 void __io_sqe_files_unregister(struct io_ring_ctx *ctx); 74 int io_sqe_files_unregister(struct io_ring_ctx *ctx); 75 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 76 unsigned nr_args, u64 __user *tags); 77 78 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 79 unsigned nr_args); 80 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 81 unsigned size, unsigned type); 82 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 83 unsigned int size, unsigned int type); 84 85 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 86 { 87 lockdep_assert_held(&ctx->uring_lock); 88 89 if (node && !--node->refs) 90 io_rsrc_node_ref_zero(node); 91 } 92 93 static inline void io_req_put_rsrc_locked(struct io_kiocb *req, 94 struct io_ring_ctx *ctx) 95 { 96 io_put_rsrc_node(ctx, req->rsrc_node); 97 } 98 99 static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx, 100 struct io_rsrc_node *node) 101 { 102 node->refs++; 103 } 104 105 static inline void __io_req_set_rsrc_node(struct io_kiocb *req, 106 struct io_ring_ctx *ctx) 107 { 108 lockdep_assert_held(&ctx->uring_lock); 109 req->rsrc_node = ctx->rsrc_node; 110 io_charge_rsrc_node(ctx, ctx->rsrc_node); 111 } 112 113 static inline void io_req_set_rsrc_node(struct io_kiocb *req, 114 struct io_ring_ctx *ctx, 115 unsigned int issue_flags) 116 { 117 if (!req->rsrc_node) { 118 io_ring_submit_lock(ctx, issue_flags); 119 __io_req_set_rsrc_node(req, ctx); 120 io_ring_submit_unlock(ctx, issue_flags); 121 } 122 } 123 124 static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) 125 { 126 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK; 127 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT; 128 129 return &data->tags[table_idx][off]; 130 } 131 132 static inline int io_rsrc_init(struct io_ring_ctx *ctx) 133 { 134 ctx->rsrc_node = io_rsrc_node_alloc(ctx); 135 return ctx->rsrc_node ? 0 : -ENOMEM; 136 } 137 138 int io_files_update(struct io_kiocb *req, unsigned int issue_flags); 139 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 140 141 int __io_account_mem(struct user_struct *user, unsigned long nr_pages); 142 143 static inline void __io_unaccount_mem(struct user_struct *user, 144 unsigned long nr_pages) 145 { 146 atomic_long_sub(nr_pages, &user->locked_vm); 147 } 148 149 #endif 150