1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_RSRC_H 3 #define IOU_RSRC_H 4 5 #include <linux/io_uring_types.h> 6 #include <linux/lockdep.h> 7 8 enum { 9 IORING_RSRC_FILE = 0, 10 IORING_RSRC_BUFFER = 1, 11 }; 12 13 struct io_rsrc_node { 14 unsigned char type; 15 int refs; 16 17 u64 tag; 18 union { 19 unsigned long file_ptr; 20 struct io_mapped_ubuf *buf; 21 }; 22 }; 23 24 enum { 25 IO_IMU_DEST = 1 << ITER_DEST, 26 IO_IMU_SOURCE = 1 << ITER_SOURCE, 27 }; 28 29 struct io_mapped_ubuf { 30 u64 ubuf; 31 unsigned int len; 32 unsigned int nr_bvecs; 33 unsigned int folio_shift; 34 refcount_t refs; 35 unsigned long acct_pages; 36 void (*release)(void *); 37 void *priv; 38 bool is_kbuf; 39 u8 dir; 40 struct bio_vec bvec[] __counted_by(nr_bvecs); 41 }; 42 43 struct io_imu_folio_data { 44 /* Head folio can be partially included in the fixed buf */ 45 unsigned int nr_pages_head; 46 /* For non-head/tail folios, has to be fully included */ 47 unsigned int nr_pages_mid; 48 unsigned int folio_shift; 49 unsigned int nr_folios; 50 }; 51 52 bool io_rsrc_cache_init(struct io_ring_ctx *ctx); 53 void io_rsrc_cache_free(struct io_ring_ctx *ctx); 54 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type); 55 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node); 56 void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data); 57 int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr); 58 59 struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req, 60 unsigned issue_flags); 61 int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter, 62 u64 buf_addr, size_t len, int ddir, 63 unsigned issue_flags); 64 65 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg); 66 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 67 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 68 unsigned int nr_args, u64 __user *tags); 69 int io_sqe_files_unregister(struct io_ring_ctx *ctx); 70 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 71 unsigned nr_args, u64 __user *tags); 72 73 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 74 unsigned nr_args); 75 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 76 unsigned size, unsigned type); 77 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 78 unsigned int size, unsigned int type); 79 80 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages, 81 struct io_imu_folio_data *data); 82 83 static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data, 84 int index) 85 { 86 if (index < data->nr) 87 return data->nodes[array_index_nospec(index, data->nr)]; 88 return NULL; 89 } 90 91 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 92 { 93 lockdep_assert_held(&ctx->uring_lock); 94 if (!--node->refs) 95 io_free_rsrc_node(ctx, node); 96 } 97 98 static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx, 99 struct io_rsrc_data *data, int index) 100 { 101 struct io_rsrc_node *node = data->nodes[index]; 102 103 if (!node) 104 return false; 105 io_put_rsrc_node(ctx, node); 106 data->nodes[index] = NULL; 107 return true; 108 } 109 110 static inline void io_req_put_rsrc_nodes(struct io_kiocb *req) 111 { 112 if (req->file_node) { 113 io_put_rsrc_node(req->ctx, req->file_node); 114 req->file_node = NULL; 115 } 116 if (req->flags & REQ_F_BUF_NODE) { 117 io_put_rsrc_node(req->ctx, req->buf_node); 118 req->buf_node = NULL; 119 } 120 } 121 122 static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node, 123 struct io_rsrc_node *node) 124 { 125 node->refs++; 126 *dst_node = node; 127 } 128 129 static inline void io_req_assign_buf_node(struct io_kiocb *req, 130 struct io_rsrc_node *node) 131 { 132 io_req_assign_rsrc_node(&req->buf_node, node); 133 req->flags |= REQ_F_BUF_NODE; 134 } 135 136 int io_files_update(struct io_kiocb *req, unsigned int issue_flags); 137 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 138 139 int __io_account_mem(struct user_struct *user, unsigned long nr_pages); 140 141 static inline void __io_unaccount_mem(struct user_struct *user, 142 unsigned long nr_pages) 143 { 144 atomic_long_sub(nr_pages, &user->locked_vm); 145 } 146 147 #endif 148