1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_RSRC_H 3 #define IOU_RSRC_H 4 5 #include <linux/lockdep.h> 6 7 #define IO_NODE_ALLOC_CACHE_MAX 32 8 9 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) 10 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) 11 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) 12 13 enum { 14 IORING_RSRC_FILE = 0, 15 IORING_RSRC_BUFFER = 1, 16 }; 17 18 struct io_rsrc_node { 19 unsigned char type; 20 int refs; 21 22 u64 tag; 23 union { 24 unsigned long file_ptr; 25 struct io_mapped_ubuf *buf; 26 }; 27 }; 28 29 struct io_mapped_ubuf { 30 u64 ubuf; 31 unsigned int len; 32 unsigned int nr_bvecs; 33 unsigned int folio_shift; 34 refcount_t refs; 35 unsigned long acct_pages; 36 struct bio_vec bvec[] __counted_by(nr_bvecs); 37 }; 38 39 struct io_imu_folio_data { 40 /* Head folio can be partially included in the fixed buf */ 41 unsigned int nr_pages_head; 42 /* For non-head/tail folios, has to be fully included */ 43 unsigned int nr_pages_mid; 44 unsigned int folio_shift; 45 unsigned int nr_folios; 46 }; 47 48 struct io_rsrc_node *io_rsrc_node_alloc(int type); 49 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node); 50 void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data); 51 int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr); 52 53 int io_import_fixed(int ddir, struct iov_iter *iter, 54 struct io_mapped_ubuf *imu, 55 u64 buf_addr, size_t len); 56 57 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg); 58 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 59 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 60 unsigned int nr_args, u64 __user *tags); 61 int io_sqe_files_unregister(struct io_ring_ctx *ctx); 62 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 63 unsigned nr_args, u64 __user *tags); 64 65 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 66 unsigned nr_args); 67 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 68 unsigned size, unsigned type); 69 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 70 unsigned int size, unsigned int type); 71 72 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages, 73 struct io_imu_folio_data *data); 74 75 static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data, 76 int index) 77 { 78 if (index < data->nr) 79 return data->nodes[array_index_nospec(index, data->nr)]; 80 return NULL; 81 } 82 83 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 84 { 85 lockdep_assert_held(&ctx->uring_lock); 86 if (node && !--node->refs) 87 io_free_rsrc_node(ctx, node); 88 } 89 90 static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx, 91 struct io_rsrc_data *data, int index) 92 { 93 struct io_rsrc_node *node = data->nodes[index]; 94 95 if (!node) 96 return false; 97 io_put_rsrc_node(ctx, node); 98 data->nodes[index] = NULL; 99 return true; 100 } 101 102 static inline void io_req_put_rsrc_nodes(struct io_kiocb *req) 103 { 104 if (req->file_node) { 105 io_put_rsrc_node(req->ctx, req->file_node); 106 req->file_node = NULL; 107 } 108 if (req->flags & REQ_F_BUF_NODE) { 109 io_put_rsrc_node(req->ctx, req->buf_node); 110 req->buf_node = NULL; 111 } 112 } 113 114 static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node, 115 struct io_rsrc_node *node) 116 { 117 node->refs++; 118 *dst_node = node; 119 } 120 121 static inline void io_req_assign_buf_node(struct io_kiocb *req, 122 struct io_rsrc_node *node) 123 { 124 io_req_assign_rsrc_node(&req->buf_node, node); 125 req->flags |= REQ_F_BUF_NODE; 126 } 127 128 int io_files_update(struct io_kiocb *req, unsigned int issue_flags); 129 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 130 131 int __io_account_mem(struct user_struct *user, unsigned long nr_pages); 132 133 static inline void __io_unaccount_mem(struct user_struct *user, 134 unsigned long nr_pages) 135 { 136 atomic_long_sub(nr_pages, &user->locked_vm); 137 } 138 139 #endif 140