1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_RSRC_H 3 #define IOU_RSRC_H 4 5 #define IO_NODE_ALLOC_CACHE_MAX 32 6 7 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) 8 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) 9 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) 10 11 enum { 12 IORING_RSRC_FILE = 0, 13 IORING_RSRC_BUFFER = 1, 14 }; 15 16 struct io_rsrc_put { 17 u64 tag; 18 union { 19 void *rsrc; 20 struct file *file; 21 struct io_mapped_ubuf *buf; 22 }; 23 }; 24 25 struct io_rsrc_data { 26 struct io_ring_ctx *ctx; 27 28 u64 **tags; 29 unsigned int nr; 30 u16 rsrc_type; 31 bool quiesce; 32 }; 33 34 struct io_rsrc_node { 35 struct io_ring_ctx *ctx; 36 int refs; 37 bool empty; 38 u16 type; 39 struct list_head node; 40 struct io_rsrc_put item; 41 }; 42 43 struct io_mapped_ubuf { 44 u64 ubuf; 45 u64 ubuf_end; 46 unsigned int nr_bvecs; 47 unsigned int folio_shift; 48 unsigned long acct_pages; 49 unsigned long folio_mask; 50 refcount_t refs; 51 struct bio_vec bvec[] __counted_by(nr_bvecs); 52 }; 53 54 struct io_imu_folio_data { 55 /* Head folio can be partially included in the fixed buf */ 56 unsigned int nr_pages_head; 57 /* For non-head/tail folios, has to be fully included */ 58 unsigned int nr_pages_mid; 59 unsigned int folio_shift; 60 }; 61 62 void io_rsrc_node_ref_zero(struct io_rsrc_node *node); 63 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node); 64 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); 65 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc); 66 67 int io_import_fixed(int ddir, struct iov_iter *iter, 68 struct io_mapped_ubuf *imu, 69 u64 buf_addr, size_t len); 70 71 int io_register_copy_buffers(struct io_ring_ctx *ctx, void __user *arg); 72 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 73 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 74 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 75 unsigned int nr_args, u64 __user *tags); 76 void __io_sqe_files_unregister(struct io_ring_ctx *ctx); 77 int io_sqe_files_unregister(struct io_ring_ctx *ctx); 78 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 79 unsigned nr_args, u64 __user *tags); 80 81 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 82 unsigned nr_args); 83 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 84 unsigned size, unsigned type); 85 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 86 unsigned int size, unsigned int type); 87 88 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 89 { 90 lockdep_assert_held(&ctx->uring_lock); 91 92 if (node && !--node->refs) 93 io_rsrc_node_ref_zero(node); 94 } 95 96 static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx, 97 struct io_rsrc_node *node) 98 { 99 node->refs++; 100 } 101 102 static inline void __io_req_set_rsrc_node(struct io_kiocb *req, 103 struct io_ring_ctx *ctx) 104 { 105 lockdep_assert_held(&ctx->uring_lock); 106 req->rsrc_node = ctx->rsrc_node; 107 io_charge_rsrc_node(ctx, ctx->rsrc_node); 108 } 109 110 static inline void io_req_set_rsrc_node(struct io_kiocb *req, 111 struct io_ring_ctx *ctx, 112 unsigned int issue_flags) 113 { 114 if (!req->rsrc_node) { 115 io_ring_submit_lock(ctx, issue_flags); 116 __io_req_set_rsrc_node(req, ctx); 117 io_ring_submit_unlock(ctx, issue_flags); 118 } 119 } 120 121 static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) 122 { 123 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK; 124 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT; 125 126 return &data->tags[table_idx][off]; 127 } 128 129 static inline int io_rsrc_init(struct io_ring_ctx *ctx) 130 { 131 ctx->rsrc_node = io_rsrc_node_alloc(ctx); 132 return ctx->rsrc_node ? 0 : -ENOMEM; 133 } 134 135 int io_files_update(struct io_kiocb *req, unsigned int issue_flags); 136 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 137 138 int __io_account_mem(struct user_struct *user, unsigned long nr_pages); 139 140 static inline void __io_unaccount_mem(struct user_struct *user, 141 unsigned long nr_pages) 142 { 143 atomic_long_sub(nr_pages, &user->locked_vm); 144 } 145 146 #endif 147