1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_RSRC_H
3 #define IOU_RSRC_H
4
5 #include <linux/lockdep.h>
6
7 enum {
8 IORING_RSRC_FILE = 0,
9 IORING_RSRC_BUFFER = 1,
10 };
11
12 struct io_rsrc_node {
13 unsigned char type;
14 int refs;
15
16 u64 tag;
17 union {
18 unsigned long file_ptr;
19 struct io_mapped_ubuf *buf;
20 };
21 };
22
23 struct io_mapped_ubuf {
24 u64 ubuf;
25 unsigned int len;
26 unsigned int nr_bvecs;
27 unsigned int folio_shift;
28 refcount_t refs;
29 unsigned long acct_pages;
30 struct bio_vec bvec[] __counted_by(nr_bvecs);
31 };
32
33 struct io_imu_folio_data {
34 /* Head folio can be partially included in the fixed buf */
35 unsigned int nr_pages_head;
36 /* For non-head/tail folios, has to be fully included */
37 unsigned int nr_pages_mid;
38 unsigned int folio_shift;
39 unsigned int nr_folios;
40 };
41
42 struct io_rsrc_node *io_rsrc_node_alloc(int type);
43 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
44 void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
45 int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
46
47 int io_import_fixed(int ddir, struct iov_iter *iter,
48 struct io_mapped_ubuf *imu,
49 u64 buf_addr, size_t len);
50
51 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
52 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
53 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
54 unsigned int nr_args, u64 __user *tags);
55 int io_sqe_files_unregister(struct io_ring_ctx *ctx);
56 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
57 unsigned nr_args, u64 __user *tags);
58
59 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
60 unsigned nr_args);
61 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
62 unsigned size, unsigned type);
63 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
64 unsigned int size, unsigned int type);
65
66 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
67 struct io_imu_folio_data *data);
68
io_rsrc_node_lookup(struct io_rsrc_data * data,int index)69 static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
70 int index)
71 {
72 if (index < data->nr)
73 return data->nodes[array_index_nospec(index, data->nr)];
74 return NULL;
75 }
76
io_put_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_node * node)77 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
78 {
79 lockdep_assert_held(&ctx->uring_lock);
80 if (node && !--node->refs)
81 io_free_rsrc_node(ctx, node);
82 }
83
io_reset_rsrc_node(struct io_ring_ctx * ctx,struct io_rsrc_data * data,int index)84 static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx,
85 struct io_rsrc_data *data, int index)
86 {
87 struct io_rsrc_node *node = data->nodes[index];
88
89 if (!node)
90 return false;
91 io_put_rsrc_node(ctx, node);
92 data->nodes[index] = NULL;
93 return true;
94 }
95
io_req_put_rsrc_nodes(struct io_kiocb * req)96 static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
97 {
98 if (req->file_node) {
99 io_put_rsrc_node(req->ctx, req->file_node);
100 req->file_node = NULL;
101 }
102 if (req->flags & REQ_F_BUF_NODE) {
103 io_put_rsrc_node(req->ctx, req->buf_node);
104 req->buf_node = NULL;
105 }
106 }
107
io_req_assign_rsrc_node(struct io_rsrc_node ** dst_node,struct io_rsrc_node * node)108 static inline void io_req_assign_rsrc_node(struct io_rsrc_node **dst_node,
109 struct io_rsrc_node *node)
110 {
111 node->refs++;
112 *dst_node = node;
113 }
114
io_req_assign_buf_node(struct io_kiocb * req,struct io_rsrc_node * node)115 static inline void io_req_assign_buf_node(struct io_kiocb *req,
116 struct io_rsrc_node *node)
117 {
118 io_req_assign_rsrc_node(&req->buf_node, node);
119 req->flags |= REQ_F_BUF_NODE;
120 }
121
122 int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
123 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
124
125 int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
126
__io_unaccount_mem(struct user_struct * user,unsigned long nr_pages)127 static inline void __io_unaccount_mem(struct user_struct *user,
128 unsigned long nr_pages)
129 {
130 atomic_long_sub(nr_pages, &user->locked_vm);
131 }
132
133 #endif
134