xref: /linux/io_uring/rsrc.h (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_RSRC_H
3 #define IOU_RSRC_H
4 
5 #include <linux/io_uring_types.h>
6 #include <linux/lockdep.h>
7 
8 #define IO_VEC_CACHE_SOFT_CAP		256
9 
10 enum {
11 	IORING_RSRC_FILE		= 0,
12 	IORING_RSRC_BUFFER		= 1,
13 };
14 
15 struct io_rsrc_node {
16 	unsigned char			type;
17 	int				refs;
18 
19 	u64 tag;
20 	union {
21 		unsigned long file_ptr;
22 		struct io_mapped_ubuf *buf;
23 	};
24 };
25 
26 enum {
27 	IO_IMU_DEST	= 1 << ITER_DEST,
28 	IO_IMU_SOURCE	= 1 << ITER_SOURCE,
29 };
30 
31 enum {
32 	IO_REGBUF_F_KBUF		= 1,
33 };
34 
35 struct io_mapped_ubuf {
36 	u64		ubuf;
37 	unsigned int	len;
38 	unsigned int	nr_bvecs;
39 	unsigned int    folio_shift;
40 	refcount_t	refs;
41 	unsigned long	acct_pages;
42 	void		(*release)(void *);
43 	void		*priv;
44 	u8		flags;
45 	u8		dir;
46 	struct bio_vec	bvec[] __counted_by(nr_bvecs);
47 };
48 
49 struct io_imu_folio_data {
50 	/* Head folio can be partially included in the fixed buf */
51 	unsigned int	nr_pages_head;
52 	/* For non-head/tail folios, has to be fully included */
53 	unsigned int	nr_pages_mid;
54 	unsigned int	folio_shift;
55 	unsigned int	nr_folios;
56 	unsigned long	first_folio_page_idx;
57 };
58 
59 bool io_rsrc_cache_init(struct io_ring_ctx *ctx);
60 void io_rsrc_cache_free(struct io_ring_ctx *ctx);
61 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
62 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
63 void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
64 int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
65 
66 struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
67 				      unsigned issue_flags);
68 int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
69 			u64 buf_addr, size_t len, int ddir,
70 			unsigned issue_flags);
71 int io_import_reg_vec(int ddir, struct iov_iter *iter,
72 			struct io_kiocb *req, struct iou_vec *vec,
73 			unsigned nr_iovs, unsigned issue_flags);
74 int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
75 			const struct iovec __user *uvec, size_t uvec_segs);
76 
77 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
78 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
79 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
80 			    unsigned int nr_args, u64 __user *tags);
81 int io_sqe_files_unregister(struct io_ring_ctx *ctx);
82 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
83 			  unsigned nr_args, u64 __user *tags);
84 
85 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
86 			     unsigned nr_args);
87 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
88 			    unsigned size, unsigned type);
89 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
90 			unsigned int size, unsigned int type);
91 int io_validate_user_buf_range(u64 uaddr, u64 ulen);
92 
93 bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
94 			      struct io_imu_folio_data *data);
95 
96 static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
97 						       unsigned int index)
98 {
99 	if (index < data->nr)
100 		return data->nodes[array_index_nospec(index, data->nr)];
101 	return NULL;
102 }
103 
104 static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
105 {
106 	lockdep_assert_held(&ctx->uring_lock);
107 	if (!--node->refs)
108 		io_free_rsrc_node(ctx, node);
109 }
110 
111 static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx,
112 				      struct io_rsrc_data *data, int index)
113 {
114 	struct io_rsrc_node *node = data->nodes[index];
115 
116 	if (!node)
117 		return false;
118 	io_put_rsrc_node(ctx, node);
119 	data->nodes[index] = NULL;
120 	return true;
121 }
122 
123 int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
124 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
125 
126 int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
127 int io_account_mem(struct user_struct *user, struct mm_struct *mm_account,
128 		   unsigned long nr_pages);
129 void io_unaccount_mem(struct user_struct *user, struct mm_struct *mm_account,
130 		      unsigned long nr_pages);
131 
132 static inline void __io_unaccount_mem(struct user_struct *user,
133 				      unsigned long nr_pages)
134 {
135 	atomic_long_sub(nr_pages, &user->locked_vm);
136 }
137 
138 void io_vec_free(struct iou_vec *iv);
139 int io_vec_realloc(struct iou_vec *iv, unsigned nr_entries);
140 
141 static inline void io_vec_reset_iovec(struct iou_vec *iv,
142 				      struct iovec *iovec, unsigned nr)
143 {
144 	io_vec_free(iv);
145 	iv->iovec = iovec;
146 	iv->nr = nr;
147 }
148 
149 static inline void io_alloc_cache_vec_kasan(struct iou_vec *iv)
150 {
151 	if (IS_ENABLED(CONFIG_KASAN))
152 		io_vec_free(iv);
153 }
154 
155 #endif
156