xref: /linux/fs/netfs/objects.c (revision 2fe3c78a2c26dd5ee811024a1b7d6cfb4d654319)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Object lifetime handling and tracing.
3  *
4  * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/mempool.h>
10 #include <linux/delay.h>
11 #include "internal.h"
12 
13 /*
14  * Allocate an I/O request and initialise it.
15  */
16 struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
17 					     struct file *file,
18 					     loff_t start, size_t len,
19 					     enum netfs_io_origin origin)
20 {
21 	static atomic_t debug_ids;
22 	struct inode *inode = file ? file_inode(file) : mapping->host;
23 	struct netfs_inode *ctx = netfs_inode(inode);
24 	struct netfs_io_request *rreq;
25 	mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
26 	struct kmem_cache *cache = mempool->pool_data;
27 	int ret;
28 
29 	for (;;) {
30 		rreq = mempool_alloc(mempool, GFP_KERNEL);
31 		if (rreq)
32 			break;
33 		msleep(10);
34 	}
35 
36 	memset(rreq, 0, kmem_cache_size(cache));
37 	rreq->start	= start;
38 	rreq->len	= len;
39 	rreq->origin	= origin;
40 	rreq->netfs_ops	= ctx->ops;
41 	rreq->mapping	= mapping;
42 	rreq->inode	= inode;
43 	rreq->i_size	= i_size_read(inode);
44 	rreq->debug_id	= atomic_inc_return(&debug_ids);
45 	rreq->wsize	= INT_MAX;
46 	rreq->io_streams[0].sreq_max_len = ULONG_MAX;
47 	rreq->io_streams[0].sreq_max_segs = 0;
48 	spin_lock_init(&rreq->lock);
49 	INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
50 	INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
51 	INIT_LIST_HEAD(&rreq->subrequests);
52 	refcount_set(&rreq->ref, 1);
53 
54 	if (origin == NETFS_READAHEAD ||
55 	    origin == NETFS_READPAGE ||
56 	    origin == NETFS_READ_GAPS ||
57 	    origin == NETFS_READ_FOR_WRITE ||
58 	    origin == NETFS_DIO_READ)
59 		INIT_WORK(&rreq->work, netfs_read_termination_worker);
60 	else
61 		INIT_WORK(&rreq->work, netfs_write_collection_worker);
62 
63 	__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
64 	if (file && file->f_flags & O_NONBLOCK)
65 		__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
66 	if (rreq->netfs_ops->init_request) {
67 		ret = rreq->netfs_ops->init_request(rreq, file);
68 		if (ret < 0) {
69 			mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
70 			return ERR_PTR(ret);
71 		}
72 	}
73 
74 	atomic_inc(&ctx->io_count);
75 	trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
76 	netfs_proc_add_rreq(rreq);
77 	netfs_stat(&netfs_n_rh_rreq);
78 	return rreq;
79 }
80 
81 void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
82 {
83 	int r;
84 
85 	__refcount_inc(&rreq->ref, &r);
86 	trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
87 }
88 
89 void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
90 {
91 	struct netfs_io_subrequest *subreq;
92 	struct netfs_io_stream *stream;
93 	int s;
94 
95 	while (!list_empty(&rreq->subrequests)) {
96 		subreq = list_first_entry(&rreq->subrequests,
97 					  struct netfs_io_subrequest, rreq_link);
98 		list_del(&subreq->rreq_link);
99 		netfs_put_subrequest(subreq, was_async,
100 				     netfs_sreq_trace_put_clear);
101 	}
102 
103 	for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
104 		stream = &rreq->io_streams[s];
105 		while (!list_empty(&stream->subrequests)) {
106 			subreq = list_first_entry(&stream->subrequests,
107 						  struct netfs_io_subrequest, rreq_link);
108 			list_del(&subreq->rreq_link);
109 			netfs_put_subrequest(subreq, was_async,
110 					     netfs_sreq_trace_put_clear);
111 		}
112 	}
113 }
114 
115 static void netfs_free_request_rcu(struct rcu_head *rcu)
116 {
117 	struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
118 
119 	mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
120 	netfs_stat_d(&netfs_n_rh_rreq);
121 }
122 
123 static void netfs_free_request(struct work_struct *work)
124 {
125 	struct netfs_io_request *rreq =
126 		container_of(work, struct netfs_io_request, work);
127 	struct netfs_inode *ictx = netfs_inode(rreq->inode);
128 	unsigned int i;
129 
130 	trace_netfs_rreq(rreq, netfs_rreq_trace_free);
131 	netfs_proc_del_rreq(rreq);
132 	netfs_clear_subrequests(rreq, false);
133 	if (rreq->netfs_ops->free_request)
134 		rreq->netfs_ops->free_request(rreq);
135 	if (rreq->cache_resources.ops)
136 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
137 	if (rreq->direct_bv) {
138 		for (i = 0; i < rreq->direct_bv_count; i++) {
139 			if (rreq->direct_bv[i].bv_page) {
140 				if (rreq->direct_bv_unpin)
141 					unpin_user_page(rreq->direct_bv[i].bv_page);
142 			}
143 		}
144 		kvfree(rreq->direct_bv);
145 	}
146 	netfs_clear_buffer(rreq);
147 
148 	if (atomic_dec_and_test(&ictx->io_count))
149 		wake_up_var(&ictx->io_count);
150 	call_rcu(&rreq->rcu, netfs_free_request_rcu);
151 }
152 
153 void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
154 		       enum netfs_rreq_ref_trace what)
155 {
156 	unsigned int debug_id;
157 	bool dead;
158 	int r;
159 
160 	if (rreq) {
161 		debug_id = rreq->debug_id;
162 		dead = __refcount_dec_and_test(&rreq->ref, &r);
163 		trace_netfs_rreq_ref(debug_id, r - 1, what);
164 		if (dead) {
165 			if (was_async) {
166 				rreq->work.func = netfs_free_request;
167 				if (!queue_work(system_unbound_wq, &rreq->work))
168 					WARN_ON(1);
169 			} else {
170 				netfs_free_request(&rreq->work);
171 			}
172 		}
173 	}
174 }
175 
176 /*
177  * Allocate and partially initialise an I/O request structure.
178  */
179 struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
180 {
181 	struct netfs_io_subrequest *subreq;
182 	mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
183 	struct kmem_cache *cache = mempool->pool_data;
184 
185 	for (;;) {
186 		subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
187 				       GFP_KERNEL);
188 		if (subreq)
189 			break;
190 		msleep(10);
191 	}
192 
193 	memset(subreq, 0, kmem_cache_size(cache));
194 	INIT_WORK(&subreq->work, NULL);
195 	INIT_LIST_HEAD(&subreq->rreq_link);
196 	refcount_set(&subreq->ref, 2);
197 	subreq->rreq = rreq;
198 	subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
199 	netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
200 	netfs_stat(&netfs_n_rh_sreq);
201 	return subreq;
202 }
203 
204 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
205 			  enum netfs_sreq_ref_trace what)
206 {
207 	int r;
208 
209 	__refcount_inc(&subreq->ref, &r);
210 	trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
211 			     what);
212 }
213 
214 static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
215 				  bool was_async)
216 {
217 	struct netfs_io_request *rreq = subreq->rreq;
218 
219 	trace_netfs_sreq(subreq, netfs_sreq_trace_free);
220 	if (rreq->netfs_ops->free_subrequest)
221 		rreq->netfs_ops->free_subrequest(subreq);
222 	mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
223 	netfs_stat_d(&netfs_n_rh_sreq);
224 	netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
225 }
226 
227 void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
228 			  enum netfs_sreq_ref_trace what)
229 {
230 	unsigned int debug_index = subreq->debug_index;
231 	unsigned int debug_id = subreq->rreq->debug_id;
232 	bool dead;
233 	int r;
234 
235 	dead = __refcount_dec_and_test(&subreq->ref, &r);
236 	trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
237 	if (dead)
238 		netfs_free_subrequest(subreq, was_async);
239 }
240