xref: /linux/block/blk-ioc.c (revision f79e4d5f92a129a1159c973735007d4ddc8541f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to io context handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task.h>
12 
13 #include "blk.h"
14 
15 /*
16  * For io context allocations
17  */
18 static struct kmem_cache *iocontext_cachep;
19 
20 /**
21  * get_io_context - increment reference count to io_context
22  * @ioc: io_context to get
23  *
24  * Increment reference count to @ioc.
25  */
26 void get_io_context(struct io_context *ioc)
27 {
28 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
29 	atomic_long_inc(&ioc->refcount);
30 }
31 EXPORT_SYMBOL(get_io_context);
32 
33 static void icq_free_icq_rcu(struct rcu_head *head)
34 {
35 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36 
37 	kmem_cache_free(icq->__rcu_icq_cache, icq);
38 }
39 
40 /*
41  * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
42  * and queue locked for legacy.
43  */
44 static void ioc_exit_icq(struct io_cq *icq)
45 {
46 	struct elevator_type *et = icq->q->elevator->type;
47 
48 	if (icq->flags & ICQ_EXITED)
49 		return;
50 
51 	if (et->uses_mq && et->ops.mq.exit_icq)
52 		et->ops.mq.exit_icq(icq);
53 	else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
54 		et->ops.sq.elevator_exit_icq_fn(icq);
55 
56 	icq->flags |= ICQ_EXITED;
57 }
58 
59 /*
60  * Release an icq. Called with ioc locked for blk-mq, and with both ioc
61  * and queue locked for legacy.
62  */
63 static void ioc_destroy_icq(struct io_cq *icq)
64 {
65 	struct io_context *ioc = icq->ioc;
66 	struct request_queue *q = icq->q;
67 	struct elevator_type *et = q->elevator->type;
68 
69 	lockdep_assert_held(&ioc->lock);
70 
71 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
72 	hlist_del_init(&icq->ioc_node);
73 	list_del_init(&icq->q_node);
74 
75 	/*
76 	 * Both setting lookup hint to and clearing it from @icq are done
77 	 * under queue_lock.  If it's not pointing to @icq now, it never
78 	 * will.  Hint assignment itself can race safely.
79 	 */
80 	if (rcu_access_pointer(ioc->icq_hint) == icq)
81 		rcu_assign_pointer(ioc->icq_hint, NULL);
82 
83 	ioc_exit_icq(icq);
84 
85 	/*
86 	 * @icq->q might have gone away by the time RCU callback runs
87 	 * making it impossible to determine icq_cache.  Record it in @icq.
88 	 */
89 	icq->__rcu_icq_cache = et->icq_cache;
90 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
91 }
92 
93 /*
94  * Slow path for ioc release in put_io_context().  Performs double-lock
95  * dancing to unlink all icq's and then frees ioc.
96  */
97 static void ioc_release_fn(struct work_struct *work)
98 {
99 	struct io_context *ioc = container_of(work, struct io_context,
100 					      release_work);
101 	unsigned long flags;
102 
103 	/*
104 	 * Exiting icq may call into put_io_context() through elevator
105 	 * which will trigger lockdep warning.  The ioc's are guaranteed to
106 	 * be different, use a different locking subclass here.  Use
107 	 * irqsave variant as there's no spin_lock_irq_nested().
108 	 */
109 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
110 
111 	while (!hlist_empty(&ioc->icq_list)) {
112 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
113 						struct io_cq, ioc_node);
114 		struct request_queue *q = icq->q;
115 
116 		if (spin_trylock(q->queue_lock)) {
117 			ioc_destroy_icq(icq);
118 			spin_unlock(q->queue_lock);
119 		} else {
120 			spin_unlock_irqrestore(&ioc->lock, flags);
121 			cpu_relax();
122 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
123 		}
124 	}
125 
126 	spin_unlock_irqrestore(&ioc->lock, flags);
127 
128 	kmem_cache_free(iocontext_cachep, ioc);
129 }
130 
131 /**
132  * put_io_context - put a reference of io_context
133  * @ioc: io_context to put
134  *
135  * Decrement reference count of @ioc and release it if the count reaches
136  * zero.
137  */
138 void put_io_context(struct io_context *ioc)
139 {
140 	unsigned long flags;
141 	bool free_ioc = false;
142 
143 	if (ioc == NULL)
144 		return;
145 
146 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
147 
148 	/*
149 	 * Releasing ioc requires reverse order double locking and we may
150 	 * already be holding a queue_lock.  Do it asynchronously from wq.
151 	 */
152 	if (atomic_long_dec_and_test(&ioc->refcount)) {
153 		spin_lock_irqsave(&ioc->lock, flags);
154 		if (!hlist_empty(&ioc->icq_list))
155 			queue_work(system_power_efficient_wq,
156 					&ioc->release_work);
157 		else
158 			free_ioc = true;
159 		spin_unlock_irqrestore(&ioc->lock, flags);
160 	}
161 
162 	if (free_ioc)
163 		kmem_cache_free(iocontext_cachep, ioc);
164 }
165 EXPORT_SYMBOL(put_io_context);
166 
167 /**
168  * put_io_context_active - put active reference on ioc
169  * @ioc: ioc of interest
170  *
171  * Undo get_io_context_active().  If active reference reaches zero after
172  * put, @ioc can never issue further IOs and ioscheds are notified.
173  */
174 void put_io_context_active(struct io_context *ioc)
175 {
176 	struct elevator_type *et;
177 	unsigned long flags;
178 	struct io_cq *icq;
179 
180 	if (!atomic_dec_and_test(&ioc->active_ref)) {
181 		put_io_context(ioc);
182 		return;
183 	}
184 
185 	/*
186 	 * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
187 	 * reverse double locking.  Read comment in ioc_release_fn() for
188 	 * explanation on the nested locking annotation.
189 	 */
190 retry:
191 	spin_lock_irqsave_nested(&ioc->lock, flags, 1);
192 	hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
193 		if (icq->flags & ICQ_EXITED)
194 			continue;
195 
196 		et = icq->q->elevator->type;
197 		if (et->uses_mq) {
198 			ioc_exit_icq(icq);
199 		} else {
200 			if (spin_trylock(icq->q->queue_lock)) {
201 				ioc_exit_icq(icq);
202 				spin_unlock(icq->q->queue_lock);
203 			} else {
204 				spin_unlock_irqrestore(&ioc->lock, flags);
205 				cpu_relax();
206 				goto retry;
207 			}
208 		}
209 	}
210 	spin_unlock_irqrestore(&ioc->lock, flags);
211 
212 	put_io_context(ioc);
213 }
214 
215 /* Called by the exiting task */
216 void exit_io_context(struct task_struct *task)
217 {
218 	struct io_context *ioc;
219 
220 	task_lock(task);
221 	ioc = task->io_context;
222 	task->io_context = NULL;
223 	task_unlock(task);
224 
225 	atomic_dec(&ioc->nr_tasks);
226 	put_io_context_active(ioc);
227 }
228 
229 static void __ioc_clear_queue(struct list_head *icq_list)
230 {
231 	unsigned long flags;
232 
233 	while (!list_empty(icq_list)) {
234 		struct io_cq *icq = list_entry(icq_list->next,
235 					       struct io_cq, q_node);
236 		struct io_context *ioc = icq->ioc;
237 
238 		spin_lock_irqsave(&ioc->lock, flags);
239 		ioc_destroy_icq(icq);
240 		spin_unlock_irqrestore(&ioc->lock, flags);
241 	}
242 }
243 
244 /**
245  * ioc_clear_queue - break any ioc association with the specified queue
246  * @q: request_queue being cleared
247  *
248  * Walk @q->icq_list and exit all io_cq's.
249  */
250 void ioc_clear_queue(struct request_queue *q)
251 {
252 	LIST_HEAD(icq_list);
253 
254 	spin_lock_irq(q->queue_lock);
255 	list_splice_init(&q->icq_list, &icq_list);
256 
257 	if (q->mq_ops) {
258 		spin_unlock_irq(q->queue_lock);
259 		__ioc_clear_queue(&icq_list);
260 	} else {
261 		__ioc_clear_queue(&icq_list);
262 		spin_unlock_irq(q->queue_lock);
263 	}
264 }
265 
266 int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
267 {
268 	struct io_context *ioc;
269 	int ret;
270 
271 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
272 				    node);
273 	if (unlikely(!ioc))
274 		return -ENOMEM;
275 
276 	/* initialize */
277 	atomic_long_set(&ioc->refcount, 1);
278 	atomic_set(&ioc->nr_tasks, 1);
279 	atomic_set(&ioc->active_ref, 1);
280 	spin_lock_init(&ioc->lock);
281 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
282 	INIT_HLIST_HEAD(&ioc->icq_list);
283 	INIT_WORK(&ioc->release_work, ioc_release_fn);
284 
285 	/*
286 	 * Try to install.  ioc shouldn't be installed if someone else
287 	 * already did or @task, which isn't %current, is exiting.  Note
288 	 * that we need to allow ioc creation on exiting %current as exit
289 	 * path may issue IOs from e.g. exit_files().  The exit path is
290 	 * responsible for not issuing IO after exit_io_context().
291 	 */
292 	task_lock(task);
293 	if (!task->io_context &&
294 	    (task == current || !(task->flags & PF_EXITING)))
295 		task->io_context = ioc;
296 	else
297 		kmem_cache_free(iocontext_cachep, ioc);
298 
299 	ret = task->io_context ? 0 : -EBUSY;
300 
301 	task_unlock(task);
302 
303 	return ret;
304 }
305 
306 /**
307  * get_task_io_context - get io_context of a task
308  * @task: task of interest
309  * @gfp_flags: allocation flags, used if allocation is necessary
310  * @node: allocation node, used if allocation is necessary
311  *
312  * Return io_context of @task.  If it doesn't exist, it is created with
313  * @gfp_flags and @node.  The returned io_context has its reference count
314  * incremented.
315  *
316  * This function always goes through task_lock() and it's better to use
317  * %current->io_context + get_io_context() for %current.
318  */
319 struct io_context *get_task_io_context(struct task_struct *task,
320 				       gfp_t gfp_flags, int node)
321 {
322 	struct io_context *ioc;
323 
324 	might_sleep_if(gfpflags_allow_blocking(gfp_flags));
325 
326 	do {
327 		task_lock(task);
328 		ioc = task->io_context;
329 		if (likely(ioc)) {
330 			get_io_context(ioc);
331 			task_unlock(task);
332 			return ioc;
333 		}
334 		task_unlock(task);
335 	} while (!create_task_io_context(task, gfp_flags, node));
336 
337 	return NULL;
338 }
339 EXPORT_SYMBOL(get_task_io_context);
340 
341 /**
342  * ioc_lookup_icq - lookup io_cq from ioc
343  * @ioc: the associated io_context
344  * @q: the associated request_queue
345  *
346  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
347  * with @q->queue_lock held.
348  */
349 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
350 {
351 	struct io_cq *icq;
352 
353 	lockdep_assert_held(q->queue_lock);
354 
355 	/*
356 	 * icq's are indexed from @ioc using radix tree and hint pointer,
357 	 * both of which are protected with RCU.  All removals are done
358 	 * holding both q and ioc locks, and we're holding q lock - if we
359 	 * find a icq which points to us, it's guaranteed to be valid.
360 	 */
361 	rcu_read_lock();
362 	icq = rcu_dereference(ioc->icq_hint);
363 	if (icq && icq->q == q)
364 		goto out;
365 
366 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
367 	if (icq && icq->q == q)
368 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
369 	else
370 		icq = NULL;
371 out:
372 	rcu_read_unlock();
373 	return icq;
374 }
375 EXPORT_SYMBOL(ioc_lookup_icq);
376 
377 /**
378  * ioc_create_icq - create and link io_cq
379  * @ioc: io_context of interest
380  * @q: request_queue of interest
381  * @gfp_mask: allocation mask
382  *
383  * Make sure io_cq linking @ioc and @q exists.  If icq doesn't exist, they
384  * will be created using @gfp_mask.
385  *
386  * The caller is responsible for ensuring @ioc won't go away and @q is
387  * alive and will stay alive until this function returns.
388  */
389 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
390 			     gfp_t gfp_mask)
391 {
392 	struct elevator_type *et = q->elevator->type;
393 	struct io_cq *icq;
394 
395 	/* allocate stuff */
396 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
397 				    q->node);
398 	if (!icq)
399 		return NULL;
400 
401 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
402 		kmem_cache_free(et->icq_cache, icq);
403 		return NULL;
404 	}
405 
406 	icq->ioc = ioc;
407 	icq->q = q;
408 	INIT_LIST_HEAD(&icq->q_node);
409 	INIT_HLIST_NODE(&icq->ioc_node);
410 
411 	/* lock both q and ioc and try to link @icq */
412 	spin_lock_irq(q->queue_lock);
413 	spin_lock(&ioc->lock);
414 
415 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
416 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
417 		list_add(&icq->q_node, &q->icq_list);
418 		if (et->uses_mq && et->ops.mq.init_icq)
419 			et->ops.mq.init_icq(icq);
420 		else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
421 			et->ops.sq.elevator_init_icq_fn(icq);
422 	} else {
423 		kmem_cache_free(et->icq_cache, icq);
424 		icq = ioc_lookup_icq(ioc, q);
425 		if (!icq)
426 			printk(KERN_ERR "cfq: icq link failed!\n");
427 	}
428 
429 	spin_unlock(&ioc->lock);
430 	spin_unlock_irq(q->queue_lock);
431 	radix_tree_preload_end();
432 	return icq;
433 }
434 
435 static int __init blk_ioc_init(void)
436 {
437 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
438 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
439 	return 0;
440 }
441 subsys_initcall(blk_ioc_init);
442