xref: /linux/block/blk-ioc.c (revision 079c9534a96da9a85a2a2f9715851050fbfbf749)
1 /*
2  * Functions related to io context handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10 #include <linux/slab.h>
11 
12 #include "blk.h"
13 
14 /*
15  * For io context allocations
16  */
17 static struct kmem_cache *iocontext_cachep;
18 
19 /**
20  * get_io_context - increment reference count to io_context
21  * @ioc: io_context to get
22  *
23  * Increment reference count to @ioc.
24  */
25 void get_io_context(struct io_context *ioc)
26 {
27 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
28 	atomic_long_inc(&ioc->refcount);
29 }
30 EXPORT_SYMBOL(get_io_context);
31 
32 /*
33  * Releasing ioc may nest into another put_io_context() leading to nested
34  * fast path release.  As the ioc's can't be the same, this is okay but
35  * makes lockdep whine.  Keep track of nesting and use it as subclass.
36  */
37 #ifdef CONFIG_LOCKDEP
38 #define ioc_release_depth(q)		((q) ? (q)->ioc_release_depth : 0)
39 #define ioc_release_depth_inc(q)	(q)->ioc_release_depth++
40 #define ioc_release_depth_dec(q)	(q)->ioc_release_depth--
41 #else
42 #define ioc_release_depth(q)		0
43 #define ioc_release_depth_inc(q)	do { } while (0)
44 #define ioc_release_depth_dec(q)	do { } while (0)
45 #endif
46 
47 static void icq_free_icq_rcu(struct rcu_head *head)
48 {
49 	struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
50 
51 	kmem_cache_free(icq->__rcu_icq_cache, icq);
52 }
53 
54 /*
55  * Exit and free an icq.  Called with both ioc and q locked.
56  */
57 static void ioc_exit_icq(struct io_cq *icq)
58 {
59 	struct io_context *ioc = icq->ioc;
60 	struct request_queue *q = icq->q;
61 	struct elevator_type *et = q->elevator->type;
62 
63 	lockdep_assert_held(&ioc->lock);
64 	lockdep_assert_held(q->queue_lock);
65 
66 	radix_tree_delete(&ioc->icq_tree, icq->q->id);
67 	hlist_del_init(&icq->ioc_node);
68 	list_del_init(&icq->q_node);
69 
70 	/*
71 	 * Both setting lookup hint to and clearing it from @icq are done
72 	 * under queue_lock.  If it's not pointing to @icq now, it never
73 	 * will.  Hint assignment itself can race safely.
74 	 */
75 	if (rcu_dereference_raw(ioc->icq_hint) == icq)
76 		rcu_assign_pointer(ioc->icq_hint, NULL);
77 
78 	if (et->ops.elevator_exit_icq_fn) {
79 		ioc_release_depth_inc(q);
80 		et->ops.elevator_exit_icq_fn(icq);
81 		ioc_release_depth_dec(q);
82 	}
83 
84 	/*
85 	 * @icq->q might have gone away by the time RCU callback runs
86 	 * making it impossible to determine icq_cache.  Record it in @icq.
87 	 */
88 	icq->__rcu_icq_cache = et->icq_cache;
89 	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90 }
91 
92 /*
93  * Slow path for ioc release in put_io_context().  Performs double-lock
94  * dancing to unlink all icq's and then frees ioc.
95  */
96 static void ioc_release_fn(struct work_struct *work)
97 {
98 	struct io_context *ioc = container_of(work, struct io_context,
99 					      release_work);
100 	struct request_queue *last_q = NULL;
101 
102 	spin_lock_irq(&ioc->lock);
103 
104 	while (!hlist_empty(&ioc->icq_list)) {
105 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
106 						struct io_cq, ioc_node);
107 		struct request_queue *this_q = icq->q;
108 
109 		if (this_q != last_q) {
110 			/*
111 			 * Need to switch to @this_q.  Once we release
112 			 * @ioc->lock, it can go away along with @cic.
113 			 * Hold on to it.
114 			 */
115 			__blk_get_queue(this_q);
116 
117 			/*
118 			 * blk_put_queue() might sleep thanks to kobject
119 			 * idiocy.  Always release both locks, put and
120 			 * restart.
121 			 */
122 			if (last_q) {
123 				spin_unlock(last_q->queue_lock);
124 				spin_unlock_irq(&ioc->lock);
125 				blk_put_queue(last_q);
126 			} else {
127 				spin_unlock_irq(&ioc->lock);
128 			}
129 
130 			last_q = this_q;
131 			spin_lock_irq(this_q->queue_lock);
132 			spin_lock(&ioc->lock);
133 			continue;
134 		}
135 		ioc_exit_icq(icq);
136 	}
137 
138 	if (last_q) {
139 		spin_unlock(last_q->queue_lock);
140 		spin_unlock_irq(&ioc->lock);
141 		blk_put_queue(last_q);
142 	} else {
143 		spin_unlock_irq(&ioc->lock);
144 	}
145 
146 	kmem_cache_free(iocontext_cachep, ioc);
147 }
148 
149 /**
150  * put_io_context - put a reference of io_context
151  * @ioc: io_context to put
152  * @locked_q: request_queue the caller is holding queue_lock of (hint)
153  *
154  * Decrement reference count of @ioc and release it if the count reaches
155  * zero.  If the caller is holding queue_lock of a queue, it can indicate
156  * that with @locked_q.  This is an optimization hint and the caller is
157  * allowed to pass in %NULL even when it's holding a queue_lock.
158  */
159 void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
160 {
161 	struct request_queue *last_q = locked_q;
162 	unsigned long flags;
163 
164 	if (ioc == NULL)
165 		return;
166 
167 	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
168 	if (locked_q)
169 		lockdep_assert_held(locked_q->queue_lock);
170 
171 	if (!atomic_long_dec_and_test(&ioc->refcount))
172 		return;
173 
174 	/*
175 	 * Destroy @ioc.  This is a bit messy because icq's are chained
176 	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
177 	 * The inner ioc->lock should be held to walk our icq_list and then
178 	 * for each icq the outer matching queue_lock should be grabbed.
179 	 * ie. We need to do reverse-order double lock dancing.
180 	 *
181 	 * Another twist is that we are often called with one of the
182 	 * matching queue_locks held as indicated by @locked_q, which
183 	 * prevents performing double-lock dance for other queues.
184 	 *
185 	 * So, we do it in two stages.  The fast path uses the queue_lock
186 	 * the caller is holding and, if other queues need to be accessed,
187 	 * uses trylock to avoid introducing locking dependency.  This can
188 	 * handle most cases, especially if @ioc was performing IO on only
189 	 * single device.
190 	 *
191 	 * If trylock doesn't cut it, we defer to @ioc->release_work which
192 	 * can do all the double-locking dancing.
193 	 */
194 	spin_lock_irqsave_nested(&ioc->lock, flags,
195 				 ioc_release_depth(locked_q));
196 
197 	while (!hlist_empty(&ioc->icq_list)) {
198 		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
199 						struct io_cq, ioc_node);
200 		struct request_queue *this_q = icq->q;
201 
202 		if (this_q != last_q) {
203 			if (last_q && last_q != locked_q)
204 				spin_unlock(last_q->queue_lock);
205 			last_q = NULL;
206 
207 			if (!spin_trylock(this_q->queue_lock))
208 				break;
209 			last_q = this_q;
210 			continue;
211 		}
212 		ioc_exit_icq(icq);
213 	}
214 
215 	if (last_q && last_q != locked_q)
216 		spin_unlock(last_q->queue_lock);
217 
218 	spin_unlock_irqrestore(&ioc->lock, flags);
219 
220 	/* if no icq is left, we're done; otherwise, kick release_work */
221 	if (hlist_empty(&ioc->icq_list))
222 		kmem_cache_free(iocontext_cachep, ioc);
223 	else
224 		schedule_work(&ioc->release_work);
225 }
226 EXPORT_SYMBOL(put_io_context);
227 
228 /* Called by the exiting task */
229 void exit_io_context(struct task_struct *task)
230 {
231 	struct io_context *ioc;
232 
233 	task_lock(task);
234 	ioc = task->io_context;
235 	task->io_context = NULL;
236 	task_unlock(task);
237 
238 	atomic_dec(&ioc->nr_tasks);
239 	put_io_context(ioc, NULL);
240 }
241 
242 /**
243  * ioc_clear_queue - break any ioc association with the specified queue
244  * @q: request_queue being cleared
245  *
246  * Walk @q->icq_list and exit all io_cq's.  Must be called with @q locked.
247  */
248 void ioc_clear_queue(struct request_queue *q)
249 {
250 	lockdep_assert_held(q->queue_lock);
251 
252 	while (!list_empty(&q->icq_list)) {
253 		struct io_cq *icq = list_entry(q->icq_list.next,
254 					       struct io_cq, q_node);
255 		struct io_context *ioc = icq->ioc;
256 
257 		spin_lock(&ioc->lock);
258 		ioc_exit_icq(icq);
259 		spin_unlock(&ioc->lock);
260 	}
261 }
262 
263 void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
264 				int node)
265 {
266 	struct io_context *ioc;
267 
268 	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
269 				    node);
270 	if (unlikely(!ioc))
271 		return;
272 
273 	/* initialize */
274 	atomic_long_set(&ioc->refcount, 1);
275 	atomic_set(&ioc->nr_tasks, 1);
276 	spin_lock_init(&ioc->lock);
277 	INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
278 	INIT_HLIST_HEAD(&ioc->icq_list);
279 	INIT_WORK(&ioc->release_work, ioc_release_fn);
280 
281 	/*
282 	 * Try to install.  ioc shouldn't be installed if someone else
283 	 * already did or @task, which isn't %current, is exiting.  Note
284 	 * that we need to allow ioc creation on exiting %current as exit
285 	 * path may issue IOs from e.g. exit_files().  The exit path is
286 	 * responsible for not issuing IO after exit_io_context().
287 	 */
288 	task_lock(task);
289 	if (!task->io_context &&
290 	    (task == current || !(task->flags & PF_EXITING)))
291 		task->io_context = ioc;
292 	else
293 		kmem_cache_free(iocontext_cachep, ioc);
294 	task_unlock(task);
295 }
296 
297 /**
298  * get_task_io_context - get io_context of a task
299  * @task: task of interest
300  * @gfp_flags: allocation flags, used if allocation is necessary
301  * @node: allocation node, used if allocation is necessary
302  *
303  * Return io_context of @task.  If it doesn't exist, it is created with
304  * @gfp_flags and @node.  The returned io_context has its reference count
305  * incremented.
306  *
307  * This function always goes through task_lock() and it's better to use
308  * %current->io_context + get_io_context() for %current.
309  */
310 struct io_context *get_task_io_context(struct task_struct *task,
311 				       gfp_t gfp_flags, int node)
312 {
313 	struct io_context *ioc;
314 
315 	might_sleep_if(gfp_flags & __GFP_WAIT);
316 
317 	do {
318 		task_lock(task);
319 		ioc = task->io_context;
320 		if (likely(ioc)) {
321 			get_io_context(ioc);
322 			task_unlock(task);
323 			return ioc;
324 		}
325 		task_unlock(task);
326 	} while (create_io_context(task, gfp_flags, node));
327 
328 	return NULL;
329 }
330 EXPORT_SYMBOL(get_task_io_context);
331 
332 /**
333  * ioc_lookup_icq - lookup io_cq from ioc
334  * @ioc: the associated io_context
335  * @q: the associated request_queue
336  *
337  * Look up io_cq associated with @ioc - @q pair from @ioc.  Must be called
338  * with @q->queue_lock held.
339  */
340 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
341 {
342 	struct io_cq *icq;
343 
344 	lockdep_assert_held(q->queue_lock);
345 
346 	/*
347 	 * icq's are indexed from @ioc using radix tree and hint pointer,
348 	 * both of which are protected with RCU.  All removals are done
349 	 * holding both q and ioc locks, and we're holding q lock - if we
350 	 * find a icq which points to us, it's guaranteed to be valid.
351 	 */
352 	rcu_read_lock();
353 	icq = rcu_dereference(ioc->icq_hint);
354 	if (icq && icq->q == q)
355 		goto out;
356 
357 	icq = radix_tree_lookup(&ioc->icq_tree, q->id);
358 	if (icq && icq->q == q)
359 		rcu_assign_pointer(ioc->icq_hint, icq);	/* allowed to race */
360 	else
361 		icq = NULL;
362 out:
363 	rcu_read_unlock();
364 	return icq;
365 }
366 EXPORT_SYMBOL(ioc_lookup_icq);
367 
368 /**
369  * ioc_create_icq - create and link io_cq
370  * @q: request_queue of interest
371  * @gfp_mask: allocation mask
372  *
373  * Make sure io_cq linking %current->io_context and @q exists.  If either
374  * io_context and/or icq don't exist, they will be created using @gfp_mask.
375  *
376  * The caller is responsible for ensuring @ioc won't go away and @q is
377  * alive and will stay alive until this function returns.
378  */
379 struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
380 {
381 	struct elevator_type *et = q->elevator->type;
382 	struct io_context *ioc;
383 	struct io_cq *icq;
384 
385 	/* allocate stuff */
386 	ioc = create_io_context(current, gfp_mask, q->node);
387 	if (!ioc)
388 		return NULL;
389 
390 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
391 				    q->node);
392 	if (!icq)
393 		return NULL;
394 
395 	if (radix_tree_preload(gfp_mask) < 0) {
396 		kmem_cache_free(et->icq_cache, icq);
397 		return NULL;
398 	}
399 
400 	icq->ioc = ioc;
401 	icq->q = q;
402 	INIT_LIST_HEAD(&icq->q_node);
403 	INIT_HLIST_NODE(&icq->ioc_node);
404 
405 	/* lock both q and ioc and try to link @icq */
406 	spin_lock_irq(q->queue_lock);
407 	spin_lock(&ioc->lock);
408 
409 	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
410 		hlist_add_head(&icq->ioc_node, &ioc->icq_list);
411 		list_add(&icq->q_node, &q->icq_list);
412 		if (et->ops.elevator_init_icq_fn)
413 			et->ops.elevator_init_icq_fn(icq);
414 	} else {
415 		kmem_cache_free(et->icq_cache, icq);
416 		icq = ioc_lookup_icq(ioc, q);
417 		if (!icq)
418 			printk(KERN_ERR "cfq: icq link failed!\n");
419 	}
420 
421 	spin_unlock(&ioc->lock);
422 	spin_unlock_irq(q->queue_lock);
423 	radix_tree_preload_end();
424 	return icq;
425 }
426 
427 void ioc_set_changed(struct io_context *ioc, int which)
428 {
429 	struct io_cq *icq;
430 	struct hlist_node *n;
431 
432 	hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
433 		set_bit(which, &icq->changed);
434 }
435 
436 /**
437  * ioc_ioprio_changed - notify ioprio change
438  * @ioc: io_context of interest
439  * @ioprio: new ioprio
440  *
441  * @ioc's ioprio has changed to @ioprio.  Set %ICQ_IOPRIO_CHANGED for all
442  * icq's.  iosched is responsible for checking the bit and applying it on
443  * request issue path.
444  */
445 void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
446 {
447 	unsigned long flags;
448 
449 	spin_lock_irqsave(&ioc->lock, flags);
450 	ioc->ioprio = ioprio;
451 	ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
452 	spin_unlock_irqrestore(&ioc->lock, flags);
453 }
454 
455 /**
456  * ioc_cgroup_changed - notify cgroup change
457  * @ioc: io_context of interest
458  *
459  * @ioc's cgroup has changed.  Set %ICQ_CGROUP_CHANGED for all icq's.
460  * iosched is responsible for checking the bit and applying it on request
461  * issue path.
462  */
463 void ioc_cgroup_changed(struct io_context *ioc)
464 {
465 	unsigned long flags;
466 
467 	spin_lock_irqsave(&ioc->lock, flags);
468 	ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
469 	spin_unlock_irqrestore(&ioc->lock, flags);
470 }
471 EXPORT_SYMBOL(ioc_cgroup_changed);
472 
473 static int __init blk_ioc_init(void)
474 {
475 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
476 			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
477 	return 0;
478 }
479 subsys_initcall(blk_ioc_init);
480