xref: /linux/include/linux/iocontext.h (revision 621032ad6eaabf2fe771c4fa0d8f58e1fcfcdba6)
1fd0928dfSJens Axboe #ifndef IOCONTEXT_H
2fd0928dfSJens Axboe #define IOCONTEXT_H
3fd0928dfSJens Axboe 
44ac845a2SJens Axboe #include <linux/radix-tree.h>
534e6bbf2SFabio Checconi #include <linux/rcupdate.h>
6b2efa052STejun Heo #include <linux/workqueue.h>
74ac845a2SJens Axboe 
8dc86900eSTejun Heo enum {
9d705ae6bSTejun Heo 	ICQ_IOPRIO_CHANGED	= 1 << 0,
10d705ae6bSTejun Heo 	ICQ_CGROUP_CHANGED	= 1 << 1,
11*621032adSTejun Heo 	ICQ_EXITED		= 1 << 2,
12d705ae6bSTejun Heo 
13d705ae6bSTejun Heo 	ICQ_CHANGED_MASK	= ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
14dc86900eSTejun Heo };
15dc86900eSTejun Heo 
16f1f8cc94STejun Heo /*
17f1f8cc94STejun Heo  * An io_cq (icq) is association between an io_context (ioc) and a
18f1f8cc94STejun Heo  * request_queue (q).  This is used by elevators which need to track
19f1f8cc94STejun Heo  * information per ioc - q pair.
20f1f8cc94STejun Heo  *
21f1f8cc94STejun Heo  * Elevator can request use of icq by setting elevator_type->icq_size and
22f1f8cc94STejun Heo  * ->icq_align.  Both size and align must be larger than that of struct
23f1f8cc94STejun Heo  * io_cq and elevator can use the tail area for private information.  The
24f1f8cc94STejun Heo  * recommended way to do this is defining a struct which contains io_cq as
25f1f8cc94STejun Heo  * the first member followed by private members and using its size and
26f1f8cc94STejun Heo  * align.  For example,
27f1f8cc94STejun Heo  *
28f1f8cc94STejun Heo  *	struct snail_io_cq {
29f1f8cc94STejun Heo  *		struct io_cq	icq;
30f1f8cc94STejun Heo  *		int		poke_snail;
31f1f8cc94STejun Heo  *		int		feed_snail;
32f1f8cc94STejun Heo  *	};
33f1f8cc94STejun Heo  *
34f1f8cc94STejun Heo  *	struct elevator_type snail_elv_type {
35f1f8cc94STejun Heo  *		.ops =		{ ... },
36f1f8cc94STejun Heo  *		.icq_size =	sizeof(struct snail_io_cq),
37f1f8cc94STejun Heo  *		.icq_align =	__alignof__(struct snail_io_cq),
38f1f8cc94STejun Heo  *		...
39f1f8cc94STejun Heo  *	};
40f1f8cc94STejun Heo  *
41f1f8cc94STejun Heo  * If icq_size is set, block core will manage icq's.  All requests will
42f1f8cc94STejun Heo  * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
43f1f8cc94STejun Heo  * is called and be holding a reference to the associated io_context.
44f1f8cc94STejun Heo  *
45f1f8cc94STejun Heo  * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
46f1f8cc94STejun Heo  * called and, on destruction, ->elevator_exit_icq_fn().  Both functions
47f1f8cc94STejun Heo  * are called with both the associated io_context and queue locks held.
48f1f8cc94STejun Heo  *
49f1f8cc94STejun Heo  * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
50f1f8cc94STejun Heo  * queue lock but the returned icq is valid only until the queue lock is
51f1f8cc94STejun Heo  * released.  Elevators can not and should not try to create or destroy
52f1f8cc94STejun Heo  * icq's.
53f1f8cc94STejun Heo  *
54f1f8cc94STejun Heo  * As icq's are linked from both ioc and q, the locking rules are a bit
55f1f8cc94STejun Heo  * complex.
56f1f8cc94STejun Heo  *
57f1f8cc94STejun Heo  * - ioc lock nests inside q lock.
58f1f8cc94STejun Heo  *
59f1f8cc94STejun Heo  * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
60f1f8cc94STejun Heo  *   q->icq_list and icq->q_node by q lock.
61f1f8cc94STejun Heo  *
62f1f8cc94STejun Heo  * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
63f1f8cc94STejun Heo  *   itself is protected by q lock.  However, both the indexes and icq
64f1f8cc94STejun Heo  *   itself are also RCU managed and lookup can be performed holding only
65f1f8cc94STejun Heo  *   the q lock.
66f1f8cc94STejun Heo  *
67f1f8cc94STejun Heo  * - icq's are not reference counted.  They are destroyed when either the
68f1f8cc94STejun Heo  *   ioc or q goes away.  Each request with icq set holds an extra
69f1f8cc94STejun Heo  *   reference to ioc to ensure it stays until the request is completed.
70f1f8cc94STejun Heo  *
71f1f8cc94STejun Heo  * - Linking and unlinking icq's are performed while holding both ioc and q
72f1f8cc94STejun Heo  *   locks.  Due to the lock ordering, q exit is simple but ioc exit
73f1f8cc94STejun Heo  *   requires reverse-order double lock dance.
74f1f8cc94STejun Heo  */
75c5869807STejun Heo struct io_cq {
76283287a5STejun Heo 	struct request_queue	*q;
77fd0928dfSJens Axboe 	struct io_context	*ioc;
78fd0928dfSJens Axboe 
797e5a8794STejun Heo 	/*
807e5a8794STejun Heo 	 * q_node and ioc_node link io_cq through icq_list of q and ioc
817e5a8794STejun Heo 	 * respectively.  Both fields are unused once ioc_exit_icq() is
827e5a8794STejun Heo 	 * called and shared with __rcu_icq_cache and __rcu_head which are
837e5a8794STejun Heo 	 * used for RCU free of io_cq.
847e5a8794STejun Heo 	 */
857e5a8794STejun Heo 	union {
86c5869807STejun Heo 		struct list_head	q_node;
877e5a8794STejun Heo 		struct kmem_cache	*__rcu_icq_cache;
887e5a8794STejun Heo 	};
897e5a8794STejun Heo 	union {
90c5869807STejun Heo 		struct hlist_node	ioc_node;
917e5a8794STejun Heo 		struct rcu_head		__rcu_head;
927e5a8794STejun Heo 	};
93fd0928dfSJens Axboe 
94d705ae6bSTejun Heo 	unsigned int		flags;
95fd0928dfSJens Axboe };
96fd0928dfSJens Axboe 
97fd0928dfSJens Axboe /*
98d38ecf93SJens Axboe  * I/O subsystem state of the associated processes.  It is refcounted
99d38ecf93SJens Axboe  * and kmalloc'ed. These could be shared between processes.
100fd0928dfSJens Axboe  */
101fd0928dfSJens Axboe struct io_context {
102d9c7d394SNikanth Karthikesan 	atomic_long_t refcount;
103d38ecf93SJens Axboe 	atomic_t nr_tasks;
104d38ecf93SJens Axboe 
105d38ecf93SJens Axboe 	/* all the fields below are protected by this lock */
106d38ecf93SJens Axboe 	spinlock_t lock;
107fd0928dfSJens Axboe 
108fd0928dfSJens Axboe 	unsigned short ioprio;
10931e4c28dSVivek Goyal 
110fd0928dfSJens Axboe 	/*
111fd0928dfSJens Axboe 	 * For request batching
112fd0928dfSJens Axboe 	 */
113fd0928dfSJens Axboe 	int nr_batch_requests;     /* Number of requests left in the batch */
11458c24a61SRichard Kennedy 	unsigned long last_waited; /* Time last woken after wait for request */
115fd0928dfSJens Axboe 
116c5869807STejun Heo 	struct radix_tree_root	icq_tree;
117c5869807STejun Heo 	struct io_cq __rcu	*icq_hint;
118c5869807STejun Heo 	struct hlist_head	icq_list;
119b2efa052STejun Heo 
120b2efa052STejun Heo 	struct work_struct release_work;
121fd0928dfSJens Axboe };
122fd0928dfSJens Axboe 
123d38ecf93SJens Axboe static inline struct io_context *ioc_task_link(struct io_context *ioc)
124d38ecf93SJens Axboe {
125d38ecf93SJens Axboe 	/*
126d38ecf93SJens Axboe 	 * if ref count is zero, don't allow sharing (ioc is going away, it's
127d38ecf93SJens Axboe 	 * a race).
128d38ecf93SJens Axboe 	 */
129d9c7d394SNikanth Karthikesan 	if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
130cbb4f264SLi Zefan 		atomic_inc(&ioc->nr_tasks);
131d38ecf93SJens Axboe 		return ioc;
132d237e5c7SJens Axboe 	}
133d38ecf93SJens Axboe 
134d38ecf93SJens Axboe 	return NULL;
135d38ecf93SJens Axboe }
136d38ecf93SJens Axboe 
137b69f2292SLouis Rilling struct task_struct;
138da9cbc87SJens Axboe #ifdef CONFIG_BLOCK
13911a3122fSTejun Heo void put_io_context(struct io_context *ioc);
140b69f2292SLouis Rilling void exit_io_context(struct task_struct *task);
1416e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
1426e736be7STejun Heo 				       gfp_t gfp_flags, int node);
143dc86900eSTejun Heo void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
144dc86900eSTejun Heo void ioc_cgroup_changed(struct io_context *ioc);
145d705ae6bSTejun Heo unsigned int icq_get_changed(struct io_cq *icq);
146da9cbc87SJens Axboe #else
147da9cbc87SJens Axboe struct io_context;
14811a3122fSTejun Heo static inline void put_io_context(struct io_context *ioc) { }
14942ec57a8STejun Heo static inline void exit_io_context(struct task_struct *task) { }
150da9cbc87SJens Axboe #endif
151da9cbc87SJens Axboe 
152fd0928dfSJens Axboe #endif
153