xref: /linux/include/linux/iocontext.h (revision c58698073218f2c8f2fc5982fa3938c2d3803b9f)
1fd0928dfSJens Axboe #ifndef IOCONTEXT_H
2fd0928dfSJens Axboe #define IOCONTEXT_H
3fd0928dfSJens Axboe 
44ac845a2SJens Axboe #include <linux/radix-tree.h>
534e6bbf2SFabio Checconi #include <linux/rcupdate.h>
6b2efa052STejun Heo #include <linux/workqueue.h>
74ac845a2SJens Axboe 
8dc86900eSTejun Heo enum {
9*c5869807STejun Heo 	ICQ_IOPRIO_CHANGED,
10*c5869807STejun Heo 	ICQ_CGROUP_CHANGED,
11dc86900eSTejun Heo };
12dc86900eSTejun Heo 
13*c5869807STejun Heo struct io_cq {
14283287a5STejun Heo 	struct request_queue	*q;
15fd0928dfSJens Axboe 	struct io_context	*ioc;
16fd0928dfSJens Axboe 
17*c5869807STejun Heo 	struct list_head	q_node;
18*c5869807STejun Heo 	struct hlist_node	ioc_node;
19fd0928dfSJens Axboe 
20dc86900eSTejun Heo 	unsigned long		changed;
2134e6bbf2SFabio Checconi 	struct rcu_head		rcu_head;
22*c5869807STejun Heo 
23*c5869807STejun Heo 	void (*exit)(struct io_cq *);
24*c5869807STejun Heo 	void (*release)(struct io_cq *);
25fd0928dfSJens Axboe };
26fd0928dfSJens Axboe 
27fd0928dfSJens Axboe /*
28d38ecf93SJens Axboe  * I/O subsystem state of the associated processes.  It is refcounted
29d38ecf93SJens Axboe  * and kmalloc'ed. These could be shared between processes.
30fd0928dfSJens Axboe  */
31fd0928dfSJens Axboe struct io_context {
32d9c7d394SNikanth Karthikesan 	atomic_long_t refcount;
33d38ecf93SJens Axboe 	atomic_t nr_tasks;
34d38ecf93SJens Axboe 
35d38ecf93SJens Axboe 	/* all the fields below are protected by this lock */
36d38ecf93SJens Axboe 	spinlock_t lock;
37fd0928dfSJens Axboe 
38fd0928dfSJens Axboe 	unsigned short ioprio;
3931e4c28dSVivek Goyal 
40fd0928dfSJens Axboe 	/*
41fd0928dfSJens Axboe 	 * For request batching
42fd0928dfSJens Axboe 	 */
43fd0928dfSJens Axboe 	int nr_batch_requests;     /* Number of requests left in the batch */
4458c24a61SRichard Kennedy 	unsigned long last_waited; /* Time last woken after wait for request */
45fd0928dfSJens Axboe 
46*c5869807STejun Heo 	struct radix_tree_root	icq_tree;
47*c5869807STejun Heo 	struct io_cq __rcu	*icq_hint;
48*c5869807STejun Heo 	struct hlist_head	icq_list;
49b2efa052STejun Heo 
50b2efa052STejun Heo 	struct work_struct release_work;
51fd0928dfSJens Axboe };
52fd0928dfSJens Axboe 
53d38ecf93SJens Axboe static inline struct io_context *ioc_task_link(struct io_context *ioc)
54d38ecf93SJens Axboe {
55d38ecf93SJens Axboe 	/*
56d38ecf93SJens Axboe 	 * if ref count is zero, don't allow sharing (ioc is going away, it's
57d38ecf93SJens Axboe 	 * a race).
58d38ecf93SJens Axboe 	 */
59d9c7d394SNikanth Karthikesan 	if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
60cbb4f264SLi Zefan 		atomic_inc(&ioc->nr_tasks);
61d38ecf93SJens Axboe 		return ioc;
62d237e5c7SJens Axboe 	}
63d38ecf93SJens Axboe 
64d38ecf93SJens Axboe 	return NULL;
65d38ecf93SJens Axboe }
66d38ecf93SJens Axboe 
67b69f2292SLouis Rilling struct task_struct;
68da9cbc87SJens Axboe #ifdef CONFIG_BLOCK
69b2efa052STejun Heo void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
70b69f2292SLouis Rilling void exit_io_context(struct task_struct *task);
716e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task,
726e736be7STejun Heo 				       gfp_t gfp_flags, int node);
73dc86900eSTejun Heo void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
74dc86900eSTejun Heo void ioc_cgroup_changed(struct io_context *ioc);
75da9cbc87SJens Axboe #else
76da9cbc87SJens Axboe struct io_context;
77b2efa052STejun Heo static inline void put_io_context(struct io_context *ioc,
78b2efa052STejun Heo 				  struct request_queue *locked_q) { }
7942ec57a8STejun Heo static inline void exit_io_context(struct task_struct *task) { }
80da9cbc87SJens Axboe #endif
81da9cbc87SJens Axboe 
82fd0928dfSJens Axboe #endif
83