1fd0928dfSJens Axboe #ifndef IOCONTEXT_H 2fd0928dfSJens Axboe #define IOCONTEXT_H 3fd0928dfSJens Axboe 44ac845a2SJens Axboe #include <linux/radix-tree.h> 534e6bbf2SFabio Checconi #include <linux/rcupdate.h> 6b2efa052STejun Heo #include <linux/workqueue.h> 74ac845a2SJens Axboe 8dc86900eSTejun Heo enum { 9c5869807STejun Heo ICQ_IOPRIO_CHANGED, 10c5869807STejun Heo ICQ_CGROUP_CHANGED, 11dc86900eSTejun Heo }; 12dc86900eSTejun Heo 13c5869807STejun Heo struct io_cq { 14283287a5STejun Heo struct request_queue *q; 15fd0928dfSJens Axboe struct io_context *ioc; 16fd0928dfSJens Axboe 17*7e5a8794STejun Heo /* 18*7e5a8794STejun Heo * q_node and ioc_node link io_cq through icq_list of q and ioc 19*7e5a8794STejun Heo * respectively. Both fields are unused once ioc_exit_icq() is 20*7e5a8794STejun Heo * called and shared with __rcu_icq_cache and __rcu_head which are 21*7e5a8794STejun Heo * used for RCU free of io_cq. 22*7e5a8794STejun Heo */ 23*7e5a8794STejun Heo union { 24c5869807STejun Heo struct list_head q_node; 25*7e5a8794STejun Heo struct kmem_cache *__rcu_icq_cache; 26*7e5a8794STejun Heo }; 27*7e5a8794STejun Heo union { 28c5869807STejun Heo struct hlist_node ioc_node; 29*7e5a8794STejun Heo struct rcu_head __rcu_head; 30*7e5a8794STejun Heo }; 31fd0928dfSJens Axboe 32dc86900eSTejun Heo unsigned long changed; 33fd0928dfSJens Axboe }; 34fd0928dfSJens Axboe 35fd0928dfSJens Axboe /* 36d38ecf93SJens Axboe * I/O subsystem state of the associated processes. It is refcounted 37d38ecf93SJens Axboe * and kmalloc'ed. These could be shared between processes. 38fd0928dfSJens Axboe */ 39fd0928dfSJens Axboe struct io_context { 40d9c7d394SNikanth Karthikesan atomic_long_t refcount; 41d38ecf93SJens Axboe atomic_t nr_tasks; 42d38ecf93SJens Axboe 43d38ecf93SJens Axboe /* all the fields below are protected by this lock */ 44d38ecf93SJens Axboe spinlock_t lock; 45fd0928dfSJens Axboe 46fd0928dfSJens Axboe unsigned short ioprio; 4731e4c28dSVivek Goyal 48fd0928dfSJens Axboe /* 49fd0928dfSJens Axboe * For request batching 50fd0928dfSJens Axboe */ 51fd0928dfSJens Axboe int nr_batch_requests; /* Number of requests left in the batch */ 5258c24a61SRichard Kennedy unsigned long last_waited; /* Time last woken after wait for request */ 53fd0928dfSJens Axboe 54c5869807STejun Heo struct radix_tree_root icq_tree; 55c5869807STejun Heo struct io_cq __rcu *icq_hint; 56c5869807STejun Heo struct hlist_head icq_list; 57b2efa052STejun Heo 58b2efa052STejun Heo struct work_struct release_work; 59fd0928dfSJens Axboe }; 60fd0928dfSJens Axboe 61d38ecf93SJens Axboe static inline struct io_context *ioc_task_link(struct io_context *ioc) 62d38ecf93SJens Axboe { 63d38ecf93SJens Axboe /* 64d38ecf93SJens Axboe * if ref count is zero, don't allow sharing (ioc is going away, it's 65d38ecf93SJens Axboe * a race). 66d38ecf93SJens Axboe */ 67d9c7d394SNikanth Karthikesan if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 68cbb4f264SLi Zefan atomic_inc(&ioc->nr_tasks); 69d38ecf93SJens Axboe return ioc; 70d237e5c7SJens Axboe } 71d38ecf93SJens Axboe 72d38ecf93SJens Axboe return NULL; 73d38ecf93SJens Axboe } 74d38ecf93SJens Axboe 75b69f2292SLouis Rilling struct task_struct; 76da9cbc87SJens Axboe #ifdef CONFIG_BLOCK 77b2efa052STejun Heo void put_io_context(struct io_context *ioc, struct request_queue *locked_q); 78b69f2292SLouis Rilling void exit_io_context(struct task_struct *task); 796e736be7STejun Heo struct io_context *get_task_io_context(struct task_struct *task, 806e736be7STejun Heo gfp_t gfp_flags, int node); 81dc86900eSTejun Heo void ioc_ioprio_changed(struct io_context *ioc, int ioprio); 82dc86900eSTejun Heo void ioc_cgroup_changed(struct io_context *ioc); 83da9cbc87SJens Axboe #else 84da9cbc87SJens Axboe struct io_context; 85b2efa052STejun Heo static inline void put_io_context(struct io_context *ioc, 86b2efa052STejun Heo struct request_queue *locked_q) { } 8742ec57a8STejun Heo static inline void exit_io_context(struct task_struct *task) { } 88da9cbc87SJens Axboe #endif 89da9cbc87SJens Axboe 90fd0928dfSJens Axboe #endif 91