1fd0928dfSJens Axboe #ifndef IOCONTEXT_H 2fd0928dfSJens Axboe #define IOCONTEXT_H 3fd0928dfSJens Axboe 44ac845a2SJens Axboe #include <linux/radix-tree.h> 534e6bbf2SFabio Checconi #include <linux/rcupdate.h> 64ac845a2SJens Axboe 7fd0928dfSJens Axboe struct cfq_queue; 8383cd721SShaohua Li struct cfq_ttime { 9383cd721SShaohua Li unsigned long last_end_request; 10383cd721SShaohua Li 11383cd721SShaohua Li unsigned long ttime_total; 12383cd721SShaohua Li unsigned long ttime_samples; 13383cd721SShaohua Li unsigned long ttime_mean; 14383cd721SShaohua Li }; 15383cd721SShaohua Li 16fd0928dfSJens Axboe struct cfq_io_context { 17fd0928dfSJens Axboe void *key; 18fd0928dfSJens Axboe 19fd0928dfSJens Axboe struct cfq_queue *cfqq[2]; 20fd0928dfSJens Axboe 21fd0928dfSJens Axboe struct io_context *ioc; 22fd0928dfSJens Axboe 23383cd721SShaohua Li struct cfq_ttime ttime; 24fd0928dfSJens Axboe 25fd0928dfSJens Axboe struct list_head queue_list; 26ffc4e759SJens Axboe struct hlist_node cic_list; 27fd0928dfSJens Axboe 28fd0928dfSJens Axboe void (*dtor)(struct io_context *); /* destructor */ 29fd0928dfSJens Axboe void (*exit)(struct io_context *); /* called on task exit */ 3034e6bbf2SFabio Checconi 3134e6bbf2SFabio Checconi struct rcu_head rcu_head; 32fd0928dfSJens Axboe }; 33fd0928dfSJens Axboe 34fd0928dfSJens Axboe /* 35d38ecf93SJens Axboe * I/O subsystem state of the associated processes. It is refcounted 36d38ecf93SJens Axboe * and kmalloc'ed. These could be shared between processes. 37fd0928dfSJens Axboe */ 38fd0928dfSJens Axboe struct io_context { 39d9c7d394SNikanth Karthikesan atomic_long_t refcount; 40d38ecf93SJens Axboe atomic_t nr_tasks; 41d38ecf93SJens Axboe 42d38ecf93SJens Axboe /* all the fields below are protected by this lock */ 43d38ecf93SJens Axboe spinlock_t lock; 44fd0928dfSJens Axboe 45fd0928dfSJens Axboe unsigned short ioprio; 46fd0928dfSJens Axboe unsigned short ioprio_changed; 47fd0928dfSJens Axboe 4867523c48SBen Blum #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 4931e4c28dSVivek Goyal unsigned short cgroup_changed; 5031e4c28dSVivek Goyal #endif 5131e4c28dSVivek Goyal 52fd0928dfSJens Axboe /* 53fd0928dfSJens Axboe * For request batching 54fd0928dfSJens Axboe */ 55fd0928dfSJens Axboe int nr_batch_requests; /* Number of requests left in the batch */ 5658c24a61SRichard Kennedy unsigned long last_waited; /* Time last woken after wait for request */ 57fd0928dfSJens Axboe 584ac845a2SJens Axboe struct radix_tree_root radix_root; 59ffc4e759SJens Axboe struct hlist_head cic_list; 604d2deb40SArnd Bergmann void __rcu *ioc_data; 61fd0928dfSJens Axboe }; 62fd0928dfSJens Axboe 63d38ecf93SJens Axboe static inline struct io_context *ioc_task_link(struct io_context *ioc) 64d38ecf93SJens Axboe { 65d38ecf93SJens Axboe /* 66d38ecf93SJens Axboe * if ref count is zero, don't allow sharing (ioc is going away, it's 67d38ecf93SJens Axboe * a race). 68d38ecf93SJens Axboe */ 69d9c7d394SNikanth Karthikesan if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 70cbb4f264SLi Zefan atomic_inc(&ioc->nr_tasks); 71d38ecf93SJens Axboe return ioc; 72d237e5c7SJens Axboe } 73d38ecf93SJens Axboe 74d38ecf93SJens Axboe return NULL; 75d38ecf93SJens Axboe } 76d38ecf93SJens Axboe 77b69f2292SLouis Rilling struct task_struct; 78da9cbc87SJens Axboe #ifdef CONFIG_BLOCK 79*42ec57a8STejun Heo void put_io_context(struct io_context *ioc); 80b69f2292SLouis Rilling void exit_io_context(struct task_struct *task); 81da9cbc87SJens Axboe struct io_context *get_io_context(gfp_t gfp_flags, int node); 82da9cbc87SJens Axboe struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 83da9cbc87SJens Axboe #else 84da9cbc87SJens Axboe struct io_context; 85*42ec57a8STejun Heo static inline void put_io_context(struct io_context *ioc) { } 86*42ec57a8STejun Heo static inline void exit_io_context(struct task_struct *task) { } 87da9cbc87SJens Axboe #endif 88da9cbc87SJens Axboe 89fd0928dfSJens Axboe #endif 90