1fd0928dfSJens Axboe #ifndef IOCONTEXT_H 2fd0928dfSJens Axboe #define IOCONTEXT_H 3fd0928dfSJens Axboe 4fd0928dfSJens Axboe /* 5fd0928dfSJens Axboe * This is the per-process anticipatory I/O scheduler state. 6fd0928dfSJens Axboe */ 7fd0928dfSJens Axboe struct as_io_context { 8fd0928dfSJens Axboe spinlock_t lock; 9fd0928dfSJens Axboe 10fd0928dfSJens Axboe void (*dtor)(struct as_io_context *aic); /* destructor */ 11fd0928dfSJens Axboe void (*exit)(struct as_io_context *aic); /* called on task exit */ 12fd0928dfSJens Axboe 13fd0928dfSJens Axboe unsigned long state; 14fd0928dfSJens Axboe atomic_t nr_queued; /* queued reads & sync writes */ 15fd0928dfSJens Axboe atomic_t nr_dispatched; /* number of requests gone to the drivers */ 16fd0928dfSJens Axboe 17fd0928dfSJens Axboe /* IO History tracking */ 18fd0928dfSJens Axboe /* Thinktime */ 19fd0928dfSJens Axboe unsigned long last_end_request; 20fd0928dfSJens Axboe unsigned long ttime_total; 21fd0928dfSJens Axboe unsigned long ttime_samples; 22fd0928dfSJens Axboe unsigned long ttime_mean; 23fd0928dfSJens Axboe /* Layout pattern */ 24fd0928dfSJens Axboe unsigned int seek_samples; 25fd0928dfSJens Axboe sector_t last_request_pos; 26fd0928dfSJens Axboe u64 seek_total; 27fd0928dfSJens Axboe sector_t seek_mean; 28fd0928dfSJens Axboe }; 29fd0928dfSJens Axboe 30fd0928dfSJens Axboe struct cfq_queue; 31fd0928dfSJens Axboe struct cfq_io_context { 32fd0928dfSJens Axboe struct rb_node rb_node; 33fd0928dfSJens Axboe void *key; 34fd0928dfSJens Axboe 35fd0928dfSJens Axboe struct cfq_queue *cfqq[2]; 36fd0928dfSJens Axboe 37fd0928dfSJens Axboe struct io_context *ioc; 38fd0928dfSJens Axboe 39fd0928dfSJens Axboe unsigned long last_end_request; 40fd0928dfSJens Axboe sector_t last_request_pos; 41fd0928dfSJens Axboe 42fd0928dfSJens Axboe unsigned long ttime_total; 43fd0928dfSJens Axboe unsigned long ttime_samples; 44fd0928dfSJens Axboe unsigned long ttime_mean; 45fd0928dfSJens Axboe 46fd0928dfSJens Axboe unsigned int seek_samples; 47fd0928dfSJens Axboe u64 seek_total; 48fd0928dfSJens Axboe sector_t seek_mean; 49fd0928dfSJens Axboe 50fd0928dfSJens Axboe struct list_head queue_list; 51fd0928dfSJens Axboe 52fd0928dfSJens Axboe void (*dtor)(struct io_context *); /* destructor */ 53fd0928dfSJens Axboe void (*exit)(struct io_context *); /* called on task exit */ 54fd0928dfSJens Axboe }; 55fd0928dfSJens Axboe 56fd0928dfSJens Axboe /* 57*d38ecf93SJens Axboe * I/O subsystem state of the associated processes. It is refcounted 58*d38ecf93SJens Axboe * and kmalloc'ed. These could be shared between processes. 59fd0928dfSJens Axboe */ 60fd0928dfSJens Axboe struct io_context { 61fd0928dfSJens Axboe atomic_t refcount; 62*d38ecf93SJens Axboe atomic_t nr_tasks; 63*d38ecf93SJens Axboe 64*d38ecf93SJens Axboe /* all the fields below are protected by this lock */ 65*d38ecf93SJens Axboe spinlock_t lock; 66fd0928dfSJens Axboe 67fd0928dfSJens Axboe unsigned short ioprio; 68fd0928dfSJens Axboe unsigned short ioprio_changed; 69fd0928dfSJens Axboe 70fd0928dfSJens Axboe /* 71fd0928dfSJens Axboe * For request batching 72fd0928dfSJens Axboe */ 73fd0928dfSJens Axboe unsigned long last_waited; /* Time last woken after wait for request */ 74fd0928dfSJens Axboe int nr_batch_requests; /* Number of requests left in the batch */ 75fd0928dfSJens Axboe 76fd0928dfSJens Axboe struct as_io_context *aic; 77fd0928dfSJens Axboe struct rb_root cic_root; 78fd0928dfSJens Axboe void *ioc_data; 79fd0928dfSJens Axboe }; 80fd0928dfSJens Axboe 81*d38ecf93SJens Axboe static inline struct io_context *ioc_task_link(struct io_context *ioc) 82*d38ecf93SJens Axboe { 83*d38ecf93SJens Axboe /* 84*d38ecf93SJens Axboe * if ref count is zero, don't allow sharing (ioc is going away, it's 85*d38ecf93SJens Axboe * a race). 86*d38ecf93SJens Axboe */ 87*d38ecf93SJens Axboe if (ioc && atomic_inc_not_zero(&ioc->refcount)) 88*d38ecf93SJens Axboe return ioc; 89*d38ecf93SJens Axboe 90*d38ecf93SJens Axboe return NULL; 91*d38ecf93SJens Axboe } 92*d38ecf93SJens Axboe 93fd0928dfSJens Axboe #endif 94