Lines Matching defs:r5l_log

82 struct r5l_log {  struct
83 struct md_rdev *rdev;
85 u32 uuid_checksum;
87 sector_t device_size; /* log device size, round to
89 sector_t max_free_space; /* reclaim run if free space is at
92 sector_t last_checkpoint; /* log tail. where recovery scan
94 u64 last_cp_seq; /* log tail sequence */
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
99 sector_t next_checkpoint;
101 struct mutex io_mutex;
102 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
104 spinlock_t io_list_lock;
105 struct list_head running_ios; /* io_units which are still running,
108 struct list_head io_end_ios; /* io_units which have been completely
111 struct list_head flushing_ios; /* io_units which are waiting for log
113 struct list_head finished_ios; /* io_units which settle down in log disk */
114 struct bio flush_bio;
116 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
118 struct kmem_cache *io_kc;
119 mempool_t io_pool;
120 struct bio_set bs;
121 mempool_t meta_pool;
123 struct md_thread __rcu *reclaim_thread;
124 unsigned long reclaim_target; /* number of space that need to be
131 wait_queue_head_t iounit_wait;
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
134 spinlock_t no_space_stripes_lock;
136 bool need_cache_flush;
139 enum r5c_journal_mode r5c_journal_mode;
142 struct list_head stripe_in_journal_list;
144 spinlock_t stripe_in_journal_lock;
145 atomic_t stripe_in_journal_count;
148 struct work_struct deferred_io_work;
150 struct work_struct disable_writeback_work;
153 spinlock_t tree_lock;
154 struct radix_tree_root big_stripe_tree;