1 #ifndef _BCACHE_WRITEBACK_H 2 #define _BCACHE_WRITEBACK_H 3 4 #define CUTOFF_WRITEBACK 40 5 #define CUTOFF_WRITEBACK_SYNC 70 6 7 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) 8 { 9 uint64_t i, ret = 0; 10 11 for (i = 0; i < d->nr_stripes; i++) 12 ret += atomic_read(d->stripe_sectors_dirty + i); 13 14 return ret; 15 } 16 17 static inline bool bcache_dev_stripe_dirty(struct bcache_device *d, 18 uint64_t offset, 19 unsigned nr_sectors) 20 { 21 uint64_t stripe = offset >> d->stripe_size_bits; 22 23 while (1) { 24 if (atomic_read(d->stripe_sectors_dirty + stripe)) 25 return true; 26 27 if (nr_sectors <= 1 << d->stripe_size_bits) 28 return false; 29 30 nr_sectors -= 1 << d->stripe_size_bits; 31 stripe++; 32 } 33 } 34 35 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, 36 unsigned cache_mode, bool would_skip) 37 { 38 unsigned in_use = dc->disk.c->gc_stats.in_use; 39 40 if (cache_mode != CACHE_MODE_WRITEBACK || 41 atomic_read(&dc->disk.detaching) || 42 in_use > CUTOFF_WRITEBACK_SYNC) 43 return false; 44 45 if (dc->partial_stripes_expensive && 46 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector, 47 bio_sectors(bio))) 48 return true; 49 50 if (would_skip) 51 return false; 52 53 return bio->bi_rw & REQ_SYNC || 54 in_use <= CUTOFF_WRITEBACK; 55 } 56 57 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); 58 void bch_writeback_queue(struct cached_dev *); 59 void bch_writeback_add(struct cached_dev *); 60 61 void bch_sectors_dirty_init(struct cached_dev *dc); 62 void bch_cached_dev_writeback_init(struct cached_dev *); 63 64 #endif 65