xref: /linux/drivers/md/bcache/writeback.h (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_WRITEBACK_H
3 #define _BCACHE_WRITEBACK_H
4 
5 #define CUTOFF_WRITEBACK	40
6 #define CUTOFF_WRITEBACK_SYNC	70
7 
8 #define MAX_WRITEBACKS_IN_PASS  5
9 #define MAX_WRITESIZE_IN_PASS   5000	/* *512b */
10 
11 /*
12  * 14 (16384ths) is chosen here as something that each backing device
13  * should be a reasonable fraction of the share, and not to blow up
14  * until individual backing devices are a petabyte.
15  */
16 #define WRITEBACK_SHARE_SHIFT   14
17 
18 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
19 {
20 	uint64_t i, ret = 0;
21 
22 	for (i = 0; i < d->nr_stripes; i++)
23 		ret += atomic_read(d->stripe_sectors_dirty + i);
24 
25 	return ret;
26 }
27 
28 static inline uint64_t  bcache_flash_devs_sectors_dirty(struct cache_set *c)
29 {
30 	uint64_t i, ret = 0;
31 
32 	mutex_lock(&bch_register_lock);
33 
34 	for (i = 0; i < c->devices_max_used; i++) {
35 		struct bcache_device *d = c->devices[i];
36 
37 		if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
38 			continue;
39 	   ret += bcache_dev_sectors_dirty(d);
40 	}
41 
42 	mutex_unlock(&bch_register_lock);
43 
44 	return ret;
45 }
46 
47 static inline unsigned offset_to_stripe(struct bcache_device *d,
48 					uint64_t offset)
49 {
50 	do_div(offset, d->stripe_size);
51 	return offset;
52 }
53 
54 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
55 					   uint64_t offset,
56 					   unsigned nr_sectors)
57 {
58 	unsigned stripe = offset_to_stripe(&dc->disk, offset);
59 
60 	while (1) {
61 		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
62 			return true;
63 
64 		if (nr_sectors <= dc->disk.stripe_size)
65 			return false;
66 
67 		nr_sectors -= dc->disk.stripe_size;
68 		stripe++;
69 	}
70 }
71 
72 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
73 				    unsigned cache_mode, bool would_skip)
74 {
75 	unsigned in_use = dc->disk.c->gc_stats.in_use;
76 
77 	if (cache_mode != CACHE_MODE_WRITEBACK ||
78 	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
79 	    in_use > CUTOFF_WRITEBACK_SYNC)
80 		return false;
81 
82 	if (dc->partial_stripes_expensive &&
83 	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
84 				    bio_sectors(bio)))
85 		return true;
86 
87 	if (would_skip)
88 		return false;
89 
90 	return (op_is_sync(bio->bi_opf) ||
91 		bio->bi_opf & (REQ_META|REQ_PRIO) ||
92 		in_use <= CUTOFF_WRITEBACK);
93 }
94 
95 static inline void bch_writeback_queue(struct cached_dev *dc)
96 {
97 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
98 		wake_up_process(dc->writeback_thread);
99 }
100 
101 static inline void bch_writeback_add(struct cached_dev *dc)
102 {
103 	if (!atomic_read(&dc->has_dirty) &&
104 	    !atomic_xchg(&dc->has_dirty, 1)) {
105 		refcount_inc(&dc->count);
106 
107 		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
108 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
109 			/* XXX: should do this synchronously */
110 			bch_write_bdev_super(dc, NULL);
111 		}
112 
113 		bch_writeback_queue(dc);
114 	}
115 }
116 
117 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
118 
119 void bch_sectors_dirty_init(struct bcache_device *);
120 void bch_cached_dev_writeback_init(struct cached_dev *);
121 int bch_cached_dev_writeback_start(struct cached_dev *);
122 
123 #endif
124