1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef WB_THROTTLE_H 3 #define WB_THROTTLE_H 4 5 #include <linux/kernel.h> 6 #include <linux/atomic.h> 7 #include <linux/wait.h> 8 #include <linux/timer.h> 9 #include <linux/ktime.h> 10 11 #include "blk-stat.h" 12 13 enum wbt_flags { 14 WBT_TRACKED = 1, /* write, tracked for throttling */ 15 WBT_READ = 2, /* read */ 16 WBT_KSWAPD = 4, /* write, from kswapd */ 17 18 WBT_NR_BITS = 3, /* number of bits */ 19 }; 20 21 enum { 22 WBT_NUM_RWQ = 2, 23 }; 24 25 /* 26 * Enable states. Either off, or on by default (done at init time), 27 * or on through manual setup in sysfs. 28 */ 29 enum { 30 WBT_STATE_ON_DEFAULT = 1, 31 WBT_STATE_ON_MANUAL = 2, 32 }; 33 34 static inline void wbt_clear_state(struct blk_issue_stat *stat) 35 { 36 stat->stat &= ~BLK_STAT_RES_MASK; 37 } 38 39 static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) 40 { 41 return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; 42 } 43 44 static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) 45 { 46 stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT; 47 } 48 49 static inline bool wbt_is_tracked(struct blk_issue_stat *stat) 50 { 51 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; 52 } 53 54 static inline bool wbt_is_read(struct blk_issue_stat *stat) 55 { 56 return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ; 57 } 58 59 struct rq_wait { 60 wait_queue_head_t wait; 61 atomic_t inflight; 62 }; 63 64 struct rq_wb { 65 /* 66 * Settings that govern how we throttle 67 */ 68 unsigned int wb_background; /* background writeback */ 69 unsigned int wb_normal; /* normal writeback */ 70 unsigned int wb_max; /* max throughput writeback */ 71 int scale_step; 72 bool scaled_max; 73 74 short enable_state; /* WBT_STATE_* */ 75 76 /* 77 * Number of consecutive periods where we don't have enough 78 * information to make a firm scale up/down decision. 79 */ 80 unsigned int unknown_cnt; 81 82 u64 win_nsec; /* default window size */ 83 u64 cur_win_nsec; /* current window size */ 84 85 struct blk_stat_callback *cb; 86 87 s64 sync_issue; 88 void *sync_cookie; 89 90 unsigned int wc; 91 unsigned int queue_depth; 92 93 unsigned long last_issue; /* last non-throttled issue */ 94 unsigned long last_comp; /* last non-throttled comp */ 95 unsigned long min_lat_nsec; 96 struct request_queue *queue; 97 struct rq_wait rq_wait[WBT_NUM_RWQ]; 98 }; 99 100 static inline unsigned int wbt_inflight(struct rq_wb *rwb) 101 { 102 unsigned int i, ret = 0; 103 104 for (i = 0; i < WBT_NUM_RWQ; i++) 105 ret += atomic_read(&rwb->rq_wait[i].inflight); 106 107 return ret; 108 } 109 110 #ifdef CONFIG_BLK_WBT 111 112 void __wbt_done(struct rq_wb *, enum wbt_flags); 113 void wbt_done(struct rq_wb *, struct blk_issue_stat *); 114 enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); 115 int wbt_init(struct request_queue *); 116 void wbt_exit(struct request_queue *); 117 void wbt_update_limits(struct rq_wb *); 118 void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); 119 void wbt_issue(struct rq_wb *, struct blk_issue_stat *); 120 void wbt_disable_default(struct request_queue *); 121 void wbt_enable_default(struct request_queue *); 122 123 void wbt_set_queue_depth(struct rq_wb *, unsigned int); 124 void wbt_set_write_cache(struct rq_wb *, bool); 125 126 u64 wbt_default_latency_nsec(struct request_queue *); 127 128 #else 129 130 static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) 131 { 132 } 133 static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) 134 { 135 } 136 static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, 137 spinlock_t *lock) 138 { 139 return 0; 140 } 141 static inline int wbt_init(struct request_queue *q) 142 { 143 return -EINVAL; 144 } 145 static inline void wbt_exit(struct request_queue *q) 146 { 147 } 148 static inline void wbt_update_limits(struct rq_wb *rwb) 149 { 150 } 151 static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) 152 { 153 } 154 static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) 155 { 156 } 157 static inline void wbt_disable_default(struct request_queue *q) 158 { 159 } 160 static inline void wbt_enable_default(struct request_queue *q) 161 { 162 } 163 static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) 164 { 165 } 166 static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) 167 { 168 } 169 static inline u64 wbt_default_latency_nsec(struct request_queue *q) 170 { 171 return 0; 172 } 173 174 #endif /* CONFIG_BLK_WBT */ 175 176 #endif 177