xref: /linux/block/blk-wbt.h (revision 2dbc0838bcf24ca59cabc3130cf3b1d6809cdcd4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef WB_THROTTLE_H
3 #define WB_THROTTLE_H
4 
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/wait.h>
8 #include <linux/timer.h>
9 #include <linux/ktime.h>
10 
11 #include "blk-stat.h"
12 #include "blk-rq-qos.h"
13 
14 enum wbt_flags {
15 	WBT_TRACKED		= 1,	/* write, tracked for throttling */
16 	WBT_READ		= 2,	/* read */
17 	WBT_KSWAPD		= 4,	/* write, from kswapd */
18 	WBT_DISCARD		= 8,	/* discard */
19 
20 	WBT_NR_BITS		= 4,	/* number of bits */
21 };
22 
23 enum {
24 	WBT_RWQ_BG		= 0,
25 	WBT_RWQ_KSWAPD,
26 	WBT_RWQ_DISCARD,
27 	WBT_NUM_RWQ,
28 };
29 
30 /*
31  * Enable states. Either off, or on by default (done at init time),
32  * or on through manual setup in sysfs.
33  */
34 enum {
35 	WBT_STATE_ON_DEFAULT	= 1,
36 	WBT_STATE_ON_MANUAL	= 2,
37 };
38 
39 struct rq_wb {
40 	/*
41 	 * Settings that govern how we throttle
42 	 */
43 	unsigned int wb_background;		/* background writeback */
44 	unsigned int wb_normal;			/* normal writeback */
45 
46 	short enable_state;			/* WBT_STATE_* */
47 
48 	/*
49 	 * Number of consecutive periods where we don't have enough
50 	 * information to make a firm scale up/down decision.
51 	 */
52 	unsigned int unknown_cnt;
53 
54 	u64 win_nsec;				/* default window size */
55 	u64 cur_win_nsec;			/* current window size */
56 
57 	struct blk_stat_callback *cb;
58 
59 	u64 sync_issue;
60 	void *sync_cookie;
61 
62 	unsigned int wc;
63 
64 	unsigned long last_issue;		/* last non-throttled issue */
65 	unsigned long last_comp;		/* last non-throttled comp */
66 	unsigned long min_lat_nsec;
67 	struct rq_qos rqos;
68 	struct rq_wait rq_wait[WBT_NUM_RWQ];
69 	struct rq_depth rq_depth;
70 };
71 
72 static inline struct rq_wb *RQWB(struct rq_qos *rqos)
73 {
74 	return container_of(rqos, struct rq_wb, rqos);
75 }
76 
77 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
78 {
79 	unsigned int i, ret = 0;
80 
81 	for (i = 0; i < WBT_NUM_RWQ; i++)
82 		ret += atomic_read(&rwb->rq_wait[i].inflight);
83 
84 	return ret;
85 }
86 
87 
88 #ifdef CONFIG_BLK_WBT
89 
90 int wbt_init(struct request_queue *);
91 void wbt_update_limits(struct request_queue *);
92 void wbt_disable_default(struct request_queue *);
93 void wbt_enable_default(struct request_queue *);
94 
95 u64 wbt_get_min_lat(struct request_queue *q);
96 void wbt_set_min_lat(struct request_queue *q, u64 val);
97 
98 void wbt_set_queue_depth(struct request_queue *, unsigned int);
99 void wbt_set_write_cache(struct request_queue *, bool);
100 
101 u64 wbt_default_latency_nsec(struct request_queue *);
102 
103 #else
104 
105 static inline void wbt_track(struct request *rq, enum wbt_flags flags)
106 {
107 }
108 static inline int wbt_init(struct request_queue *q)
109 {
110 	return -EINVAL;
111 }
112 static inline void wbt_update_limits(struct request_queue *q)
113 {
114 }
115 static inline void wbt_disable_default(struct request_queue *q)
116 {
117 }
118 static inline void wbt_enable_default(struct request_queue *q)
119 {
120 }
121 static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
122 {
123 }
124 static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
125 {
126 }
127 static inline u64 wbt_get_min_lat(struct request_queue *q)
128 {
129 	return 0;
130 }
131 static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
132 {
133 }
134 static inline u64 wbt_default_latency_nsec(struct request_queue *q)
135 {
136 	return 0;
137 }
138 
139 #endif /* CONFIG_BLK_WBT */
140 
141 #endif
142