xref: /linux/lib/dim/net_dim.c (revision 0e862838f290147ea9c16db852d8d494b552d38d)
1  // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2  /*
3   * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
4   */
5  
6  #include <linux/dim.h>
7  
8  /*
9   * Net DIM profiles:
10   *        There are different set of profiles for each CQ period mode.
11   *        There are different set of profiles for RX/TX CQs.
12   *        Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
13   */
14  #define NET_DIM_PARAMS_NUM_PROFILES 5
15  #define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
16  #define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
17  #define NET_DIM_DEF_PROFILE_CQE 1
18  #define NET_DIM_DEF_PROFILE_EQE 1
19  
20  #define NET_DIM_RX_EQE_PROFILES { \
21  	{.usec = 1,   .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
22  	{.usec = 8,   .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
23  	{.usec = 64,  .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
24  	{.usec = 128, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \
25  	{.usec = 256, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}  \
26  }
27  
28  #define NET_DIM_RX_CQE_PROFILES { \
29  	{.usec = 2,  .pkts = 256,},             \
30  	{.usec = 8,  .pkts = 128,},             \
31  	{.usec = 16, .pkts = 64,},              \
32  	{.usec = 32, .pkts = 64,},              \
33  	{.usec = 64, .pkts = 64,}               \
34  }
35  
36  #define NET_DIM_TX_EQE_PROFILES { \
37  	{.usec = 1,   .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
38  	{.usec = 8,   .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
39  	{.usec = 32,  .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
40  	{.usec = 64,  .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,},  \
41  	{.usec = 128, .pkts = NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE,}   \
42  }
43  
44  #define NET_DIM_TX_CQE_PROFILES { \
45  	{.usec = 5,  .pkts = 128,},  \
46  	{.usec = 8,  .pkts = 64,},  \
47  	{.usec = 16, .pkts = 32,},  \
48  	{.usec = 32, .pkts = 32,},  \
49  	{.usec = 64, .pkts = 32,}   \
50  }
51  
52  static const struct dim_cq_moder
53  rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
54  	NET_DIM_RX_EQE_PROFILES,
55  	NET_DIM_RX_CQE_PROFILES,
56  };
57  
58  static const struct dim_cq_moder
59  tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
60  	NET_DIM_TX_EQE_PROFILES,
61  	NET_DIM_TX_CQE_PROFILES,
62  };
63  
64  struct dim_cq_moder
65  net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
66  {
67  	struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
68  
69  	cq_moder.cq_period_mode = cq_period_mode;
70  	return cq_moder;
71  }
72  EXPORT_SYMBOL(net_dim_get_rx_moderation);
73  
74  struct dim_cq_moder
75  net_dim_get_def_rx_moderation(u8 cq_period_mode)
76  {
77  	u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
78  			NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
79  
80  	return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
81  }
82  EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
83  
84  struct dim_cq_moder
85  net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
86  {
87  	struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
88  
89  	cq_moder.cq_period_mode = cq_period_mode;
90  	return cq_moder;
91  }
92  EXPORT_SYMBOL(net_dim_get_tx_moderation);
93  
94  struct dim_cq_moder
95  net_dim_get_def_tx_moderation(u8 cq_period_mode)
96  {
97  	u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
98  			NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
99  
100  	return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
101  }
102  EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
103  
104  static int net_dim_step(struct dim *dim)
105  {
106  	if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
107  		return DIM_TOO_TIRED;
108  
109  	switch (dim->tune_state) {
110  	case DIM_PARKING_ON_TOP:
111  	case DIM_PARKING_TIRED:
112  		break;
113  	case DIM_GOING_RIGHT:
114  		if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
115  			return DIM_ON_EDGE;
116  		dim->profile_ix++;
117  		dim->steps_right++;
118  		break;
119  	case DIM_GOING_LEFT:
120  		if (dim->profile_ix == 0)
121  			return DIM_ON_EDGE;
122  		dim->profile_ix--;
123  		dim->steps_left++;
124  		break;
125  	}
126  
127  	dim->tired++;
128  	return DIM_STEPPED;
129  }
130  
131  static void net_dim_exit_parking(struct dim *dim)
132  {
133  	dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
134  	net_dim_step(dim);
135  }
136  
137  static int net_dim_stats_compare(struct dim_stats *curr,
138  				 struct dim_stats *prev)
139  {
140  	if (!prev->bpms)
141  		return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
142  
143  	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
144  		return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
145  						   DIM_STATS_WORSE;
146  
147  	if (!prev->ppms)
148  		return curr->ppms ? DIM_STATS_BETTER :
149  				    DIM_STATS_SAME;
150  
151  	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
152  		return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
153  						   DIM_STATS_WORSE;
154  
155  	if (!prev->epms)
156  		return DIM_STATS_SAME;
157  
158  	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
159  		return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
160  						   DIM_STATS_WORSE;
161  
162  	return DIM_STATS_SAME;
163  }
164  
165  static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
166  {
167  	int prev_state = dim->tune_state;
168  	int prev_ix = dim->profile_ix;
169  	int stats_res;
170  	int step_res;
171  
172  	switch (dim->tune_state) {
173  	case DIM_PARKING_ON_TOP:
174  		stats_res = net_dim_stats_compare(curr_stats,
175  						  &dim->prev_stats);
176  		if (stats_res != DIM_STATS_SAME)
177  			net_dim_exit_parking(dim);
178  		break;
179  
180  	case DIM_PARKING_TIRED:
181  		dim->tired--;
182  		if (!dim->tired)
183  			net_dim_exit_parking(dim);
184  		break;
185  
186  	case DIM_GOING_RIGHT:
187  	case DIM_GOING_LEFT:
188  		stats_res = net_dim_stats_compare(curr_stats,
189  						  &dim->prev_stats);
190  		if (stats_res != DIM_STATS_BETTER)
191  			dim_turn(dim);
192  
193  		if (dim_on_top(dim)) {
194  			dim_park_on_top(dim);
195  			break;
196  		}
197  
198  		step_res = net_dim_step(dim);
199  		switch (step_res) {
200  		case DIM_ON_EDGE:
201  			dim_park_on_top(dim);
202  			break;
203  		case DIM_TOO_TIRED:
204  			dim_park_tired(dim);
205  			break;
206  		}
207  
208  		break;
209  	}
210  
211  	if (prev_state != DIM_PARKING_ON_TOP ||
212  	    dim->tune_state != DIM_PARKING_ON_TOP)
213  		dim->prev_stats = *curr_stats;
214  
215  	return dim->profile_ix != prev_ix;
216  }
217  
218  void net_dim(struct dim *dim, struct dim_sample end_sample)
219  {
220  	struct dim_stats curr_stats;
221  	u16 nevents;
222  
223  	switch (dim->state) {
224  	case DIM_MEASURE_IN_PROGRESS:
225  		nevents = BIT_GAP(BITS_PER_TYPE(u16),
226  				  end_sample.event_ctr,
227  				  dim->start_sample.event_ctr);
228  		if (nevents < DIM_NEVENTS)
229  			break;
230  		dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
231  		if (net_dim_decision(&curr_stats, dim)) {
232  			dim->state = DIM_APPLY_NEW_PROFILE;
233  			schedule_work(&dim->work);
234  			break;
235  		}
236  		fallthrough;
237  	case DIM_START_MEASURE:
238  		dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
239  				  end_sample.byte_ctr, &dim->start_sample);
240  		dim->state = DIM_MEASURE_IN_PROGRESS;
241  		break;
242  	case DIM_APPLY_NEW_PROFILE:
243  		break;
244  	}
245  }
246  EXPORT_SYMBOL(net_dim);
247