xref: /linux/drivers/net/wireless/realtek/rtw88/phy.c (revision ba95c7452439756d4f6dceb5a188b7c31dbbe5b6)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/bcd.h>
6 
7 #include "main.h"
8 #include "reg.h"
9 #include "fw.h"
10 #include "phy.h"
11 #include "debug.h"
12 
13 struct phy_cfg_pair {
14 	u32 addr;
15 	u32 data;
16 };
17 
18 union phy_table_tile {
19 	struct rtw_phy_cond cond;
20 	struct phy_cfg_pair cfg;
21 };
22 
23 struct phy_pg_cfg_pair {
24 	u32 band;
25 	u32 rf_path;
26 	u32 tx_num;
27 	u32 addr;
28 	u32 bitmask;
29 	u32 data;
30 };
31 
32 struct txpwr_lmt_cfg_pair {
33 	u8 regd;
34 	u8 band;
35 	u8 bw;
36 	u8 rs;
37 	u8 ch;
38 	s8 txpwr_lmt;
39 };
40 
41 static const u32 db_invert_table[12][8] = {
42 	{10,		13,		16,		20,
43 	 25,		32,		40,		50},
44 	{64,		80,		101,		128,
45 	 160,		201,		256,		318},
46 	{401,		505,		635,		800,
47 	 1007,		1268,		1596,		2010},
48 	{316,		398,		501,		631,
49 	 794,		1000,		1259,		1585},
50 	{1995,		2512,		3162,		3981,
51 	 5012,		6310,		7943,		10000},
52 	{12589,		15849,		19953,		25119,
53 	 31623,		39811,		50119,		63098},
54 	{79433,		100000,		125893,		158489,
55 	 199526,	251189,		316228,		398107},
56 	{501187,	630957,		794328,		1000000,
57 	 1258925,	1584893,	1995262,	2511886},
58 	{3162278,	3981072,	5011872,	6309573,
59 	 7943282,	1000000,	12589254,	15848932},
60 	{19952623,	25118864,	31622777,	39810717,
61 	 50118723,	63095734,	79432823,	100000000},
62 	{125892541,	158489319,	199526232,	251188643,
63 	 316227766,	398107171,	501187234,	630957345},
64 	{794328235,	1000000000,	1258925412,	1584893192,
65 	 1995262315,	2511886432U,	3162277660U,	3981071706U}
66 };
67 
68 enum rtw_phy_band_type {
69 	PHY_BAND_2G	= 0,
70 	PHY_BAND_5G	= 1,
71 };
72 
73 void rtw_phy_init(struct rtw_dev *rtwdev)
74 {
75 	struct rtw_chip_info *chip = rtwdev->chip;
76 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
77 	u32 addr, mask;
78 
79 	dm_info->fa_history[3] = 0;
80 	dm_info->fa_history[2] = 0;
81 	dm_info->fa_history[1] = 0;
82 	dm_info->fa_history[0] = 0;
83 	dm_info->igi_bitmap = 0;
84 	dm_info->igi_history[3] = 0;
85 	dm_info->igi_history[2] = 0;
86 	dm_info->igi_history[1] = 0;
87 
88 	addr = chip->dig[0].addr;
89 	mask = chip->dig[0].mask;
90 	dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
91 }
92 
93 void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
94 {
95 	struct rtw_chip_info *chip = rtwdev->chip;
96 	struct rtw_hal *hal = &rtwdev->hal;
97 	u32 addr, mask;
98 	u8 path;
99 
100 	for (path = 0; path < hal->rf_path_num; path++) {
101 		addr = chip->dig[path].addr;
102 		mask = chip->dig[path].mask;
103 		rtw_write32_mask(rtwdev, addr, mask, igi);
104 	}
105 }
106 
107 static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
108 {
109 	struct rtw_chip_info *chip = rtwdev->chip;
110 
111 	chip->ops->false_alarm_statistics(rtwdev);
112 }
113 
114 #define RA_FLOOR_TABLE_SIZE	7
115 #define RA_FLOOR_UP_GAP		3
116 
117 static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
118 {
119 	u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
120 	u8 new_level = 0;
121 	int i;
122 
123 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
124 		if (i >= old_level)
125 			table[i] += RA_FLOOR_UP_GAP;
126 
127 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
128 		if (rssi < table[i]) {
129 			new_level = i;
130 			break;
131 		}
132 	}
133 
134 	return new_level;
135 }
136 
137 struct rtw_phy_stat_iter_data {
138 	struct rtw_dev *rtwdev;
139 	u8 min_rssi;
140 };
141 
142 static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
143 {
144 	struct rtw_phy_stat_iter_data *iter_data = data;
145 	struct rtw_dev *rtwdev = iter_data->rtwdev;
146 	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
147 	u8 rssi;
148 
149 	rssi = ewma_rssi_read(&si->avg_rssi);
150 	si->rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
151 
152 	rtw_fw_send_rssi_info(rtwdev, si);
153 
154 	iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
155 }
156 
157 static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
158 {
159 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
160 	struct rtw_phy_stat_iter_data data = {};
161 
162 	data.rtwdev = rtwdev;
163 	data.min_rssi = U8_MAX;
164 	rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
165 
166 	dm_info->pre_min_rssi = dm_info->min_rssi;
167 	dm_info->min_rssi = data.min_rssi;
168 }
169 
170 static void rtw_phy_statistics(struct rtw_dev *rtwdev)
171 {
172 	rtw_phy_stat_rssi(rtwdev);
173 	rtw_phy_stat_false_alarm(rtwdev);
174 }
175 
176 #define DIG_PERF_FA_TH_LOW			250
177 #define DIG_PERF_FA_TH_HIGH			500
178 #define DIG_PERF_FA_TH_EXTRA_HIGH		750
179 #define DIG_PERF_MAX				0x5a
180 #define DIG_PERF_MID				0x40
181 #define DIG_CVRG_FA_TH_LOW			2000
182 #define DIG_CVRG_FA_TH_HIGH			4000
183 #define DIG_CVRG_FA_TH_EXTRA_HIGH		5000
184 #define DIG_CVRG_MAX				0x2a
185 #define DIG_CVRG_MID				0x26
186 #define DIG_CVRG_MIN				0x1c
187 #define DIG_RSSI_GAIN_OFFSET			15
188 
189 static bool
190 rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
191 {
192 	u16 fa_lo = DIG_PERF_FA_TH_LOW;
193 	u16 fa_hi = DIG_PERF_FA_TH_HIGH;
194 	u16 *fa_history;
195 	u8 *igi_history;
196 	u8 damping_rssi;
197 	u8 min_rssi;
198 	u8 diff;
199 	u8 igi_bitmap;
200 	bool damping = false;
201 
202 	min_rssi = dm_info->min_rssi;
203 	if (dm_info->damping) {
204 		damping_rssi = dm_info->damping_rssi;
205 		diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
206 						 damping_rssi - min_rssi;
207 		if (diff > 3 || dm_info->damping_cnt++ > 20) {
208 			dm_info->damping = false;
209 			return false;
210 		}
211 
212 		return true;
213 	}
214 
215 	igi_history = dm_info->igi_history;
216 	fa_history = dm_info->fa_history;
217 	igi_bitmap = dm_info->igi_bitmap & 0xf;
218 	switch (igi_bitmap) {
219 	case 5:
220 		/* down -> up -> down -> up */
221 		if (igi_history[0] > igi_history[1] &&
222 		    igi_history[2] > igi_history[3] &&
223 		    igi_history[0] - igi_history[1] >= 2 &&
224 		    igi_history[2] - igi_history[3] >= 2 &&
225 		    fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
226 		    fa_history[2] > fa_hi && fa_history[3] < fa_lo)
227 			damping = true;
228 		break;
229 	case 9:
230 		/* up -> down -> down -> up */
231 		if (igi_history[0] > igi_history[1] &&
232 		    igi_history[3] > igi_history[2] &&
233 		    igi_history[0] - igi_history[1] >= 4 &&
234 		    igi_history[3] - igi_history[2] >= 2 &&
235 		    fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
236 		    fa_history[2] < fa_lo && fa_history[3] > fa_hi)
237 			damping = true;
238 		break;
239 	default:
240 		return false;
241 	}
242 
243 	if (damping) {
244 		dm_info->damping = true;
245 		dm_info->damping_cnt = 0;
246 		dm_info->damping_rssi = min_rssi;
247 	}
248 
249 	return damping;
250 }
251 
252 static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
253 				     u8 *upper, u8 *lower, bool linked)
254 {
255 	u8 dig_max, dig_min, dig_mid;
256 	u8 min_rssi;
257 
258 	if (linked) {
259 		dig_max = DIG_PERF_MAX;
260 		dig_mid = DIG_PERF_MID;
261 		/* 22B=0x1c, 22C=0x20 */
262 		dig_min = 0x1c;
263 		min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
264 	} else {
265 		dig_max = DIG_CVRG_MAX;
266 		dig_mid = DIG_CVRG_MID;
267 		dig_min = DIG_CVRG_MIN;
268 		min_rssi = dig_min;
269 	}
270 
271 	/* DIG MAX should be bounded by minimum RSSI with offset +15 */
272 	dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
273 
274 	*lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
275 	*upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
276 }
277 
278 static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
279 				      u16 *fa_th, u8 *step, bool linked)
280 {
281 	u8 min_rssi, pre_min_rssi;
282 
283 	min_rssi = dm_info->min_rssi;
284 	pre_min_rssi = dm_info->pre_min_rssi;
285 	step[0] = 4;
286 	step[1] = 3;
287 	step[2] = 2;
288 
289 	if (linked) {
290 		fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
291 		fa_th[1] = DIG_PERF_FA_TH_HIGH;
292 		fa_th[2] = DIG_PERF_FA_TH_LOW;
293 		if (pre_min_rssi > min_rssi) {
294 			step[0] = 6;
295 			step[1] = 4;
296 			step[2] = 2;
297 		}
298 	} else {
299 		fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
300 		fa_th[1] = DIG_CVRG_FA_TH_HIGH;
301 		fa_th[2] = DIG_CVRG_FA_TH_LOW;
302 	}
303 }
304 
305 static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
306 {
307 	u8 *igi_history;
308 	u16 *fa_history;
309 	u8 igi_bitmap;
310 	bool up;
311 
312 	igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
313 	igi_history = dm_info->igi_history;
314 	fa_history = dm_info->fa_history;
315 
316 	up = igi > igi_history[0];
317 	igi_bitmap |= up;
318 
319 	igi_history[3] = igi_history[2];
320 	igi_history[2] = igi_history[1];
321 	igi_history[1] = igi_history[0];
322 	igi_history[0] = igi;
323 
324 	fa_history[3] = fa_history[2];
325 	fa_history[2] = fa_history[1];
326 	fa_history[1] = fa_history[0];
327 	fa_history[0] = fa;
328 
329 	dm_info->igi_bitmap = igi_bitmap;
330 }
331 
332 static void rtw_phy_dig(struct rtw_dev *rtwdev)
333 {
334 	struct rtw_dm_info *dm_info = &rtwdev->dm_info;
335 	u8 upper_bound, lower_bound;
336 	u8 pre_igi, cur_igi;
337 	u16 fa_th[3], fa_cnt;
338 	u8 level;
339 	u8 step[3];
340 	bool linked;
341 
342 	if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
343 		return;
344 
345 	if (rtw_phy_dig_check_damping(dm_info))
346 		return;
347 
348 	linked = !!rtwdev->sta_cnt;
349 
350 	fa_cnt = dm_info->total_fa_cnt;
351 	pre_igi = dm_info->igi_history[0];
352 
353 	rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
354 
355 	/* test the false alarm count from the highest threshold level first,
356 	 * and increase it by corresponding step size
357 	 *
358 	 * note that the step size is offset by -2, compensate it afterall
359 	 */
360 	cur_igi = pre_igi;
361 	for (level = 0; level < 3; level++) {
362 		if (fa_cnt > fa_th[level]) {
363 			cur_igi += step[level];
364 			break;
365 		}
366 	}
367 	cur_igi -= 2;
368 
369 	/* calculate the upper/lower bound by the minimum rssi we have among
370 	 * the peers connected with us, meanwhile make sure the igi value does
371 	 * not beyond the hardware limitation
372 	 */
373 	rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
374 	cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
375 
376 	/* record current igi value and false alarm statistics for further
377 	 * damping checks, and record the trend of igi values
378 	 */
379 	rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
380 
381 	if (cur_igi != pre_igi)
382 		rtw_phy_dig_write(rtwdev, cur_igi);
383 }
384 
385 static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
386 {
387 	struct rtw_dev *rtwdev = data;
388 	struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
389 
390 	rtw_update_sta_info(rtwdev, si);
391 }
392 
393 static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
394 {
395 	if (rtwdev->watch_dog_cnt & 0x3)
396 		return;
397 
398 	rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
399 }
400 
401 void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
402 {
403 	/* for further calculation */
404 	rtw_phy_statistics(rtwdev);
405 	rtw_phy_dig(rtwdev);
406 	rtw_phy_ra_info_update(rtwdev);
407 }
408 
409 #define FRAC_BITS 3
410 
411 static u8 rtw_phy_power_2_db(s8 power)
412 {
413 	if (power <= -100 || power >= 20)
414 		return 0;
415 	else if (power >= 0)
416 		return 100;
417 	else
418 		return 100 + power;
419 }
420 
421 static u64 rtw_phy_db_2_linear(u8 power_db)
422 {
423 	u8 i, j;
424 	u64 linear;
425 
426 	if (power_db > 96)
427 		power_db = 96;
428 	else if (power_db < 1)
429 		return 1;
430 
431 	/* 1dB ~ 96dB */
432 	i = (power_db - 1) >> 3;
433 	j = (power_db - 1) - (i << 3);
434 
435 	linear = db_invert_table[i][j];
436 	linear = i > 2 ? linear << FRAC_BITS : linear;
437 
438 	return linear;
439 }
440 
441 static u8 rtw_phy_linear_2_db(u64 linear)
442 {
443 	u8 i;
444 	u8 j;
445 	u32 dB;
446 
447 	if (linear >= db_invert_table[11][7])
448 		return 96; /* maximum 96 dB */
449 
450 	for (i = 0; i < 12; i++) {
451 		if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
452 			break;
453 		else if (i > 2 && linear <= db_invert_table[i][7])
454 			break;
455 	}
456 
457 	for (j = 0; j < 8; j++) {
458 		if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
459 			break;
460 		else if (i > 2 && linear <= db_invert_table[i][j])
461 			break;
462 	}
463 
464 	if (j == 0 && i == 0)
465 		goto end;
466 
467 	if (j == 0) {
468 		if (i != 3) {
469 			if (db_invert_table[i][0] - linear >
470 			    linear - db_invert_table[i - 1][7]) {
471 				i = i - 1;
472 				j = 7;
473 			}
474 		} else {
475 			if (db_invert_table[3][0] - linear >
476 			    linear - db_invert_table[2][7]) {
477 				i = 2;
478 				j = 7;
479 			}
480 		}
481 	} else {
482 		if (db_invert_table[i][j] - linear >
483 		    linear - db_invert_table[i][j - 1]) {
484 			j = j - 1;
485 		}
486 	}
487 end:
488 	dB = (i << 3) + j + 1;
489 
490 	return dB;
491 }
492 
493 u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
494 {
495 	s8 power;
496 	u8 power_db;
497 	u64 linear;
498 	u64 sum = 0;
499 	u8 path;
500 
501 	for (path = 0; path < path_num; path++) {
502 		power = rf_power[path];
503 		power_db = rtw_phy_power_2_db(power);
504 		linear = rtw_phy_db_2_linear(power_db);
505 		sum += linear;
506 	}
507 
508 	sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
509 	switch (path_num) {
510 	case 2:
511 		sum >>= 1;
512 		break;
513 	case 3:
514 		sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
515 		break;
516 	case 4:
517 		sum >>= 2;
518 		break;
519 	default:
520 		break;
521 	}
522 
523 	return rtw_phy_linear_2_db(sum);
524 }
525 
526 u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
527 		    u32 addr, u32 mask)
528 {
529 	struct rtw_hal *hal = &rtwdev->hal;
530 	struct rtw_chip_info *chip = rtwdev->chip;
531 	const u32 *base_addr = chip->rf_base_addr;
532 	u32 val, direct_addr;
533 
534 	if (rf_path >= hal->rf_path_num) {
535 		rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
536 		return INV_RF_DATA;
537 	}
538 
539 	addr &= 0xff;
540 	direct_addr = base_addr[rf_path] + (addr << 2);
541 	mask &= RFREG_MASK;
542 
543 	val = rtw_read32_mask(rtwdev, direct_addr, mask);
544 
545 	return val;
546 }
547 
548 bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
549 			       u32 addr, u32 mask, u32 data)
550 {
551 	struct rtw_hal *hal = &rtwdev->hal;
552 	struct rtw_chip_info *chip = rtwdev->chip;
553 	u32 *sipi_addr = chip->rf_sipi_addr;
554 	u32 data_and_addr;
555 	u32 old_data = 0;
556 	u32 shift;
557 
558 	if (rf_path >= hal->rf_path_num) {
559 		rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
560 		return false;
561 	}
562 
563 	addr &= 0xff;
564 	mask &= RFREG_MASK;
565 
566 	if (mask != RFREG_MASK) {
567 		old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
568 
569 		if (old_data == INV_RF_DATA) {
570 			rtw_err(rtwdev, "Write fail, rf is disabled\n");
571 			return false;
572 		}
573 
574 		shift = __ffs(mask);
575 		data = ((old_data) & (~mask)) | (data << shift);
576 	}
577 
578 	data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
579 
580 	rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
581 
582 	udelay(13);
583 
584 	return true;
585 }
586 
587 bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
588 			  u32 addr, u32 mask, u32 data)
589 {
590 	struct rtw_hal *hal = &rtwdev->hal;
591 	struct rtw_chip_info *chip = rtwdev->chip;
592 	const u32 *base_addr = chip->rf_base_addr;
593 	u32 direct_addr;
594 
595 	if (rf_path >= hal->rf_path_num) {
596 		rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
597 		return false;
598 	}
599 
600 	addr &= 0xff;
601 	direct_addr = base_addr[rf_path] + (addr << 2);
602 	mask &= RFREG_MASK;
603 
604 	if (addr == RF_CFGCH) {
605 		rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
606 		rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
607 	}
608 
609 	rtw_write32_mask(rtwdev, direct_addr, mask, data);
610 
611 	udelay(1);
612 
613 	if (addr == RF_CFGCH) {
614 		rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
615 		rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
616 	}
617 
618 	return true;
619 }
620 
621 bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
622 			      u32 addr, u32 mask, u32 data)
623 {
624 	if (addr != 0x00)
625 		return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
626 
627 	return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
628 }
629 
630 void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
631 {
632 	struct rtw_hal *hal = &rtwdev->hal;
633 	struct rtw_efuse *efuse = &rtwdev->efuse;
634 	struct rtw_phy_cond cond = {0};
635 
636 	cond.cut = hal->cut_version ? hal->cut_version : 15;
637 	cond.pkg = pkg ? pkg : 15;
638 	cond.plat = 0x04;
639 	cond.rfe = efuse->rfe_option;
640 
641 	switch (rtw_hci_type(rtwdev)) {
642 	case RTW_HCI_TYPE_USB:
643 		cond.intf = INTF_USB;
644 		break;
645 	case RTW_HCI_TYPE_SDIO:
646 		cond.intf = INTF_SDIO;
647 		break;
648 	case RTW_HCI_TYPE_PCIE:
649 	default:
650 		cond.intf = INTF_PCIE;
651 		break;
652 	}
653 
654 	hal->phy_cond = cond;
655 
656 	rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
657 }
658 
659 static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
660 {
661 	struct rtw_hal *hal = &rtwdev->hal;
662 	struct rtw_phy_cond drv_cond = hal->phy_cond;
663 
664 	if (cond.cut && cond.cut != drv_cond.cut)
665 		return false;
666 
667 	if (cond.pkg && cond.pkg != drv_cond.pkg)
668 		return false;
669 
670 	if (cond.intf && cond.intf != drv_cond.intf)
671 		return false;
672 
673 	if (cond.rfe != drv_cond.rfe)
674 		return false;
675 
676 	return true;
677 }
678 
679 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
680 {
681 	const union phy_table_tile *p = tbl->data;
682 	const union phy_table_tile *end = p + tbl->size / 2;
683 	struct rtw_phy_cond pos_cond = {0};
684 	bool is_matched = true, is_skipped = false;
685 
686 	BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
687 
688 	for (; p < end; p++) {
689 		if (p->cond.pos) {
690 			switch (p->cond.branch) {
691 			case BRANCH_ENDIF:
692 				is_matched = true;
693 				is_skipped = false;
694 				break;
695 			case BRANCH_ELSE:
696 				is_matched = is_skipped ? false : true;
697 				break;
698 			case BRANCH_IF:
699 			case BRANCH_ELIF:
700 			default:
701 				pos_cond = p->cond;
702 				break;
703 			}
704 		} else if (p->cond.neg) {
705 			if (!is_skipped) {
706 				if (check_positive(rtwdev, pos_cond)) {
707 					is_matched = true;
708 					is_skipped = true;
709 				} else {
710 					is_matched = false;
711 					is_skipped = false;
712 				}
713 			} else {
714 				is_matched = false;
715 			}
716 		} else if (is_matched) {
717 			(*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
718 		}
719 	}
720 }
721 
722 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
723 {
724 	const struct phy_pg_cfg_pair *p = tbl->data;
725 	const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
726 
727 	BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
728 
729 	for (; p < end; p++) {
730 		if (p->addr == 0xfe || p->addr == 0xffe) {
731 			msleep(50);
732 			continue;
733 		}
734 		phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
735 					   p->tx_num, p->addr, p->bitmask,
736 					   p->data);
737 	}
738 }
739 
740 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
741 			     const struct rtw_table *tbl)
742 {
743 	const struct txpwr_lmt_cfg_pair *p = tbl->data;
744 	const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
745 
746 	BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
747 
748 	for (; p < end; p++) {
749 		phy_set_tx_power_limit(rtwdev, p->regd, p->band,
750 				       p->bw, p->rs,
751 				       p->ch, p->txpwr_lmt);
752 	}
753 }
754 
755 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
756 		     u32 addr, u32 data)
757 {
758 	rtw_write8(rtwdev, addr, data);
759 }
760 
761 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
762 		     u32 addr, u32 data)
763 {
764 	rtw_write32(rtwdev, addr, data);
765 }
766 
767 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
768 		    u32 addr, u32 data)
769 {
770 	if (addr == 0xfe)
771 		msleep(50);
772 	else if (addr == 0xfd)
773 		mdelay(5);
774 	else if (addr == 0xfc)
775 		mdelay(1);
776 	else if (addr == 0xfb)
777 		usleep_range(50, 60);
778 	else if (addr == 0xfa)
779 		udelay(5);
780 	else if (addr == 0xf9)
781 		udelay(1);
782 	else
783 		rtw_write32(rtwdev, addr, data);
784 }
785 
786 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
787 		    u32 addr, u32 data)
788 {
789 	if (addr == 0xffe) {
790 		msleep(50);
791 	} else if (addr == 0xfe) {
792 		usleep_range(100, 110);
793 	} else {
794 		rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
795 		udelay(1);
796 	}
797 }
798 
799 static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
800 {
801 	struct rtw_chip_info *chip = rtwdev->chip;
802 
803 	if (!chip->rfk_init_tbl)
804 		return;
805 
806 	rtw_load_table(rtwdev, chip->rfk_init_tbl);
807 }
808 
809 void rtw_phy_load_tables(struct rtw_dev *rtwdev)
810 {
811 	struct rtw_chip_info *chip = rtwdev->chip;
812 	u8 rf_path;
813 
814 	rtw_load_table(rtwdev, chip->mac_tbl);
815 	rtw_load_table(rtwdev, chip->bb_tbl);
816 	rtw_load_table(rtwdev, chip->agc_tbl);
817 	rtw_load_rfk_table(rtwdev);
818 
819 	for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
820 		const struct rtw_table *tbl;
821 
822 		tbl = chip->rf_tbl[rf_path];
823 		rtw_load_table(rtwdev, tbl);
824 	}
825 }
826 
827 #define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
828 
829 #define RTW_MAX_POWER_INDEX		0x3F
830 
831 u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
832 u8 rtw_ofdm_rates[] = {
833 	DESC_RATE6M,  DESC_RATE9M,  DESC_RATE12M,
834 	DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
835 	DESC_RATE48M, DESC_RATE54M
836 };
837 u8 rtw_ht_1s_rates[] = {
838 	DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
839 	DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
840 	DESC_RATEMCS6, DESC_RATEMCS7
841 };
842 u8 rtw_ht_2s_rates[] = {
843 	DESC_RATEMCS8,  DESC_RATEMCS9,  DESC_RATEMCS10,
844 	DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
845 	DESC_RATEMCS14, DESC_RATEMCS15
846 };
847 u8 rtw_vht_1s_rates[] = {
848 	DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
849 	DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
850 	DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
851 	DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
852 	DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
853 };
854 u8 rtw_vht_2s_rates[] = {
855 	DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
856 	DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
857 	DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
858 	DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
859 	DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
860 };
861 
862 static u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
863 static u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
864 static u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
865 static u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
866 static u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
867 static u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
868 u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
869 	rtw_cck_rates, rtw_ofdm_rates,
870 	rtw_ht_1s_rates, rtw_ht_2s_rates,
871 	rtw_vht_1s_rates, rtw_vht_2s_rates
872 };
873 u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
874 	ARRAY_SIZE(rtw_cck_rates),
875 	ARRAY_SIZE(rtw_ofdm_rates),
876 	ARRAY_SIZE(rtw_ht_1s_rates),
877 	ARRAY_SIZE(rtw_ht_2s_rates),
878 	ARRAY_SIZE(rtw_vht_1s_rates),
879 	ARRAY_SIZE(rtw_vht_2s_rates)
880 };
881 
882 static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
883 	36,  38,  40,  42,  44,  46,  48, /* Band 1 */
884 	52,  54,  56,  58,  60,  62,  64, /* Band 2 */
885 	100, 102, 104, 106, 108, 110, 112, /* Band 3 */
886 	116, 118, 120, 122, 124, 126, 128, /* Band 3 */
887 	132, 134, 136, 138, 140, 142, 144, /* Band 3 */
888 	149, 151, 153, 155, 157, 159, 161, /* Band 4 */
889 	165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
890 
891 static int rtw_channel_to_idx(u8 band, u8 channel)
892 {
893 	int ch_idx;
894 	u8 n_channel;
895 
896 	if (band == PHY_BAND_2G) {
897 		ch_idx = channel - 1;
898 		n_channel = RTW_MAX_CHANNEL_NUM_2G;
899 	} else if (band == PHY_BAND_5G) {
900 		n_channel = RTW_MAX_CHANNEL_NUM_5G;
901 		for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
902 			if (rtw_channel_idx_5g[ch_idx] == channel)
903 				break;
904 	} else {
905 		return -1;
906 	}
907 
908 	if (ch_idx >= n_channel)
909 		return -1;
910 
911 	return ch_idx;
912 }
913 
914 static u8 rtw_get_channel_group(u8 channel)
915 {
916 	switch (channel) {
917 	default:
918 		WARN_ON(1);
919 		/* fall through */
920 	case 1:
921 	case 2:
922 	case 36:
923 	case 38:
924 	case 40:
925 	case 42:
926 		return 0;
927 	case 3:
928 	case 4:
929 	case 5:
930 	case 44:
931 	case 46:
932 	case 48:
933 	case 50:
934 		return 1;
935 	case 6:
936 	case 7:
937 	case 8:
938 	case 52:
939 	case 54:
940 	case 56:
941 	case 58:
942 		return 2;
943 	case 9:
944 	case 10:
945 	case 11:
946 	case 60:
947 	case 62:
948 	case 64:
949 		return 3;
950 	case 12:
951 	case 13:
952 	case 100:
953 	case 102:
954 	case 104:
955 	case 106:
956 		return 4;
957 	case 14:
958 	case 108:
959 	case 110:
960 	case 112:
961 	case 114:
962 		return 5;
963 	case 116:
964 	case 118:
965 	case 120:
966 	case 122:
967 		return 6;
968 	case 124:
969 	case 126:
970 	case 128:
971 	case 130:
972 		return 7;
973 	case 132:
974 	case 134:
975 	case 136:
976 	case 138:
977 		return 8;
978 	case 140:
979 	case 142:
980 	case 144:
981 		return 9;
982 	case 149:
983 	case 151:
984 	case 153:
985 	case 155:
986 		return 10;
987 	case 157:
988 	case 159:
989 	case 161:
990 		return 11;
991 	case 165:
992 	case 167:
993 	case 169:
994 	case 171:
995 		return 12;
996 	case 173:
997 	case 175:
998 	case 177:
999 		return 13;
1000 	}
1001 }
1002 
1003 static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
1004 				    struct rtw_2g_txpwr_idx *pwr_idx_2g,
1005 				    enum rtw_bandwidth bandwidth,
1006 				    u8 rate, u8 group)
1007 {
1008 	struct rtw_chip_info *chip = rtwdev->chip;
1009 	u8 tx_power;
1010 	bool mcs_rate;
1011 	bool above_2ss;
1012 	u8 factor = chip->txgi_factor;
1013 
1014 	if (rate <= DESC_RATE11M)
1015 		tx_power = pwr_idx_2g->cck_base[group];
1016 	else
1017 		tx_power = pwr_idx_2g->bw40_base[group];
1018 
1019 	if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1020 		tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
1021 
1022 	mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1023 		   (rate >= DESC_RATEVHT1SS_MCS0 &&
1024 		    rate <= DESC_RATEVHT2SS_MCS9);
1025 	above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1026 		    (rate >= DESC_RATEVHT2SS_MCS0);
1027 
1028 	if (!mcs_rate)
1029 		return tx_power;
1030 
1031 	switch (bandwidth) {
1032 	default:
1033 		WARN_ON(1);
1034 		/* fall through */
1035 	case RTW_CHANNEL_WIDTH_20:
1036 		tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
1037 		if (above_2ss)
1038 			tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
1039 		break;
1040 	case RTW_CHANNEL_WIDTH_40:
1041 		/* bw40 is the base power */
1042 		if (above_2ss)
1043 			tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
1044 		break;
1045 	}
1046 
1047 	return tx_power;
1048 }
1049 
1050 static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
1051 				    struct rtw_5g_txpwr_idx *pwr_idx_5g,
1052 				    enum rtw_bandwidth bandwidth,
1053 				    u8 rate, u8 group)
1054 {
1055 	struct rtw_chip_info *chip = rtwdev->chip;
1056 	u8 tx_power;
1057 	u8 upper, lower;
1058 	bool mcs_rate;
1059 	bool above_2ss;
1060 	u8 factor = chip->txgi_factor;
1061 
1062 	tx_power = pwr_idx_5g->bw40_base[group];
1063 
1064 	mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
1065 		   (rate >= DESC_RATEVHT1SS_MCS0 &&
1066 		    rate <= DESC_RATEVHT2SS_MCS9);
1067 	above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
1068 		    (rate >= DESC_RATEVHT2SS_MCS0);
1069 
1070 	if (!mcs_rate) {
1071 		tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
1072 		return tx_power;
1073 	}
1074 
1075 	switch (bandwidth) {
1076 	default:
1077 		WARN_ON(1);
1078 		/* fall through */
1079 	case RTW_CHANNEL_WIDTH_20:
1080 		tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
1081 		if (above_2ss)
1082 			tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
1083 		break;
1084 	case RTW_CHANNEL_WIDTH_40:
1085 		/* bw40 is the base power */
1086 		if (above_2ss)
1087 			tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
1088 		break;
1089 	case RTW_CHANNEL_WIDTH_80:
1090 		/* the base idx of bw80 is the average of bw40+/bw40- */
1091 		lower = pwr_idx_5g->bw40_base[group];
1092 		upper = pwr_idx_5g->bw40_base[group + 1];
1093 
1094 		tx_power = (lower + upper) / 2;
1095 		tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
1096 		if (above_2ss)
1097 			tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
1098 		break;
1099 	}
1100 
1101 	return tx_power;
1102 }
1103 
1104 /* set tx power level by path for each rates, note that the order of the rates
1105  * are *very* important, bacause 8822B/8821C combines every four bytes of tx
1106  * power index into a four-byte power index register, and calls set_tx_agc to
1107  * write these values into hardware
1108  */
1109 static
1110 void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path)
1111 {
1112 	struct rtw_hal *hal = &rtwdev->hal;
1113 	u8 rs;
1114 
1115 	/* do not need cck rates if we are not in 2.4G */
1116 	if (hal->current_band_type == RTW_BAND_2G)
1117 		rs = RTW_RATE_SECTION_CCK;
1118 	else
1119 		rs = RTW_RATE_SECTION_OFDM;
1120 
1121 	for (; rs < RTW_RATE_SECTION_MAX; rs++)
1122 		phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
1123 }
1124 
1125 void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
1126 {
1127 	struct rtw_chip_info *chip = rtwdev->chip;
1128 	struct rtw_hal *hal = &rtwdev->hal;
1129 	u8 path;
1130 
1131 	mutex_lock(&hal->tx_power_mutex);
1132 
1133 	for (path = 0; path < hal->rf_path_num; path++)
1134 		phy_set_tx_power_level_by_path(rtwdev, channel, path);
1135 
1136 	chip->ops->set_tx_power_index(rtwdev);
1137 	mutex_unlock(&hal->tx_power_mutex);
1138 }
1139 
1140 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
1141 			  enum rtw_bandwidth bandwidth, u8 rf_path,
1142 			  u8 rate, u8 channel, u8 regd);
1143 
1144 static
1145 u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
1146 			  enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
1147 {
1148 	struct rtw_dev *rtwdev = adapter;
1149 	struct rtw_hal *hal = &rtwdev->hal;
1150 	struct rtw_txpwr_idx *pwr_idx;
1151 	u8 tx_power;
1152 	u8 group;
1153 	u8 band;
1154 	s8 offset, limit;
1155 
1156 	pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path];
1157 	group = rtw_get_channel_group(channel);
1158 
1159 	/* base power index for 2.4G/5G */
1160 	if (channel <= 14) {
1161 		band = PHY_BAND_2G;
1162 		tx_power = phy_get_2g_tx_power_index(rtwdev,
1163 						     &pwr_idx->pwr_idx_2g,
1164 						     bandwidth, rate, group);
1165 		offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate];
1166 	} else {
1167 		band = PHY_BAND_5G;
1168 		tx_power = phy_get_5g_tx_power_index(rtwdev,
1169 						     &pwr_idx->pwr_idx_5g,
1170 						     bandwidth, rate, group);
1171 		offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate];
1172 	}
1173 
1174 	limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path,
1175 				       rate, channel, regd);
1176 
1177 	if (offset > limit)
1178 		offset = limit;
1179 
1180 	tx_power += offset;
1181 
1182 	if (tx_power > rtwdev->chip->max_power_index)
1183 		tx_power = rtwdev->chip->max_power_index;
1184 
1185 	return tx_power;
1186 }
1187 
1188 void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
1189 {
1190 	struct rtw_dev *rtwdev = adapter;
1191 	struct rtw_hal *hal = &rtwdev->hal;
1192 	u8 regd = rtwdev->regd.txpwr_regd;
1193 	u8 *rates;
1194 	u8 size;
1195 	u8 rate;
1196 	u8 pwr_idx;
1197 	u8 bw;
1198 	int i;
1199 
1200 	if (rs >= RTW_RATE_SECTION_MAX)
1201 		return;
1202 
1203 	rates = rtw_rate_section[rs];
1204 	size = rtw_rate_size[rs];
1205 	bw = hal->current_band_width;
1206 	for (i = 0; i < size; i++) {
1207 		rate = rates[i];
1208 		pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch,
1209 						 regd);
1210 		hal->tx_pwr_tbl[path][rate] = pwr_idx;
1211 	}
1212 }
1213 
1214 static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
1215 {
1216 	if (rtwdev->chip->is_pwr_by_rate_dec)
1217 		return bcd_to_dec_pwr_by_rate(hex, i);
1218 	else
1219 		return (hex >> (i * 8)) & 0xFF;
1220 }
1221 
1222 static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
1223 						 u32 addr, u32 mask,
1224 						 u32 val, u8 *rate,
1225 						 u8 *pwr_by_rate, u8 *rate_num)
1226 {
1227 	int i;
1228 
1229 	switch (addr) {
1230 	case 0xE00:
1231 	case 0x830:
1232 		rate[0] = DESC_RATE6M;
1233 		rate[1] = DESC_RATE9M;
1234 		rate[2] = DESC_RATE12M;
1235 		rate[3] = DESC_RATE18M;
1236 		for (i = 0; i < 4; ++i)
1237 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1238 		*rate_num = 4;
1239 		break;
1240 	case 0xE04:
1241 	case 0x834:
1242 		rate[0] = DESC_RATE24M;
1243 		rate[1] = DESC_RATE36M;
1244 		rate[2] = DESC_RATE48M;
1245 		rate[3] = DESC_RATE54M;
1246 		for (i = 0; i < 4; ++i)
1247 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1248 		*rate_num = 4;
1249 		break;
1250 	case 0xE08:
1251 		rate[0] = DESC_RATE1M;
1252 		pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
1253 		*rate_num = 1;
1254 		break;
1255 	case 0x86C:
1256 		if (mask == 0xffffff00) {
1257 			rate[0] = DESC_RATE2M;
1258 			rate[1] = DESC_RATE5_5M;
1259 			rate[2] = DESC_RATE11M;
1260 			for (i = 1; i < 4; ++i)
1261 				pwr_by_rate[i - 1] =
1262 					tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1263 			*rate_num = 3;
1264 		} else if (mask == 0x000000ff) {
1265 			rate[0] = DESC_RATE11M;
1266 			pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
1267 			*rate_num = 1;
1268 		}
1269 		break;
1270 	case 0xE10:
1271 	case 0x83C:
1272 		rate[0] = DESC_RATEMCS0;
1273 		rate[1] = DESC_RATEMCS1;
1274 		rate[2] = DESC_RATEMCS2;
1275 		rate[3] = DESC_RATEMCS3;
1276 		for (i = 0; i < 4; ++i)
1277 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1278 		*rate_num = 4;
1279 		break;
1280 	case 0xE14:
1281 	case 0x848:
1282 		rate[0] = DESC_RATEMCS4;
1283 		rate[1] = DESC_RATEMCS5;
1284 		rate[2] = DESC_RATEMCS6;
1285 		rate[3] = DESC_RATEMCS7;
1286 		for (i = 0; i < 4; ++i)
1287 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1288 		*rate_num = 4;
1289 		break;
1290 	case 0xE18:
1291 	case 0x84C:
1292 		rate[0] = DESC_RATEMCS8;
1293 		rate[1] = DESC_RATEMCS9;
1294 		rate[2] = DESC_RATEMCS10;
1295 		rate[3] = DESC_RATEMCS11;
1296 		for (i = 0; i < 4; ++i)
1297 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1298 		*rate_num = 4;
1299 		break;
1300 	case 0xE1C:
1301 	case 0x868:
1302 		rate[0] = DESC_RATEMCS12;
1303 		rate[1] = DESC_RATEMCS13;
1304 		rate[2] = DESC_RATEMCS14;
1305 		rate[3] = DESC_RATEMCS15;
1306 		for (i = 0; i < 4; ++i)
1307 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1308 		*rate_num = 4;
1309 
1310 		break;
1311 	case 0x838:
1312 		rate[0] = DESC_RATE1M;
1313 		rate[1] = DESC_RATE2M;
1314 		rate[2] = DESC_RATE5_5M;
1315 		for (i = 1; i < 4; ++i)
1316 			pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
1317 								    val, i);
1318 		*rate_num = 3;
1319 		break;
1320 	case 0xC20:
1321 	case 0xE20:
1322 	case 0x1820:
1323 	case 0x1A20:
1324 		rate[0] = DESC_RATE1M;
1325 		rate[1] = DESC_RATE2M;
1326 		rate[2] = DESC_RATE5_5M;
1327 		rate[3] = DESC_RATE11M;
1328 		for (i = 0; i < 4; ++i)
1329 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1330 		*rate_num = 4;
1331 		break;
1332 	case 0xC24:
1333 	case 0xE24:
1334 	case 0x1824:
1335 	case 0x1A24:
1336 		rate[0] = DESC_RATE6M;
1337 		rate[1] = DESC_RATE9M;
1338 		rate[2] = DESC_RATE12M;
1339 		rate[3] = DESC_RATE18M;
1340 		for (i = 0; i < 4; ++i)
1341 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1342 		*rate_num = 4;
1343 		break;
1344 	case 0xC28:
1345 	case 0xE28:
1346 	case 0x1828:
1347 	case 0x1A28:
1348 		rate[0] = DESC_RATE24M;
1349 		rate[1] = DESC_RATE36M;
1350 		rate[2] = DESC_RATE48M;
1351 		rate[3] = DESC_RATE54M;
1352 		for (i = 0; i < 4; ++i)
1353 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1354 		*rate_num = 4;
1355 		break;
1356 	case 0xC2C:
1357 	case 0xE2C:
1358 	case 0x182C:
1359 	case 0x1A2C:
1360 		rate[0] = DESC_RATEMCS0;
1361 		rate[1] = DESC_RATEMCS1;
1362 		rate[2] = DESC_RATEMCS2;
1363 		rate[3] = DESC_RATEMCS3;
1364 		for (i = 0; i < 4; ++i)
1365 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1366 		*rate_num = 4;
1367 		break;
1368 	case 0xC30:
1369 	case 0xE30:
1370 	case 0x1830:
1371 	case 0x1A30:
1372 		rate[0] = DESC_RATEMCS4;
1373 		rate[1] = DESC_RATEMCS5;
1374 		rate[2] = DESC_RATEMCS6;
1375 		rate[3] = DESC_RATEMCS7;
1376 		for (i = 0; i < 4; ++i)
1377 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1378 		*rate_num = 4;
1379 		break;
1380 	case 0xC34:
1381 	case 0xE34:
1382 	case 0x1834:
1383 	case 0x1A34:
1384 		rate[0] = DESC_RATEMCS8;
1385 		rate[1] = DESC_RATEMCS9;
1386 		rate[2] = DESC_RATEMCS10;
1387 		rate[3] = DESC_RATEMCS11;
1388 		for (i = 0; i < 4; ++i)
1389 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1390 		*rate_num = 4;
1391 		break;
1392 	case 0xC38:
1393 	case 0xE38:
1394 	case 0x1838:
1395 	case 0x1A38:
1396 		rate[0] = DESC_RATEMCS12;
1397 		rate[1] = DESC_RATEMCS13;
1398 		rate[2] = DESC_RATEMCS14;
1399 		rate[3] = DESC_RATEMCS15;
1400 		for (i = 0; i < 4; ++i)
1401 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1402 		*rate_num = 4;
1403 		break;
1404 	case 0xC3C:
1405 	case 0xE3C:
1406 	case 0x183C:
1407 	case 0x1A3C:
1408 		rate[0] = DESC_RATEVHT1SS_MCS0;
1409 		rate[1] = DESC_RATEVHT1SS_MCS1;
1410 		rate[2] = DESC_RATEVHT1SS_MCS2;
1411 		rate[3] = DESC_RATEVHT1SS_MCS3;
1412 		for (i = 0; i < 4; ++i)
1413 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1414 		*rate_num = 4;
1415 		break;
1416 	case 0xC40:
1417 	case 0xE40:
1418 	case 0x1840:
1419 	case 0x1A40:
1420 		rate[0] = DESC_RATEVHT1SS_MCS4;
1421 		rate[1] = DESC_RATEVHT1SS_MCS5;
1422 		rate[2] = DESC_RATEVHT1SS_MCS6;
1423 		rate[3] = DESC_RATEVHT1SS_MCS7;
1424 		for (i = 0; i < 4; ++i)
1425 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1426 		*rate_num = 4;
1427 		break;
1428 	case 0xC44:
1429 	case 0xE44:
1430 	case 0x1844:
1431 	case 0x1A44:
1432 		rate[0] = DESC_RATEVHT1SS_MCS8;
1433 		rate[1] = DESC_RATEVHT1SS_MCS9;
1434 		rate[2] = DESC_RATEVHT2SS_MCS0;
1435 		rate[3] = DESC_RATEVHT2SS_MCS1;
1436 		for (i = 0; i < 4; ++i)
1437 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1438 		*rate_num = 4;
1439 		break;
1440 	case 0xC48:
1441 	case 0xE48:
1442 	case 0x1848:
1443 	case 0x1A48:
1444 		rate[0] = DESC_RATEVHT2SS_MCS2;
1445 		rate[1] = DESC_RATEVHT2SS_MCS3;
1446 		rate[2] = DESC_RATEVHT2SS_MCS4;
1447 		rate[3] = DESC_RATEVHT2SS_MCS5;
1448 		for (i = 0; i < 4; ++i)
1449 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1450 		*rate_num = 4;
1451 		break;
1452 	case 0xC4C:
1453 	case 0xE4C:
1454 	case 0x184C:
1455 	case 0x1A4C:
1456 		rate[0] = DESC_RATEVHT2SS_MCS6;
1457 		rate[1] = DESC_RATEVHT2SS_MCS7;
1458 		rate[2] = DESC_RATEVHT2SS_MCS8;
1459 		rate[3] = DESC_RATEVHT2SS_MCS9;
1460 		for (i = 0; i < 4; ++i)
1461 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1462 		*rate_num = 4;
1463 		break;
1464 	case 0xCD8:
1465 	case 0xED8:
1466 	case 0x18D8:
1467 	case 0x1AD8:
1468 		rate[0] = DESC_RATEMCS16;
1469 		rate[1] = DESC_RATEMCS17;
1470 		rate[2] = DESC_RATEMCS18;
1471 		rate[3] = DESC_RATEMCS19;
1472 		for (i = 0; i < 4; ++i)
1473 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1474 		*rate_num = 4;
1475 		break;
1476 	case 0xCDC:
1477 	case 0xEDC:
1478 	case 0x18DC:
1479 	case 0x1ADC:
1480 		rate[0] = DESC_RATEMCS20;
1481 		rate[1] = DESC_RATEMCS21;
1482 		rate[2] = DESC_RATEMCS22;
1483 		rate[3] = DESC_RATEMCS23;
1484 		for (i = 0; i < 4; ++i)
1485 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1486 		*rate_num = 4;
1487 		break;
1488 	case 0xCE0:
1489 	case 0xEE0:
1490 	case 0x18E0:
1491 	case 0x1AE0:
1492 		rate[0] = DESC_RATEVHT3SS_MCS0;
1493 		rate[1] = DESC_RATEVHT3SS_MCS1;
1494 		rate[2] = DESC_RATEVHT3SS_MCS2;
1495 		rate[3] = DESC_RATEVHT3SS_MCS3;
1496 		for (i = 0; i < 4; ++i)
1497 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1498 		*rate_num = 4;
1499 		break;
1500 	case 0xCE4:
1501 	case 0xEE4:
1502 	case 0x18E4:
1503 	case 0x1AE4:
1504 		rate[0] = DESC_RATEVHT3SS_MCS4;
1505 		rate[1] = DESC_RATEVHT3SS_MCS5;
1506 		rate[2] = DESC_RATEVHT3SS_MCS6;
1507 		rate[3] = DESC_RATEVHT3SS_MCS7;
1508 		for (i = 0; i < 4; ++i)
1509 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1510 		*rate_num = 4;
1511 		break;
1512 	case 0xCE8:
1513 	case 0xEE8:
1514 	case 0x18E8:
1515 	case 0x1AE8:
1516 		rate[0] = DESC_RATEVHT3SS_MCS8;
1517 		rate[1] = DESC_RATEVHT3SS_MCS9;
1518 		for (i = 0; i < 2; ++i)
1519 			pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
1520 		*rate_num = 2;
1521 		break;
1522 	default:
1523 		rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
1524 		break;
1525 	}
1526 }
1527 
1528 void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
1529 				u32 regaddr, u32 bitmask, u32 data)
1530 {
1531 	struct rtw_dev *rtwdev = adapter;
1532 	struct rtw_hal *hal = &rtwdev->hal;
1533 	u8 rate_num = 0;
1534 	u8 rate;
1535 	u8 rates[RTW_RF_PATH_MAX] = {0};
1536 	s8 offset;
1537 	s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
1538 	int i;
1539 
1540 	phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
1541 					     rates, pwr_by_rate, &rate_num);
1542 
1543 	if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
1544 		    (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
1545 		    rate_num > RTW_RF_PATH_MAX))
1546 		return;
1547 
1548 	for (i = 0; i < rate_num; i++) {
1549 		offset = pwr_by_rate[i];
1550 		rate = rates[i];
1551 		if (band == PHY_BAND_2G)
1552 			hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
1553 		else if (band == PHY_BAND_5G)
1554 			hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
1555 		else
1556 			continue;
1557 	}
1558 }
1559 
1560 static
1561 void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
1562 					 u8 rs, u8 size, u8 *rates)
1563 {
1564 	u8 rate;
1565 	u8 base_idx, rate_idx;
1566 	s8 base_2g, base_5g;
1567 
1568 	if (rs >= RTW_RATE_SECTION_VHT_1S)
1569 		base_idx = rates[size - 3];
1570 	else
1571 		base_idx = rates[size - 1];
1572 	base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
1573 	base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
1574 	hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
1575 	hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
1576 	for (rate = 0; rate < size; rate++) {
1577 		rate_idx = rates[rate];
1578 		hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
1579 		hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
1580 	}
1581 }
1582 
1583 void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
1584 {
1585 	u8 path;
1586 
1587 	for (path = 0; path < RTW_RF_PATH_MAX; path++) {
1588 		phy_tx_power_by_rate_config_by_path(hal, path,
1589 				RTW_RATE_SECTION_CCK,
1590 				rtw_cck_size, rtw_cck_rates);
1591 		phy_tx_power_by_rate_config_by_path(hal, path,
1592 				RTW_RATE_SECTION_OFDM,
1593 				rtw_ofdm_size, rtw_ofdm_rates);
1594 		phy_tx_power_by_rate_config_by_path(hal, path,
1595 				RTW_RATE_SECTION_HT_1S,
1596 				rtw_ht_1s_size, rtw_ht_1s_rates);
1597 		phy_tx_power_by_rate_config_by_path(hal, path,
1598 				RTW_RATE_SECTION_HT_2S,
1599 				rtw_ht_2s_size, rtw_ht_2s_rates);
1600 		phy_tx_power_by_rate_config_by_path(hal, path,
1601 				RTW_RATE_SECTION_VHT_1S,
1602 				rtw_vht_1s_size, rtw_vht_1s_rates);
1603 		phy_tx_power_by_rate_config_by_path(hal, path,
1604 				RTW_RATE_SECTION_VHT_2S,
1605 				rtw_vht_2s_size, rtw_vht_2s_rates);
1606 	}
1607 }
1608 
1609 static void
1610 phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
1611 {
1612 	s8 base, orig;
1613 	u8 ch;
1614 
1615 	for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
1616 		base = hal->tx_pwr_by_rate_base_2g[0][rs];
1617 		orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch];
1618 		hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
1619 	}
1620 
1621 	for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
1622 		base = hal->tx_pwr_by_rate_base_5g[0][rs];
1623 		hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
1624 	}
1625 }
1626 
1627 void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
1628 {
1629 	u8 regd, bw, rs;
1630 
1631 	for (regd = 0; regd < RTW_REGD_MAX; regd++)
1632 		for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1633 			for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1634 				phy_tx_power_limit_config(hal, regd, bw, rs);
1635 }
1636 
1637 static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd)
1638 {
1639 	if (regd > RTW_REGD_WW)
1640 		return RTW_MAX_POWER_INDEX;
1641 
1642 	return hal->tx_pwr_limit_2g[regd][bw][rs][ch];
1643 }
1644 
1645 s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
1646 			  enum rtw_bandwidth bw, u8 rf_path,
1647 			  u8 rate, u8 channel, u8 regd)
1648 {
1649 	struct rtw_hal *hal = &rtwdev->hal;
1650 	s8 power_limit;
1651 	u8 rs;
1652 	int ch_idx;
1653 
1654 	if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
1655 		rs = RTW_RATE_SECTION_CCK;
1656 	else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
1657 		rs = RTW_RATE_SECTION_OFDM;
1658 	else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
1659 		rs = RTW_RATE_SECTION_HT_1S;
1660 	else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
1661 		rs = RTW_RATE_SECTION_HT_2S;
1662 	else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
1663 		rs = RTW_RATE_SECTION_VHT_1S;
1664 	else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
1665 		rs = RTW_RATE_SECTION_VHT_2S;
1666 	else
1667 		goto err;
1668 
1669 	ch_idx = rtw_channel_to_idx(band, channel);
1670 	if (ch_idx < 0)
1671 		goto err;
1672 
1673 	power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd);
1674 
1675 	return power_limit;
1676 
1677 err:
1678 	WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
1679 	     band, bw, rf_path, rate, channel);
1680 	return RTW_MAX_POWER_INDEX;
1681 }
1682 
1683 void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
1684 			    u8 bw, u8 rs, u8 ch, s8 pwr_limit)
1685 {
1686 	struct rtw_hal *hal = &rtwdev->hal;
1687 	int ch_idx;
1688 
1689 	pwr_limit = clamp_t(s8, pwr_limit,
1690 			    -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX);
1691 	ch_idx = rtw_channel_to_idx(band, ch);
1692 
1693 	if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
1694 	    rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
1695 		WARN(1,
1696 		     "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
1697 		     regd, band, bw, rs, ch_idx, pwr_limit);
1698 		return;
1699 	}
1700 
1701 	if (band == PHY_BAND_2G)
1702 		hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
1703 	else if (band == PHY_BAND_5G)
1704 		hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
1705 }
1706 
1707 static
1708 void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
1709 {
1710 	u8 ch;
1711 
1712 	/* 2.4G channels */
1713 	for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
1714 		hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
1715 
1716 	/* 5G channels */
1717 	for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
1718 		hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
1719 }
1720 
1721 void rtw_hw_init_tx_power(struct rtw_hal *hal)
1722 {
1723 	u8 regd, path, rate, rs, bw;
1724 
1725 	/* init tx power by rate offset */
1726 	for (path = 0; path < RTW_RF_PATH_MAX; path++) {
1727 		for (rate = 0; rate < DESC_RATE_MAX; rate++) {
1728 			hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
1729 			hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
1730 		}
1731 	}
1732 
1733 	/* init tx power limit */
1734 	for (regd = 0; regd < RTW_REGD_MAX; regd++)
1735 		for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
1736 			for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
1737 				rtw_hw_tx_power_limit_init(hal, regd, bw, rs);
1738 }
1739