1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 20 #include "mt76.h" 21 #include "mt76x02_phy.h" 22 #include "mt76x02_mac.h" 23 24 void mt76x02_phy_set_rxpath(struct mt76_dev *dev) 25 { 26 u32 val; 27 28 val = __mt76_rr(dev, MT_BBP(AGC, 0)); 29 val &= ~BIT(4); 30 31 switch (dev->chainmask & 0xf) { 32 case 2: 33 val |= BIT(3); 34 break; 35 default: 36 val &= ~BIT(3); 37 break; 38 } 39 40 __mt76_wr(dev, MT_BBP(AGC, 0), val); 41 mb(); 42 val = __mt76_rr(dev, MT_BBP(AGC, 0)); 43 } 44 EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath); 45 46 void mt76x02_phy_set_txdac(struct mt76_dev *dev) 47 { 48 int txpath; 49 50 txpath = (dev->chainmask >> 8) & 0xf; 51 switch (txpath) { 52 case 2: 53 __mt76_set(dev, MT_BBP(TXBE, 5), 0x3); 54 break; 55 default: 56 __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); 57 break; 58 } 59 } 60 EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac); 61 62 static u32 63 mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4) 64 { 65 u32 val = 0; 66 67 val |= (v1 & (BIT(6) - 1)) << 0; 68 val |= (v2 & (BIT(6) - 1)) << 8; 69 val |= (v3 & (BIT(6) - 1)) << 16; 70 val |= (v4 & (BIT(6) - 1)) << 24; 71 return val; 72 } 73 74 int mt76x02_get_max_rate_power(struct mt76_rate_power *r) 75 { 76 s8 ret = 0; 77 int i; 78 79 for (i = 0; i < sizeof(r->all); i++) 80 ret = max(ret, r->all[i]); 81 82 return ret; 83 } 84 EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power); 85 86 void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit) 87 { 88 int i; 89 90 for (i = 0; i < sizeof(r->all); i++) 91 if (r->all[i] > limit) 92 r->all[i] = limit; 93 } 94 EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power); 95 96 void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset) 97 { 98 int i; 99 100 for (i = 0; i < sizeof(r->all); i++) 101 r->all[i] += offset; 102 } 103 EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset); 104 105 void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1) 106 { 107 struct mt76_rate_power *t = &dev->rate_power; 108 109 __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, 110 txp_0); 111 __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, 112 txp_1); 113 114 __mt76_wr(dev, MT_TX_PWR_CFG_0, 115 mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], 116 t->ofdm[2])); 117 __mt76_wr(dev, MT_TX_PWR_CFG_1, 118 mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], 119 t->ht[2])); 120 __mt76_wr(dev, MT_TX_PWR_CFG_2, 121 mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], 122 t->ht[10])); 123 __mt76_wr(dev, MT_TX_PWR_CFG_3, 124 mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], 125 t->stbc[2])); 126 __mt76_wr(dev, MT_TX_PWR_CFG_4, 127 mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); 128 __mt76_wr(dev, MT_TX_PWR_CFG_7, 129 mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], 130 t->vht[9])); 131 __mt76_wr(dev, MT_TX_PWR_CFG_8, 132 mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); 133 __mt76_wr(dev, MT_TX_PWR_CFG_9, 134 mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); 135 } 136 EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); 137 138 int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev) 139 { 140 struct mt76x02_sta *sta; 141 struct mt76_wcid *wcid; 142 int i, j, min_rssi = 0; 143 s8 cur_rssi; 144 145 local_bh_disable(); 146 rcu_read_lock(); 147 148 for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { 149 unsigned long mask = dev->wcid_mask[i]; 150 151 if (!mask) 152 continue; 153 154 for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { 155 if (!(mask & 1)) 156 continue; 157 158 wcid = rcu_dereference(dev->wcid[j]); 159 if (!wcid) 160 continue; 161 162 sta = container_of(wcid, struct mt76x02_sta, wcid); 163 spin_lock(&dev->rx_lock); 164 if (sta->inactive_count++ < 5) 165 cur_rssi = ewma_signal_read(&sta->rssi); 166 else 167 cur_rssi = 0; 168 spin_unlock(&dev->rx_lock); 169 170 if (cur_rssi < min_rssi) 171 min_rssi = cur_rssi; 172 } 173 } 174 175 rcu_read_unlock(); 176 local_bh_enable(); 177 178 if (!min_rssi) 179 return -75; 180 181 return min_rssi; 182 } 183 EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); 184