1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/pci.h> 20 21 #include "mt76x0.h" 22 #include "mcu.h" 23 24 static int mt76x0e_start(struct ieee80211_hw *hw) 25 { 26 struct mt76x02_dev *dev = hw->priv; 27 28 mutex_lock(&dev->mt76.mutex); 29 30 mt76x02_mac_start(dev); 31 mt76x0_phy_calibrate(dev, true); 32 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 33 MT_CALIBRATE_INTERVAL); 34 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 35 MT_CALIBRATE_INTERVAL); 36 set_bit(MT76_STATE_RUNNING, &dev->mt76.state); 37 38 mutex_unlock(&dev->mt76.mutex); 39 40 return 0; 41 } 42 43 static void mt76x0e_stop_hw(struct mt76x02_dev *dev) 44 { 45 cancel_delayed_work_sync(&dev->cal_work); 46 cancel_delayed_work_sync(&dev->mac_work); 47 48 if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY, 49 0, 1000)) 50 dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); 51 mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN); 52 53 mt76x0_mac_stop(dev); 54 55 if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 56 0, 1000)) 57 dev_warn(dev->mt76.dev, "TX DMA did not stop\n"); 58 mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN); 59 } 60 61 static void mt76x0e_stop(struct ieee80211_hw *hw) 62 { 63 struct mt76x02_dev *dev = hw->priv; 64 65 mutex_lock(&dev->mt76.mutex); 66 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); 67 mt76x0e_stop_hw(dev); 68 mutex_unlock(&dev->mt76.mutex); 69 } 70 71 static void 72 mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 73 u32 queues, bool drop) 74 { 75 } 76 77 static int 78 mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 79 bool set) 80 { 81 return 0; 82 } 83 84 static const struct ieee80211_ops mt76x0e_ops = { 85 .tx = mt76x02_tx, 86 .start = mt76x0e_start, 87 .stop = mt76x0e_stop, 88 .add_interface = mt76x02_add_interface, 89 .remove_interface = mt76x02_remove_interface, 90 .config = mt76x0_config, 91 .configure_filter = mt76x02_configure_filter, 92 .bss_info_changed = mt76x02_bss_info_changed, 93 .sta_state = mt76_sta_state, 94 .set_key = mt76x02_set_key, 95 .conf_tx = mt76x02_conf_tx, 96 .sw_scan_start = mt76x02_sw_scan, 97 .sw_scan_complete = mt76x02_sw_scan_complete, 98 .ampdu_action = mt76x02_ampdu_action, 99 .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, 100 .wake_tx_queue = mt76_wake_tx_queue, 101 .get_survey = mt76_get_survey, 102 .get_txpower = mt76_get_txpower, 103 .flush = mt76x0e_flush, 104 .set_tim = mt76x0e_set_tim, 105 .release_buffered_frames = mt76_release_buffered_frames, 106 .set_coverage_class = mt76x02_set_coverage_class, 107 .set_rts_threshold = mt76x02_set_rts_threshold, 108 }; 109 110 static int mt76x0e_register_device(struct mt76x02_dev *dev) 111 { 112 int err; 113 114 mt76x0_chip_onoff(dev, true, false); 115 if (!mt76x02_wait_for_mac(&dev->mt76)) 116 return -ETIMEDOUT; 117 118 mt76x02_dma_disable(dev); 119 err = mt76x0e_mcu_init(dev); 120 if (err < 0) 121 return err; 122 123 err = mt76x02_dma_init(dev); 124 if (err < 0) 125 return err; 126 127 err = mt76x0_init_hardware(dev); 128 if (err < 0) 129 return err; 130 131 if (mt76_chip(&dev->mt76) == 0x7610) { 132 u16 val; 133 134 mt76_clear(dev, MT_COEXCFG0, BIT(0)); 135 136 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 137 if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT)) 138 mt76_set(dev, MT_XO_CTRL7, 0xc03); 139 } 140 141 mt76_clear(dev, 0x110, BIT(9)); 142 mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); 143 144 mt76_wr(dev, MT_CH_TIME_CFG, 145 MT_CH_TIME_CFG_TIMER_EN | 146 MT_CH_TIME_CFG_TX_AS_BUSY | 147 MT_CH_TIME_CFG_RX_AS_BUSY | 148 MT_CH_TIME_CFG_NAV_AS_BUSY | 149 MT_CH_TIME_CFG_EIFS_AS_BUSY | 150 MT_CH_CCA_RC_EN | 151 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); 152 153 err = mt76x0_register_device(dev); 154 if (err < 0) 155 return err; 156 157 set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); 158 159 return 0; 160 } 161 162 static int 163 mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) 164 { 165 static const struct mt76_driver_ops drv_ops = { 166 .txwi_size = sizeof(struct mt76x02_txwi), 167 .update_survey = mt76x02_update_channel, 168 .tx_prepare_skb = mt76x02_tx_prepare_skb, 169 .tx_complete_skb = mt76x02_tx_complete_skb, 170 .rx_skb = mt76x02_queue_rx_skb, 171 .rx_poll_complete = mt76x02_rx_poll_complete, 172 .sta_ps = mt76x02_sta_ps, 173 .sta_add = mt76x02_sta_add, 174 .sta_remove = mt76x02_sta_remove, 175 }; 176 struct mt76x02_dev *dev; 177 int ret; 178 179 ret = pcim_enable_device(pdev); 180 if (ret) 181 return ret; 182 183 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 184 if (ret) 185 return ret; 186 187 pci_set_master(pdev); 188 189 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 190 if (ret) 191 return ret; 192 193 dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops); 194 if (!dev) 195 return -ENOMEM; 196 197 mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); 198 199 dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); 200 dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); 201 202 ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, 203 IRQF_SHARED, KBUILD_MODNAME, dev); 204 if (ret) 205 goto error; 206 207 ret = mt76x0e_register_device(dev); 208 if (ret < 0) 209 goto error; 210 211 return 0; 212 213 error: 214 ieee80211_free_hw(mt76_hw(dev)); 215 return ret; 216 } 217 218 static void mt76x0e_cleanup(struct mt76x02_dev *dev) 219 { 220 clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); 221 tasklet_disable(&dev->pre_tbtt_tasklet); 222 mt76x0_chip_onoff(dev, false, false); 223 mt76x0e_stop_hw(dev); 224 mt76x02_dma_cleanup(dev); 225 mt76x02_mcu_cleanup(dev); 226 } 227 228 static void 229 mt76x0e_remove(struct pci_dev *pdev) 230 { 231 struct mt76_dev *mdev = pci_get_drvdata(pdev); 232 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 233 234 mt76_unregister_device(mdev); 235 mt76x0e_cleanup(dev); 236 ieee80211_free_hw(mdev->hw); 237 } 238 239 static const struct pci_device_id mt76x0e_device_table[] = { 240 { PCI_DEVICE(0x14c3, 0x7630) }, 241 { PCI_DEVICE(0x14c3, 0x7650) }, 242 { }, 243 }; 244 245 MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); 246 MODULE_FIRMWARE(MT7610E_FIRMWARE); 247 MODULE_FIRMWARE(MT7650E_FIRMWARE); 248 MODULE_LICENSE("Dual BSD/GPL"); 249 250 static struct pci_driver mt76x0e_driver = { 251 .name = KBUILD_MODNAME, 252 .id_table = mt76x0e_device_table, 253 .probe = mt76x0e_probe, 254 .remove = mt76x0e_remove, 255 }; 256 257 module_pci_driver(mt76x0e_driver); 258