1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include "main.h" 8 #include "pci.h" 9 #include "tx.h" 10 #include "rx.h" 11 #include "fw.h" 12 #include "debug.h" 13 14 static bool rtw_disable_msi; 15 module_param_named(disable_msi, rtw_disable_msi, bool, 0644); 16 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); 17 18 static u32 rtw_pci_tx_queue_idx_addr[] = { 19 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, 20 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, 21 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, 22 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, 23 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, 24 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, 25 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, 26 }; 27 28 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) 29 { 30 switch (queue) { 31 case RTW_TX_QUEUE_BCN: 32 return TX_DESC_QSEL_BEACON; 33 case RTW_TX_QUEUE_H2C: 34 return TX_DESC_QSEL_H2C; 35 case RTW_TX_QUEUE_MGMT: 36 return TX_DESC_QSEL_MGMT; 37 case RTW_TX_QUEUE_HI0: 38 return TX_DESC_QSEL_HIGH; 39 default: 40 return skb->priority; 41 } 42 }; 43 44 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) 45 { 46 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 47 48 return readb(rtwpci->mmap + addr); 49 } 50 51 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) 52 { 53 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 54 55 return readw(rtwpci->mmap + addr); 56 } 57 58 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) 59 { 60 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 61 62 return readl(rtwpci->mmap + addr); 63 } 64 65 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 66 { 67 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 68 69 writeb(val, rtwpci->mmap + addr); 70 } 71 72 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 73 { 74 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 75 76 writew(val, rtwpci->mmap + addr); 77 } 78 79 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 80 { 81 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 82 83 writel(val, rtwpci->mmap + addr); 84 } 85 86 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) 87 { 88 int offset = tx_ring->r.desc_size * idx; 89 90 return tx_ring->r.head + offset; 91 } 92 93 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, 94 struct rtw_pci_tx_ring *tx_ring) 95 { 96 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 97 struct rtw_pci_tx_data *tx_data; 98 struct sk_buff *skb, *tmp; 99 dma_addr_t dma; 100 u8 *head = tx_ring->r.head; 101 u32 len = tx_ring->r.len; 102 int ring_sz = len * tx_ring->r.desc_size; 103 104 /* free every skb remained in tx list */ 105 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { 106 __skb_unlink(skb, &tx_ring->queue); 107 tx_data = rtw_pci_get_tx_data(skb); 108 dma = tx_data->dma; 109 110 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); 111 dev_kfree_skb_any(skb); 112 } 113 114 /* free the ring itself */ 115 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); 116 tx_ring->r.head = NULL; 117 } 118 119 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, 120 struct rtw_pci_rx_ring *rx_ring) 121 { 122 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 123 struct sk_buff *skb; 124 dma_addr_t dma; 125 u8 *head = rx_ring->r.head; 126 int buf_sz = RTK_PCI_RX_BUF_SIZE; 127 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; 128 int i; 129 130 for (i = 0; i < rx_ring->r.len; i++) { 131 skb = rx_ring->buf[i]; 132 if (!skb) 133 continue; 134 135 dma = *((dma_addr_t *)skb->cb); 136 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 137 dev_kfree_skb(skb); 138 rx_ring->buf[i] = NULL; 139 } 140 141 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); 142 } 143 144 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) 145 { 146 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 147 struct rtw_pci_tx_ring *tx_ring; 148 struct rtw_pci_rx_ring *rx_ring; 149 int i; 150 151 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 152 tx_ring = &rtwpci->tx_rings[i]; 153 rtw_pci_free_tx_ring(rtwdev, tx_ring); 154 } 155 156 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { 157 rx_ring = &rtwpci->rx_rings[i]; 158 rtw_pci_free_rx_ring(rtwdev, rx_ring); 159 } 160 } 161 162 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, 163 struct rtw_pci_tx_ring *tx_ring, 164 u8 desc_size, u32 len) 165 { 166 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 167 int ring_sz = desc_size * len; 168 dma_addr_t dma; 169 u8 *head; 170 171 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 172 if (!head) { 173 rtw_err(rtwdev, "failed to allocate tx ring\n"); 174 return -ENOMEM; 175 } 176 177 skb_queue_head_init(&tx_ring->queue); 178 tx_ring->r.head = head; 179 tx_ring->r.dma = dma; 180 tx_ring->r.len = len; 181 tx_ring->r.desc_size = desc_size; 182 tx_ring->r.wp = 0; 183 tx_ring->r.rp = 0; 184 185 return 0; 186 } 187 188 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, 189 struct rtw_pci_rx_ring *rx_ring, 190 u32 idx, u32 desc_sz) 191 { 192 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 193 struct rtw_pci_rx_buffer_desc *buf_desc; 194 int buf_sz = RTK_PCI_RX_BUF_SIZE; 195 dma_addr_t dma; 196 197 if (!skb) 198 return -EINVAL; 199 200 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE); 201 if (pci_dma_mapping_error(pdev, dma)) 202 return -EBUSY; 203 204 *((dma_addr_t *)skb->cb) = dma; 205 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 206 idx * desc_sz); 207 memset(buf_desc, 0, sizeof(*buf_desc)); 208 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 209 buf_desc->dma = cpu_to_le32(dma); 210 211 return 0; 212 } 213 214 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, 215 struct rtw_pci_rx_ring *rx_ring, 216 u32 idx, u32 desc_sz) 217 { 218 struct device *dev = rtwdev->dev; 219 struct rtw_pci_rx_buffer_desc *buf_desc; 220 int buf_sz = RTK_PCI_RX_BUF_SIZE; 221 222 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); 223 224 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 225 idx * desc_sz); 226 memset(buf_desc, 0, sizeof(*buf_desc)); 227 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 228 buf_desc->dma = cpu_to_le32(dma); 229 } 230 231 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, 232 struct rtw_pci_rx_ring *rx_ring, 233 u8 desc_size, u32 len) 234 { 235 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 236 struct sk_buff *skb = NULL; 237 dma_addr_t dma; 238 u8 *head; 239 int ring_sz = desc_size * len; 240 int buf_sz = RTK_PCI_RX_BUF_SIZE; 241 int i, allocated; 242 int ret = 0; 243 244 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 245 if (!head) { 246 rtw_err(rtwdev, "failed to allocate rx ring\n"); 247 return -ENOMEM; 248 } 249 rx_ring->r.head = head; 250 251 for (i = 0; i < len; i++) { 252 skb = dev_alloc_skb(buf_sz); 253 if (!skb) { 254 allocated = i; 255 ret = -ENOMEM; 256 goto err_out; 257 } 258 259 memset(skb->data, 0, buf_sz); 260 rx_ring->buf[i] = skb; 261 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); 262 if (ret) { 263 allocated = i; 264 dev_kfree_skb_any(skb); 265 goto err_out; 266 } 267 } 268 269 rx_ring->r.dma = dma; 270 rx_ring->r.len = len; 271 rx_ring->r.desc_size = desc_size; 272 rx_ring->r.wp = 0; 273 rx_ring->r.rp = 0; 274 275 return 0; 276 277 err_out: 278 for (i = 0; i < allocated; i++) { 279 skb = rx_ring->buf[i]; 280 if (!skb) 281 continue; 282 dma = *((dma_addr_t *)skb->cb); 283 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 284 dev_kfree_skb_any(skb); 285 rx_ring->buf[i] = NULL; 286 } 287 pci_free_consistent(pdev, ring_sz, head, dma); 288 289 rtw_err(rtwdev, "failed to init rx buffer\n"); 290 291 return ret; 292 } 293 294 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) 295 { 296 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 297 struct rtw_pci_tx_ring *tx_ring; 298 struct rtw_pci_rx_ring *rx_ring; 299 struct rtw_chip_info *chip = rtwdev->chip; 300 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; 301 int tx_desc_size, rx_desc_size; 302 u32 len; 303 int ret; 304 305 tx_desc_size = chip->tx_buf_desc_sz; 306 307 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 308 tx_ring = &rtwpci->tx_rings[i]; 309 len = max_num_of_tx_queue(i); 310 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); 311 if (ret) 312 goto out; 313 } 314 315 rx_desc_size = chip->rx_buf_desc_sz; 316 317 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { 318 rx_ring = &rtwpci->rx_rings[j]; 319 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, 320 RTK_MAX_RX_DESC_NUM); 321 if (ret) 322 goto out; 323 } 324 325 return 0; 326 327 out: 328 tx_alloced = i; 329 for (i = 0; i < tx_alloced; i++) { 330 tx_ring = &rtwpci->tx_rings[i]; 331 rtw_pci_free_tx_ring(rtwdev, tx_ring); 332 } 333 334 rx_alloced = j; 335 for (j = 0; j < rx_alloced; j++) { 336 rx_ring = &rtwpci->rx_rings[j]; 337 rtw_pci_free_rx_ring(rtwdev, rx_ring); 338 } 339 340 return ret; 341 } 342 343 static void rtw_pci_deinit(struct rtw_dev *rtwdev) 344 { 345 rtw_pci_free_trx_ring(rtwdev); 346 } 347 348 static int rtw_pci_init(struct rtw_dev *rtwdev) 349 { 350 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 351 int ret = 0; 352 353 rtwpci->irq_mask[0] = IMR_HIGHDOK | 354 IMR_MGNTDOK | 355 IMR_BKDOK | 356 IMR_BEDOK | 357 IMR_VIDOK | 358 IMR_VODOK | 359 IMR_ROK | 360 IMR_BCNDMAINT_E | 361 0; 362 rtwpci->irq_mask[1] = IMR_TXFOVW | 363 0; 364 rtwpci->irq_mask[3] = IMR_H2CDOK | 365 0; 366 spin_lock_init(&rtwpci->irq_lock); 367 ret = rtw_pci_init_trx_ring(rtwdev); 368 369 return ret; 370 } 371 372 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) 373 { 374 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 375 u32 len; 376 u8 tmp; 377 dma_addr_t dma; 378 379 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); 380 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); 381 382 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; 383 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); 384 385 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; 386 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; 387 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; 388 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; 389 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len); 390 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); 391 392 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; 393 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; 394 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; 395 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; 396 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len); 397 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); 398 399 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; 400 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; 401 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; 402 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; 403 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len); 404 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); 405 406 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; 407 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; 408 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; 409 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; 410 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len); 411 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); 412 413 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; 414 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; 415 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; 416 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; 417 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len); 418 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); 419 420 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; 421 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; 422 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; 423 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; 424 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len); 425 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); 426 427 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; 428 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; 429 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; 430 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; 431 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len); 432 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); 433 434 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; 435 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; 436 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; 437 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; 438 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff); 439 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); 440 441 /* reset read/write point */ 442 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); 443 444 /* rest H2C Queue index */ 445 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX); 446 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX); 447 } 448 449 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) 450 { 451 rtw_pci_reset_buf_desc(rtwdev); 452 } 453 454 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, 455 struct rtw_pci *rtwpci) 456 { 457 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]); 458 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); 459 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); 460 rtwpci->irq_enabled = true; 461 } 462 463 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, 464 struct rtw_pci *rtwpci) 465 { 466 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); 467 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); 468 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); 469 rtwpci->irq_enabled = false; 470 } 471 472 static int rtw_pci_setup(struct rtw_dev *rtwdev) 473 { 474 rtw_pci_reset_trx_ring(rtwdev); 475 476 return 0; 477 } 478 479 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 480 { 481 /* reset dma and rx tag */ 482 rtw_write32_set(rtwdev, RTK_PCI_CTRL, 483 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); 484 rtwpci->rx_tag = 0; 485 } 486 487 static int rtw_pci_start(struct rtw_dev *rtwdev) 488 { 489 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 490 unsigned long flags; 491 492 rtw_pci_dma_reset(rtwdev, rtwpci); 493 494 spin_lock_irqsave(&rtwpci->irq_lock, flags); 495 rtw_pci_enable_interrupt(rtwdev, rtwpci); 496 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 497 498 return 0; 499 } 500 501 static void rtw_pci_stop(struct rtw_dev *rtwdev) 502 { 503 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 504 unsigned long flags; 505 506 spin_lock_irqsave(&rtwpci->irq_lock, flags); 507 rtw_pci_disable_interrupt(rtwdev, rtwpci); 508 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 509 } 510 511 static u8 ac_to_hwq[] = { 512 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, 513 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, 514 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, 515 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, 516 }; 517 518 static u8 rtw_hw_queue_mapping(struct sk_buff *skb) 519 { 520 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 521 __le16 fc = hdr->frame_control; 522 u8 q_mapping = skb_get_queue_mapping(skb); 523 u8 queue; 524 525 if (unlikely(ieee80211_is_beacon(fc))) 526 queue = RTW_TX_QUEUE_BCN; 527 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 528 queue = RTW_TX_QUEUE_MGMT; 529 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) 530 queue = ac_to_hwq[IEEE80211_AC_BE]; 531 else 532 queue = ac_to_hwq[q_mapping]; 533 534 return queue; 535 } 536 537 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, 538 struct rtw_pci_tx_ring *ring) 539 { 540 struct sk_buff *prev = skb_dequeue(&ring->queue); 541 struct rtw_pci_tx_data *tx_data; 542 dma_addr_t dma; 543 544 if (!prev) 545 return; 546 547 tx_data = rtw_pci_get_tx_data(prev); 548 dma = tx_data->dma; 549 pci_unmap_single(rtwpci->pdev, dma, prev->len, 550 PCI_DMA_TODEVICE); 551 dev_kfree_skb_any(prev); 552 } 553 554 static void rtw_pci_dma_check(struct rtw_dev *rtwdev, 555 struct rtw_pci_rx_ring *rx_ring, 556 u32 idx) 557 { 558 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 559 struct rtw_chip_info *chip = rtwdev->chip; 560 struct rtw_pci_rx_buffer_desc *buf_desc; 561 u32 desc_sz = chip->rx_buf_desc_sz; 562 u16 total_pkt_size; 563 564 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 565 idx * desc_sz); 566 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); 567 568 /* rx tag mismatch, throw a warning */ 569 if (total_pkt_size != rtwpci->rx_tag) 570 rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); 571 572 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; 573 } 574 575 static int rtw_pci_xmit(struct rtw_dev *rtwdev, 576 struct rtw_tx_pkt_info *pkt_info, 577 struct sk_buff *skb, u8 queue) 578 { 579 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 580 struct rtw_chip_info *chip = rtwdev->chip; 581 struct rtw_pci_tx_ring *ring; 582 struct rtw_pci_tx_data *tx_data; 583 dma_addr_t dma; 584 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; 585 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; 586 u32 size; 587 u32 psb_len; 588 u8 *pkt_desc; 589 struct rtw_pci_tx_buffer_desc *buf_desc; 590 u32 bd_idx; 591 592 ring = &rtwpci->tx_rings[queue]; 593 594 size = skb->len; 595 596 if (queue == RTW_TX_QUEUE_BCN) 597 rtw_pci_release_rsvd_page(rtwpci, ring); 598 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) 599 return -ENOSPC; 600 601 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 602 memset(pkt_desc, 0, tx_pkt_desc_sz); 603 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); 604 rtw_tx_fill_tx_desc(pkt_info, skb); 605 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len, 606 PCI_DMA_TODEVICE); 607 if (pci_dma_mapping_error(rtwpci->pdev, dma)) 608 return -EBUSY; 609 610 /* after this we got dma mapped, there is no way back */ 611 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); 612 memset(buf_desc, 0, tx_buf_desc_sz); 613 psb_len = (skb->len - 1) / 128 + 1; 614 if (queue == RTW_TX_QUEUE_BCN) 615 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; 616 617 buf_desc[0].psb_len = cpu_to_le16(psb_len); 618 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); 619 buf_desc[0].dma = cpu_to_le32(dma); 620 buf_desc[1].buf_size = cpu_to_le16(size); 621 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); 622 623 tx_data = rtw_pci_get_tx_data(skb); 624 tx_data->dma = dma; 625 tx_data->sn = pkt_info->sn; 626 skb_queue_tail(&ring->queue, skb); 627 628 /* kick off tx queue */ 629 if (queue != RTW_TX_QUEUE_BCN) { 630 if (++ring->r.wp >= ring->r.len) 631 ring->r.wp = 0; 632 bd_idx = rtw_pci_tx_queue_idx_addr[queue]; 633 rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff); 634 } else { 635 u32 reg_bcn_work; 636 637 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); 638 reg_bcn_work |= BIT_PCI_BCNQ_FLAG; 639 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); 640 } 641 642 return 0; 643 } 644 645 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 646 u32 size) 647 { 648 struct sk_buff *skb; 649 struct rtw_tx_pkt_info pkt_info; 650 u32 tx_pkt_desc_sz; 651 u32 length; 652 653 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 654 length = size + tx_pkt_desc_sz; 655 skb = dev_alloc_skb(length); 656 if (!skb) 657 return -ENOMEM; 658 659 skb_reserve(skb, tx_pkt_desc_sz); 660 memcpy((u8 *)skb_put(skb, size), buf, size); 661 memset(&pkt_info, 0, sizeof(pkt_info)); 662 pkt_info.tx_pkt_size = size; 663 pkt_info.offset = tx_pkt_desc_sz; 664 665 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); 666 } 667 668 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 669 { 670 struct sk_buff *skb; 671 struct rtw_tx_pkt_info pkt_info; 672 u32 tx_pkt_desc_sz; 673 u32 length; 674 675 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 676 length = size + tx_pkt_desc_sz; 677 skb = dev_alloc_skb(length); 678 if (!skb) 679 return -ENOMEM; 680 681 skb_reserve(skb, tx_pkt_desc_sz); 682 memcpy((u8 *)skb_put(skb, size), buf, size); 683 memset(&pkt_info, 0, sizeof(pkt_info)); 684 pkt_info.tx_pkt_size = size; 685 686 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); 687 } 688 689 static int rtw_pci_tx(struct rtw_dev *rtwdev, 690 struct rtw_tx_pkt_info *pkt_info, 691 struct sk_buff *skb) 692 { 693 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 694 struct rtw_pci_tx_ring *ring; 695 u8 queue = rtw_hw_queue_mapping(skb); 696 int ret; 697 698 ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue); 699 if (ret) 700 return ret; 701 702 ring = &rtwpci->tx_rings[queue]; 703 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { 704 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); 705 ring->queue_stopped = true; 706 } 707 708 return 0; 709 } 710 711 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 712 u8 hw_queue) 713 { 714 struct ieee80211_hw *hw = rtwdev->hw; 715 struct ieee80211_tx_info *info; 716 struct rtw_pci_tx_ring *ring; 717 struct rtw_pci_tx_data *tx_data; 718 struct sk_buff *skb; 719 u32 count; 720 u32 bd_idx_addr; 721 u32 bd_idx, cur_rp; 722 u16 q_map; 723 724 ring = &rtwpci->tx_rings[hw_queue]; 725 726 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; 727 bd_idx = rtw_read32(rtwdev, bd_idx_addr); 728 cur_rp = bd_idx >> 16; 729 cur_rp &= 0xfff; 730 if (cur_rp >= ring->r.rp) 731 count = cur_rp - ring->r.rp; 732 else 733 count = ring->r.len - (ring->r.rp - cur_rp); 734 735 while (count--) { 736 skb = skb_dequeue(&ring->queue); 737 tx_data = rtw_pci_get_tx_data(skb); 738 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, 739 PCI_DMA_TODEVICE); 740 741 /* just free command packets from host to card */ 742 if (hw_queue == RTW_TX_QUEUE_H2C) { 743 dev_kfree_skb_irq(skb); 744 continue; 745 } 746 747 if (ring->queue_stopped && 748 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { 749 q_map = skb_get_queue_mapping(skb); 750 ieee80211_wake_queue(hw, q_map); 751 ring->queue_stopped = false; 752 } 753 754 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 755 756 info = IEEE80211_SKB_CB(skb); 757 758 /* enqueue to wait for tx report */ 759 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 760 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 761 continue; 762 } 763 764 /* always ACK for others, then they won't be marked as drop */ 765 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 766 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 767 else 768 info->flags |= IEEE80211_TX_STAT_ACK; 769 770 ieee80211_tx_info_clear_status(info); 771 ieee80211_tx_status_irqsafe(hw, skb); 772 } 773 774 ring->r.rp = cur_rp; 775 } 776 777 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 778 u8 hw_queue) 779 { 780 struct rtw_chip_info *chip = rtwdev->chip; 781 struct rtw_pci_rx_ring *ring; 782 struct rtw_rx_pkt_stat pkt_stat; 783 struct ieee80211_rx_status rx_status; 784 struct sk_buff *skb, *new; 785 u32 cur_wp, cur_rp, tmp; 786 u32 count; 787 u32 pkt_offset; 788 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; 789 u32 buf_desc_sz = chip->rx_buf_desc_sz; 790 u32 new_len; 791 u8 *rx_desc; 792 dma_addr_t dma; 793 794 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; 795 796 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); 797 cur_wp = tmp >> 16; 798 cur_wp &= 0xfff; 799 if (cur_wp >= ring->r.wp) 800 count = cur_wp - ring->r.wp; 801 else 802 count = ring->r.len - (ring->r.wp - cur_wp); 803 804 cur_rp = ring->r.rp; 805 while (count--) { 806 rtw_pci_dma_check(rtwdev, ring, cur_rp); 807 skb = ring->buf[cur_rp]; 808 dma = *((dma_addr_t *)skb->cb); 809 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, 810 DMA_FROM_DEVICE); 811 rx_desc = skb->data; 812 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); 813 814 /* offset from rx_desc to payload */ 815 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 816 pkt_stat.shift; 817 818 /* allocate a new skb for this frame, 819 * discard the frame if none available 820 */ 821 new_len = pkt_stat.pkt_len + pkt_offset; 822 new = dev_alloc_skb(new_len); 823 if (WARN_ONCE(!new, "rx routine starvation\n")) 824 goto next_rp; 825 826 /* put the DMA data including rx_desc from phy to new skb */ 827 skb_put_data(new, skb->data, new_len); 828 829 if (pkt_stat.is_c2h) { 830 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); 831 } else { 832 /* remove rx_desc */ 833 skb_pull(new, pkt_offset); 834 835 rtw_rx_stats(rtwdev, pkt_stat.vif, new); 836 memcpy(new->cb, &rx_status, sizeof(rx_status)); 837 ieee80211_rx_irqsafe(rtwdev->hw, new); 838 } 839 840 next_rp: 841 /* new skb delivered to mac80211, re-enable original skb DMA */ 842 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, 843 buf_desc_sz); 844 845 /* host read next element in ring */ 846 if (++cur_rp >= ring->r.len) 847 cur_rp = 0; 848 } 849 850 ring->r.rp = cur_rp; 851 ring->r.wp = cur_wp; 852 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); 853 } 854 855 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, 856 struct rtw_pci *rtwpci, u32 *irq_status) 857 { 858 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); 859 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); 860 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); 861 irq_status[0] &= rtwpci->irq_mask[0]; 862 irq_status[1] &= rtwpci->irq_mask[1]; 863 irq_status[3] &= rtwpci->irq_mask[3]; 864 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); 865 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); 866 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); 867 } 868 869 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) 870 { 871 struct rtw_dev *rtwdev = dev; 872 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 873 874 spin_lock(&rtwpci->irq_lock); 875 if (!rtwpci->irq_enabled) 876 goto out; 877 878 /* disable RTW PCI interrupt to avoid more interrupts before the end of 879 * thread function 880 * 881 * disable HIMR here to also avoid new HISR flag being raised before 882 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs 883 * are cleared, the edge-triggered interrupt will not be generated when 884 * a new HISR flag is set. 885 */ 886 rtw_pci_disable_interrupt(rtwdev, rtwpci); 887 out: 888 spin_unlock(&rtwpci->irq_lock); 889 890 return IRQ_WAKE_THREAD; 891 } 892 893 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) 894 { 895 struct rtw_dev *rtwdev = dev; 896 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 897 unsigned long flags; 898 u32 irq_status[4]; 899 900 spin_lock_irqsave(&rtwpci->irq_lock, flags); 901 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); 902 903 if (irq_status[0] & IMR_MGNTDOK) 904 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); 905 if (irq_status[0] & IMR_HIGHDOK) 906 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); 907 if (irq_status[0] & IMR_BEDOK) 908 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); 909 if (irq_status[0] & IMR_BKDOK) 910 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); 911 if (irq_status[0] & IMR_VODOK) 912 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); 913 if (irq_status[0] & IMR_VIDOK) 914 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); 915 if (irq_status[3] & IMR_H2CDOK) 916 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); 917 if (irq_status[0] & IMR_ROK) 918 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); 919 920 /* all of the jobs for this interrupt have been done */ 921 rtw_pci_enable_interrupt(rtwdev, rtwpci); 922 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 923 924 return IRQ_HANDLED; 925 } 926 927 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, 928 struct pci_dev *pdev) 929 { 930 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 931 unsigned long len; 932 u8 bar_id = 2; 933 int ret; 934 935 ret = pci_request_regions(pdev, KBUILD_MODNAME); 936 if (ret) { 937 rtw_err(rtwdev, "failed to request pci regions\n"); 938 return ret; 939 } 940 941 len = pci_resource_len(pdev, bar_id); 942 rtwpci->mmap = pci_iomap(pdev, bar_id, len); 943 if (!rtwpci->mmap) { 944 rtw_err(rtwdev, "failed to map pci memory\n"); 945 return -ENOMEM; 946 } 947 948 return 0; 949 } 950 951 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, 952 struct pci_dev *pdev) 953 { 954 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 955 956 if (rtwpci->mmap) { 957 pci_iounmap(pdev, rtwpci->mmap); 958 pci_release_regions(pdev); 959 } 960 } 961 962 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) 963 { 964 u16 write_addr; 965 u16 remainder = addr & 0x3; 966 u8 flag; 967 u8 cnt = 20; 968 969 write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12))); 970 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); 971 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); 972 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01); 973 974 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 975 while (flag && (cnt != 0)) { 976 udelay(10); 977 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 978 cnt--; 979 } 980 981 WARN(flag, "DBI write fail\n"); 982 } 983 984 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) 985 { 986 u8 page; 987 u8 wflag; 988 u8 cnt; 989 990 rtw_write16(rtwdev, REG_MDIO_V1, data); 991 992 page = addr < 0x20 ? 0 : 1; 993 page += g1 ? 0 : 2; 994 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f); 995 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); 996 997 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); 998 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); 999 1000 cnt = 20; 1001 while (wflag && (cnt != 0)) { 1002 udelay(10); 1003 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, 1004 BIT_MDIO_WFLAG_V1); 1005 cnt--; 1006 } 1007 1008 WARN(wflag, "MDIO write fail\n"); 1009 } 1010 1011 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) 1012 { 1013 struct rtw_chip_info *chip = rtwdev->chip; 1014 struct rtw_intf_phy_para *para; 1015 u16 cut; 1016 u16 value; 1017 u16 offset; 1018 int i; 1019 1020 cut = BIT(0) << rtwdev->hal.cut_version; 1021 1022 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { 1023 para = &chip->intf_table->gen1_para[i]; 1024 if (!(para->cut_mask & cut)) 1025 continue; 1026 if (para->offset == 0xffff) 1027 break; 1028 offset = para->offset; 1029 value = para->value; 1030 if (para->ip_sel == RTW_IP_SEL_PHY) 1031 rtw_mdio_write(rtwdev, offset, value, true); 1032 else 1033 rtw_dbi_write8(rtwdev, offset, value); 1034 } 1035 1036 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { 1037 para = &chip->intf_table->gen2_para[i]; 1038 if (!(para->cut_mask & cut)) 1039 continue; 1040 if (para->offset == 0xffff) 1041 break; 1042 offset = para->offset; 1043 value = para->value; 1044 if (para->ip_sel == RTW_IP_SEL_PHY) 1045 rtw_mdio_write(rtwdev, offset, value, false); 1046 else 1047 rtw_dbi_write8(rtwdev, offset, value); 1048 } 1049 } 1050 1051 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1052 { 1053 int ret; 1054 1055 ret = pci_enable_device(pdev); 1056 if (ret) { 1057 rtw_err(rtwdev, "failed to enable pci device\n"); 1058 return ret; 1059 } 1060 1061 pci_set_master(pdev); 1062 pci_set_drvdata(pdev, rtwdev->hw); 1063 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 1064 1065 return 0; 1066 } 1067 1068 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1069 { 1070 pci_clear_master(pdev); 1071 pci_disable_device(pdev); 1072 } 1073 1074 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1075 { 1076 struct rtw_pci *rtwpci; 1077 int ret; 1078 1079 rtwpci = (struct rtw_pci *)rtwdev->priv; 1080 rtwpci->pdev = pdev; 1081 1082 /* after this driver can access to hw registers */ 1083 ret = rtw_pci_io_mapping(rtwdev, pdev); 1084 if (ret) { 1085 rtw_err(rtwdev, "failed to request pci io region\n"); 1086 goto err_out; 1087 } 1088 1089 ret = rtw_pci_init(rtwdev); 1090 if (ret) { 1091 rtw_err(rtwdev, "failed to allocate pci resources\n"); 1092 goto err_io_unmap; 1093 } 1094 1095 rtw_pci_phy_cfg(rtwdev); 1096 1097 return 0; 1098 1099 err_io_unmap: 1100 rtw_pci_io_unmapping(rtwdev, pdev); 1101 1102 err_out: 1103 return ret; 1104 } 1105 1106 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1107 { 1108 rtw_pci_deinit(rtwdev); 1109 rtw_pci_io_unmapping(rtwdev, pdev); 1110 } 1111 1112 static struct rtw_hci_ops rtw_pci_ops = { 1113 .tx = rtw_pci_tx, 1114 .setup = rtw_pci_setup, 1115 .start = rtw_pci_start, 1116 .stop = rtw_pci_stop, 1117 1118 .read8 = rtw_pci_read8, 1119 .read16 = rtw_pci_read16, 1120 .read32 = rtw_pci_read32, 1121 .write8 = rtw_pci_write8, 1122 .write16 = rtw_pci_write16, 1123 .write32 = rtw_pci_write32, 1124 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, 1125 .write_data_h2c = rtw_pci_write_data_h2c, 1126 }; 1127 1128 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1129 { 1130 unsigned int flags = PCI_IRQ_LEGACY; 1131 int ret; 1132 1133 if (!rtw_disable_msi) 1134 flags |= PCI_IRQ_MSI; 1135 1136 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 1137 if (ret < 0) { 1138 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); 1139 return ret; 1140 } 1141 1142 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 1143 rtw_pci_interrupt_handler, 1144 rtw_pci_interrupt_threadfn, 1145 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 1146 if (ret) { 1147 rtw_err(rtwdev, "failed to request irq %d\n", ret); 1148 pci_free_irq_vectors(pdev); 1149 } 1150 1151 return ret; 1152 } 1153 1154 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1155 { 1156 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 1157 pci_free_irq_vectors(pdev); 1158 } 1159 1160 static int rtw_pci_probe(struct pci_dev *pdev, 1161 const struct pci_device_id *id) 1162 { 1163 struct ieee80211_hw *hw; 1164 struct rtw_dev *rtwdev; 1165 int drv_data_size; 1166 int ret; 1167 1168 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); 1169 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1170 if (!hw) { 1171 dev_err(&pdev->dev, "failed to allocate hw\n"); 1172 return -ENOMEM; 1173 } 1174 1175 rtwdev = hw->priv; 1176 rtwdev->hw = hw; 1177 rtwdev->dev = &pdev->dev; 1178 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; 1179 rtwdev->hci.ops = &rtw_pci_ops; 1180 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; 1181 1182 ret = rtw_core_init(rtwdev); 1183 if (ret) 1184 goto err_release_hw; 1185 1186 rtw_dbg(rtwdev, RTW_DBG_PCI, 1187 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", 1188 pdev->vendor, pdev->device, pdev->revision); 1189 1190 ret = rtw_pci_claim(rtwdev, pdev); 1191 if (ret) { 1192 rtw_err(rtwdev, "failed to claim pci device\n"); 1193 goto err_deinit_core; 1194 } 1195 1196 ret = rtw_pci_setup_resource(rtwdev, pdev); 1197 if (ret) { 1198 rtw_err(rtwdev, "failed to setup pci resources\n"); 1199 goto err_pci_declaim; 1200 } 1201 1202 ret = rtw_chip_info_setup(rtwdev); 1203 if (ret) { 1204 rtw_err(rtwdev, "failed to setup chip information\n"); 1205 goto err_destroy_pci; 1206 } 1207 1208 ret = rtw_register_hw(rtwdev, hw); 1209 if (ret) { 1210 rtw_err(rtwdev, "failed to register hw\n"); 1211 goto err_destroy_pci; 1212 } 1213 1214 ret = rtw_pci_request_irq(rtwdev, pdev); 1215 if (ret) { 1216 ieee80211_unregister_hw(hw); 1217 goto err_destroy_pci; 1218 } 1219 1220 return 0; 1221 1222 err_destroy_pci: 1223 rtw_pci_destroy(rtwdev, pdev); 1224 1225 err_pci_declaim: 1226 rtw_pci_declaim(rtwdev, pdev); 1227 1228 err_deinit_core: 1229 rtw_core_deinit(rtwdev); 1230 1231 err_release_hw: 1232 ieee80211_free_hw(hw); 1233 1234 return ret; 1235 } 1236 1237 static void rtw_pci_remove(struct pci_dev *pdev) 1238 { 1239 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 1240 struct rtw_dev *rtwdev; 1241 struct rtw_pci *rtwpci; 1242 1243 if (!hw) 1244 return; 1245 1246 rtwdev = hw->priv; 1247 rtwpci = (struct rtw_pci *)rtwdev->priv; 1248 1249 rtw_unregister_hw(rtwdev, hw); 1250 rtw_pci_disable_interrupt(rtwdev, rtwpci); 1251 rtw_pci_destroy(rtwdev, pdev); 1252 rtw_pci_declaim(rtwdev, pdev); 1253 rtw_pci_free_irq(rtwdev, pdev); 1254 rtw_core_deinit(rtwdev); 1255 ieee80211_free_hw(hw); 1256 } 1257 1258 static const struct pci_device_id rtw_pci_id_table[] = { 1259 #ifdef CONFIG_RTW88_8822BE 1260 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) }, 1261 #endif 1262 #ifdef CONFIG_RTW88_8822CE 1263 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) }, 1264 #endif 1265 {}, 1266 }; 1267 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table); 1268 1269 static struct pci_driver rtw_pci_driver = { 1270 .name = "rtw_pci", 1271 .id_table = rtw_pci_id_table, 1272 .probe = rtw_pci_probe, 1273 .remove = rtw_pci_remove, 1274 }; 1275 module_pci_driver(rtw_pci_driver); 1276 1277 MODULE_AUTHOR("Realtek Corporation"); 1278 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); 1279 MODULE_LICENSE("Dual BSD/GPL"); 1280