1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include "main.h" 8 #include "pci.h" 9 #include "tx.h" 10 #include "rx.h" 11 #include "fw.h" 12 #include "ps.h" 13 #include "debug.h" 14 15 static bool rtw_disable_msi; 16 module_param_named(disable_msi, rtw_disable_msi, bool, 0644); 17 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); 18 19 static u32 rtw_pci_tx_queue_idx_addr[] = { 20 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, 21 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, 22 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, 23 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, 24 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, 25 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, 26 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, 27 }; 28 29 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) 30 { 31 switch (queue) { 32 case RTW_TX_QUEUE_BCN: 33 return TX_DESC_QSEL_BEACON; 34 case RTW_TX_QUEUE_H2C: 35 return TX_DESC_QSEL_H2C; 36 case RTW_TX_QUEUE_MGMT: 37 return TX_DESC_QSEL_MGMT; 38 case RTW_TX_QUEUE_HI0: 39 return TX_DESC_QSEL_HIGH; 40 default: 41 return skb->priority; 42 } 43 }; 44 45 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) 46 { 47 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 48 49 return readb(rtwpci->mmap + addr); 50 } 51 52 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) 53 { 54 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 55 56 return readw(rtwpci->mmap + addr); 57 } 58 59 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) 60 { 61 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 62 63 return readl(rtwpci->mmap + addr); 64 } 65 66 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 67 { 68 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 69 70 writeb(val, rtwpci->mmap + addr); 71 } 72 73 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 74 { 75 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 76 77 writew(val, rtwpci->mmap + addr); 78 } 79 80 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 81 { 82 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 83 84 writel(val, rtwpci->mmap + addr); 85 } 86 87 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) 88 { 89 int offset = tx_ring->r.desc_size * idx; 90 91 return tx_ring->r.head + offset; 92 } 93 94 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, 95 struct rtw_pci_tx_ring *tx_ring) 96 { 97 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 98 struct rtw_pci_tx_data *tx_data; 99 struct sk_buff *skb, *tmp; 100 dma_addr_t dma; 101 102 /* free every skb remained in tx list */ 103 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { 104 __skb_unlink(skb, &tx_ring->queue); 105 tx_data = rtw_pci_get_tx_data(skb); 106 dma = tx_data->dma; 107 108 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); 109 dev_kfree_skb_any(skb); 110 } 111 } 112 113 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, 114 struct rtw_pci_tx_ring *tx_ring) 115 { 116 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 117 u8 *head = tx_ring->r.head; 118 u32 len = tx_ring->r.len; 119 int ring_sz = len * tx_ring->r.desc_size; 120 121 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); 122 123 /* free the ring itself */ 124 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); 125 tx_ring->r.head = NULL; 126 } 127 128 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, 129 struct rtw_pci_rx_ring *rx_ring) 130 { 131 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 132 struct sk_buff *skb; 133 int buf_sz = RTK_PCI_RX_BUF_SIZE; 134 dma_addr_t dma; 135 int i; 136 137 for (i = 0; i < rx_ring->r.len; i++) { 138 skb = rx_ring->buf[i]; 139 if (!skb) 140 continue; 141 142 dma = *((dma_addr_t *)skb->cb); 143 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 144 dev_kfree_skb(skb); 145 rx_ring->buf[i] = NULL; 146 } 147 } 148 149 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, 150 struct rtw_pci_rx_ring *rx_ring) 151 { 152 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 153 u8 *head = rx_ring->r.head; 154 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; 155 156 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); 157 158 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); 159 } 160 161 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) 162 { 163 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 164 struct rtw_pci_tx_ring *tx_ring; 165 struct rtw_pci_rx_ring *rx_ring; 166 int i; 167 168 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 169 tx_ring = &rtwpci->tx_rings[i]; 170 rtw_pci_free_tx_ring(rtwdev, tx_ring); 171 } 172 173 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { 174 rx_ring = &rtwpci->rx_rings[i]; 175 rtw_pci_free_rx_ring(rtwdev, rx_ring); 176 } 177 } 178 179 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, 180 struct rtw_pci_tx_ring *tx_ring, 181 u8 desc_size, u32 len) 182 { 183 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 184 int ring_sz = desc_size * len; 185 dma_addr_t dma; 186 u8 *head; 187 188 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 189 if (!head) { 190 rtw_err(rtwdev, "failed to allocate tx ring\n"); 191 return -ENOMEM; 192 } 193 194 skb_queue_head_init(&tx_ring->queue); 195 tx_ring->r.head = head; 196 tx_ring->r.dma = dma; 197 tx_ring->r.len = len; 198 tx_ring->r.desc_size = desc_size; 199 tx_ring->r.wp = 0; 200 tx_ring->r.rp = 0; 201 202 return 0; 203 } 204 205 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, 206 struct rtw_pci_rx_ring *rx_ring, 207 u32 idx, u32 desc_sz) 208 { 209 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 210 struct rtw_pci_rx_buffer_desc *buf_desc; 211 int buf_sz = RTK_PCI_RX_BUF_SIZE; 212 dma_addr_t dma; 213 214 if (!skb) 215 return -EINVAL; 216 217 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE); 218 if (pci_dma_mapping_error(pdev, dma)) 219 return -EBUSY; 220 221 *((dma_addr_t *)skb->cb) = dma; 222 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 223 idx * desc_sz); 224 memset(buf_desc, 0, sizeof(*buf_desc)); 225 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 226 buf_desc->dma = cpu_to_le32(dma); 227 228 return 0; 229 } 230 231 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, 232 struct rtw_pci_rx_ring *rx_ring, 233 u32 idx, u32 desc_sz) 234 { 235 struct device *dev = rtwdev->dev; 236 struct rtw_pci_rx_buffer_desc *buf_desc; 237 int buf_sz = RTK_PCI_RX_BUF_SIZE; 238 239 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); 240 241 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 242 idx * desc_sz); 243 memset(buf_desc, 0, sizeof(*buf_desc)); 244 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 245 buf_desc->dma = cpu_to_le32(dma); 246 } 247 248 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, 249 struct rtw_pci_rx_ring *rx_ring, 250 u8 desc_size, u32 len) 251 { 252 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 253 struct sk_buff *skb = NULL; 254 dma_addr_t dma; 255 u8 *head; 256 int ring_sz = desc_size * len; 257 int buf_sz = RTK_PCI_RX_BUF_SIZE; 258 int i, allocated; 259 int ret = 0; 260 261 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 262 if (!head) { 263 rtw_err(rtwdev, "failed to allocate rx ring\n"); 264 return -ENOMEM; 265 } 266 rx_ring->r.head = head; 267 268 for (i = 0; i < len; i++) { 269 skb = dev_alloc_skb(buf_sz); 270 if (!skb) { 271 allocated = i; 272 ret = -ENOMEM; 273 goto err_out; 274 } 275 276 memset(skb->data, 0, buf_sz); 277 rx_ring->buf[i] = skb; 278 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); 279 if (ret) { 280 allocated = i; 281 dev_kfree_skb_any(skb); 282 goto err_out; 283 } 284 } 285 286 rx_ring->r.dma = dma; 287 rx_ring->r.len = len; 288 rx_ring->r.desc_size = desc_size; 289 rx_ring->r.wp = 0; 290 rx_ring->r.rp = 0; 291 292 return 0; 293 294 err_out: 295 for (i = 0; i < allocated; i++) { 296 skb = rx_ring->buf[i]; 297 if (!skb) 298 continue; 299 dma = *((dma_addr_t *)skb->cb); 300 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 301 dev_kfree_skb_any(skb); 302 rx_ring->buf[i] = NULL; 303 } 304 pci_free_consistent(pdev, ring_sz, head, dma); 305 306 rtw_err(rtwdev, "failed to init rx buffer\n"); 307 308 return ret; 309 } 310 311 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) 312 { 313 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 314 struct rtw_pci_tx_ring *tx_ring; 315 struct rtw_pci_rx_ring *rx_ring; 316 struct rtw_chip_info *chip = rtwdev->chip; 317 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; 318 int tx_desc_size, rx_desc_size; 319 u32 len; 320 int ret; 321 322 tx_desc_size = chip->tx_buf_desc_sz; 323 324 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 325 tx_ring = &rtwpci->tx_rings[i]; 326 len = max_num_of_tx_queue(i); 327 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); 328 if (ret) 329 goto out; 330 } 331 332 rx_desc_size = chip->rx_buf_desc_sz; 333 334 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { 335 rx_ring = &rtwpci->rx_rings[j]; 336 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, 337 RTK_MAX_RX_DESC_NUM); 338 if (ret) 339 goto out; 340 } 341 342 return 0; 343 344 out: 345 tx_alloced = i; 346 for (i = 0; i < tx_alloced; i++) { 347 tx_ring = &rtwpci->tx_rings[i]; 348 rtw_pci_free_tx_ring(rtwdev, tx_ring); 349 } 350 351 rx_alloced = j; 352 for (j = 0; j < rx_alloced; j++) { 353 rx_ring = &rtwpci->rx_rings[j]; 354 rtw_pci_free_rx_ring(rtwdev, rx_ring); 355 } 356 357 return ret; 358 } 359 360 static void rtw_pci_deinit(struct rtw_dev *rtwdev) 361 { 362 rtw_pci_free_trx_ring(rtwdev); 363 } 364 365 static int rtw_pci_init(struct rtw_dev *rtwdev) 366 { 367 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 368 int ret = 0; 369 370 rtwpci->irq_mask[0] = IMR_HIGHDOK | 371 IMR_MGNTDOK | 372 IMR_BKDOK | 373 IMR_BEDOK | 374 IMR_VIDOK | 375 IMR_VODOK | 376 IMR_ROK | 377 IMR_BCNDMAINT_E | 378 0; 379 rtwpci->irq_mask[1] = IMR_TXFOVW | 380 0; 381 rtwpci->irq_mask[3] = IMR_H2CDOK | 382 0; 383 spin_lock_init(&rtwpci->irq_lock); 384 ret = rtw_pci_init_trx_ring(rtwdev); 385 386 return ret; 387 } 388 389 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) 390 { 391 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 392 u32 len; 393 u8 tmp; 394 dma_addr_t dma; 395 396 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); 397 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); 398 399 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; 400 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); 401 402 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; 403 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; 404 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; 405 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; 406 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len); 407 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); 408 409 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; 410 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; 411 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; 412 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; 413 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len); 414 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); 415 416 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; 417 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; 418 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; 419 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; 420 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len); 421 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); 422 423 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; 424 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; 425 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; 426 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; 427 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len); 428 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); 429 430 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; 431 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; 432 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; 433 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; 434 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len); 435 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); 436 437 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; 438 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; 439 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; 440 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; 441 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len); 442 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); 443 444 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; 445 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; 446 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; 447 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; 448 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len); 449 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); 450 451 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; 452 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; 453 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; 454 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; 455 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff); 456 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); 457 458 /* reset read/write point */ 459 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); 460 461 /* reset H2C Queue index in a single write */ 462 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, 463 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); 464 } 465 466 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) 467 { 468 rtw_pci_reset_buf_desc(rtwdev); 469 } 470 471 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, 472 struct rtw_pci *rtwpci) 473 { 474 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]); 475 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); 476 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); 477 rtwpci->irq_enabled = true; 478 } 479 480 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, 481 struct rtw_pci *rtwpci) 482 { 483 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); 484 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); 485 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); 486 rtwpci->irq_enabled = false; 487 } 488 489 static int rtw_pci_setup(struct rtw_dev *rtwdev) 490 { 491 rtw_pci_reset_trx_ring(rtwdev); 492 493 return 0; 494 } 495 496 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 497 { 498 /* reset dma and rx tag */ 499 rtw_write32_set(rtwdev, RTK_PCI_CTRL, 500 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); 501 rtwpci->rx_tag = 0; 502 } 503 504 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 505 { 506 struct rtw_pci_tx_ring *tx_ring; 507 u8 queue; 508 509 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { 510 tx_ring = &rtwpci->tx_rings[queue]; 511 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); 512 } 513 } 514 515 static int rtw_pci_start(struct rtw_dev *rtwdev) 516 { 517 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 518 unsigned long flags; 519 520 rtw_pci_dma_reset(rtwdev, rtwpci); 521 522 spin_lock_irqsave(&rtwpci->irq_lock, flags); 523 rtw_pci_enable_interrupt(rtwdev, rtwpci); 524 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 525 526 return 0; 527 } 528 529 static void rtw_pci_stop(struct rtw_dev *rtwdev) 530 { 531 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 532 unsigned long flags; 533 534 spin_lock_irqsave(&rtwpci->irq_lock, flags); 535 rtw_pci_disable_interrupt(rtwdev, rtwpci); 536 rtw_pci_dma_release(rtwdev, rtwpci); 537 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 538 } 539 540 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) 541 { 542 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 543 struct rtw_pci_tx_ring *tx_ring; 544 bool tx_empty = true; 545 u8 queue; 546 547 lockdep_assert_held(&rtwpci->irq_lock); 548 549 /* Deep PS state is not allowed to TX-DMA */ 550 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { 551 /* BCN queue is rsvd page, does not have DMA interrupt 552 * H2C queue is managed by firmware 553 */ 554 if (queue == RTW_TX_QUEUE_BCN || 555 queue == RTW_TX_QUEUE_H2C) 556 continue; 557 558 tx_ring = &rtwpci->tx_rings[queue]; 559 560 /* check if there is any skb DMAing */ 561 if (skb_queue_len(&tx_ring->queue)) { 562 tx_empty = false; 563 break; 564 } 565 } 566 567 if (!tx_empty) { 568 rtw_dbg(rtwdev, RTW_DBG_PS, 569 "TX path not empty, cannot enter deep power save state\n"); 570 return; 571 } 572 573 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); 574 rtw_power_mode_change(rtwdev, true); 575 } 576 577 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) 578 { 579 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 580 581 lockdep_assert_held(&rtwpci->irq_lock); 582 583 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 584 rtw_power_mode_change(rtwdev, false); 585 } 586 587 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) 588 { 589 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 590 unsigned long flags; 591 592 spin_lock_irqsave(&rtwpci->irq_lock, flags); 593 594 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 595 rtw_pci_deep_ps_enter(rtwdev); 596 597 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 598 rtw_pci_deep_ps_leave(rtwdev); 599 600 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 601 } 602 603 static u8 ac_to_hwq[] = { 604 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, 605 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, 606 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, 607 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, 608 }; 609 610 static u8 rtw_hw_queue_mapping(struct sk_buff *skb) 611 { 612 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 613 __le16 fc = hdr->frame_control; 614 u8 q_mapping = skb_get_queue_mapping(skb); 615 u8 queue; 616 617 if (unlikely(ieee80211_is_beacon(fc))) 618 queue = RTW_TX_QUEUE_BCN; 619 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 620 queue = RTW_TX_QUEUE_MGMT; 621 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) 622 queue = ac_to_hwq[IEEE80211_AC_BE]; 623 else 624 queue = ac_to_hwq[q_mapping]; 625 626 return queue; 627 } 628 629 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, 630 struct rtw_pci_tx_ring *ring) 631 { 632 struct sk_buff *prev = skb_dequeue(&ring->queue); 633 struct rtw_pci_tx_data *tx_data; 634 dma_addr_t dma; 635 636 if (!prev) 637 return; 638 639 tx_data = rtw_pci_get_tx_data(prev); 640 dma = tx_data->dma; 641 pci_unmap_single(rtwpci->pdev, dma, prev->len, 642 PCI_DMA_TODEVICE); 643 dev_kfree_skb_any(prev); 644 } 645 646 static void rtw_pci_dma_check(struct rtw_dev *rtwdev, 647 struct rtw_pci_rx_ring *rx_ring, 648 u32 idx) 649 { 650 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 651 struct rtw_chip_info *chip = rtwdev->chip; 652 struct rtw_pci_rx_buffer_desc *buf_desc; 653 u32 desc_sz = chip->rx_buf_desc_sz; 654 u16 total_pkt_size; 655 656 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 657 idx * desc_sz); 658 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); 659 660 /* rx tag mismatch, throw a warning */ 661 if (total_pkt_size != rtwpci->rx_tag) 662 rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); 663 664 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; 665 } 666 667 static int rtw_pci_xmit(struct rtw_dev *rtwdev, 668 struct rtw_tx_pkt_info *pkt_info, 669 struct sk_buff *skb, u8 queue) 670 { 671 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 672 struct rtw_chip_info *chip = rtwdev->chip; 673 struct rtw_pci_tx_ring *ring; 674 struct rtw_pci_tx_data *tx_data; 675 dma_addr_t dma; 676 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; 677 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; 678 u32 size; 679 u32 psb_len; 680 u8 *pkt_desc; 681 struct rtw_pci_tx_buffer_desc *buf_desc; 682 u32 bd_idx; 683 unsigned long flags; 684 685 ring = &rtwpci->tx_rings[queue]; 686 687 size = skb->len; 688 689 if (queue == RTW_TX_QUEUE_BCN) 690 rtw_pci_release_rsvd_page(rtwpci, ring); 691 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) 692 return -ENOSPC; 693 694 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 695 memset(pkt_desc, 0, tx_pkt_desc_sz); 696 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); 697 rtw_tx_fill_tx_desc(pkt_info, skb); 698 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len, 699 PCI_DMA_TODEVICE); 700 if (pci_dma_mapping_error(rtwpci->pdev, dma)) 701 return -EBUSY; 702 703 /* after this we got dma mapped, there is no way back */ 704 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); 705 memset(buf_desc, 0, tx_buf_desc_sz); 706 psb_len = (skb->len - 1) / 128 + 1; 707 if (queue == RTW_TX_QUEUE_BCN) 708 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; 709 710 buf_desc[0].psb_len = cpu_to_le16(psb_len); 711 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); 712 buf_desc[0].dma = cpu_to_le32(dma); 713 buf_desc[1].buf_size = cpu_to_le16(size); 714 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); 715 716 tx_data = rtw_pci_get_tx_data(skb); 717 tx_data->dma = dma; 718 tx_data->sn = pkt_info->sn; 719 720 spin_lock_irqsave(&rtwpci->irq_lock, flags); 721 722 rtw_pci_deep_ps_leave(rtwdev); 723 skb_queue_tail(&ring->queue, skb); 724 725 /* kick off tx queue */ 726 if (queue != RTW_TX_QUEUE_BCN) { 727 if (++ring->r.wp >= ring->r.len) 728 ring->r.wp = 0; 729 bd_idx = rtw_pci_tx_queue_idx_addr[queue]; 730 rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff); 731 } else { 732 u32 reg_bcn_work; 733 734 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); 735 reg_bcn_work |= BIT_PCI_BCNQ_FLAG; 736 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); 737 } 738 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 739 740 return 0; 741 } 742 743 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 744 u32 size) 745 { 746 struct sk_buff *skb; 747 struct rtw_tx_pkt_info pkt_info; 748 u32 tx_pkt_desc_sz; 749 u32 length; 750 751 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 752 length = size + tx_pkt_desc_sz; 753 skb = dev_alloc_skb(length); 754 if (!skb) 755 return -ENOMEM; 756 757 skb_reserve(skb, tx_pkt_desc_sz); 758 memcpy((u8 *)skb_put(skb, size), buf, size); 759 memset(&pkt_info, 0, sizeof(pkt_info)); 760 pkt_info.tx_pkt_size = size; 761 pkt_info.offset = tx_pkt_desc_sz; 762 763 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); 764 } 765 766 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 767 { 768 struct sk_buff *skb; 769 struct rtw_tx_pkt_info pkt_info; 770 u32 tx_pkt_desc_sz; 771 u32 length; 772 773 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 774 length = size + tx_pkt_desc_sz; 775 skb = dev_alloc_skb(length); 776 if (!skb) 777 return -ENOMEM; 778 779 skb_reserve(skb, tx_pkt_desc_sz); 780 memcpy((u8 *)skb_put(skb, size), buf, size); 781 memset(&pkt_info, 0, sizeof(pkt_info)); 782 pkt_info.tx_pkt_size = size; 783 784 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); 785 } 786 787 static int rtw_pci_tx(struct rtw_dev *rtwdev, 788 struct rtw_tx_pkt_info *pkt_info, 789 struct sk_buff *skb) 790 { 791 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 792 struct rtw_pci_tx_ring *ring; 793 u8 queue = rtw_hw_queue_mapping(skb); 794 int ret; 795 796 ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue); 797 if (ret) 798 return ret; 799 800 ring = &rtwpci->tx_rings[queue]; 801 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { 802 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); 803 ring->queue_stopped = true; 804 } 805 806 return 0; 807 } 808 809 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 810 u8 hw_queue) 811 { 812 struct ieee80211_hw *hw = rtwdev->hw; 813 struct ieee80211_tx_info *info; 814 struct rtw_pci_tx_ring *ring; 815 struct rtw_pci_tx_data *tx_data; 816 struct sk_buff *skb; 817 u32 count; 818 u32 bd_idx_addr; 819 u32 bd_idx, cur_rp; 820 u16 q_map; 821 822 ring = &rtwpci->tx_rings[hw_queue]; 823 824 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; 825 bd_idx = rtw_read32(rtwdev, bd_idx_addr); 826 cur_rp = bd_idx >> 16; 827 cur_rp &= 0xfff; 828 if (cur_rp >= ring->r.rp) 829 count = cur_rp - ring->r.rp; 830 else 831 count = ring->r.len - (ring->r.rp - cur_rp); 832 833 while (count--) { 834 skb = skb_dequeue(&ring->queue); 835 tx_data = rtw_pci_get_tx_data(skb); 836 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, 837 PCI_DMA_TODEVICE); 838 839 /* just free command packets from host to card */ 840 if (hw_queue == RTW_TX_QUEUE_H2C) { 841 dev_kfree_skb_irq(skb); 842 continue; 843 } 844 845 if (ring->queue_stopped && 846 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { 847 q_map = skb_get_queue_mapping(skb); 848 ieee80211_wake_queue(hw, q_map); 849 ring->queue_stopped = false; 850 } 851 852 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 853 854 info = IEEE80211_SKB_CB(skb); 855 856 /* enqueue to wait for tx report */ 857 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 858 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 859 continue; 860 } 861 862 /* always ACK for others, then they won't be marked as drop */ 863 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 864 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 865 else 866 info->flags |= IEEE80211_TX_STAT_ACK; 867 868 ieee80211_tx_info_clear_status(info); 869 ieee80211_tx_status_irqsafe(hw, skb); 870 } 871 872 ring->r.rp = cur_rp; 873 } 874 875 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 876 u8 hw_queue) 877 { 878 struct rtw_chip_info *chip = rtwdev->chip; 879 struct rtw_pci_rx_ring *ring; 880 struct rtw_rx_pkt_stat pkt_stat; 881 struct ieee80211_rx_status rx_status; 882 struct sk_buff *skb, *new; 883 u32 cur_wp, cur_rp, tmp; 884 u32 count; 885 u32 pkt_offset; 886 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; 887 u32 buf_desc_sz = chip->rx_buf_desc_sz; 888 u32 new_len; 889 u8 *rx_desc; 890 dma_addr_t dma; 891 892 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; 893 894 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); 895 cur_wp = tmp >> 16; 896 cur_wp &= 0xfff; 897 if (cur_wp >= ring->r.wp) 898 count = cur_wp - ring->r.wp; 899 else 900 count = ring->r.len - (ring->r.wp - cur_wp); 901 902 cur_rp = ring->r.rp; 903 while (count--) { 904 rtw_pci_dma_check(rtwdev, ring, cur_rp); 905 skb = ring->buf[cur_rp]; 906 dma = *((dma_addr_t *)skb->cb); 907 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, 908 DMA_FROM_DEVICE); 909 rx_desc = skb->data; 910 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); 911 912 /* offset from rx_desc to payload */ 913 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 914 pkt_stat.shift; 915 916 /* allocate a new skb for this frame, 917 * discard the frame if none available 918 */ 919 new_len = pkt_stat.pkt_len + pkt_offset; 920 new = dev_alloc_skb(new_len); 921 if (WARN_ONCE(!new, "rx routine starvation\n")) 922 goto next_rp; 923 924 /* put the DMA data including rx_desc from phy to new skb */ 925 skb_put_data(new, skb->data, new_len); 926 927 if (pkt_stat.is_c2h) { 928 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); 929 } else { 930 /* remove rx_desc */ 931 skb_pull(new, pkt_offset); 932 933 rtw_rx_stats(rtwdev, pkt_stat.vif, new); 934 memcpy(new->cb, &rx_status, sizeof(rx_status)); 935 ieee80211_rx_irqsafe(rtwdev->hw, new); 936 } 937 938 next_rp: 939 /* new skb delivered to mac80211, re-enable original skb DMA */ 940 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, 941 buf_desc_sz); 942 943 /* host read next element in ring */ 944 if (++cur_rp >= ring->r.len) 945 cur_rp = 0; 946 } 947 948 ring->r.rp = cur_rp; 949 ring->r.wp = cur_wp; 950 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); 951 } 952 953 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, 954 struct rtw_pci *rtwpci, u32 *irq_status) 955 { 956 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); 957 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); 958 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); 959 irq_status[0] &= rtwpci->irq_mask[0]; 960 irq_status[1] &= rtwpci->irq_mask[1]; 961 irq_status[3] &= rtwpci->irq_mask[3]; 962 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); 963 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); 964 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); 965 } 966 967 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) 968 { 969 struct rtw_dev *rtwdev = dev; 970 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 971 972 spin_lock(&rtwpci->irq_lock); 973 if (!rtwpci->irq_enabled) 974 goto out; 975 976 /* disable RTW PCI interrupt to avoid more interrupts before the end of 977 * thread function 978 * 979 * disable HIMR here to also avoid new HISR flag being raised before 980 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs 981 * are cleared, the edge-triggered interrupt will not be generated when 982 * a new HISR flag is set. 983 */ 984 rtw_pci_disable_interrupt(rtwdev, rtwpci); 985 out: 986 spin_unlock(&rtwpci->irq_lock); 987 988 return IRQ_WAKE_THREAD; 989 } 990 991 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) 992 { 993 struct rtw_dev *rtwdev = dev; 994 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 995 unsigned long flags; 996 u32 irq_status[4]; 997 998 spin_lock_irqsave(&rtwpci->irq_lock, flags); 999 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); 1000 1001 if (irq_status[0] & IMR_MGNTDOK) 1002 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); 1003 if (irq_status[0] & IMR_HIGHDOK) 1004 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); 1005 if (irq_status[0] & IMR_BEDOK) 1006 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); 1007 if (irq_status[0] & IMR_BKDOK) 1008 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); 1009 if (irq_status[0] & IMR_VODOK) 1010 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); 1011 if (irq_status[0] & IMR_VIDOK) 1012 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); 1013 if (irq_status[3] & IMR_H2CDOK) 1014 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); 1015 if (irq_status[0] & IMR_ROK) 1016 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); 1017 1018 /* all of the jobs for this interrupt have been done */ 1019 rtw_pci_enable_interrupt(rtwdev, rtwpci); 1020 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1021 1022 return IRQ_HANDLED; 1023 } 1024 1025 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, 1026 struct pci_dev *pdev) 1027 { 1028 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1029 unsigned long len; 1030 u8 bar_id = 2; 1031 int ret; 1032 1033 ret = pci_request_regions(pdev, KBUILD_MODNAME); 1034 if (ret) { 1035 rtw_err(rtwdev, "failed to request pci regions\n"); 1036 return ret; 1037 } 1038 1039 len = pci_resource_len(pdev, bar_id); 1040 rtwpci->mmap = pci_iomap(pdev, bar_id, len); 1041 if (!rtwpci->mmap) { 1042 rtw_err(rtwdev, "failed to map pci memory\n"); 1043 return -ENOMEM; 1044 } 1045 1046 return 0; 1047 } 1048 1049 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, 1050 struct pci_dev *pdev) 1051 { 1052 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1053 1054 if (rtwpci->mmap) { 1055 pci_iounmap(pdev, rtwpci->mmap); 1056 pci_release_regions(pdev); 1057 } 1058 } 1059 1060 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) 1061 { 1062 u16 write_addr; 1063 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); 1064 u8 flag; 1065 u8 cnt; 1066 1067 write_addr = addr & BITS_DBI_ADDR_MASK; 1068 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); 1069 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); 1070 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); 1071 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); 1072 1073 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1074 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 1075 if (flag == 0) 1076 return; 1077 1078 udelay(10); 1079 } 1080 1081 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); 1082 } 1083 1084 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) 1085 { 1086 u16 read_addr = addr & BITS_DBI_ADDR_MASK; 1087 u8 flag; 1088 u8 cnt; 1089 1090 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); 1091 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); 1092 1093 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1094 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 1095 if (flag == 0) { 1096 read_addr = REG_DBI_RDATA_V1 + (addr & 3); 1097 *value = rtw_read8(rtwdev, read_addr); 1098 return 0; 1099 } 1100 1101 udelay(10); 1102 } 1103 1104 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); 1105 return -EIO; 1106 } 1107 1108 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) 1109 { 1110 u8 page; 1111 u8 wflag; 1112 u8 cnt; 1113 1114 rtw_write16(rtwdev, REG_MDIO_V1, data); 1115 1116 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; 1117 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; 1118 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); 1119 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); 1120 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); 1121 1122 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1123 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, 1124 BIT_MDIO_WFLAG_V1); 1125 if (wflag == 0) 1126 return; 1127 1128 udelay(10); 1129 } 1130 1131 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); 1132 } 1133 1134 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) 1135 { 1136 u8 value; 1137 int ret; 1138 1139 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); 1140 if (ret) { 1141 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); 1142 return; 1143 } 1144 1145 if (enable) 1146 value |= BIT_CLKREQ_SW_EN; 1147 else 1148 value &= ~BIT_CLKREQ_SW_EN; 1149 1150 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); 1151 } 1152 1153 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) 1154 { 1155 u8 value; 1156 int ret; 1157 1158 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); 1159 if (ret) { 1160 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); 1161 return; 1162 } 1163 1164 if (enable) 1165 value |= BIT_L1_SW_EN; 1166 else 1167 value &= ~BIT_L1_SW_EN; 1168 1169 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); 1170 } 1171 1172 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) 1173 { 1174 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1175 1176 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can 1177 * only be enabled when host supports it. 1178 * 1179 * And ASPM mechanism should be enabled when driver/firmware enters 1180 * power save mode, without having heavy traffic. Because we've 1181 * experienced some inter-operability issues that the link tends 1182 * to enter L1 state on the fly even when driver is having high 1183 * throughput. This is probably because the ASPM behavior slightly 1184 * varies from different SOC. 1185 */ 1186 if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 1187 rtw_pci_aspm_set(rtwdev, enter); 1188 } 1189 1190 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) 1191 { 1192 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1193 struct pci_dev *pdev = rtwpci->pdev; 1194 u16 link_ctrl; 1195 int ret; 1196 1197 /* Though there is standard PCIE configuration space to set the 1198 * link control register, but by Realtek's design, driver should 1199 * check if host supports CLKREQ/ASPM to enable the HW module. 1200 * 1201 * These functions are implemented by two HW modules associated, 1202 * one is responsible to access PCIE configuration space to 1203 * follow the host settings, and another is in charge of doing 1204 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 1205 * the host does not support it, and due to some reasons or wrong 1206 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 1207 * loss if HW misbehaves on the link. 1208 * 1209 * Hence it's designed that driver should first check the PCIE 1210 * configuration space is sync'ed and enabled, then driver can turn 1211 * on the other module that is actually working on the mechanism. 1212 */ 1213 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 1214 if (ret) { 1215 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 1216 return; 1217 } 1218 1219 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 1220 rtw_pci_clkreq_set(rtwdev, true); 1221 1222 rtwpci->link_ctrl = link_ctrl; 1223 } 1224 1225 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) 1226 { 1227 struct rtw_chip_info *chip = rtwdev->chip; 1228 struct rtw_intf_phy_para *para; 1229 u16 cut; 1230 u16 value; 1231 u16 offset; 1232 int i; 1233 1234 cut = BIT(0) << rtwdev->hal.cut_version; 1235 1236 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { 1237 para = &chip->intf_table->gen1_para[i]; 1238 if (!(para->cut_mask & cut)) 1239 continue; 1240 if (para->offset == 0xffff) 1241 break; 1242 offset = para->offset; 1243 value = para->value; 1244 if (para->ip_sel == RTW_IP_SEL_PHY) 1245 rtw_mdio_write(rtwdev, offset, value, true); 1246 else 1247 rtw_dbi_write8(rtwdev, offset, value); 1248 } 1249 1250 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { 1251 para = &chip->intf_table->gen2_para[i]; 1252 if (!(para->cut_mask & cut)) 1253 continue; 1254 if (para->offset == 0xffff) 1255 break; 1256 offset = para->offset; 1257 value = para->value; 1258 if (para->ip_sel == RTW_IP_SEL_PHY) 1259 rtw_mdio_write(rtwdev, offset, value, false); 1260 else 1261 rtw_dbi_write8(rtwdev, offset, value); 1262 } 1263 1264 rtw_pci_link_cfg(rtwdev); 1265 } 1266 1267 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1268 { 1269 int ret; 1270 1271 ret = pci_enable_device(pdev); 1272 if (ret) { 1273 rtw_err(rtwdev, "failed to enable pci device\n"); 1274 return ret; 1275 } 1276 1277 pci_set_master(pdev); 1278 pci_set_drvdata(pdev, rtwdev->hw); 1279 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 1280 1281 return 0; 1282 } 1283 1284 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1285 { 1286 pci_clear_master(pdev); 1287 pci_disable_device(pdev); 1288 } 1289 1290 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1291 { 1292 struct rtw_pci *rtwpci; 1293 int ret; 1294 1295 rtwpci = (struct rtw_pci *)rtwdev->priv; 1296 rtwpci->pdev = pdev; 1297 1298 /* after this driver can access to hw registers */ 1299 ret = rtw_pci_io_mapping(rtwdev, pdev); 1300 if (ret) { 1301 rtw_err(rtwdev, "failed to request pci io region\n"); 1302 goto err_out; 1303 } 1304 1305 ret = rtw_pci_init(rtwdev); 1306 if (ret) { 1307 rtw_err(rtwdev, "failed to allocate pci resources\n"); 1308 goto err_io_unmap; 1309 } 1310 1311 return 0; 1312 1313 err_io_unmap: 1314 rtw_pci_io_unmapping(rtwdev, pdev); 1315 1316 err_out: 1317 return ret; 1318 } 1319 1320 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1321 { 1322 rtw_pci_deinit(rtwdev); 1323 rtw_pci_io_unmapping(rtwdev, pdev); 1324 } 1325 1326 static struct rtw_hci_ops rtw_pci_ops = { 1327 .tx = rtw_pci_tx, 1328 .setup = rtw_pci_setup, 1329 .start = rtw_pci_start, 1330 .stop = rtw_pci_stop, 1331 .deep_ps = rtw_pci_deep_ps, 1332 .link_ps = rtw_pci_link_ps, 1333 1334 .read8 = rtw_pci_read8, 1335 .read16 = rtw_pci_read16, 1336 .read32 = rtw_pci_read32, 1337 .write8 = rtw_pci_write8, 1338 .write16 = rtw_pci_write16, 1339 .write32 = rtw_pci_write32, 1340 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, 1341 .write_data_h2c = rtw_pci_write_data_h2c, 1342 }; 1343 1344 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1345 { 1346 unsigned int flags = PCI_IRQ_LEGACY; 1347 int ret; 1348 1349 if (!rtw_disable_msi) 1350 flags |= PCI_IRQ_MSI; 1351 1352 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 1353 if (ret < 0) { 1354 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); 1355 return ret; 1356 } 1357 1358 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 1359 rtw_pci_interrupt_handler, 1360 rtw_pci_interrupt_threadfn, 1361 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 1362 if (ret) { 1363 rtw_err(rtwdev, "failed to request irq %d\n", ret); 1364 pci_free_irq_vectors(pdev); 1365 } 1366 1367 return ret; 1368 } 1369 1370 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1371 { 1372 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 1373 pci_free_irq_vectors(pdev); 1374 } 1375 1376 static int rtw_pci_probe(struct pci_dev *pdev, 1377 const struct pci_device_id *id) 1378 { 1379 struct ieee80211_hw *hw; 1380 struct rtw_dev *rtwdev; 1381 int drv_data_size; 1382 int ret; 1383 1384 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); 1385 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1386 if (!hw) { 1387 dev_err(&pdev->dev, "failed to allocate hw\n"); 1388 return -ENOMEM; 1389 } 1390 1391 rtwdev = hw->priv; 1392 rtwdev->hw = hw; 1393 rtwdev->dev = &pdev->dev; 1394 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; 1395 rtwdev->hci.ops = &rtw_pci_ops; 1396 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; 1397 1398 ret = rtw_core_init(rtwdev); 1399 if (ret) 1400 goto err_release_hw; 1401 1402 rtw_dbg(rtwdev, RTW_DBG_PCI, 1403 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", 1404 pdev->vendor, pdev->device, pdev->revision); 1405 1406 ret = rtw_pci_claim(rtwdev, pdev); 1407 if (ret) { 1408 rtw_err(rtwdev, "failed to claim pci device\n"); 1409 goto err_deinit_core; 1410 } 1411 1412 ret = rtw_pci_setup_resource(rtwdev, pdev); 1413 if (ret) { 1414 rtw_err(rtwdev, "failed to setup pci resources\n"); 1415 goto err_pci_declaim; 1416 } 1417 1418 ret = rtw_chip_info_setup(rtwdev); 1419 if (ret) { 1420 rtw_err(rtwdev, "failed to setup chip information\n"); 1421 goto err_destroy_pci; 1422 } 1423 1424 rtw_pci_phy_cfg(rtwdev); 1425 1426 ret = rtw_register_hw(rtwdev, hw); 1427 if (ret) { 1428 rtw_err(rtwdev, "failed to register hw\n"); 1429 goto err_destroy_pci; 1430 } 1431 1432 ret = rtw_pci_request_irq(rtwdev, pdev); 1433 if (ret) { 1434 ieee80211_unregister_hw(hw); 1435 goto err_destroy_pci; 1436 } 1437 1438 return 0; 1439 1440 err_destroy_pci: 1441 rtw_pci_destroy(rtwdev, pdev); 1442 1443 err_pci_declaim: 1444 rtw_pci_declaim(rtwdev, pdev); 1445 1446 err_deinit_core: 1447 rtw_core_deinit(rtwdev); 1448 1449 err_release_hw: 1450 ieee80211_free_hw(hw); 1451 1452 return ret; 1453 } 1454 1455 static void rtw_pci_remove(struct pci_dev *pdev) 1456 { 1457 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 1458 struct rtw_dev *rtwdev; 1459 struct rtw_pci *rtwpci; 1460 1461 if (!hw) 1462 return; 1463 1464 rtwdev = hw->priv; 1465 rtwpci = (struct rtw_pci *)rtwdev->priv; 1466 1467 rtw_unregister_hw(rtwdev, hw); 1468 rtw_pci_disable_interrupt(rtwdev, rtwpci); 1469 rtw_pci_destroy(rtwdev, pdev); 1470 rtw_pci_declaim(rtwdev, pdev); 1471 rtw_pci_free_irq(rtwdev, pdev); 1472 rtw_core_deinit(rtwdev); 1473 ieee80211_free_hw(hw); 1474 } 1475 1476 static const struct pci_device_id rtw_pci_id_table[] = { 1477 #ifdef CONFIG_RTW88_8822BE 1478 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) }, 1479 #endif 1480 #ifdef CONFIG_RTW88_8822CE 1481 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) }, 1482 #endif 1483 {}, 1484 }; 1485 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table); 1486 1487 static struct pci_driver rtw_pci_driver = { 1488 .name = "rtw_pci", 1489 .id_table = rtw_pci_id_table, 1490 .probe = rtw_pci_probe, 1491 .remove = rtw_pci_remove, 1492 }; 1493 module_pci_driver(rtw_pci_driver); 1494 1495 MODULE_AUTHOR("Realtek Corporation"); 1496 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); 1497 MODULE_LICENSE("Dual BSD/GPL"); 1498