1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include "main.h" 8 #include "pci.h" 9 #include "tx.h" 10 #include "rx.h" 11 #include "fw.h" 12 #include "ps.h" 13 #include "debug.h" 14 15 static bool rtw_disable_msi; 16 module_param_named(disable_msi, rtw_disable_msi, bool, 0644); 17 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); 18 19 static u32 rtw_pci_tx_queue_idx_addr[] = { 20 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, 21 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, 22 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, 23 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, 24 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, 25 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, 26 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, 27 }; 28 29 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) 30 { 31 switch (queue) { 32 case RTW_TX_QUEUE_BCN: 33 return TX_DESC_QSEL_BEACON; 34 case RTW_TX_QUEUE_H2C: 35 return TX_DESC_QSEL_H2C; 36 case RTW_TX_QUEUE_MGMT: 37 return TX_DESC_QSEL_MGMT; 38 case RTW_TX_QUEUE_HI0: 39 return TX_DESC_QSEL_HIGH; 40 default: 41 return skb->priority; 42 } 43 }; 44 45 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) 46 { 47 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 48 49 return readb(rtwpci->mmap + addr); 50 } 51 52 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) 53 { 54 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 55 56 return readw(rtwpci->mmap + addr); 57 } 58 59 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) 60 { 61 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 62 63 return readl(rtwpci->mmap + addr); 64 } 65 66 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 67 { 68 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 69 70 writeb(val, rtwpci->mmap + addr); 71 } 72 73 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 74 { 75 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 76 77 writew(val, rtwpci->mmap + addr); 78 } 79 80 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 81 { 82 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 83 84 writel(val, rtwpci->mmap + addr); 85 } 86 87 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) 88 { 89 int offset = tx_ring->r.desc_size * idx; 90 91 return tx_ring->r.head + offset; 92 } 93 94 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, 95 struct rtw_pci_tx_ring *tx_ring) 96 { 97 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 98 struct rtw_pci_tx_data *tx_data; 99 struct sk_buff *skb, *tmp; 100 dma_addr_t dma; 101 102 /* free every skb remained in tx list */ 103 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { 104 __skb_unlink(skb, &tx_ring->queue); 105 tx_data = rtw_pci_get_tx_data(skb); 106 dma = tx_data->dma; 107 108 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); 109 dev_kfree_skb_any(skb); 110 } 111 } 112 113 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, 114 struct rtw_pci_tx_ring *tx_ring) 115 { 116 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 117 u8 *head = tx_ring->r.head; 118 u32 len = tx_ring->r.len; 119 int ring_sz = len * tx_ring->r.desc_size; 120 121 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); 122 123 /* free the ring itself */ 124 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); 125 tx_ring->r.head = NULL; 126 } 127 128 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, 129 struct rtw_pci_rx_ring *rx_ring) 130 { 131 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 132 struct sk_buff *skb; 133 int buf_sz = RTK_PCI_RX_BUF_SIZE; 134 dma_addr_t dma; 135 int i; 136 137 for (i = 0; i < rx_ring->r.len; i++) { 138 skb = rx_ring->buf[i]; 139 if (!skb) 140 continue; 141 142 dma = *((dma_addr_t *)skb->cb); 143 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 144 dev_kfree_skb(skb); 145 rx_ring->buf[i] = NULL; 146 } 147 } 148 149 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, 150 struct rtw_pci_rx_ring *rx_ring) 151 { 152 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 153 u8 *head = rx_ring->r.head; 154 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; 155 156 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); 157 158 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); 159 } 160 161 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) 162 { 163 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 164 struct rtw_pci_tx_ring *tx_ring; 165 struct rtw_pci_rx_ring *rx_ring; 166 int i; 167 168 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 169 tx_ring = &rtwpci->tx_rings[i]; 170 rtw_pci_free_tx_ring(rtwdev, tx_ring); 171 } 172 173 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { 174 rx_ring = &rtwpci->rx_rings[i]; 175 rtw_pci_free_rx_ring(rtwdev, rx_ring); 176 } 177 } 178 179 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, 180 struct rtw_pci_tx_ring *tx_ring, 181 u8 desc_size, u32 len) 182 { 183 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 184 int ring_sz = desc_size * len; 185 dma_addr_t dma; 186 u8 *head; 187 188 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 189 if (!head) { 190 rtw_err(rtwdev, "failed to allocate tx ring\n"); 191 return -ENOMEM; 192 } 193 194 skb_queue_head_init(&tx_ring->queue); 195 tx_ring->r.head = head; 196 tx_ring->r.dma = dma; 197 tx_ring->r.len = len; 198 tx_ring->r.desc_size = desc_size; 199 tx_ring->r.wp = 0; 200 tx_ring->r.rp = 0; 201 202 return 0; 203 } 204 205 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, 206 struct rtw_pci_rx_ring *rx_ring, 207 u32 idx, u32 desc_sz) 208 { 209 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 210 struct rtw_pci_rx_buffer_desc *buf_desc; 211 int buf_sz = RTK_PCI_RX_BUF_SIZE; 212 dma_addr_t dma; 213 214 if (!skb) 215 return -EINVAL; 216 217 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE); 218 if (pci_dma_mapping_error(pdev, dma)) 219 return -EBUSY; 220 221 *((dma_addr_t *)skb->cb) = dma; 222 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 223 idx * desc_sz); 224 memset(buf_desc, 0, sizeof(*buf_desc)); 225 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 226 buf_desc->dma = cpu_to_le32(dma); 227 228 return 0; 229 } 230 231 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, 232 struct rtw_pci_rx_ring *rx_ring, 233 u32 idx, u32 desc_sz) 234 { 235 struct device *dev = rtwdev->dev; 236 struct rtw_pci_rx_buffer_desc *buf_desc; 237 int buf_sz = RTK_PCI_RX_BUF_SIZE; 238 239 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); 240 241 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 242 idx * desc_sz); 243 memset(buf_desc, 0, sizeof(*buf_desc)); 244 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 245 buf_desc->dma = cpu_to_le32(dma); 246 } 247 248 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, 249 struct rtw_pci_rx_ring *rx_ring, 250 u8 desc_size, u32 len) 251 { 252 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 253 struct sk_buff *skb = NULL; 254 dma_addr_t dma; 255 u8 *head; 256 int ring_sz = desc_size * len; 257 int buf_sz = RTK_PCI_RX_BUF_SIZE; 258 int i, allocated; 259 int ret = 0; 260 261 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 262 if (!head) { 263 rtw_err(rtwdev, "failed to allocate rx ring\n"); 264 return -ENOMEM; 265 } 266 rx_ring->r.head = head; 267 268 for (i = 0; i < len; i++) { 269 skb = dev_alloc_skb(buf_sz); 270 if (!skb) { 271 allocated = i; 272 ret = -ENOMEM; 273 goto err_out; 274 } 275 276 memset(skb->data, 0, buf_sz); 277 rx_ring->buf[i] = skb; 278 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); 279 if (ret) { 280 allocated = i; 281 dev_kfree_skb_any(skb); 282 goto err_out; 283 } 284 } 285 286 rx_ring->r.dma = dma; 287 rx_ring->r.len = len; 288 rx_ring->r.desc_size = desc_size; 289 rx_ring->r.wp = 0; 290 rx_ring->r.rp = 0; 291 292 return 0; 293 294 err_out: 295 for (i = 0; i < allocated; i++) { 296 skb = rx_ring->buf[i]; 297 if (!skb) 298 continue; 299 dma = *((dma_addr_t *)skb->cb); 300 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 301 dev_kfree_skb_any(skb); 302 rx_ring->buf[i] = NULL; 303 } 304 pci_free_consistent(pdev, ring_sz, head, dma); 305 306 rtw_err(rtwdev, "failed to init rx buffer\n"); 307 308 return ret; 309 } 310 311 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) 312 { 313 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 314 struct rtw_pci_tx_ring *tx_ring; 315 struct rtw_pci_rx_ring *rx_ring; 316 struct rtw_chip_info *chip = rtwdev->chip; 317 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; 318 int tx_desc_size, rx_desc_size; 319 u32 len; 320 int ret; 321 322 tx_desc_size = chip->tx_buf_desc_sz; 323 324 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 325 tx_ring = &rtwpci->tx_rings[i]; 326 len = max_num_of_tx_queue(i); 327 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); 328 if (ret) 329 goto out; 330 } 331 332 rx_desc_size = chip->rx_buf_desc_sz; 333 334 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { 335 rx_ring = &rtwpci->rx_rings[j]; 336 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, 337 RTK_MAX_RX_DESC_NUM); 338 if (ret) 339 goto out; 340 } 341 342 return 0; 343 344 out: 345 tx_alloced = i; 346 for (i = 0; i < tx_alloced; i++) { 347 tx_ring = &rtwpci->tx_rings[i]; 348 rtw_pci_free_tx_ring(rtwdev, tx_ring); 349 } 350 351 rx_alloced = j; 352 for (j = 0; j < rx_alloced; j++) { 353 rx_ring = &rtwpci->rx_rings[j]; 354 rtw_pci_free_rx_ring(rtwdev, rx_ring); 355 } 356 357 return ret; 358 } 359 360 static void rtw_pci_deinit(struct rtw_dev *rtwdev) 361 { 362 rtw_pci_free_trx_ring(rtwdev); 363 } 364 365 static int rtw_pci_init(struct rtw_dev *rtwdev) 366 { 367 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 368 int ret = 0; 369 370 rtwpci->irq_mask[0] = IMR_HIGHDOK | 371 IMR_MGNTDOK | 372 IMR_BKDOK | 373 IMR_BEDOK | 374 IMR_VIDOK | 375 IMR_VODOK | 376 IMR_ROK | 377 IMR_BCNDMAINT_E | 378 0; 379 rtwpci->irq_mask[1] = IMR_TXFOVW | 380 0; 381 rtwpci->irq_mask[3] = IMR_H2CDOK | 382 0; 383 spin_lock_init(&rtwpci->irq_lock); 384 ret = rtw_pci_init_trx_ring(rtwdev); 385 386 return ret; 387 } 388 389 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) 390 { 391 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 392 u32 len; 393 u8 tmp; 394 dma_addr_t dma; 395 396 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); 397 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); 398 399 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; 400 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); 401 402 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; 403 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; 404 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; 405 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; 406 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len); 407 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); 408 409 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; 410 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; 411 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; 412 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; 413 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len); 414 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); 415 416 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; 417 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; 418 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; 419 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; 420 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len); 421 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); 422 423 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; 424 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; 425 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; 426 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; 427 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len); 428 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); 429 430 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; 431 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; 432 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; 433 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; 434 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len); 435 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); 436 437 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; 438 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; 439 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; 440 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; 441 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len); 442 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); 443 444 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; 445 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; 446 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; 447 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; 448 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len); 449 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); 450 451 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; 452 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; 453 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; 454 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; 455 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff); 456 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); 457 458 /* reset read/write point */ 459 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); 460 461 /* reset H2C Queue index in a single write */ 462 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, 463 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); 464 } 465 466 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) 467 { 468 rtw_pci_reset_buf_desc(rtwdev); 469 } 470 471 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, 472 struct rtw_pci *rtwpci) 473 { 474 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]); 475 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); 476 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); 477 rtwpci->irq_enabled = true; 478 } 479 480 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, 481 struct rtw_pci *rtwpci) 482 { 483 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); 484 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); 485 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); 486 rtwpci->irq_enabled = false; 487 } 488 489 static int rtw_pci_setup(struct rtw_dev *rtwdev) 490 { 491 rtw_pci_reset_trx_ring(rtwdev); 492 493 return 0; 494 } 495 496 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 497 { 498 /* reset dma and rx tag */ 499 rtw_write32_set(rtwdev, RTK_PCI_CTRL, 500 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); 501 rtwpci->rx_tag = 0; 502 } 503 504 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 505 { 506 struct rtw_pci_tx_ring *tx_ring; 507 u8 queue; 508 509 rtw_pci_reset_trx_ring(rtwdev); 510 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { 511 tx_ring = &rtwpci->tx_rings[queue]; 512 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); 513 } 514 } 515 516 static int rtw_pci_start(struct rtw_dev *rtwdev) 517 { 518 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 519 unsigned long flags; 520 521 rtw_pci_dma_reset(rtwdev, rtwpci); 522 523 spin_lock_irqsave(&rtwpci->irq_lock, flags); 524 rtw_pci_enable_interrupt(rtwdev, rtwpci); 525 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 526 527 return 0; 528 } 529 530 static void rtw_pci_stop(struct rtw_dev *rtwdev) 531 { 532 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 533 unsigned long flags; 534 535 spin_lock_irqsave(&rtwpci->irq_lock, flags); 536 rtw_pci_disable_interrupt(rtwdev, rtwpci); 537 rtw_pci_dma_release(rtwdev, rtwpci); 538 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 539 } 540 541 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) 542 { 543 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 544 struct rtw_pci_tx_ring *tx_ring; 545 bool tx_empty = true; 546 u8 queue; 547 548 lockdep_assert_held(&rtwpci->irq_lock); 549 550 /* Deep PS state is not allowed to TX-DMA */ 551 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { 552 /* BCN queue is rsvd page, does not have DMA interrupt 553 * H2C queue is managed by firmware 554 */ 555 if (queue == RTW_TX_QUEUE_BCN || 556 queue == RTW_TX_QUEUE_H2C) 557 continue; 558 559 tx_ring = &rtwpci->tx_rings[queue]; 560 561 /* check if there is any skb DMAing */ 562 if (skb_queue_len(&tx_ring->queue)) { 563 tx_empty = false; 564 break; 565 } 566 } 567 568 if (!tx_empty) { 569 rtw_dbg(rtwdev, RTW_DBG_PS, 570 "TX path not empty, cannot enter deep power save state\n"); 571 return; 572 } 573 574 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); 575 rtw_power_mode_change(rtwdev, true); 576 } 577 578 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) 579 { 580 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 581 582 lockdep_assert_held(&rtwpci->irq_lock); 583 584 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 585 rtw_power_mode_change(rtwdev, false); 586 } 587 588 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) 589 { 590 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 591 unsigned long flags; 592 593 spin_lock_irqsave(&rtwpci->irq_lock, flags); 594 595 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 596 rtw_pci_deep_ps_enter(rtwdev); 597 598 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) 599 rtw_pci_deep_ps_leave(rtwdev); 600 601 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 602 } 603 604 static u8 ac_to_hwq[] = { 605 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, 606 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, 607 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, 608 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, 609 }; 610 611 static u8 rtw_hw_queue_mapping(struct sk_buff *skb) 612 { 613 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 614 __le16 fc = hdr->frame_control; 615 u8 q_mapping = skb_get_queue_mapping(skb); 616 u8 queue; 617 618 if (unlikely(ieee80211_is_beacon(fc))) 619 queue = RTW_TX_QUEUE_BCN; 620 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 621 queue = RTW_TX_QUEUE_MGMT; 622 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) 623 queue = ac_to_hwq[IEEE80211_AC_BE]; 624 else 625 queue = ac_to_hwq[q_mapping]; 626 627 return queue; 628 } 629 630 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, 631 struct rtw_pci_tx_ring *ring) 632 { 633 struct sk_buff *prev = skb_dequeue(&ring->queue); 634 struct rtw_pci_tx_data *tx_data; 635 dma_addr_t dma; 636 637 if (!prev) 638 return; 639 640 tx_data = rtw_pci_get_tx_data(prev); 641 dma = tx_data->dma; 642 pci_unmap_single(rtwpci->pdev, dma, prev->len, 643 PCI_DMA_TODEVICE); 644 dev_kfree_skb_any(prev); 645 } 646 647 static void rtw_pci_dma_check(struct rtw_dev *rtwdev, 648 struct rtw_pci_rx_ring *rx_ring, 649 u32 idx) 650 { 651 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 652 struct rtw_chip_info *chip = rtwdev->chip; 653 struct rtw_pci_rx_buffer_desc *buf_desc; 654 u32 desc_sz = chip->rx_buf_desc_sz; 655 u16 total_pkt_size; 656 657 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 658 idx * desc_sz); 659 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); 660 661 /* rx tag mismatch, throw a warning */ 662 if (total_pkt_size != rtwpci->rx_tag) 663 rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); 664 665 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; 666 } 667 668 static int rtw_pci_xmit(struct rtw_dev *rtwdev, 669 struct rtw_tx_pkt_info *pkt_info, 670 struct sk_buff *skb, u8 queue) 671 { 672 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 673 struct rtw_chip_info *chip = rtwdev->chip; 674 struct rtw_pci_tx_ring *ring; 675 struct rtw_pci_tx_data *tx_data; 676 dma_addr_t dma; 677 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; 678 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; 679 u32 size; 680 u32 psb_len; 681 u8 *pkt_desc; 682 struct rtw_pci_tx_buffer_desc *buf_desc; 683 u32 bd_idx; 684 unsigned long flags; 685 686 ring = &rtwpci->tx_rings[queue]; 687 688 size = skb->len; 689 690 if (queue == RTW_TX_QUEUE_BCN) 691 rtw_pci_release_rsvd_page(rtwpci, ring); 692 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) 693 return -ENOSPC; 694 695 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 696 memset(pkt_desc, 0, tx_pkt_desc_sz); 697 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); 698 rtw_tx_fill_tx_desc(pkt_info, skb); 699 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len, 700 PCI_DMA_TODEVICE); 701 if (pci_dma_mapping_error(rtwpci->pdev, dma)) 702 return -EBUSY; 703 704 /* after this we got dma mapped, there is no way back */ 705 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); 706 memset(buf_desc, 0, tx_buf_desc_sz); 707 psb_len = (skb->len - 1) / 128 + 1; 708 if (queue == RTW_TX_QUEUE_BCN) 709 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; 710 711 buf_desc[0].psb_len = cpu_to_le16(psb_len); 712 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); 713 buf_desc[0].dma = cpu_to_le32(dma); 714 buf_desc[1].buf_size = cpu_to_le16(size); 715 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); 716 717 tx_data = rtw_pci_get_tx_data(skb); 718 tx_data->dma = dma; 719 tx_data->sn = pkt_info->sn; 720 721 spin_lock_irqsave(&rtwpci->irq_lock, flags); 722 723 rtw_pci_deep_ps_leave(rtwdev); 724 skb_queue_tail(&ring->queue, skb); 725 726 /* kick off tx queue */ 727 if (queue != RTW_TX_QUEUE_BCN) { 728 if (++ring->r.wp >= ring->r.len) 729 ring->r.wp = 0; 730 bd_idx = rtw_pci_tx_queue_idx_addr[queue]; 731 rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff); 732 } else { 733 u32 reg_bcn_work; 734 735 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); 736 reg_bcn_work |= BIT_PCI_BCNQ_FLAG; 737 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); 738 } 739 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 740 741 return 0; 742 } 743 744 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 745 u32 size) 746 { 747 struct sk_buff *skb; 748 struct rtw_tx_pkt_info pkt_info; 749 u32 tx_pkt_desc_sz; 750 u32 length; 751 752 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 753 length = size + tx_pkt_desc_sz; 754 skb = dev_alloc_skb(length); 755 if (!skb) 756 return -ENOMEM; 757 758 skb_reserve(skb, tx_pkt_desc_sz); 759 memcpy((u8 *)skb_put(skb, size), buf, size); 760 memset(&pkt_info, 0, sizeof(pkt_info)); 761 pkt_info.tx_pkt_size = size; 762 pkt_info.offset = tx_pkt_desc_sz; 763 764 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); 765 } 766 767 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 768 { 769 struct sk_buff *skb; 770 struct rtw_tx_pkt_info pkt_info; 771 u32 tx_pkt_desc_sz; 772 u32 length; 773 774 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 775 length = size + tx_pkt_desc_sz; 776 skb = dev_alloc_skb(length); 777 if (!skb) 778 return -ENOMEM; 779 780 skb_reserve(skb, tx_pkt_desc_sz); 781 memcpy((u8 *)skb_put(skb, size), buf, size); 782 memset(&pkt_info, 0, sizeof(pkt_info)); 783 pkt_info.tx_pkt_size = size; 784 785 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); 786 } 787 788 static int rtw_pci_tx(struct rtw_dev *rtwdev, 789 struct rtw_tx_pkt_info *pkt_info, 790 struct sk_buff *skb) 791 { 792 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 793 struct rtw_pci_tx_ring *ring; 794 u8 queue = rtw_hw_queue_mapping(skb); 795 int ret; 796 797 ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue); 798 if (ret) 799 return ret; 800 801 ring = &rtwpci->tx_rings[queue]; 802 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { 803 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); 804 ring->queue_stopped = true; 805 } 806 807 return 0; 808 } 809 810 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 811 u8 hw_queue) 812 { 813 struct ieee80211_hw *hw = rtwdev->hw; 814 struct ieee80211_tx_info *info; 815 struct rtw_pci_tx_ring *ring; 816 struct rtw_pci_tx_data *tx_data; 817 struct sk_buff *skb; 818 u32 count; 819 u32 bd_idx_addr; 820 u32 bd_idx, cur_rp; 821 u16 q_map; 822 823 ring = &rtwpci->tx_rings[hw_queue]; 824 825 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; 826 bd_idx = rtw_read32(rtwdev, bd_idx_addr); 827 cur_rp = bd_idx >> 16; 828 cur_rp &= 0xfff; 829 if (cur_rp >= ring->r.rp) 830 count = cur_rp - ring->r.rp; 831 else 832 count = ring->r.len - (ring->r.rp - cur_rp); 833 834 while (count--) { 835 skb = skb_dequeue(&ring->queue); 836 tx_data = rtw_pci_get_tx_data(skb); 837 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, 838 PCI_DMA_TODEVICE); 839 840 /* just free command packets from host to card */ 841 if (hw_queue == RTW_TX_QUEUE_H2C) { 842 dev_kfree_skb_irq(skb); 843 continue; 844 } 845 846 if (ring->queue_stopped && 847 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { 848 q_map = skb_get_queue_mapping(skb); 849 ieee80211_wake_queue(hw, q_map); 850 ring->queue_stopped = false; 851 } 852 853 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 854 855 info = IEEE80211_SKB_CB(skb); 856 857 /* enqueue to wait for tx report */ 858 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 859 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 860 continue; 861 } 862 863 /* always ACK for others, then they won't be marked as drop */ 864 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 865 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 866 else 867 info->flags |= IEEE80211_TX_STAT_ACK; 868 869 ieee80211_tx_info_clear_status(info); 870 ieee80211_tx_status_irqsafe(hw, skb); 871 } 872 873 ring->r.rp = cur_rp; 874 } 875 876 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 877 u8 hw_queue) 878 { 879 struct rtw_chip_info *chip = rtwdev->chip; 880 struct rtw_pci_rx_ring *ring; 881 struct rtw_rx_pkt_stat pkt_stat; 882 struct ieee80211_rx_status rx_status; 883 struct sk_buff *skb, *new; 884 u32 cur_wp, cur_rp, tmp; 885 u32 count; 886 u32 pkt_offset; 887 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; 888 u32 buf_desc_sz = chip->rx_buf_desc_sz; 889 u32 new_len; 890 u8 *rx_desc; 891 dma_addr_t dma; 892 893 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; 894 895 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); 896 cur_wp = tmp >> 16; 897 cur_wp &= 0xfff; 898 if (cur_wp >= ring->r.wp) 899 count = cur_wp - ring->r.wp; 900 else 901 count = ring->r.len - (ring->r.wp - cur_wp); 902 903 cur_rp = ring->r.rp; 904 while (count--) { 905 rtw_pci_dma_check(rtwdev, ring, cur_rp); 906 skb = ring->buf[cur_rp]; 907 dma = *((dma_addr_t *)skb->cb); 908 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, 909 DMA_FROM_DEVICE); 910 rx_desc = skb->data; 911 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); 912 913 /* offset from rx_desc to payload */ 914 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 915 pkt_stat.shift; 916 917 /* allocate a new skb for this frame, 918 * discard the frame if none available 919 */ 920 new_len = pkt_stat.pkt_len + pkt_offset; 921 new = dev_alloc_skb(new_len); 922 if (WARN_ONCE(!new, "rx routine starvation\n")) 923 goto next_rp; 924 925 /* put the DMA data including rx_desc from phy to new skb */ 926 skb_put_data(new, skb->data, new_len); 927 928 if (pkt_stat.is_c2h) { 929 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); 930 } else { 931 /* remove rx_desc */ 932 skb_pull(new, pkt_offset); 933 934 rtw_rx_stats(rtwdev, pkt_stat.vif, new); 935 memcpy(new->cb, &rx_status, sizeof(rx_status)); 936 ieee80211_rx_irqsafe(rtwdev->hw, new); 937 } 938 939 next_rp: 940 /* new skb delivered to mac80211, re-enable original skb DMA */ 941 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, 942 buf_desc_sz); 943 944 /* host read next element in ring */ 945 if (++cur_rp >= ring->r.len) 946 cur_rp = 0; 947 } 948 949 ring->r.rp = cur_rp; 950 ring->r.wp = cur_wp; 951 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); 952 } 953 954 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, 955 struct rtw_pci *rtwpci, u32 *irq_status) 956 { 957 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); 958 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); 959 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); 960 irq_status[0] &= rtwpci->irq_mask[0]; 961 irq_status[1] &= rtwpci->irq_mask[1]; 962 irq_status[3] &= rtwpci->irq_mask[3]; 963 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); 964 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); 965 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); 966 } 967 968 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) 969 { 970 struct rtw_dev *rtwdev = dev; 971 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 972 973 spin_lock(&rtwpci->irq_lock); 974 if (!rtwpci->irq_enabled) 975 goto out; 976 977 /* disable RTW PCI interrupt to avoid more interrupts before the end of 978 * thread function 979 * 980 * disable HIMR here to also avoid new HISR flag being raised before 981 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs 982 * are cleared, the edge-triggered interrupt will not be generated when 983 * a new HISR flag is set. 984 */ 985 rtw_pci_disable_interrupt(rtwdev, rtwpci); 986 out: 987 spin_unlock(&rtwpci->irq_lock); 988 989 return IRQ_WAKE_THREAD; 990 } 991 992 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) 993 { 994 struct rtw_dev *rtwdev = dev; 995 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 996 unsigned long flags; 997 u32 irq_status[4]; 998 999 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1000 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); 1001 1002 if (irq_status[0] & IMR_MGNTDOK) 1003 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); 1004 if (irq_status[0] & IMR_HIGHDOK) 1005 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); 1006 if (irq_status[0] & IMR_BEDOK) 1007 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); 1008 if (irq_status[0] & IMR_BKDOK) 1009 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); 1010 if (irq_status[0] & IMR_VODOK) 1011 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); 1012 if (irq_status[0] & IMR_VIDOK) 1013 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); 1014 if (irq_status[3] & IMR_H2CDOK) 1015 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); 1016 if (irq_status[0] & IMR_ROK) 1017 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); 1018 1019 /* all of the jobs for this interrupt have been done */ 1020 rtw_pci_enable_interrupt(rtwdev, rtwpci); 1021 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1022 1023 return IRQ_HANDLED; 1024 } 1025 1026 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, 1027 struct pci_dev *pdev) 1028 { 1029 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1030 unsigned long len; 1031 u8 bar_id = 2; 1032 int ret; 1033 1034 ret = pci_request_regions(pdev, KBUILD_MODNAME); 1035 if (ret) { 1036 rtw_err(rtwdev, "failed to request pci regions\n"); 1037 return ret; 1038 } 1039 1040 len = pci_resource_len(pdev, bar_id); 1041 rtwpci->mmap = pci_iomap(pdev, bar_id, len); 1042 if (!rtwpci->mmap) { 1043 rtw_err(rtwdev, "failed to map pci memory\n"); 1044 return -ENOMEM; 1045 } 1046 1047 return 0; 1048 } 1049 1050 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, 1051 struct pci_dev *pdev) 1052 { 1053 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1054 1055 if (rtwpci->mmap) { 1056 pci_iounmap(pdev, rtwpci->mmap); 1057 pci_release_regions(pdev); 1058 } 1059 } 1060 1061 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) 1062 { 1063 u16 write_addr; 1064 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); 1065 u8 flag; 1066 u8 cnt; 1067 1068 write_addr = addr & BITS_DBI_ADDR_MASK; 1069 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); 1070 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); 1071 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); 1072 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); 1073 1074 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1075 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 1076 if (flag == 0) 1077 return; 1078 1079 udelay(10); 1080 } 1081 1082 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); 1083 } 1084 1085 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) 1086 { 1087 u16 read_addr = addr & BITS_DBI_ADDR_MASK; 1088 u8 flag; 1089 u8 cnt; 1090 1091 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); 1092 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); 1093 1094 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1095 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 1096 if (flag == 0) { 1097 read_addr = REG_DBI_RDATA_V1 + (addr & 3); 1098 *value = rtw_read8(rtwdev, read_addr); 1099 return 0; 1100 } 1101 1102 udelay(10); 1103 } 1104 1105 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); 1106 return -EIO; 1107 } 1108 1109 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) 1110 { 1111 u8 page; 1112 u8 wflag; 1113 u8 cnt; 1114 1115 rtw_write16(rtwdev, REG_MDIO_V1, data); 1116 1117 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; 1118 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; 1119 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); 1120 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); 1121 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); 1122 1123 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { 1124 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, 1125 BIT_MDIO_WFLAG_V1); 1126 if (wflag == 0) 1127 return; 1128 1129 udelay(10); 1130 } 1131 1132 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); 1133 } 1134 1135 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) 1136 { 1137 u8 value; 1138 int ret; 1139 1140 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); 1141 if (ret) { 1142 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); 1143 return; 1144 } 1145 1146 if (enable) 1147 value |= BIT_CLKREQ_SW_EN; 1148 else 1149 value &= ~BIT_CLKREQ_SW_EN; 1150 1151 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); 1152 } 1153 1154 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) 1155 { 1156 u8 value; 1157 int ret; 1158 1159 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); 1160 if (ret) { 1161 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); 1162 return; 1163 } 1164 1165 if (enable) 1166 value |= BIT_L1_SW_EN; 1167 else 1168 value &= ~BIT_L1_SW_EN; 1169 1170 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); 1171 } 1172 1173 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) 1174 { 1175 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1176 1177 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can 1178 * only be enabled when host supports it. 1179 * 1180 * And ASPM mechanism should be enabled when driver/firmware enters 1181 * power save mode, without having heavy traffic. Because we've 1182 * experienced some inter-operability issues that the link tends 1183 * to enter L1 state on the fly even when driver is having high 1184 * throughput. This is probably because the ASPM behavior slightly 1185 * varies from different SOC. 1186 */ 1187 if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 1188 rtw_pci_aspm_set(rtwdev, enter); 1189 } 1190 1191 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) 1192 { 1193 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 1194 struct pci_dev *pdev = rtwpci->pdev; 1195 u16 link_ctrl; 1196 int ret; 1197 1198 /* Though there is standard PCIE configuration space to set the 1199 * link control register, but by Realtek's design, driver should 1200 * check if host supports CLKREQ/ASPM to enable the HW module. 1201 * 1202 * These functions are implemented by two HW modules associated, 1203 * one is responsible to access PCIE configuration space to 1204 * follow the host settings, and another is in charge of doing 1205 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 1206 * the host does not support it, and due to some reasons or wrong 1207 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 1208 * loss if HW misbehaves on the link. 1209 * 1210 * Hence it's designed that driver should first check the PCIE 1211 * configuration space is sync'ed and enabled, then driver can turn 1212 * on the other module that is actually working on the mechanism. 1213 */ 1214 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 1215 if (ret) { 1216 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 1217 return; 1218 } 1219 1220 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 1221 rtw_pci_clkreq_set(rtwdev, true); 1222 1223 rtwpci->link_ctrl = link_ctrl; 1224 } 1225 1226 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) 1227 { 1228 struct rtw_chip_info *chip = rtwdev->chip; 1229 struct rtw_intf_phy_para *para; 1230 u16 cut; 1231 u16 value; 1232 u16 offset; 1233 int i; 1234 1235 cut = BIT(0) << rtwdev->hal.cut_version; 1236 1237 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { 1238 para = &chip->intf_table->gen1_para[i]; 1239 if (!(para->cut_mask & cut)) 1240 continue; 1241 if (para->offset == 0xffff) 1242 break; 1243 offset = para->offset; 1244 value = para->value; 1245 if (para->ip_sel == RTW_IP_SEL_PHY) 1246 rtw_mdio_write(rtwdev, offset, value, true); 1247 else 1248 rtw_dbi_write8(rtwdev, offset, value); 1249 } 1250 1251 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { 1252 para = &chip->intf_table->gen2_para[i]; 1253 if (!(para->cut_mask & cut)) 1254 continue; 1255 if (para->offset == 0xffff) 1256 break; 1257 offset = para->offset; 1258 value = para->value; 1259 if (para->ip_sel == RTW_IP_SEL_PHY) 1260 rtw_mdio_write(rtwdev, offset, value, false); 1261 else 1262 rtw_dbi_write8(rtwdev, offset, value); 1263 } 1264 1265 rtw_pci_link_cfg(rtwdev); 1266 } 1267 1268 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1269 { 1270 int ret; 1271 1272 ret = pci_enable_device(pdev); 1273 if (ret) { 1274 rtw_err(rtwdev, "failed to enable pci device\n"); 1275 return ret; 1276 } 1277 1278 pci_set_master(pdev); 1279 pci_set_drvdata(pdev, rtwdev->hw); 1280 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 1281 1282 return 0; 1283 } 1284 1285 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1286 { 1287 pci_clear_master(pdev); 1288 pci_disable_device(pdev); 1289 } 1290 1291 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1292 { 1293 struct rtw_pci *rtwpci; 1294 int ret; 1295 1296 rtwpci = (struct rtw_pci *)rtwdev->priv; 1297 rtwpci->pdev = pdev; 1298 1299 /* after this driver can access to hw registers */ 1300 ret = rtw_pci_io_mapping(rtwdev, pdev); 1301 if (ret) { 1302 rtw_err(rtwdev, "failed to request pci io region\n"); 1303 goto err_out; 1304 } 1305 1306 ret = rtw_pci_init(rtwdev); 1307 if (ret) { 1308 rtw_err(rtwdev, "failed to allocate pci resources\n"); 1309 goto err_io_unmap; 1310 } 1311 1312 return 0; 1313 1314 err_io_unmap: 1315 rtw_pci_io_unmapping(rtwdev, pdev); 1316 1317 err_out: 1318 return ret; 1319 } 1320 1321 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1322 { 1323 rtw_pci_deinit(rtwdev); 1324 rtw_pci_io_unmapping(rtwdev, pdev); 1325 } 1326 1327 static struct rtw_hci_ops rtw_pci_ops = { 1328 .tx = rtw_pci_tx, 1329 .setup = rtw_pci_setup, 1330 .start = rtw_pci_start, 1331 .stop = rtw_pci_stop, 1332 .deep_ps = rtw_pci_deep_ps, 1333 .link_ps = rtw_pci_link_ps, 1334 1335 .read8 = rtw_pci_read8, 1336 .read16 = rtw_pci_read16, 1337 .read32 = rtw_pci_read32, 1338 .write8 = rtw_pci_write8, 1339 .write16 = rtw_pci_write16, 1340 .write32 = rtw_pci_write32, 1341 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, 1342 .write_data_h2c = rtw_pci_write_data_h2c, 1343 }; 1344 1345 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1346 { 1347 unsigned int flags = PCI_IRQ_LEGACY; 1348 int ret; 1349 1350 if (!rtw_disable_msi) 1351 flags |= PCI_IRQ_MSI; 1352 1353 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 1354 if (ret < 0) { 1355 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); 1356 return ret; 1357 } 1358 1359 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 1360 rtw_pci_interrupt_handler, 1361 rtw_pci_interrupt_threadfn, 1362 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 1363 if (ret) { 1364 rtw_err(rtwdev, "failed to request irq %d\n", ret); 1365 pci_free_irq_vectors(pdev); 1366 } 1367 1368 return ret; 1369 } 1370 1371 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1372 { 1373 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 1374 pci_free_irq_vectors(pdev); 1375 } 1376 1377 static int rtw_pci_probe(struct pci_dev *pdev, 1378 const struct pci_device_id *id) 1379 { 1380 struct ieee80211_hw *hw; 1381 struct rtw_dev *rtwdev; 1382 int drv_data_size; 1383 int ret; 1384 1385 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); 1386 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1387 if (!hw) { 1388 dev_err(&pdev->dev, "failed to allocate hw\n"); 1389 return -ENOMEM; 1390 } 1391 1392 rtwdev = hw->priv; 1393 rtwdev->hw = hw; 1394 rtwdev->dev = &pdev->dev; 1395 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; 1396 rtwdev->hci.ops = &rtw_pci_ops; 1397 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; 1398 1399 ret = rtw_core_init(rtwdev); 1400 if (ret) 1401 goto err_release_hw; 1402 1403 rtw_dbg(rtwdev, RTW_DBG_PCI, 1404 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", 1405 pdev->vendor, pdev->device, pdev->revision); 1406 1407 ret = rtw_pci_claim(rtwdev, pdev); 1408 if (ret) { 1409 rtw_err(rtwdev, "failed to claim pci device\n"); 1410 goto err_deinit_core; 1411 } 1412 1413 ret = rtw_pci_setup_resource(rtwdev, pdev); 1414 if (ret) { 1415 rtw_err(rtwdev, "failed to setup pci resources\n"); 1416 goto err_pci_declaim; 1417 } 1418 1419 ret = rtw_chip_info_setup(rtwdev); 1420 if (ret) { 1421 rtw_err(rtwdev, "failed to setup chip information\n"); 1422 goto err_destroy_pci; 1423 } 1424 1425 rtw_pci_phy_cfg(rtwdev); 1426 1427 ret = rtw_register_hw(rtwdev, hw); 1428 if (ret) { 1429 rtw_err(rtwdev, "failed to register hw\n"); 1430 goto err_destroy_pci; 1431 } 1432 1433 ret = rtw_pci_request_irq(rtwdev, pdev); 1434 if (ret) { 1435 ieee80211_unregister_hw(hw); 1436 goto err_destroy_pci; 1437 } 1438 1439 return 0; 1440 1441 err_destroy_pci: 1442 rtw_pci_destroy(rtwdev, pdev); 1443 1444 err_pci_declaim: 1445 rtw_pci_declaim(rtwdev, pdev); 1446 1447 err_deinit_core: 1448 rtw_core_deinit(rtwdev); 1449 1450 err_release_hw: 1451 ieee80211_free_hw(hw); 1452 1453 return ret; 1454 } 1455 1456 static void rtw_pci_remove(struct pci_dev *pdev) 1457 { 1458 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 1459 struct rtw_dev *rtwdev; 1460 struct rtw_pci *rtwpci; 1461 1462 if (!hw) 1463 return; 1464 1465 rtwdev = hw->priv; 1466 rtwpci = (struct rtw_pci *)rtwdev->priv; 1467 1468 rtw_unregister_hw(rtwdev, hw); 1469 rtw_pci_disable_interrupt(rtwdev, rtwpci); 1470 rtw_pci_destroy(rtwdev, pdev); 1471 rtw_pci_declaim(rtwdev, pdev); 1472 rtw_pci_free_irq(rtwdev, pdev); 1473 rtw_core_deinit(rtwdev); 1474 ieee80211_free_hw(hw); 1475 } 1476 1477 static const struct pci_device_id rtw_pci_id_table[] = { 1478 #ifdef CONFIG_RTW88_8822BE 1479 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) }, 1480 #endif 1481 #ifdef CONFIG_RTW88_8822CE 1482 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) }, 1483 #endif 1484 {}, 1485 }; 1486 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table); 1487 1488 static struct pci_driver rtw_pci_driver = { 1489 .name = "rtw_pci", 1490 .id_table = rtw_pci_id_table, 1491 .probe = rtw_pci_probe, 1492 .remove = rtw_pci_remove, 1493 }; 1494 module_pci_driver(rtw_pci_driver); 1495 1496 MODULE_AUTHOR("Realtek Corporation"); 1497 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); 1498 MODULE_LICENSE("Dual BSD/GPL"); 1499