1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, MediaTek Inc. 4 * Copyright (c) 2021-2022, Intel Corporation. 5 * 6 * Authors: 7 * Amir Hanania <amir.hanania@intel.com> 8 * Haijun Liu <haijun.liu@mediatek.com> 9 * Moises Veleta <moises.veleta@intel.com> 10 * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 * Sreehari Kancharla <sreehari.kancharla@intel.com> 12 * 13 * Contributors: 14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 16 * Eliot Lee <eliot.lee@intel.com> 17 */ 18 19 #include <linux/bits.h> 20 #include <linux/bitops.h> 21 #include <linux/delay.h> 22 #include <linux/device.h> 23 #include <linux/dmapool.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dma-direction.h> 26 #include <linux/gfp.h> 27 #include <linux/io.h> 28 #include <linux/io-64-nonatomic-lo-hi.h> 29 #include <linux/iopoll.h> 30 #include <linux/irqreturn.h> 31 #include <linux/kernel.h> 32 #include <linux/kthread.h> 33 #include <linux/list.h> 34 #include <linux/netdevice.h> 35 #include <linux/pci.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/sched.h> 38 #include <linux/skbuff.h> 39 #include <linux/slab.h> 40 #include <linux/spinlock.h> 41 #include <linux/types.h> 42 #include <linux/wait.h> 43 #include <linux/workqueue.h> 44 45 #include "t7xx_cldma.h" 46 #include "t7xx_hif_cldma.h" 47 #include "t7xx_mhccif.h" 48 #include "t7xx_pci.h" 49 #include "t7xx_pcie_mac.h" 50 #include "t7xx_port_proxy.h" 51 #include "t7xx_reg.h" 52 #include "t7xx_state_monitor.h" 53 54 #define MAX_TX_BUDGET 16 55 #define MAX_RX_BUDGET 16 56 57 #define CHECK_Q_STOP_TIMEOUT_US 1000000 58 #define CHECK_Q_STOP_STEP_US 10000 59 60 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 61 enum mtk_txrx tx_rx, unsigned int index) 62 { 63 queue->dir = tx_rx; 64 queue->index = index; 65 queue->md_ctrl = md_ctrl; 66 queue->tr_ring = NULL; 67 queue->tr_done = NULL; 68 queue->tx_next = NULL; 69 } 70 71 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, 72 enum mtk_txrx tx_rx, unsigned int index) 73 { 74 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index); 75 init_waitqueue_head(&queue->req_wq); 76 spin_lock_init(&queue->ring_lock); 77 } 78 79 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr) 80 { 81 gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr)); 82 gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr)); 83 } 84 85 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr) 86 { 87 gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr)); 88 gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr)); 89 } 90 91 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, 92 size_t size, gfp_t gfp_mask) 93 { 94 req->skb = __dev_alloc_skb(size, gfp_mask); 95 if (!req->skb) 96 return -ENOMEM; 97 98 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE); 99 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) { 100 dev_kfree_skb_any(req->skb); 101 req->skb = NULL; 102 req->mapped_buff = 0; 103 dev_err(md_ctrl->dev, "DMA mapping failed\n"); 104 return -ENOMEM; 105 } 106 107 return 0; 108 } 109 110 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) 111 { 112 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 113 unsigned int hwo_polling_count = 0; 114 struct t7xx_cldma_hw *hw_info; 115 bool rx_not_done = true; 116 unsigned long flags; 117 int count = 0; 118 119 hw_info = &md_ctrl->hw_info; 120 121 do { 122 struct cldma_request *req; 123 struct cldma_gpd *gpd; 124 struct sk_buff *skb; 125 int ret; 126 127 req = queue->tr_done; 128 if (!req) 129 return -ENODATA; 130 131 gpd = req->gpd; 132 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 133 dma_addr_t gpd_addr; 134 135 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) { 136 dev_err(md_ctrl->dev, "PCIe Link disconnected\n"); 137 return -ENODEV; 138 } 139 140 gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 + 141 queue->index * sizeof(u64)); 142 if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100) 143 return 0; 144 145 udelay(1); 146 continue; 147 } 148 149 hwo_polling_count = 0; 150 skb = req->skb; 151 152 if (req->mapped_buff) { 153 dma_unmap_single(md_ctrl->dev, req->mapped_buff, 154 queue->tr_ring->pkt_size, DMA_FROM_DEVICE); 155 req->mapped_buff = 0; 156 } 157 158 skb->len = 0; 159 skb_reset_tail_pointer(skb); 160 skb_put(skb, le16_to_cpu(gpd->data_buff_len)); 161 162 ret = queue->recv_skb(queue, skb); 163 /* Break processing, will try again later */ 164 if (ret < 0) 165 return ret; 166 167 req->skb = NULL; 168 t7xx_cldma_gpd_set_data_ptr(gpd, 0); 169 170 spin_lock_irqsave(&queue->ring_lock, flags); 171 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 172 spin_unlock_irqrestore(&queue->ring_lock, flags); 173 req = queue->rx_refill; 174 175 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); 176 if (ret) 177 return ret; 178 179 gpd = req->gpd; 180 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 181 gpd->data_buff_len = 0; 182 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 183 184 spin_lock_irqsave(&queue->ring_lock, flags); 185 queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 186 spin_unlock_irqrestore(&queue->ring_lock, flags); 187 188 rx_not_done = ++count < budget || !need_resched(); 189 } while (rx_not_done); 190 191 *over_budget = true; 192 return 0; 193 } 194 195 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) 196 { 197 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 198 struct t7xx_cldma_hw *hw_info; 199 unsigned int pending_rx_int; 200 bool over_budget = false; 201 unsigned long flags; 202 int ret; 203 204 hw_info = &md_ctrl->hw_info; 205 206 do { 207 ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget); 208 if (ret == -ENODATA) 209 return 0; 210 else if (ret) 211 return ret; 212 213 pending_rx_int = 0; 214 215 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 216 if (md_ctrl->rxq_active & BIT(queue->index)) { 217 if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX)) 218 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX); 219 220 pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index), 221 MTK_RX); 222 if (pending_rx_int) { 223 t7xx_cldma_hw_rx_done(hw_info, pending_rx_int); 224 225 if (over_budget) { 226 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 227 return -EAGAIN; 228 } 229 } 230 } 231 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 232 } while (pending_rx_int); 233 234 return 0; 235 } 236 237 static void t7xx_cldma_rx_done(struct work_struct *work) 238 { 239 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 240 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 241 int value; 242 243 value = t7xx_cldma_gpd_rx_collect(queue, queue->budget); 244 if (value && md_ctrl->rxq_active & BIT(queue->index)) { 245 queue_work(queue->worker, &queue->cldma_work); 246 return; 247 } 248 249 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info); 250 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX); 251 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX); 252 pm_runtime_mark_last_busy(md_ctrl->dev); 253 pm_runtime_put_autosuspend(md_ctrl->dev); 254 } 255 256 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) 257 { 258 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 259 unsigned int dma_len, count = 0; 260 struct cldma_request *req; 261 struct cldma_gpd *gpd; 262 unsigned long flags; 263 dma_addr_t dma_free; 264 struct sk_buff *skb; 265 266 while (!kthread_should_stop()) { 267 spin_lock_irqsave(&queue->ring_lock, flags); 268 req = queue->tr_done; 269 if (!req) { 270 spin_unlock_irqrestore(&queue->ring_lock, flags); 271 break; 272 } 273 gpd = req->gpd; 274 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) { 275 spin_unlock_irqrestore(&queue->ring_lock, flags); 276 break; 277 } 278 queue->budget++; 279 dma_free = req->mapped_buff; 280 dma_len = le16_to_cpu(gpd->data_buff_len); 281 skb = req->skb; 282 req->skb = NULL; 283 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry); 284 spin_unlock_irqrestore(&queue->ring_lock, flags); 285 286 count++; 287 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE); 288 dev_kfree_skb_any(skb); 289 } 290 291 if (count) 292 wake_up_nr(&queue->req_wq, count); 293 294 return count; 295 } 296 297 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) 298 { 299 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 300 struct cldma_request *req; 301 dma_addr_t ul_curr_addr; 302 unsigned long flags; 303 bool pending_gpd; 304 305 if (!(md_ctrl->txq_active & BIT(queue->index))) 306 return; 307 308 spin_lock_irqsave(&queue->ring_lock, flags); 309 req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry); 310 spin_unlock_irqrestore(&queue->ring_lock, flags); 311 312 pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb; 313 314 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 315 if (pending_gpd) { 316 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 317 318 /* Check current processing TGPD, 64-bit address is in a table by Q index */ 319 ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 + 320 queue->index * sizeof(u64)); 321 if (req->gpd_addr != ul_curr_addr) { 322 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 323 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n", 324 md_ctrl->hif_id, queue->index); 325 return; 326 } 327 328 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX); 329 } 330 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 331 } 332 333 static void t7xx_cldma_tx_done(struct work_struct *work) 334 { 335 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); 336 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 337 struct t7xx_cldma_hw *hw_info; 338 unsigned int l2_tx_int; 339 unsigned long flags; 340 341 hw_info = &md_ctrl->hw_info; 342 t7xx_cldma_gpd_tx_collect(queue); 343 l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index), 344 MTK_TX); 345 if (l2_tx_int & EQ_STA_BIT(queue->index)) { 346 t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index)); 347 t7xx_cldma_txq_empty_hndl(queue); 348 } 349 350 if (l2_tx_int & BIT(queue->index)) { 351 t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index)); 352 queue_work(queue->worker, &queue->cldma_work); 353 return; 354 } 355 356 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 357 if (md_ctrl->txq_active & BIT(queue->index)) { 358 t7xx_cldma_clear_ip_busy(hw_info); 359 t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX); 360 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX); 361 } 362 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 363 364 pm_runtime_mark_last_busy(md_ctrl->dev); 365 pm_runtime_put_autosuspend(md_ctrl->dev); 366 } 367 368 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl, 369 struct cldma_ring *ring, enum dma_data_direction tx_rx) 370 { 371 struct cldma_request *req_cur, *req_next; 372 373 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) { 374 if (req_cur->mapped_buff && req_cur->skb) { 375 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff, 376 ring->pkt_size, tx_rx); 377 req_cur->mapped_buff = 0; 378 } 379 380 dev_kfree_skb_any(req_cur->skb); 381 382 if (req_cur->gpd) 383 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr); 384 385 list_del(&req_cur->entry); 386 kfree(req_cur); 387 } 388 } 389 390 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size) 391 { 392 struct cldma_request *req; 393 int val; 394 395 req = kzalloc(sizeof(*req), GFP_KERNEL); 396 if (!req) 397 return NULL; 398 399 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 400 if (!req->gpd) 401 goto err_free_req; 402 403 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); 404 if (val) 405 goto err_free_pool; 406 407 return req; 408 409 err_free_pool: 410 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr); 411 412 err_free_req: 413 kfree(req); 414 415 return NULL; 416 } 417 418 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 419 { 420 struct cldma_request *req; 421 struct cldma_gpd *gpd; 422 int i; 423 424 INIT_LIST_HEAD(&ring->gpd_ring); 425 ring->length = MAX_RX_BUDGET; 426 427 for (i = 0; i < ring->length; i++) { 428 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size); 429 if (!req) { 430 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE); 431 return -ENOMEM; 432 } 433 434 gpd = req->gpd; 435 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff); 436 gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size); 437 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 438 INIT_LIST_HEAD(&req->entry); 439 list_add_tail(&req->entry, &ring->gpd_ring); 440 } 441 442 /* Link previous GPD to next GPD, circular */ 443 list_for_each_entry(req, &ring->gpd_ring, entry) { 444 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 445 gpd = req->gpd; 446 } 447 448 return 0; 449 } 450 451 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl) 452 { 453 struct cldma_request *req; 454 455 req = kzalloc(sizeof(*req), GFP_KERNEL); 456 if (!req) 457 return NULL; 458 459 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr); 460 if (!req->gpd) { 461 kfree(req); 462 return NULL; 463 } 464 465 return req; 466 } 467 468 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring) 469 { 470 struct cldma_request *req; 471 struct cldma_gpd *gpd; 472 int i; 473 474 INIT_LIST_HEAD(&ring->gpd_ring); 475 ring->length = MAX_TX_BUDGET; 476 477 for (i = 0; i < ring->length; i++) { 478 req = t7xx_alloc_tx_request(md_ctrl); 479 if (!req) { 480 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE); 481 return -ENOMEM; 482 } 483 484 gpd = req->gpd; 485 gpd->flags = GPD_FLAGS_IOC; 486 INIT_LIST_HEAD(&req->entry); 487 list_add_tail(&req->entry, &ring->gpd_ring); 488 } 489 490 /* Link previous GPD to next GPD, circular */ 491 list_for_each_entry(req, &ring->gpd_ring, entry) { 492 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr); 493 gpd = req->gpd; 494 } 495 496 return 0; 497 } 498 499 /** 500 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values. 501 * @queue: Pointer to the queue structure. 502 * 503 * Called with ring_lock (unless called during initialization phase) 504 */ 505 static void t7xx_cldma_q_reset(struct cldma_queue *queue) 506 { 507 struct cldma_request *req; 508 509 req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry); 510 queue->tr_done = req; 511 queue->budget = queue->tr_ring->length; 512 513 if (queue->dir == MTK_TX) 514 queue->tx_next = req; 515 else 516 queue->rx_refill = req; 517 } 518 519 static void t7xx_cldma_rxq_init(struct cldma_queue *queue) 520 { 521 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 522 523 queue->dir = MTK_RX; 524 queue->tr_ring = &md_ctrl->rx_ring[queue->index]; 525 t7xx_cldma_q_reset(queue); 526 } 527 528 static void t7xx_cldma_txq_init(struct cldma_queue *queue) 529 { 530 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 531 532 queue->dir = MTK_TX; 533 queue->tr_ring = &md_ctrl->tx_ring[queue->index]; 534 t7xx_cldma_q_reset(queue); 535 } 536 537 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl) 538 { 539 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 540 } 541 542 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl) 543 { 544 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id); 545 } 546 547 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl) 548 { 549 unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val; 550 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 551 int i; 552 553 /* L2 raw interrupt status */ 554 l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0); 555 l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0); 556 l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0); 557 l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0); 558 l2_tx_int &= ~l2_tx_int_msk; 559 l2_rx_int &= ~l2_rx_int_msk; 560 561 if (l2_tx_int) { 562 if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) { 563 /* Read and clear L3 TX interrupt status */ 564 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 565 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0); 566 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 567 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1); 568 } 569 570 t7xx_cldma_hw_tx_done(hw_info, l2_tx_int); 571 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 572 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) { 573 if (i < CLDMA_TXQ_NUM) { 574 pm_runtime_get(md_ctrl->dev); 575 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX); 576 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX); 577 queue_work(md_ctrl->txq[i].worker, 578 &md_ctrl->txq[i].cldma_work); 579 } else { 580 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]); 581 } 582 } 583 } 584 } 585 586 if (l2_rx_int) { 587 if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) { 588 /* Read and clear L3 RX interrupt status */ 589 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 590 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0); 591 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 592 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1); 593 } 594 595 t7xx_cldma_hw_rx_done(hw_info, l2_rx_int); 596 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) { 597 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM; 598 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) { 599 pm_runtime_get(md_ctrl->dev); 600 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX); 601 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX); 602 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work); 603 } 604 } 605 } 606 } 607 608 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl) 609 { 610 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 611 unsigned int tx_active; 612 unsigned int rx_active; 613 614 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) 615 return false; 616 617 tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX); 618 rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX); 619 620 return tx_active || rx_active; 621 } 622 623 /** 624 * t7xx_cldma_stop() - Stop CLDMA. 625 * @md_ctrl: CLDMA context structure. 626 * 627 * Stop TX and RX queues. Disable L1 and L2 interrupts. 628 * Clear status registers. 629 * 630 * Return: 631 * * 0 - Success. 632 * * -ERROR - Error code from polling cldma_queues_active. 633 */ 634 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl) 635 { 636 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 637 bool active; 638 int i, ret; 639 640 md_ctrl->rxq_active = 0; 641 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 642 md_ctrl->txq_active = 0; 643 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 644 md_ctrl->txq_started = 0; 645 t7xx_cldma_disable_irq(md_ctrl); 646 t7xx_cldma_hw_stop(hw_info, MTK_RX); 647 t7xx_cldma_hw_stop(hw_info, MTK_TX); 648 t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK); 649 t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK); 650 651 if (md_ctrl->is_late_init) { 652 for (i = 0; i < CLDMA_TXQ_NUM; i++) 653 flush_work(&md_ctrl->txq[i].cldma_work); 654 655 for (i = 0; i < CLDMA_RXQ_NUM; i++) 656 flush_work(&md_ctrl->rxq[i].cldma_work); 657 } 658 659 ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US, 660 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl); 661 if (ret) 662 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id); 663 664 return ret; 665 } 666 667 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl) 668 { 669 int i; 670 671 if (!md_ctrl->is_late_init) 672 return; 673 674 for (i = 0; i < CLDMA_TXQ_NUM; i++) 675 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 676 677 for (i = 0; i < CLDMA_RXQ_NUM; i++) 678 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE); 679 680 dma_pool_destroy(md_ctrl->gpd_dmapool); 681 md_ctrl->gpd_dmapool = NULL; 682 md_ctrl->is_late_init = false; 683 } 684 685 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl) 686 { 687 unsigned long flags; 688 int i; 689 690 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 691 md_ctrl->txq_active = 0; 692 md_ctrl->rxq_active = 0; 693 t7xx_cldma_disable_irq(md_ctrl); 694 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 695 696 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 697 cancel_work_sync(&md_ctrl->txq[i].cldma_work); 698 699 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 700 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 701 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 702 } 703 704 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 705 cancel_work_sync(&md_ctrl->rxq[i].cldma_work); 706 707 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 708 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 709 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 710 } 711 712 t7xx_cldma_late_release(md_ctrl); 713 } 714 715 /** 716 * t7xx_cldma_start() - Start CLDMA. 717 * @md_ctrl: CLDMA context structure. 718 * 719 * Set TX/RX start address. 720 * Start all RX queues and enable L2 interrupt. 721 */ 722 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl) 723 { 724 unsigned long flags; 725 726 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 727 if (md_ctrl->is_late_init) { 728 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 729 int i; 730 731 t7xx_cldma_enable_irq(md_ctrl); 732 733 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 734 if (md_ctrl->txq[i].tr_done) 735 t7xx_cldma_hw_set_start_addr(hw_info, i, 736 md_ctrl->txq[i].tr_done->gpd_addr, 737 MTK_TX); 738 } 739 740 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 741 if (md_ctrl->rxq[i].tr_done) 742 t7xx_cldma_hw_set_start_addr(hw_info, i, 743 md_ctrl->rxq[i].tr_done->gpd_addr, 744 MTK_RX); 745 } 746 747 /* Enable L2 interrupt */ 748 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 749 t7xx_cldma_hw_start(hw_info); 750 md_ctrl->txq_started = 0; 751 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 752 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 753 } 754 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 755 } 756 757 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum) 758 { 759 struct cldma_queue *txq = &md_ctrl->txq[qnum]; 760 struct cldma_request *req; 761 struct cldma_gpd *gpd; 762 unsigned long flags; 763 764 spin_lock_irqsave(&txq->ring_lock, flags); 765 t7xx_cldma_q_reset(txq); 766 list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) { 767 gpd = req->gpd; 768 gpd->flags &= ~GPD_FLAGS_HWO; 769 t7xx_cldma_gpd_set_data_ptr(gpd, 0); 770 gpd->data_buff_len = 0; 771 dev_kfree_skb_any(req->skb); 772 req->skb = NULL; 773 } 774 spin_unlock_irqrestore(&txq->ring_lock, flags); 775 } 776 777 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum) 778 { 779 struct cldma_queue *rxq = &md_ctrl->rxq[qnum]; 780 struct cldma_request *req; 781 struct cldma_gpd *gpd; 782 unsigned long flags; 783 int ret = 0; 784 785 spin_lock_irqsave(&rxq->ring_lock, flags); 786 t7xx_cldma_q_reset(rxq); 787 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 788 gpd = req->gpd; 789 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO; 790 gpd->data_buff_len = 0; 791 792 if (req->skb) { 793 req->skb->len = 0; 794 skb_reset_tail_pointer(req->skb); 795 } 796 } 797 798 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) { 799 if (req->skb) 800 continue; 801 802 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); 803 if (ret) 804 break; 805 806 t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff); 807 } 808 spin_unlock_irqrestore(&rxq->ring_lock, flags); 809 810 return ret; 811 } 812 813 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 814 { 815 int i; 816 817 if (tx_rx == MTK_TX) { 818 for (i = 0; i < CLDMA_TXQ_NUM; i++) 819 t7xx_cldma_clear_txq(md_ctrl, i); 820 } else { 821 for (i = 0; i < CLDMA_RXQ_NUM; i++) 822 t7xx_cldma_clear_rxq(md_ctrl, i); 823 } 824 } 825 826 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx) 827 { 828 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 829 unsigned long flags; 830 831 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 832 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx); 833 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx); 834 if (tx_rx == MTK_RX) 835 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 836 else 837 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 838 t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx); 839 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 840 } 841 842 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, 843 struct sk_buff *skb) 844 { 845 struct cldma_ctrl *md_ctrl = queue->md_ctrl; 846 struct cldma_gpd *gpd = tx_req->gpd; 847 unsigned long flags; 848 849 /* Update GPD */ 850 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE); 851 852 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) { 853 dev_err(md_ctrl->dev, "DMA mapping failed\n"); 854 return -ENOMEM; 855 } 856 857 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff); 858 gpd->data_buff_len = cpu_to_le16(skb->len); 859 860 /* This lock must cover TGPD setting, as even without a resume operation, 861 * CLDMA can send next HWO=1 if last TGPD just finished. 862 */ 863 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 864 if (md_ctrl->txq_active & BIT(queue->index)) 865 gpd->flags |= GPD_FLAGS_HWO; 866 867 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 868 869 tx_req->skb = skb; 870 return 0; 871 } 872 873 /* Called with cldma_lock */ 874 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno, 875 struct cldma_request *prev_req) 876 { 877 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 878 879 /* Check whether the device was powered off (CLDMA start address is not set) */ 880 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) { 881 t7xx_cldma_hw_init(hw_info); 882 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX); 883 md_ctrl->txq_started &= ~BIT(qno); 884 } 885 886 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) { 887 if (md_ctrl->txq_started & BIT(qno)) 888 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX); 889 else 890 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX); 891 892 md_ctrl->txq_started |= BIT(qno); 893 } 894 } 895 896 /** 897 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets. 898 * @queue: CLDMA queue. 899 * @recv_skb: Receiving skb callback. 900 */ 901 void t7xx_cldma_set_recv_skb(struct cldma_queue *queue, 902 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) 903 { 904 queue->recv_skb = recv_skb; 905 } 906 907 /** 908 * t7xx_cldma_send_skb() - Send control data to modem. 909 * @md_ctrl: CLDMA context structure. 910 * @qno: Queue number. 911 * @skb: Socket buffer. 912 * 913 * Return: 914 * * 0 - Success. 915 * * -ENOMEM - Allocation failure. 916 * * -EINVAL - Invalid queue request. 917 * * -EIO - Queue is not active. 918 * * -ETIMEDOUT - Timeout waiting for the device to wake up. 919 */ 920 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb) 921 { 922 struct cldma_request *tx_req; 923 struct cldma_queue *queue; 924 unsigned long flags; 925 int ret; 926 927 if (qno >= CLDMA_TXQ_NUM) 928 return -EINVAL; 929 930 ret = pm_runtime_resume_and_get(md_ctrl->dev); 931 if (ret < 0 && ret != -EACCES) 932 return ret; 933 934 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev); 935 queue = &md_ctrl->txq[qno]; 936 937 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 938 if (!(md_ctrl->txq_active & BIT(qno))) { 939 ret = -EIO; 940 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 941 goto allow_sleep; 942 } 943 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 944 945 do { 946 spin_lock_irqsave(&queue->ring_lock, flags); 947 tx_req = queue->tx_next; 948 if (queue->budget > 0 && !tx_req->skb) { 949 struct list_head *gpd_ring = &queue->tr_ring->gpd_ring; 950 951 queue->budget--; 952 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb); 953 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry); 954 spin_unlock_irqrestore(&queue->ring_lock, flags); 955 956 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 957 ret = -ETIMEDOUT; 958 break; 959 } 960 961 /* Protect the access to the modem for queues operations (resume/start) 962 * which access shared locations by all the queues. 963 * cldma_lock is independent of ring_lock which is per queue. 964 */ 965 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 966 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req); 967 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 968 969 break; 970 } 971 spin_unlock_irqrestore(&queue->ring_lock, flags); 972 973 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) { 974 ret = -ETIMEDOUT; 975 break; 976 } 977 978 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) { 979 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 980 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX); 981 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 982 } 983 984 ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0); 985 } while (!ret); 986 987 allow_sleep: 988 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev); 989 pm_runtime_mark_last_busy(md_ctrl->dev); 990 pm_runtime_put_autosuspend(md_ctrl->dev); 991 return ret; 992 } 993 994 static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) 995 { 996 int qno; 997 998 for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) { 999 md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; 1000 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb); 1001 } 1002 1003 md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ; 1004 1005 for (qno = 0; qno < CLDMA_TXQ_NUM; qno++) 1006 md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ; 1007 1008 if (cfg_id == CLDMA_DEDICATED_Q_CFG) { 1009 md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; 1010 md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ; 1011 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP], 1012 t7xx_port_proxy_recv_skb_from_dedicated_queue); 1013 } 1014 } 1015 1016 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl) 1017 { 1018 char dma_pool_name[32]; 1019 int i, j, ret; 1020 1021 if (md_ctrl->is_late_init) { 1022 dev_err(md_ctrl->dev, "CLDMA late init was already done\n"); 1023 return -EALREADY; 1024 } 1025 1026 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id); 1027 1028 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev, 1029 sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0); 1030 if (!md_ctrl->gpd_dmapool) { 1031 dev_err(md_ctrl->dev, "DMA pool alloc fail\n"); 1032 return -ENOMEM; 1033 } 1034 1035 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1036 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]); 1037 if (ret) { 1038 dev_err(md_ctrl->dev, "control TX ring init fail\n"); 1039 goto err_free_tx_ring; 1040 } 1041 } 1042 1043 for (j = 0; j < CLDMA_RXQ_NUM; j++) { 1044 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]); 1045 if (ret) { 1046 dev_err(md_ctrl->dev, "Control RX ring init fail\n"); 1047 goto err_free_rx_ring; 1048 } 1049 } 1050 1051 for (i = 0; i < CLDMA_TXQ_NUM; i++) 1052 t7xx_cldma_txq_init(&md_ctrl->txq[i]); 1053 1054 for (j = 0; j < CLDMA_RXQ_NUM; j++) 1055 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]); 1056 1057 md_ctrl->is_late_init = true; 1058 return 0; 1059 1060 err_free_rx_ring: 1061 while (j--) 1062 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE); 1063 1064 err_free_tx_ring: 1065 while (i--) 1066 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE); 1067 1068 return ret; 1069 } 1070 1071 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr) 1072 { 1073 return addr + phy_addr - addr_trs1; 1074 } 1075 1076 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl) 1077 { 1078 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr; 1079 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1080 u32 phy_ao_base, phy_pd_base; 1081 1082 hw_info->hw_mode = MODE_BIT_64; 1083 1084 if (md_ctrl->hif_id == CLDMA_ID_MD) { 1085 phy_ao_base = CLDMA1_AO_BASE; 1086 phy_pd_base = CLDMA1_PD_BASE; 1087 hw_info->phy_interrupt_id = CLDMA1_INT; 1088 } else { 1089 phy_ao_base = CLDMA0_AO_BASE; 1090 phy_pd_base = CLDMA0_PD_BASE; 1091 hw_info->phy_interrupt_id = CLDMA0_INT; 1092 } 1093 1094 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1095 pbase->pcie_dev_reg_trsl_addr, phy_ao_base); 1096 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base, 1097 pbase->pcie_dev_reg_trsl_addr, phy_pd_base); 1098 } 1099 1100 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) 1101 { 1102 dev_kfree_skb_any(skb); 1103 return 0; 1104 } 1105 1106 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev) 1107 { 1108 struct device *dev = &t7xx_dev->pdev->dev; 1109 struct cldma_ctrl *md_ctrl; 1110 int qno; 1111 1112 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL); 1113 if (!md_ctrl) 1114 return -ENOMEM; 1115 1116 md_ctrl->t7xx_dev = t7xx_dev; 1117 md_ctrl->dev = dev; 1118 md_ctrl->hif_id = hif_id; 1119 for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) 1120 md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb; 1121 1122 t7xx_hw_info_init(md_ctrl); 1123 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl; 1124 return 0; 1125 } 1126 1127 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1128 { 1129 struct cldma_ctrl *md_ctrl = entity_param; 1130 struct t7xx_cldma_hw *hw_info; 1131 unsigned long flags; 1132 int qno_t; 1133 1134 hw_info = &md_ctrl->hw_info; 1135 1136 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1137 t7xx_cldma_hw_restore(hw_info); 1138 for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) { 1139 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr, 1140 MTK_TX); 1141 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr, 1142 MTK_RX); 1143 } 1144 t7xx_cldma_enable_irq(md_ctrl); 1145 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX); 1146 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK; 1147 t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1148 t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1149 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1150 } 1151 1152 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1153 { 1154 struct cldma_ctrl *md_ctrl = entity_param; 1155 unsigned long flags; 1156 1157 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1158 md_ctrl->txq_active |= TXRX_STATUS_BITMASK; 1159 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1160 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX); 1161 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1162 1163 if (md_ctrl->hif_id == CLDMA_ID_MD) 1164 t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK); 1165 1166 return 0; 1167 } 1168 1169 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1170 { 1171 struct cldma_ctrl *md_ctrl = entity_param; 1172 struct t7xx_cldma_hw *hw_info; 1173 unsigned long flags; 1174 1175 hw_info = &md_ctrl->hw_info; 1176 1177 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1178 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX); 1179 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX); 1180 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK; 1181 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX); 1182 t7xx_cldma_clear_ip_busy(hw_info); 1183 t7xx_cldma_disable_irq(md_ctrl); 1184 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1185 } 1186 1187 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param) 1188 { 1189 struct cldma_ctrl *md_ctrl = entity_param; 1190 struct t7xx_cldma_hw *hw_info; 1191 unsigned long flags; 1192 1193 if (md_ctrl->hif_id == CLDMA_ID_MD) 1194 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK); 1195 1196 hw_info = &md_ctrl->hw_info; 1197 1198 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1199 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX); 1200 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX); 1201 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK; 1202 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX); 1203 md_ctrl->txq_started = 0; 1204 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1205 1206 return 0; 1207 } 1208 1209 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl) 1210 { 1211 md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL); 1212 if (!md_ctrl->pm_entity) 1213 return -ENOMEM; 1214 1215 md_ctrl->pm_entity->entity_param = md_ctrl; 1216 1217 if (md_ctrl->hif_id == CLDMA_ID_MD) 1218 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1; 1219 else 1220 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2; 1221 1222 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend; 1223 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late; 1224 md_ctrl->pm_entity->resume = t7xx_cldma_resume; 1225 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early; 1226 1227 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1228 } 1229 1230 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl) 1231 { 1232 if (!md_ctrl->pm_entity) 1233 return -EINVAL; 1234 1235 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity); 1236 kfree(md_ctrl->pm_entity); 1237 md_ctrl->pm_entity = NULL; 1238 return 0; 1239 } 1240 1241 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl) 1242 { 1243 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1244 unsigned long flags; 1245 1246 spin_lock_irqsave(&md_ctrl->cldma_lock, flags); 1247 t7xx_cldma_hw_stop(hw_info, MTK_TX); 1248 t7xx_cldma_hw_stop(hw_info, MTK_RX); 1249 t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1250 t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK); 1251 t7xx_cldma_hw_init(hw_info); 1252 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags); 1253 } 1254 1255 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data) 1256 { 1257 struct cldma_ctrl *md_ctrl = data; 1258 u32 interrupt; 1259 1260 interrupt = md_ctrl->hw_info.phy_interrupt_id; 1261 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt); 1262 t7xx_cldma_irq_work_cb(md_ctrl); 1263 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt); 1264 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt); 1265 return IRQ_HANDLED; 1266 } 1267 1268 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl) 1269 { 1270 int i; 1271 1272 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1273 if (md_ctrl->txq[i].worker) { 1274 destroy_workqueue(md_ctrl->txq[i].worker); 1275 md_ctrl->txq[i].worker = NULL; 1276 } 1277 } 1278 1279 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1280 if (md_ctrl->rxq[i].worker) { 1281 destroy_workqueue(md_ctrl->rxq[i].worker); 1282 md_ctrl->rxq[i].worker = NULL; 1283 } 1284 } 1285 } 1286 1287 /** 1288 * t7xx_cldma_init() - Initialize CLDMA. 1289 * @md_ctrl: CLDMA context structure. 1290 * 1291 * Allocate and initialize device power management entity. 1292 * Initialize HIF TX/RX queue structure. 1293 * Register CLDMA callback ISR with PCIe driver. 1294 * 1295 * Return: 1296 * * 0 - Success. 1297 * * -ERROR - Error code from failure sub-initializations. 1298 */ 1299 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl) 1300 { 1301 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info; 1302 int ret, i; 1303 1304 md_ctrl->txq_active = 0; 1305 md_ctrl->rxq_active = 0; 1306 md_ctrl->is_late_init = false; 1307 1308 ret = t7xx_cldma_pm_init(md_ctrl); 1309 if (ret) 1310 return ret; 1311 1312 spin_lock_init(&md_ctrl->cldma_lock); 1313 1314 for (i = 0; i < CLDMA_TXQ_NUM; i++) { 1315 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i); 1316 md_ctrl->txq[i].worker = 1317 alloc_ordered_workqueue("md_hif%d_tx%d_worker", 1318 WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI), 1319 md_ctrl->hif_id, i); 1320 if (!md_ctrl->txq[i].worker) 1321 goto err_workqueue; 1322 1323 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done); 1324 } 1325 1326 for (i = 0; i < CLDMA_RXQ_NUM; i++) { 1327 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i); 1328 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done); 1329 1330 md_ctrl->rxq[i].worker = 1331 alloc_ordered_workqueue("md_hif%d_rx%d_worker", 1332 WQ_MEM_RECLAIM, 1333 md_ctrl->hif_id, i); 1334 if (!md_ctrl->rxq[i].worker) 1335 goto err_workqueue; 1336 } 1337 1338 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1339 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler; 1340 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL; 1341 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl; 1342 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id); 1343 return 0; 1344 1345 err_workqueue: 1346 t7xx_cldma_destroy_wqs(md_ctrl); 1347 t7xx_cldma_pm_uninit(md_ctrl); 1348 return -ENOMEM; 1349 } 1350 1351 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id) 1352 { 1353 t7xx_cldma_late_release(md_ctrl); 1354 t7xx_cldma_adjust_config(md_ctrl, cfg_id); 1355 t7xx_cldma_late_init(md_ctrl); 1356 } 1357 1358 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl) 1359 { 1360 t7xx_cldma_stop(md_ctrl); 1361 t7xx_cldma_late_release(md_ctrl); 1362 t7xx_cldma_destroy_wqs(md_ctrl); 1363 t7xx_cldma_pm_uninit(md_ctrl); 1364 } 1365