1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 * Sreehari Kancharla <sreehari.kancharla@intel.com>
12 *
13 * Contributors:
14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16 * Eliot Lee <eliot.lee@intel.com>
17 */
18
19 #include <linux/bits.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dmapool.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-direction.h>
26 #include <linux/gfp.h>
27 #include <linux/io.h>
28 #include <linux/io-64-nonatomic-lo-hi.h>
29 #include <linux/iopoll.h>
30 #include <linux/irqreturn.h>
31 #include <linux/kernel.h>
32 #include <linux/kthread.h>
33 #include <linux/list.h>
34 #include <linux/netdevice.h>
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/sched.h>
38 #include <linux/skbuff.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44
45 #include "t7xx_cldma.h"
46 #include "t7xx_hif_cldma.h"
47 #include "t7xx_mhccif.h"
48 #include "t7xx_pci.h"
49 #include "t7xx_pcie_mac.h"
50 #include "t7xx_port_proxy.h"
51 #include "t7xx_reg.h"
52 #include "t7xx_state_monitor.h"
53
54 #define MAX_TX_BUDGET 16
55 #define MAX_RX_BUDGET 16
56
57 #define CHECK_Q_STOP_TIMEOUT_US 1000000
58 #define CHECK_Q_STOP_STEP_US 10000
59
md_cd_queue_struct_reset(struct cldma_queue * queue,struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx,unsigned int index)60 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
61 enum mtk_txrx tx_rx, unsigned int index)
62 {
63 queue->dir = tx_rx;
64 queue->index = index;
65 queue->md_ctrl = md_ctrl;
66 queue->tr_ring = NULL;
67 queue->tr_done = NULL;
68 queue->tx_next = NULL;
69 }
70
md_cd_queue_struct_init(struct cldma_queue * queue,struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx,unsigned int index)71 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
72 enum mtk_txrx tx_rx, unsigned int index)
73 {
74 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
75 init_waitqueue_head(&queue->req_wq);
76 spin_lock_init(&queue->ring_lock);
77 }
78
t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd * gpd,dma_addr_t data_ptr)79 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
80 {
81 gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
82 gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
83 }
84
t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd * gpd,dma_addr_t next_ptr)85 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
86 {
87 gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
88 gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
89 }
90
t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl * md_ctrl,struct cldma_request * req,size_t size,gfp_t gfp_mask)91 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
92 size_t size, gfp_t gfp_mask)
93 {
94 req->skb = __dev_alloc_skb(size, gfp_mask);
95 if (!req->skb)
96 return -ENOMEM;
97
98 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
99 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
100 dev_kfree_skb_any(req->skb);
101 req->skb = NULL;
102 req->mapped_buff = 0;
103 dev_err(md_ctrl->dev, "DMA mapping failed\n");
104 return -ENOMEM;
105 }
106
107 return 0;
108 }
109
t7xx_cldma_gpd_rx_from_q(struct cldma_queue * queue,int budget,bool * over_budget)110 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
111 {
112 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
113 unsigned int hwo_polling_count = 0;
114 struct t7xx_cldma_hw *hw_info;
115 bool rx_not_done = true;
116 unsigned long flags;
117 int count = 0;
118
119 hw_info = &md_ctrl->hw_info;
120
121 do {
122 struct cldma_request *req;
123 struct cldma_gpd *gpd;
124 struct sk_buff *skb;
125 int ret;
126
127 req = queue->tr_done;
128 if (!req)
129 return -ENODATA;
130
131 gpd = req->gpd;
132 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
133 dma_addr_t gpd_addr;
134
135 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
136 dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
137 return -ENODEV;
138 }
139
140 gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
141 REG_CLDMA_DL_CURRENT_ADDRL_0 +
142 queue->index * sizeof(u64));
143 if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
144 return 0;
145
146 udelay(1);
147 continue;
148 }
149
150 hwo_polling_count = 0;
151 skb = req->skb;
152
153 if (req->mapped_buff) {
154 dma_unmap_single(md_ctrl->dev, req->mapped_buff,
155 queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
156 req->mapped_buff = 0;
157 }
158
159 skb->len = 0;
160 skb_reset_tail_pointer(skb);
161 skb_put(skb, le16_to_cpu(gpd->data_buff_len));
162
163 ret = queue->recv_skb(queue, skb);
164 /* Break processing, will try again later */
165 if (ret < 0)
166 return ret;
167
168 req->skb = NULL;
169 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
170
171 spin_lock_irqsave(&queue->ring_lock, flags);
172 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
173 spin_unlock_irqrestore(&queue->ring_lock, flags);
174 req = queue->rx_refill;
175
176 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
177 if (ret)
178 return ret;
179
180 gpd = req->gpd;
181 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
182 gpd->data_buff_len = 0;
183 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
184
185 spin_lock_irqsave(&queue->ring_lock, flags);
186 queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
187 spin_unlock_irqrestore(&queue->ring_lock, flags);
188
189 rx_not_done = ++count < budget || !need_resched();
190 } while (rx_not_done);
191
192 *over_budget = true;
193 return 0;
194 }
195
t7xx_cldma_gpd_rx_collect(struct cldma_queue * queue,int budget)196 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
197 {
198 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
199 struct t7xx_cldma_hw *hw_info;
200 unsigned int pending_rx_int;
201 bool over_budget = false;
202 unsigned long flags;
203 int ret;
204
205 hw_info = &md_ctrl->hw_info;
206
207 do {
208 ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
209 if (ret == -ENODATA)
210 return 0;
211 else if (ret)
212 return ret;
213
214 pending_rx_int = 0;
215
216 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
217 if (md_ctrl->rxq_active & BIT(queue->index)) {
218 if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
219 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
220
221 pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
222 MTK_RX);
223 if (pending_rx_int) {
224 t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
225
226 if (over_budget) {
227 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
228 return -EAGAIN;
229 }
230 }
231 }
232 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
233 } while (pending_rx_int);
234
235 return 0;
236 }
237
t7xx_cldma_rx_done(struct work_struct * work)238 static void t7xx_cldma_rx_done(struct work_struct *work)
239 {
240 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
241 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
242 int value;
243
244 value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
245 if (value && md_ctrl->rxq_active & BIT(queue->index)) {
246 queue_work(queue->worker, &queue->cldma_work);
247 return;
248 }
249
250 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
251 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
252 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
253 pm_runtime_put_autosuspend(md_ctrl->dev);
254 }
255
t7xx_cldma_gpd_tx_collect(struct cldma_queue * queue)256 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
257 {
258 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
259 unsigned int dma_len, count = 0;
260 struct cldma_request *req;
261 struct cldma_gpd *gpd;
262 unsigned long flags;
263 dma_addr_t dma_free;
264 struct sk_buff *skb;
265
266 while (!kthread_should_stop()) {
267 spin_lock_irqsave(&queue->ring_lock, flags);
268 req = queue->tr_done;
269 if (!req) {
270 spin_unlock_irqrestore(&queue->ring_lock, flags);
271 break;
272 }
273 gpd = req->gpd;
274 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
275 spin_unlock_irqrestore(&queue->ring_lock, flags);
276 break;
277 }
278 queue->budget++;
279 dma_free = req->mapped_buff;
280 dma_len = le16_to_cpu(gpd->data_buff_len);
281 skb = req->skb;
282 req->skb = NULL;
283 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
284 spin_unlock_irqrestore(&queue->ring_lock, flags);
285
286 count++;
287 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
288 dev_kfree_skb_any(skb);
289 }
290
291 if (count)
292 wake_up_nr(&queue->req_wq, count);
293
294 return count;
295 }
296
t7xx_cldma_txq_empty_hndl(struct cldma_queue * queue)297 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
298 {
299 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
300 struct cldma_request *req;
301 dma_addr_t ul_curr_addr;
302 unsigned long flags;
303 bool pending_gpd;
304
305 if (!(md_ctrl->txq_active & BIT(queue->index)))
306 return;
307
308 spin_lock_irqsave(&queue->ring_lock, flags);
309 req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
310 spin_unlock_irqrestore(&queue->ring_lock, flags);
311
312 pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
313
314 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
315 if (pending_gpd) {
316 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
317
318 /* Check current processing TGPD, 64-bit address is in a table by Q index */
319 ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
320 queue->index * sizeof(u64));
321 if (req->gpd_addr != ul_curr_addr) {
322 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
323 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
324 md_ctrl->hif_id, queue->index);
325 return;
326 }
327
328 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
329 }
330 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
331 }
332
t7xx_cldma_tx_done(struct work_struct * work)333 static void t7xx_cldma_tx_done(struct work_struct *work)
334 {
335 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
336 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
337 struct t7xx_cldma_hw *hw_info;
338 unsigned int l2_tx_int;
339 unsigned long flags;
340
341 hw_info = &md_ctrl->hw_info;
342 t7xx_cldma_gpd_tx_collect(queue);
343 l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
344 MTK_TX);
345 if (l2_tx_int & EQ_STA_BIT(queue->index)) {
346 t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
347 t7xx_cldma_txq_empty_hndl(queue);
348 }
349
350 if (l2_tx_int & BIT(queue->index)) {
351 t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
352 queue_work(queue->worker, &queue->cldma_work);
353 return;
354 }
355
356 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
357 if (md_ctrl->txq_active & BIT(queue->index)) {
358 t7xx_cldma_clear_ip_busy(hw_info);
359 t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
360 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
361 }
362 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
363
364 pm_runtime_put_autosuspend(md_ctrl->dev);
365 }
366
t7xx_cldma_ring_free(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring,enum dma_data_direction tx_rx)367 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
368 struct cldma_ring *ring, enum dma_data_direction tx_rx)
369 {
370 struct cldma_request *req_cur, *req_next;
371
372 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
373 if (req_cur->mapped_buff && req_cur->skb) {
374 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
375 ring->pkt_size, tx_rx);
376 req_cur->mapped_buff = 0;
377 }
378
379 dev_kfree_skb_any(req_cur->skb);
380
381 if (req_cur->gpd)
382 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
383
384 list_del(&req_cur->entry);
385 kfree(req_cur);
386 }
387 }
388
t7xx_alloc_rx_request(struct cldma_ctrl * md_ctrl,size_t pkt_size)389 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
390 {
391 struct cldma_request *req;
392 int val;
393
394 req = kzalloc_obj(*req);
395 if (!req)
396 return NULL;
397
398 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
399 if (!req->gpd)
400 goto err_free_req;
401
402 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
403 if (val)
404 goto err_free_pool;
405
406 return req;
407
408 err_free_pool:
409 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
410
411 err_free_req:
412 kfree(req);
413
414 return NULL;
415 }
416
t7xx_cldma_rx_ring_init(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring)417 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
418 {
419 struct cldma_request *req;
420 struct cldma_gpd *gpd;
421 int i;
422
423 INIT_LIST_HEAD(&ring->gpd_ring);
424 ring->length = MAX_RX_BUDGET;
425
426 for (i = 0; i < ring->length; i++) {
427 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
428 if (!req) {
429 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
430 return -ENOMEM;
431 }
432
433 gpd = req->gpd;
434 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
435 gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
436 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
437 INIT_LIST_HEAD(&req->entry);
438 list_add_tail(&req->entry, &ring->gpd_ring);
439 }
440
441 /* Link previous GPD to next GPD, circular */
442 list_for_each_entry(req, &ring->gpd_ring, entry) {
443 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
444 gpd = req->gpd;
445 }
446
447 return 0;
448 }
449
t7xx_alloc_tx_request(struct cldma_ctrl * md_ctrl)450 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
451 {
452 struct cldma_request *req;
453
454 req = kzalloc_obj(*req);
455 if (!req)
456 return NULL;
457
458 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
459 if (!req->gpd) {
460 kfree(req);
461 return NULL;
462 }
463
464 return req;
465 }
466
t7xx_cldma_tx_ring_init(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring)467 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
468 {
469 struct cldma_request *req;
470 struct cldma_gpd *gpd;
471 int i;
472
473 INIT_LIST_HEAD(&ring->gpd_ring);
474 ring->length = MAX_TX_BUDGET;
475
476 for (i = 0; i < ring->length; i++) {
477 req = t7xx_alloc_tx_request(md_ctrl);
478 if (!req) {
479 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
480 return -ENOMEM;
481 }
482
483 gpd = req->gpd;
484 gpd->flags = GPD_FLAGS_IOC;
485 INIT_LIST_HEAD(&req->entry);
486 list_add_tail(&req->entry, &ring->gpd_ring);
487 }
488
489 /* Link previous GPD to next GPD, circular */
490 list_for_each_entry(req, &ring->gpd_ring, entry) {
491 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
492 gpd = req->gpd;
493 }
494
495 return 0;
496 }
497
498 /**
499 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
500 * @queue: Pointer to the queue structure.
501 *
502 * Called with ring_lock (unless called during initialization phase)
503 */
t7xx_cldma_q_reset(struct cldma_queue * queue)504 static void t7xx_cldma_q_reset(struct cldma_queue *queue)
505 {
506 struct cldma_request *req;
507
508 req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
509 queue->tr_done = req;
510 queue->budget = queue->tr_ring->length;
511
512 if (queue->dir == MTK_TX)
513 queue->tx_next = req;
514 else
515 queue->rx_refill = req;
516 }
517
t7xx_cldma_rxq_init(struct cldma_queue * queue)518 static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
519 {
520 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
521
522 queue->dir = MTK_RX;
523 queue->tr_ring = &md_ctrl->rx_ring[queue->index];
524 t7xx_cldma_q_reset(queue);
525 }
526
t7xx_cldma_txq_init(struct cldma_queue * queue)527 static void t7xx_cldma_txq_init(struct cldma_queue *queue)
528 {
529 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
530
531 queue->dir = MTK_TX;
532 queue->tr_ring = &md_ctrl->tx_ring[queue->index];
533 t7xx_cldma_q_reset(queue);
534 }
535
t7xx_cldma_enable_irq(struct cldma_ctrl * md_ctrl)536 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
537 {
538 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
539 }
540
t7xx_cldma_disable_irq(struct cldma_ctrl * md_ctrl)541 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
542 {
543 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
544 }
545
t7xx_cldma_irq_work_cb(struct cldma_ctrl * md_ctrl)546 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
547 {
548 unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
549 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
550 int i;
551
552 /* L2 raw interrupt status */
553 l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
554 l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
555 l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
556 l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
557 l2_tx_int &= ~l2_tx_int_msk;
558 l2_rx_int &= ~l2_rx_int_msk;
559
560 if (l2_tx_int) {
561 if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
562 /* Read and clear L3 TX interrupt status */
563 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
564 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
565 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
566 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
567 }
568
569 t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
570 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
571 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
572 if (i < CLDMA_TXQ_NUM) {
573 pm_runtime_get(md_ctrl->dev);
574 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
575 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
576 queue_work(md_ctrl->txq[i].worker,
577 &md_ctrl->txq[i].cldma_work);
578 } else {
579 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
580 }
581 }
582 }
583 }
584
585 if (l2_rx_int) {
586 if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
587 /* Read and clear L3 RX interrupt status */
588 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
589 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
590 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
591 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
592 }
593
594 t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
595 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
596 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
597 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
598 pm_runtime_get(md_ctrl->dev);
599 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
600 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
601 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
602 }
603 }
604 }
605 }
606
t7xx_cldma_qs_are_active(struct cldma_ctrl * md_ctrl)607 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
608 {
609 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
610 unsigned int tx_active;
611 unsigned int rx_active;
612
613 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
614 return false;
615
616 tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
617 rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
618
619 return tx_active || rx_active;
620 }
621
622 /**
623 * t7xx_cldma_stop() - Stop CLDMA.
624 * @md_ctrl: CLDMA context structure.
625 *
626 * Stop TX and RX queues. Disable L1 and L2 interrupts.
627 * Clear status registers.
628 *
629 * Return:
630 * * 0 - Success.
631 * * -ERROR - Error code from polling cldma_queues_active.
632 */
t7xx_cldma_stop(struct cldma_ctrl * md_ctrl)633 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
634 {
635 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
636 bool active;
637 int i, ret;
638
639 md_ctrl->rxq_active = 0;
640 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
641 md_ctrl->txq_active = 0;
642 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
643 md_ctrl->txq_started = 0;
644 t7xx_cldma_disable_irq(md_ctrl);
645 t7xx_cldma_hw_stop(hw_info, MTK_RX);
646 t7xx_cldma_hw_stop(hw_info, MTK_TX);
647 t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
648 t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
649
650 if (md_ctrl->is_late_init) {
651 for (i = 0; i < CLDMA_TXQ_NUM; i++)
652 flush_work(&md_ctrl->txq[i].cldma_work);
653
654 for (i = 0; i < CLDMA_RXQ_NUM; i++)
655 flush_work(&md_ctrl->rxq[i].cldma_work);
656 }
657
658 ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
659 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
660 if (ret)
661 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
662
663 return ret;
664 }
665
t7xx_cldma_late_release(struct cldma_ctrl * md_ctrl)666 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
667 {
668 int i;
669
670 if (!md_ctrl->is_late_init)
671 return;
672
673 for (i = 0; i < CLDMA_TXQ_NUM; i++)
674 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
675
676 for (i = 0; i < CLDMA_RXQ_NUM; i++)
677 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
678
679 dma_pool_destroy(md_ctrl->gpd_dmapool);
680 md_ctrl->gpd_dmapool = NULL;
681 md_ctrl->is_late_init = false;
682 }
683
t7xx_cldma_reset(struct cldma_ctrl * md_ctrl)684 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
685 {
686 unsigned long flags;
687 int i;
688
689 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
690 md_ctrl->txq_active = 0;
691 md_ctrl->rxq_active = 0;
692 t7xx_cldma_disable_irq(md_ctrl);
693 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
694
695 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
696 cancel_work_sync(&md_ctrl->txq[i].cldma_work);
697
698 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
699 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
700 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
701 }
702
703 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
704 cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
705
706 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
707 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
708 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
709 }
710
711 t7xx_cldma_late_release(md_ctrl);
712 }
713
714 /**
715 * t7xx_cldma_start() - Start CLDMA.
716 * @md_ctrl: CLDMA context structure.
717 *
718 * Set TX/RX start address.
719 * Start all RX queues and enable L2 interrupt.
720 */
t7xx_cldma_start(struct cldma_ctrl * md_ctrl)721 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
722 {
723 unsigned long flags;
724
725 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
726 if (md_ctrl->is_late_init) {
727 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
728 int i;
729
730 t7xx_cldma_enable_irq(md_ctrl);
731
732 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
733 if (md_ctrl->txq[i].tr_done)
734 t7xx_cldma_hw_set_start_addr(hw_info, i,
735 md_ctrl->txq[i].tr_done->gpd_addr,
736 MTK_TX);
737 }
738
739 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
740 if (md_ctrl->rxq[i].tr_done)
741 t7xx_cldma_hw_set_start_addr(hw_info, i,
742 md_ctrl->rxq[i].tr_done->gpd_addr,
743 MTK_RX);
744 }
745
746 /* Enable L2 interrupt */
747 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
748 t7xx_cldma_hw_start(hw_info);
749 md_ctrl->txq_started = 0;
750 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
751 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
752 }
753 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
754 }
755
t7xx_cldma_clear_txq(struct cldma_ctrl * md_ctrl,int qnum)756 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
757 {
758 struct cldma_queue *txq = &md_ctrl->txq[qnum];
759 struct cldma_request *req;
760 struct cldma_gpd *gpd;
761 unsigned long flags;
762
763 spin_lock_irqsave(&txq->ring_lock, flags);
764 t7xx_cldma_q_reset(txq);
765 list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
766 gpd = req->gpd;
767 gpd->flags &= ~GPD_FLAGS_HWO;
768 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
769 gpd->data_buff_len = 0;
770 dev_kfree_skb_any(req->skb);
771 req->skb = NULL;
772 }
773 spin_unlock_irqrestore(&txq->ring_lock, flags);
774 }
775
t7xx_cldma_clear_rxq(struct cldma_ctrl * md_ctrl,int qnum)776 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
777 {
778 struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
779 struct cldma_request *req;
780 struct cldma_gpd *gpd;
781 unsigned long flags;
782 int ret = 0;
783
784 spin_lock_irqsave(&rxq->ring_lock, flags);
785 t7xx_cldma_q_reset(rxq);
786 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
787 gpd = req->gpd;
788 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
789 gpd->data_buff_len = 0;
790
791 if (req->skb) {
792 req->skb->len = 0;
793 skb_reset_tail_pointer(req->skb);
794 }
795 }
796
797 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
798 if (req->skb)
799 continue;
800
801 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
802 if (ret)
803 break;
804
805 t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
806 }
807 spin_unlock_irqrestore(&rxq->ring_lock, flags);
808
809 return ret;
810 }
811
t7xx_cldma_clear_all_qs(struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx)812 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
813 {
814 int i;
815
816 if (tx_rx == MTK_TX) {
817 for (i = 0; i < CLDMA_TXQ_NUM; i++)
818 t7xx_cldma_clear_txq(md_ctrl, i);
819 } else {
820 for (i = 0; i < CLDMA_RXQ_NUM; i++)
821 t7xx_cldma_clear_rxq(md_ctrl, i);
822 }
823 }
824
t7xx_cldma_stop_all_qs(struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx)825 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
826 {
827 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
828 unsigned long flags;
829
830 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
831 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
832 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
833 if (tx_rx == MTK_RX)
834 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
835 else
836 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
837 t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
838 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
839 }
840
t7xx_cldma_gpd_handle_tx_request(struct cldma_queue * queue,struct cldma_request * tx_req,struct sk_buff * skb)841 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
842 struct sk_buff *skb)
843 {
844 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
845 struct cldma_gpd *gpd = tx_req->gpd;
846 unsigned long flags;
847
848 /* Update GPD */
849 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
850
851 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
852 dev_err(md_ctrl->dev, "DMA mapping failed\n");
853 return -ENOMEM;
854 }
855
856 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
857 gpd->data_buff_len = cpu_to_le16(skb->len);
858
859 /* This lock must cover TGPD setting, as even without a resume operation,
860 * CLDMA can send next HWO=1 if last TGPD just finished.
861 */
862 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
863 if (md_ctrl->txq_active & BIT(queue->index))
864 gpd->flags |= GPD_FLAGS_HWO;
865
866 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
867
868 tx_req->skb = skb;
869 return 0;
870 }
871
872 /* Called with cldma_lock */
t7xx_cldma_hw_start_send(struct cldma_ctrl * md_ctrl,int qno,struct cldma_request * prev_req)873 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
874 struct cldma_request *prev_req)
875 {
876 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
877
878 /* Check whether the device was powered off (CLDMA start address is not set) */
879 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
880 t7xx_cldma_hw_init(hw_info);
881 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
882 md_ctrl->txq_started &= ~BIT(qno);
883 }
884
885 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
886 if (md_ctrl->txq_started & BIT(qno))
887 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
888 else
889 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
890
891 md_ctrl->txq_started |= BIT(qno);
892 }
893 }
894
895 /**
896 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
897 * @queue: CLDMA queue.
898 * @recv_skb: Receiving skb callback.
899 */
t7xx_cldma_set_recv_skb(struct cldma_queue * queue,int (* recv_skb)(struct cldma_queue * queue,struct sk_buff * skb))900 static void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
901 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
902 {
903 queue->recv_skb = recv_skb;
904 }
905
906 /**
907 * t7xx_cldma_send_skb() - Send control data to modem.
908 * @md_ctrl: CLDMA context structure.
909 * @qno: Queue number.
910 * @skb: Socket buffer.
911 *
912 * Return:
913 * * 0 - Success.
914 * * -ENOMEM - Allocation failure.
915 * * -EINVAL - Invalid queue request.
916 * * -EIO - Queue is not active.
917 * * -ETIMEDOUT - Timeout waiting for the device to wake up.
918 */
t7xx_cldma_send_skb(struct cldma_ctrl * md_ctrl,int qno,struct sk_buff * skb)919 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
920 {
921 struct cldma_request *tx_req;
922 struct cldma_queue *queue;
923 unsigned long flags;
924 int ret;
925
926 if (qno >= CLDMA_TXQ_NUM)
927 return -EINVAL;
928
929 ret = pm_runtime_resume_and_get(md_ctrl->dev);
930 if (ret < 0 && ret != -EACCES)
931 return ret;
932
933 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
934 queue = &md_ctrl->txq[qno];
935
936 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
937 if (!(md_ctrl->txq_active & BIT(qno))) {
938 ret = -EIO;
939 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
940 goto allow_sleep;
941 }
942 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
943
944 do {
945 spin_lock_irqsave(&queue->ring_lock, flags);
946 tx_req = queue->tx_next;
947 if (queue->budget > 0 && !tx_req->skb) {
948 struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
949
950 queue->budget--;
951 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
952 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
953 spin_unlock_irqrestore(&queue->ring_lock, flags);
954
955 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
956 ret = -ETIMEDOUT;
957 break;
958 }
959
960 /* Protect the access to the modem for queues operations (resume/start)
961 * which access shared locations by all the queues.
962 * cldma_lock is independent of ring_lock which is per queue.
963 */
964 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
965 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
966 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
967
968 break;
969 }
970 spin_unlock_irqrestore(&queue->ring_lock, flags);
971
972 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
973 ret = -ETIMEDOUT;
974 break;
975 }
976
977 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
978 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
979 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
980 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
981 }
982
983 ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
984 } while (!ret);
985
986 allow_sleep:
987 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
988 pm_runtime_put_autosuspend(md_ctrl->dev);
989 return ret;
990 }
991
t7xx_cldma_adjust_config(struct cldma_ctrl * md_ctrl,enum cldma_cfg cfg_id)992 static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
993 {
994 int qno;
995
996 for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
997 md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
998 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
999 }
1000
1001 md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
1002
1003 for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
1004 md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
1005
1006 if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
1007 md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
1008 md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
1009 t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
1010 t7xx_port_proxy_recv_skb_from_dedicated_queue);
1011 }
1012 }
1013
t7xx_cldma_late_init(struct cldma_ctrl * md_ctrl)1014 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
1015 {
1016 char dma_pool_name[32];
1017 int i, j, ret;
1018
1019 if (md_ctrl->is_late_init) {
1020 dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
1021 return -EALREADY;
1022 }
1023
1024 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
1025
1026 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
1027 sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
1028 if (!md_ctrl->gpd_dmapool) {
1029 dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
1030 return -ENOMEM;
1031 }
1032
1033 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1034 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
1035 if (ret) {
1036 dev_err(md_ctrl->dev, "control TX ring init fail\n");
1037 goto err_free_tx_ring;
1038 }
1039 }
1040
1041 for (j = 0; j < CLDMA_RXQ_NUM; j++) {
1042 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
1043 if (ret) {
1044 dev_err(md_ctrl->dev, "Control RX ring init fail\n");
1045 goto err_free_rx_ring;
1046 }
1047 }
1048
1049 for (i = 0; i < CLDMA_TXQ_NUM; i++)
1050 t7xx_cldma_txq_init(&md_ctrl->txq[i]);
1051
1052 for (j = 0; j < CLDMA_RXQ_NUM; j++)
1053 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
1054
1055 md_ctrl->is_late_init = true;
1056 return 0;
1057
1058 err_free_rx_ring:
1059 while (j--)
1060 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
1061
1062 err_free_tx_ring:
1063 while (i--)
1064 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
1065
1066 return ret;
1067 }
1068
t7xx_pcie_addr_transfer(void __iomem * addr,u32 addr_trs1,u32 phy_addr)1069 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
1070 {
1071 return addr + phy_addr - addr_trs1;
1072 }
1073
t7xx_hw_info_init(struct cldma_ctrl * md_ctrl)1074 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
1075 {
1076 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
1077 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1078 u32 phy_ao_base, phy_pd_base;
1079
1080 hw_info->hw_mode = MODE_BIT_64;
1081
1082 if (md_ctrl->hif_id == CLDMA_ID_MD) {
1083 phy_ao_base = CLDMA1_AO_BASE;
1084 phy_pd_base = CLDMA1_PD_BASE;
1085 hw_info->phy_interrupt_id = CLDMA1_INT;
1086 } else {
1087 phy_ao_base = CLDMA0_AO_BASE;
1088 phy_pd_base = CLDMA0_PD_BASE;
1089 hw_info->phy_interrupt_id = CLDMA0_INT;
1090 }
1091
1092 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1093 pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
1094 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1095 pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
1096 }
1097
t7xx_cldma_default_recv_skb(struct cldma_queue * queue,struct sk_buff * skb)1098 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
1099 {
1100 dev_kfree_skb_any(skb);
1101 return 0;
1102 }
1103
t7xx_cldma_alloc(enum cldma_id hif_id,struct t7xx_pci_dev * t7xx_dev)1104 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
1105 {
1106 struct device *dev = &t7xx_dev->pdev->dev;
1107 struct cldma_ctrl *md_ctrl;
1108 int qno;
1109
1110 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
1111 if (!md_ctrl)
1112 return -ENOMEM;
1113
1114 md_ctrl->t7xx_dev = t7xx_dev;
1115 md_ctrl->dev = dev;
1116 md_ctrl->hif_id = hif_id;
1117 for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
1118 md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
1119
1120 t7xx_hw_info_init(md_ctrl);
1121 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
1122 return 0;
1123 }
1124
t7xx_cldma_resume_early(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1125 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1126 {
1127 struct cldma_ctrl *md_ctrl = entity_param;
1128 struct t7xx_cldma_hw *hw_info;
1129 unsigned long flags;
1130 int qno_t;
1131
1132 hw_info = &md_ctrl->hw_info;
1133
1134 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1135 t7xx_cldma_hw_restore(hw_info);
1136 for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
1137 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
1138 MTK_TX);
1139 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
1140 MTK_RX);
1141 }
1142 t7xx_cldma_enable_irq(md_ctrl);
1143 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
1144 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
1145 t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1146 t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1147 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1148 }
1149
t7xx_cldma_resume(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1150 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1151 {
1152 struct cldma_ctrl *md_ctrl = entity_param;
1153 unsigned long flags;
1154
1155 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1156 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
1157 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1158 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1159 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1160
1161 if (md_ctrl->hif_id == CLDMA_ID_MD)
1162 t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
1163
1164 return 0;
1165 }
1166
t7xx_cldma_suspend_late(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1167 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1168 {
1169 struct cldma_ctrl *md_ctrl = entity_param;
1170 struct t7xx_cldma_hw *hw_info;
1171 unsigned long flags;
1172
1173 hw_info = &md_ctrl->hw_info;
1174
1175 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1176 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1177 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1178 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
1179 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
1180 t7xx_cldma_clear_ip_busy(hw_info);
1181 t7xx_cldma_disable_irq(md_ctrl);
1182 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1183 }
1184
t7xx_cldma_suspend(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1185 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1186 {
1187 struct cldma_ctrl *md_ctrl = entity_param;
1188 struct t7xx_cldma_hw *hw_info;
1189 unsigned long flags;
1190
1191 if (md_ctrl->hif_id == CLDMA_ID_MD)
1192 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
1193
1194 hw_info = &md_ctrl->hw_info;
1195
1196 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1197 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
1198 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
1199 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
1200 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
1201 md_ctrl->txq_started = 0;
1202 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1203
1204 return 0;
1205 }
1206
t7xx_cldma_pm_init(struct cldma_ctrl * md_ctrl)1207 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
1208 {
1209 md_ctrl->pm_entity = kzalloc_obj(*md_ctrl->pm_entity);
1210 if (!md_ctrl->pm_entity)
1211 return -ENOMEM;
1212
1213 md_ctrl->pm_entity->entity_param = md_ctrl;
1214
1215 if (md_ctrl->hif_id == CLDMA_ID_MD)
1216 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
1217 else
1218 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
1219
1220 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
1221 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
1222 md_ctrl->pm_entity->resume = t7xx_cldma_resume;
1223 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
1224
1225 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1226 }
1227
t7xx_cldma_pm_uninit(struct cldma_ctrl * md_ctrl)1228 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
1229 {
1230 if (!md_ctrl->pm_entity)
1231 return -EINVAL;
1232
1233 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1234 kfree(md_ctrl->pm_entity);
1235 md_ctrl->pm_entity = NULL;
1236 return 0;
1237 }
1238
t7xx_cldma_hif_hw_init(struct cldma_ctrl * md_ctrl)1239 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
1240 {
1241 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1242 unsigned long flags;
1243
1244 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1245 t7xx_cldma_hw_stop(hw_info, MTK_TX);
1246 t7xx_cldma_hw_stop(hw_info, MTK_RX);
1247 t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1248 t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1249 t7xx_cldma_hw_init(hw_info);
1250 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1251 }
1252
t7xx_cldma_isr_handler(int irq,void * data)1253 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
1254 {
1255 struct cldma_ctrl *md_ctrl = data;
1256 u32 interrupt;
1257
1258 interrupt = md_ctrl->hw_info.phy_interrupt_id;
1259 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
1260 t7xx_cldma_irq_work_cb(md_ctrl);
1261 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
1262 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
1263 return IRQ_HANDLED;
1264 }
1265
t7xx_cldma_destroy_wqs(struct cldma_ctrl * md_ctrl)1266 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
1267 {
1268 int i;
1269
1270 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1271 if (md_ctrl->txq[i].worker) {
1272 destroy_workqueue(md_ctrl->txq[i].worker);
1273 md_ctrl->txq[i].worker = NULL;
1274 }
1275 }
1276
1277 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1278 if (md_ctrl->rxq[i].worker) {
1279 destroy_workqueue(md_ctrl->rxq[i].worker);
1280 md_ctrl->rxq[i].worker = NULL;
1281 }
1282 }
1283 }
1284
1285 /**
1286 * t7xx_cldma_init() - Initialize CLDMA.
1287 * @md_ctrl: CLDMA context structure.
1288 *
1289 * Allocate and initialize device power management entity.
1290 * Initialize HIF TX/RX queue structure.
1291 * Register CLDMA callback ISR with PCIe driver.
1292 *
1293 * Return:
1294 * * 0 - Success.
1295 * * -ERROR - Error code from failure sub-initializations.
1296 */
t7xx_cldma_init(struct cldma_ctrl * md_ctrl)1297 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
1298 {
1299 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1300 int ret, i;
1301
1302 md_ctrl->txq_active = 0;
1303 md_ctrl->rxq_active = 0;
1304 md_ctrl->is_late_init = false;
1305
1306 ret = t7xx_cldma_pm_init(md_ctrl);
1307 if (ret)
1308 return ret;
1309
1310 spin_lock_init(&md_ctrl->cldma_lock);
1311
1312 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1313 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
1314 md_ctrl->txq[i].worker =
1315 alloc_ordered_workqueue("md_hif%d_tx%d_worker",
1316 WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1317 md_ctrl->hif_id, i);
1318 if (!md_ctrl->txq[i].worker)
1319 goto err_workqueue;
1320
1321 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
1322 }
1323
1324 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1325 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
1326 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
1327
1328 md_ctrl->rxq[i].worker =
1329 alloc_ordered_workqueue("md_hif%d_rx%d_worker",
1330 WQ_MEM_RECLAIM,
1331 md_ctrl->hif_id, i);
1332 if (!md_ctrl->rxq[i].worker)
1333 goto err_workqueue;
1334 }
1335
1336 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1337 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
1338 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
1339 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
1340 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1341 return 0;
1342
1343 err_workqueue:
1344 t7xx_cldma_destroy_wqs(md_ctrl);
1345 t7xx_cldma_pm_uninit(md_ctrl);
1346 return -ENOMEM;
1347 }
1348
t7xx_cldma_switch_cfg(struct cldma_ctrl * md_ctrl,enum cldma_cfg cfg_id)1349 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
1350 {
1351 t7xx_cldma_late_release(md_ctrl);
1352 t7xx_cldma_adjust_config(md_ctrl, cfg_id);
1353 t7xx_cldma_late_init(md_ctrl);
1354 }
1355
t7xx_cldma_exit(struct cldma_ctrl * md_ctrl)1356 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
1357 {
1358 t7xx_cldma_stop(md_ctrl);
1359 t7xx_cldma_late_release(md_ctrl);
1360 t7xx_cldma_destroy_wqs(md_ctrl);
1361 t7xx_cldma_pm_uninit(md_ctrl);
1362 }
1363