xref: /linux/drivers/net/wwan/t7xx/t7xx_hif_cldma.c (revision b82779648dfd3814df4e381f086326ec70fd791f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Moises Veleta <moises.veleta@intel.com>
10  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
11  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Eliot Lee <eliot.lee@intel.com>
17  */
18 
19 #include <linux/bits.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dmapool.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-direction.h>
26 #include <linux/gfp.h>
27 #include <linux/io.h>
28 #include <linux/io-64-nonatomic-lo-hi.h>
29 #include <linux/iopoll.h>
30 #include <linux/irqreturn.h>
31 #include <linux/kernel.h>
32 #include <linux/kthread.h>
33 #include <linux/list.h>
34 #include <linux/netdevice.h>
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/sched.h>
38 #include <linux/skbuff.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 
45 #include "t7xx_cldma.h"
46 #include "t7xx_hif_cldma.h"
47 #include "t7xx_mhccif.h"
48 #include "t7xx_pci.h"
49 #include "t7xx_pcie_mac.h"
50 #include "t7xx_port_proxy.h"
51 #include "t7xx_reg.h"
52 #include "t7xx_state_monitor.h"
53 
54 #define MAX_TX_BUDGET			16
55 #define MAX_RX_BUDGET			16
56 
57 #define CHECK_Q_STOP_TIMEOUT_US		1000000
58 #define CHECK_Q_STOP_STEP_US		10000
59 
md_cd_queue_struct_reset(struct cldma_queue * queue,struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx,unsigned int index)60 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
61 				     enum mtk_txrx tx_rx, unsigned int index)
62 {
63 	queue->dir = tx_rx;
64 	queue->index = index;
65 	queue->md_ctrl = md_ctrl;
66 	queue->tr_ring = NULL;
67 	queue->tr_done = NULL;
68 	queue->tx_next = NULL;
69 }
70 
md_cd_queue_struct_init(struct cldma_queue * queue,struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx,unsigned int index)71 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
72 				    enum mtk_txrx tx_rx, unsigned int index)
73 {
74 	md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
75 	init_waitqueue_head(&queue->req_wq);
76 	spin_lock_init(&queue->ring_lock);
77 }
78 
t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd * gpd,dma_addr_t data_ptr)79 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
80 {
81 	gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
82 	gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
83 }
84 
t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd * gpd,dma_addr_t next_ptr)85 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
86 {
87 	gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
88 	gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
89 }
90 
t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl * md_ctrl,struct cldma_request * req,size_t size,gfp_t gfp_mask)91 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
92 					size_t size, gfp_t gfp_mask)
93 {
94 	req->skb = __dev_alloc_skb(size, gfp_mask);
95 	if (!req->skb)
96 		return -ENOMEM;
97 
98 	req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
99 	if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
100 		dev_kfree_skb_any(req->skb);
101 		req->skb = NULL;
102 		req->mapped_buff = 0;
103 		dev_err(md_ctrl->dev, "DMA mapping failed\n");
104 		return -ENOMEM;
105 	}
106 
107 	return 0;
108 }
109 
t7xx_cldma_gpd_rx_from_q(struct cldma_queue * queue,int budget,bool * over_budget)110 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
111 {
112 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
113 	unsigned int hwo_polling_count = 0;
114 	struct t7xx_cldma_hw *hw_info;
115 	bool rx_not_done = true;
116 	unsigned long flags;
117 	int count = 0;
118 
119 	hw_info = &md_ctrl->hw_info;
120 
121 	do {
122 		struct cldma_request *req;
123 		struct cldma_gpd *gpd;
124 		struct sk_buff *skb;
125 		int ret;
126 
127 		req = queue->tr_done;
128 		if (!req)
129 			return -ENODATA;
130 
131 		gpd = req->gpd;
132 		if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
133 			dma_addr_t gpd_addr;
134 
135 			if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
136 				dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
137 				return -ENODEV;
138 			}
139 
140 			gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
141 						  REG_CLDMA_DL_CURRENT_ADDRL_0 +
142 						  queue->index * sizeof(u64));
143 			if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
144 				return 0;
145 
146 			udelay(1);
147 			continue;
148 		}
149 
150 		hwo_polling_count = 0;
151 		skb = req->skb;
152 
153 		if (req->mapped_buff) {
154 			dma_unmap_single(md_ctrl->dev, req->mapped_buff,
155 					 queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
156 			req->mapped_buff = 0;
157 		}
158 
159 		skb->len = 0;
160 		skb_reset_tail_pointer(skb);
161 		skb_put(skb, le16_to_cpu(gpd->data_buff_len));
162 
163 		ret = queue->recv_skb(queue, skb);
164 		/* Break processing, will try again later */
165 		if (ret < 0)
166 			return ret;
167 
168 		req->skb = NULL;
169 		t7xx_cldma_gpd_set_data_ptr(gpd, 0);
170 
171 		spin_lock_irqsave(&queue->ring_lock, flags);
172 		queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
173 		spin_unlock_irqrestore(&queue->ring_lock, flags);
174 		req = queue->rx_refill;
175 
176 		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
177 		if (ret)
178 			return ret;
179 
180 		gpd = req->gpd;
181 		t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
182 		gpd->data_buff_len = 0;
183 		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
184 
185 		spin_lock_irqsave(&queue->ring_lock, flags);
186 		queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
187 		spin_unlock_irqrestore(&queue->ring_lock, flags);
188 
189 		rx_not_done = ++count < budget || !need_resched();
190 	} while (rx_not_done);
191 
192 	*over_budget = true;
193 	return 0;
194 }
195 
t7xx_cldma_gpd_rx_collect(struct cldma_queue * queue,int budget)196 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
197 {
198 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
199 	struct t7xx_cldma_hw *hw_info;
200 	unsigned int pending_rx_int;
201 	bool over_budget = false;
202 	unsigned long flags;
203 	int ret;
204 
205 	hw_info = &md_ctrl->hw_info;
206 
207 	do {
208 		ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
209 		if (ret == -ENODATA)
210 			return 0;
211 		else if (ret)
212 			return ret;
213 
214 		pending_rx_int = 0;
215 
216 		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
217 		if (md_ctrl->rxq_active & BIT(queue->index)) {
218 			if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
219 				t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
220 
221 			pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
222 								  MTK_RX);
223 			if (pending_rx_int) {
224 				t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
225 
226 				if (over_budget) {
227 					spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
228 					return -EAGAIN;
229 				}
230 			}
231 		}
232 		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
233 	} while (pending_rx_int);
234 
235 	return 0;
236 }
237 
t7xx_cldma_rx_done(struct work_struct * work)238 static void t7xx_cldma_rx_done(struct work_struct *work)
239 {
240 	struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
241 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
242 	int value;
243 
244 	value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
245 	if (value && md_ctrl->rxq_active & BIT(queue->index)) {
246 		queue_work(queue->worker, &queue->cldma_work);
247 		return;
248 	}
249 
250 	t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
251 	t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
252 	t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
253 	pm_runtime_mark_last_busy(md_ctrl->dev);
254 	pm_runtime_put_autosuspend(md_ctrl->dev);
255 }
256 
t7xx_cldma_gpd_tx_collect(struct cldma_queue * queue)257 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
258 {
259 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
260 	unsigned int dma_len, count = 0;
261 	struct cldma_request *req;
262 	struct cldma_gpd *gpd;
263 	unsigned long flags;
264 	dma_addr_t dma_free;
265 	struct sk_buff *skb;
266 
267 	while (!kthread_should_stop()) {
268 		spin_lock_irqsave(&queue->ring_lock, flags);
269 		req = queue->tr_done;
270 		if (!req) {
271 			spin_unlock_irqrestore(&queue->ring_lock, flags);
272 			break;
273 		}
274 		gpd = req->gpd;
275 		if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
276 			spin_unlock_irqrestore(&queue->ring_lock, flags);
277 			break;
278 		}
279 		queue->budget++;
280 		dma_free = req->mapped_buff;
281 		dma_len = le16_to_cpu(gpd->data_buff_len);
282 		skb = req->skb;
283 		req->skb = NULL;
284 		queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
285 		spin_unlock_irqrestore(&queue->ring_lock, flags);
286 
287 		count++;
288 		dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
289 		dev_kfree_skb_any(skb);
290 	}
291 
292 	if (count)
293 		wake_up_nr(&queue->req_wq, count);
294 
295 	return count;
296 }
297 
t7xx_cldma_txq_empty_hndl(struct cldma_queue * queue)298 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
299 {
300 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
301 	struct cldma_request *req;
302 	dma_addr_t ul_curr_addr;
303 	unsigned long flags;
304 	bool pending_gpd;
305 
306 	if (!(md_ctrl->txq_active & BIT(queue->index)))
307 		return;
308 
309 	spin_lock_irqsave(&queue->ring_lock, flags);
310 	req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
311 	spin_unlock_irqrestore(&queue->ring_lock, flags);
312 
313 	pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
314 
315 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
316 	if (pending_gpd) {
317 		struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
318 
319 		/* Check current processing TGPD, 64-bit address is in a table by Q index */
320 		ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
321 					      queue->index * sizeof(u64));
322 		if (req->gpd_addr != ul_curr_addr) {
323 			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
324 			dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
325 				md_ctrl->hif_id, queue->index);
326 			return;
327 		}
328 
329 		t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
330 	}
331 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
332 }
333 
t7xx_cldma_tx_done(struct work_struct * work)334 static void t7xx_cldma_tx_done(struct work_struct *work)
335 {
336 	struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
337 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
338 	struct t7xx_cldma_hw *hw_info;
339 	unsigned int l2_tx_int;
340 	unsigned long flags;
341 
342 	hw_info = &md_ctrl->hw_info;
343 	t7xx_cldma_gpd_tx_collect(queue);
344 	l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
345 					     MTK_TX);
346 	if (l2_tx_int & EQ_STA_BIT(queue->index)) {
347 		t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
348 		t7xx_cldma_txq_empty_hndl(queue);
349 	}
350 
351 	if (l2_tx_int & BIT(queue->index)) {
352 		t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
353 		queue_work(queue->worker, &queue->cldma_work);
354 		return;
355 	}
356 
357 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
358 	if (md_ctrl->txq_active & BIT(queue->index)) {
359 		t7xx_cldma_clear_ip_busy(hw_info);
360 		t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
361 		t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
362 	}
363 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
364 
365 	pm_runtime_mark_last_busy(md_ctrl->dev);
366 	pm_runtime_put_autosuspend(md_ctrl->dev);
367 }
368 
t7xx_cldma_ring_free(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring,enum dma_data_direction tx_rx)369 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
370 				 struct cldma_ring *ring, enum dma_data_direction tx_rx)
371 {
372 	struct cldma_request *req_cur, *req_next;
373 
374 	list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
375 		if (req_cur->mapped_buff && req_cur->skb) {
376 			dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
377 					 ring->pkt_size, tx_rx);
378 			req_cur->mapped_buff = 0;
379 		}
380 
381 		dev_kfree_skb_any(req_cur->skb);
382 
383 		if (req_cur->gpd)
384 			dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
385 
386 		list_del(&req_cur->entry);
387 		kfree(req_cur);
388 	}
389 }
390 
t7xx_alloc_rx_request(struct cldma_ctrl * md_ctrl,size_t pkt_size)391 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
392 {
393 	struct cldma_request *req;
394 	int val;
395 
396 	req = kzalloc(sizeof(*req), GFP_KERNEL);
397 	if (!req)
398 		return NULL;
399 
400 	req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
401 	if (!req->gpd)
402 		goto err_free_req;
403 
404 	val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
405 	if (val)
406 		goto err_free_pool;
407 
408 	return req;
409 
410 err_free_pool:
411 	dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
412 
413 err_free_req:
414 	kfree(req);
415 
416 	return NULL;
417 }
418 
t7xx_cldma_rx_ring_init(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring)419 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
420 {
421 	struct cldma_request *req;
422 	struct cldma_gpd *gpd;
423 	int i;
424 
425 	INIT_LIST_HEAD(&ring->gpd_ring);
426 	ring->length = MAX_RX_BUDGET;
427 
428 	for (i = 0; i < ring->length; i++) {
429 		req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
430 		if (!req) {
431 			t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
432 			return -ENOMEM;
433 		}
434 
435 		gpd = req->gpd;
436 		t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
437 		gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
438 		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
439 		INIT_LIST_HEAD(&req->entry);
440 		list_add_tail(&req->entry, &ring->gpd_ring);
441 	}
442 
443 	/* Link previous GPD to next GPD, circular */
444 	list_for_each_entry(req, &ring->gpd_ring, entry) {
445 		t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
446 		gpd = req->gpd;
447 	}
448 
449 	return 0;
450 }
451 
t7xx_alloc_tx_request(struct cldma_ctrl * md_ctrl)452 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
453 {
454 	struct cldma_request *req;
455 
456 	req = kzalloc(sizeof(*req), GFP_KERNEL);
457 	if (!req)
458 		return NULL;
459 
460 	req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
461 	if (!req->gpd) {
462 		kfree(req);
463 		return NULL;
464 	}
465 
466 	return req;
467 }
468 
t7xx_cldma_tx_ring_init(struct cldma_ctrl * md_ctrl,struct cldma_ring * ring)469 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
470 {
471 	struct cldma_request *req;
472 	struct cldma_gpd *gpd;
473 	int i;
474 
475 	INIT_LIST_HEAD(&ring->gpd_ring);
476 	ring->length = MAX_TX_BUDGET;
477 
478 	for (i = 0; i < ring->length; i++) {
479 		req = t7xx_alloc_tx_request(md_ctrl);
480 		if (!req) {
481 			t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
482 			return -ENOMEM;
483 		}
484 
485 		gpd = req->gpd;
486 		gpd->flags = GPD_FLAGS_IOC;
487 		INIT_LIST_HEAD(&req->entry);
488 		list_add_tail(&req->entry, &ring->gpd_ring);
489 	}
490 
491 	/* Link previous GPD to next GPD, circular */
492 	list_for_each_entry(req, &ring->gpd_ring, entry) {
493 		t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
494 		gpd = req->gpd;
495 	}
496 
497 	return 0;
498 }
499 
500 /**
501  * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
502  * @queue: Pointer to the queue structure.
503  *
504  * Called with ring_lock (unless called during initialization phase)
505  */
t7xx_cldma_q_reset(struct cldma_queue * queue)506 static void t7xx_cldma_q_reset(struct cldma_queue *queue)
507 {
508 	struct cldma_request *req;
509 
510 	req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
511 	queue->tr_done = req;
512 	queue->budget = queue->tr_ring->length;
513 
514 	if (queue->dir == MTK_TX)
515 		queue->tx_next = req;
516 	else
517 		queue->rx_refill = req;
518 }
519 
t7xx_cldma_rxq_init(struct cldma_queue * queue)520 static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
521 {
522 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
523 
524 	queue->dir = MTK_RX;
525 	queue->tr_ring = &md_ctrl->rx_ring[queue->index];
526 	t7xx_cldma_q_reset(queue);
527 }
528 
t7xx_cldma_txq_init(struct cldma_queue * queue)529 static void t7xx_cldma_txq_init(struct cldma_queue *queue)
530 {
531 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
532 
533 	queue->dir = MTK_TX;
534 	queue->tr_ring = &md_ctrl->tx_ring[queue->index];
535 	t7xx_cldma_q_reset(queue);
536 }
537 
t7xx_cldma_enable_irq(struct cldma_ctrl * md_ctrl)538 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
539 {
540 	t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
541 }
542 
t7xx_cldma_disable_irq(struct cldma_ctrl * md_ctrl)543 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
544 {
545 	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
546 }
547 
t7xx_cldma_irq_work_cb(struct cldma_ctrl * md_ctrl)548 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
549 {
550 	unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
551 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
552 	int i;
553 
554 	/* L2 raw interrupt status */
555 	l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
556 	l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
557 	l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
558 	l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
559 	l2_tx_int &= ~l2_tx_int_msk;
560 	l2_rx_int &= ~l2_rx_int_msk;
561 
562 	if (l2_tx_int) {
563 		if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
564 			/* Read and clear L3 TX interrupt status */
565 			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
566 			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
567 			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
568 			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
569 		}
570 
571 		t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
572 		if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
573 			for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
574 				if (i < CLDMA_TXQ_NUM) {
575 					pm_runtime_get(md_ctrl->dev);
576 					t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
577 					t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
578 					queue_work(md_ctrl->txq[i].worker,
579 						   &md_ctrl->txq[i].cldma_work);
580 				} else {
581 					t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
582 				}
583 			}
584 		}
585 	}
586 
587 	if (l2_rx_int) {
588 		if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
589 			/* Read and clear L3 RX interrupt status */
590 			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
591 			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
592 			val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
593 			iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
594 		}
595 
596 		t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
597 		if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
598 			l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
599 			for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
600 				pm_runtime_get(md_ctrl->dev);
601 				t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
602 				t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
603 				queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
604 			}
605 		}
606 	}
607 }
608 
t7xx_cldma_qs_are_active(struct cldma_ctrl * md_ctrl)609 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
610 {
611 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
612 	unsigned int tx_active;
613 	unsigned int rx_active;
614 
615 	if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
616 		return false;
617 
618 	tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
619 	rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
620 
621 	return tx_active || rx_active;
622 }
623 
624 /**
625  * t7xx_cldma_stop() - Stop CLDMA.
626  * @md_ctrl: CLDMA context structure.
627  *
628  * Stop TX and RX queues. Disable L1 and L2 interrupts.
629  * Clear status registers.
630  *
631  * Return:
632  * * 0		- Success.
633  * * -ERROR	- Error code from polling cldma_queues_active.
634  */
t7xx_cldma_stop(struct cldma_ctrl * md_ctrl)635 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
636 {
637 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
638 	bool active;
639 	int i, ret;
640 
641 	md_ctrl->rxq_active = 0;
642 	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
643 	md_ctrl->txq_active = 0;
644 	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
645 	md_ctrl->txq_started = 0;
646 	t7xx_cldma_disable_irq(md_ctrl);
647 	t7xx_cldma_hw_stop(hw_info, MTK_RX);
648 	t7xx_cldma_hw_stop(hw_info, MTK_TX);
649 	t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
650 	t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
651 
652 	if (md_ctrl->is_late_init) {
653 		for (i = 0; i < CLDMA_TXQ_NUM; i++)
654 			flush_work(&md_ctrl->txq[i].cldma_work);
655 
656 		for (i = 0; i < CLDMA_RXQ_NUM; i++)
657 			flush_work(&md_ctrl->rxq[i].cldma_work);
658 	}
659 
660 	ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
661 				CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
662 	if (ret)
663 		dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
664 
665 	return ret;
666 }
667 
t7xx_cldma_late_release(struct cldma_ctrl * md_ctrl)668 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
669 {
670 	int i;
671 
672 	if (!md_ctrl->is_late_init)
673 		return;
674 
675 	for (i = 0; i < CLDMA_TXQ_NUM; i++)
676 		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
677 
678 	for (i = 0; i < CLDMA_RXQ_NUM; i++)
679 		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
680 
681 	dma_pool_destroy(md_ctrl->gpd_dmapool);
682 	md_ctrl->gpd_dmapool = NULL;
683 	md_ctrl->is_late_init = false;
684 }
685 
t7xx_cldma_reset(struct cldma_ctrl * md_ctrl)686 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
687 {
688 	unsigned long flags;
689 	int i;
690 
691 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
692 	md_ctrl->txq_active = 0;
693 	md_ctrl->rxq_active = 0;
694 	t7xx_cldma_disable_irq(md_ctrl);
695 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
696 
697 	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
698 		cancel_work_sync(&md_ctrl->txq[i].cldma_work);
699 
700 		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
701 		md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
702 		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
703 	}
704 
705 	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
706 		cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
707 
708 		spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
709 		md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
710 		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
711 	}
712 
713 	t7xx_cldma_late_release(md_ctrl);
714 }
715 
716 /**
717  * t7xx_cldma_start() - Start CLDMA.
718  * @md_ctrl: CLDMA context structure.
719  *
720  * Set TX/RX start address.
721  * Start all RX queues and enable L2 interrupt.
722  */
t7xx_cldma_start(struct cldma_ctrl * md_ctrl)723 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
724 {
725 	unsigned long flags;
726 
727 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
728 	if (md_ctrl->is_late_init) {
729 		struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
730 		int i;
731 
732 		t7xx_cldma_enable_irq(md_ctrl);
733 
734 		for (i = 0; i < CLDMA_TXQ_NUM; i++) {
735 			if (md_ctrl->txq[i].tr_done)
736 				t7xx_cldma_hw_set_start_addr(hw_info, i,
737 							     md_ctrl->txq[i].tr_done->gpd_addr,
738 							     MTK_TX);
739 		}
740 
741 		for (i = 0; i < CLDMA_RXQ_NUM; i++) {
742 			if (md_ctrl->rxq[i].tr_done)
743 				t7xx_cldma_hw_set_start_addr(hw_info, i,
744 							     md_ctrl->rxq[i].tr_done->gpd_addr,
745 							     MTK_RX);
746 		}
747 
748 		/* Enable L2 interrupt */
749 		t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
750 		t7xx_cldma_hw_start(hw_info);
751 		md_ctrl->txq_started = 0;
752 		md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
753 		md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
754 	}
755 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
756 }
757 
t7xx_cldma_clear_txq(struct cldma_ctrl * md_ctrl,int qnum)758 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
759 {
760 	struct cldma_queue *txq = &md_ctrl->txq[qnum];
761 	struct cldma_request *req;
762 	struct cldma_gpd *gpd;
763 	unsigned long flags;
764 
765 	spin_lock_irqsave(&txq->ring_lock, flags);
766 	t7xx_cldma_q_reset(txq);
767 	list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
768 		gpd = req->gpd;
769 		gpd->flags &= ~GPD_FLAGS_HWO;
770 		t7xx_cldma_gpd_set_data_ptr(gpd, 0);
771 		gpd->data_buff_len = 0;
772 		dev_kfree_skb_any(req->skb);
773 		req->skb = NULL;
774 	}
775 	spin_unlock_irqrestore(&txq->ring_lock, flags);
776 }
777 
t7xx_cldma_clear_rxq(struct cldma_ctrl * md_ctrl,int qnum)778 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
779 {
780 	struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
781 	struct cldma_request *req;
782 	struct cldma_gpd *gpd;
783 	unsigned long flags;
784 	int ret = 0;
785 
786 	spin_lock_irqsave(&rxq->ring_lock, flags);
787 	t7xx_cldma_q_reset(rxq);
788 	list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
789 		gpd = req->gpd;
790 		gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
791 		gpd->data_buff_len = 0;
792 
793 		if (req->skb) {
794 			req->skb->len = 0;
795 			skb_reset_tail_pointer(req->skb);
796 		}
797 	}
798 
799 	list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
800 		if (req->skb)
801 			continue;
802 
803 		ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
804 		if (ret)
805 			break;
806 
807 		t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
808 	}
809 	spin_unlock_irqrestore(&rxq->ring_lock, flags);
810 
811 	return ret;
812 }
813 
t7xx_cldma_clear_all_qs(struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx)814 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
815 {
816 	int i;
817 
818 	if (tx_rx == MTK_TX) {
819 		for (i = 0; i < CLDMA_TXQ_NUM; i++)
820 			t7xx_cldma_clear_txq(md_ctrl, i);
821 	} else {
822 		for (i = 0; i < CLDMA_RXQ_NUM; i++)
823 			t7xx_cldma_clear_rxq(md_ctrl, i);
824 	}
825 }
826 
t7xx_cldma_stop_all_qs(struct cldma_ctrl * md_ctrl,enum mtk_txrx tx_rx)827 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
828 {
829 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
830 	unsigned long flags;
831 
832 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
833 	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
834 	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
835 	if (tx_rx == MTK_RX)
836 		md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
837 	else
838 		md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
839 	t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
840 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
841 }
842 
t7xx_cldma_gpd_handle_tx_request(struct cldma_queue * queue,struct cldma_request * tx_req,struct sk_buff * skb)843 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
844 					    struct sk_buff *skb)
845 {
846 	struct cldma_ctrl *md_ctrl = queue->md_ctrl;
847 	struct cldma_gpd *gpd = tx_req->gpd;
848 	unsigned long flags;
849 
850 	/* Update GPD */
851 	tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
852 
853 	if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
854 		dev_err(md_ctrl->dev, "DMA mapping failed\n");
855 		return -ENOMEM;
856 	}
857 
858 	t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
859 	gpd->data_buff_len = cpu_to_le16(skb->len);
860 
861 	/* This lock must cover TGPD setting, as even without a resume operation,
862 	 * CLDMA can send next HWO=1 if last TGPD just finished.
863 	 */
864 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
865 	if (md_ctrl->txq_active & BIT(queue->index))
866 		gpd->flags |= GPD_FLAGS_HWO;
867 
868 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
869 
870 	tx_req->skb = skb;
871 	return 0;
872 }
873 
874 /* Called with cldma_lock */
t7xx_cldma_hw_start_send(struct cldma_ctrl * md_ctrl,int qno,struct cldma_request * prev_req)875 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
876 				     struct cldma_request *prev_req)
877 {
878 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
879 
880 	/* Check whether the device was powered off (CLDMA start address is not set) */
881 	if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
882 		t7xx_cldma_hw_init(hw_info);
883 		t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
884 		md_ctrl->txq_started &= ~BIT(qno);
885 	}
886 
887 	if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
888 		if (md_ctrl->txq_started & BIT(qno))
889 			t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
890 		else
891 			t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
892 
893 		md_ctrl->txq_started |= BIT(qno);
894 	}
895 }
896 
897 /**
898  * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
899  * @queue: CLDMA queue.
900  * @recv_skb: Receiving skb callback.
901  */
t7xx_cldma_set_recv_skb(struct cldma_queue * queue,int (* recv_skb)(struct cldma_queue * queue,struct sk_buff * skb))902 void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
903 			     int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
904 {
905 	queue->recv_skb = recv_skb;
906 }
907 
908 /**
909  * t7xx_cldma_send_skb() - Send control data to modem.
910  * @md_ctrl: CLDMA context structure.
911  * @qno: Queue number.
912  * @skb: Socket buffer.
913  *
914  * Return:
915  * * 0		- Success.
916  * * -ENOMEM	- Allocation failure.
917  * * -EINVAL	- Invalid queue request.
918  * * -EIO	- Queue is not active.
919  * * -ETIMEDOUT	- Timeout waiting for the device to wake up.
920  */
t7xx_cldma_send_skb(struct cldma_ctrl * md_ctrl,int qno,struct sk_buff * skb)921 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
922 {
923 	struct cldma_request *tx_req;
924 	struct cldma_queue *queue;
925 	unsigned long flags;
926 	int ret;
927 
928 	if (qno >= CLDMA_TXQ_NUM)
929 		return -EINVAL;
930 
931 	ret = pm_runtime_resume_and_get(md_ctrl->dev);
932 	if (ret < 0 && ret != -EACCES)
933 		return ret;
934 
935 	t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
936 	queue = &md_ctrl->txq[qno];
937 
938 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
939 	if (!(md_ctrl->txq_active & BIT(qno))) {
940 		ret = -EIO;
941 		spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
942 		goto allow_sleep;
943 	}
944 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
945 
946 	do {
947 		spin_lock_irqsave(&queue->ring_lock, flags);
948 		tx_req = queue->tx_next;
949 		if (queue->budget > 0 && !tx_req->skb) {
950 			struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
951 
952 			queue->budget--;
953 			t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
954 			queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
955 			spin_unlock_irqrestore(&queue->ring_lock, flags);
956 
957 			if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
958 				ret = -ETIMEDOUT;
959 				break;
960 			}
961 
962 			/* Protect the access to the modem for queues operations (resume/start)
963 			 * which access shared locations by all the queues.
964 			 * cldma_lock is independent of ring_lock which is per queue.
965 			 */
966 			spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
967 			t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
968 			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
969 
970 			break;
971 		}
972 		spin_unlock_irqrestore(&queue->ring_lock, flags);
973 
974 		if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
975 			ret = -ETIMEDOUT;
976 			break;
977 		}
978 
979 		if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
980 			spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
981 			t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
982 			spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
983 		}
984 
985 		ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
986 	} while (!ret);
987 
988 allow_sleep:
989 	t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
990 	pm_runtime_mark_last_busy(md_ctrl->dev);
991 	pm_runtime_put_autosuspend(md_ctrl->dev);
992 	return ret;
993 }
994 
t7xx_cldma_adjust_config(struct cldma_ctrl * md_ctrl,enum cldma_cfg cfg_id)995 static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
996 {
997 	int qno;
998 
999 	for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
1000 		md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
1001 		t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
1002 	}
1003 
1004 	md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
1005 
1006 	for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
1007 		md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
1008 
1009 	if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
1010 		md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
1011 		md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
1012 		t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
1013 					t7xx_port_proxy_recv_skb_from_dedicated_queue);
1014 	}
1015 }
1016 
t7xx_cldma_late_init(struct cldma_ctrl * md_ctrl)1017 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
1018 {
1019 	char dma_pool_name[32];
1020 	int i, j, ret;
1021 
1022 	if (md_ctrl->is_late_init) {
1023 		dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
1024 		return -EALREADY;
1025 	}
1026 
1027 	snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
1028 
1029 	md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
1030 					       sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
1031 	if (!md_ctrl->gpd_dmapool) {
1032 		dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
1033 		return -ENOMEM;
1034 	}
1035 
1036 	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1037 		ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
1038 		if (ret) {
1039 			dev_err(md_ctrl->dev, "control TX ring init fail\n");
1040 			goto err_free_tx_ring;
1041 		}
1042 	}
1043 
1044 	for (j = 0; j < CLDMA_RXQ_NUM; j++) {
1045 		ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
1046 		if (ret) {
1047 			dev_err(md_ctrl->dev, "Control RX ring init fail\n");
1048 			goto err_free_rx_ring;
1049 		}
1050 	}
1051 
1052 	for (i = 0; i < CLDMA_TXQ_NUM; i++)
1053 		t7xx_cldma_txq_init(&md_ctrl->txq[i]);
1054 
1055 	for (j = 0; j < CLDMA_RXQ_NUM; j++)
1056 		t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
1057 
1058 	md_ctrl->is_late_init = true;
1059 	return 0;
1060 
1061 err_free_rx_ring:
1062 	while (j--)
1063 		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
1064 
1065 err_free_tx_ring:
1066 	while (i--)
1067 		t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
1068 
1069 	return ret;
1070 }
1071 
t7xx_pcie_addr_transfer(void __iomem * addr,u32 addr_trs1,u32 phy_addr)1072 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
1073 {
1074 	return addr + phy_addr - addr_trs1;
1075 }
1076 
t7xx_hw_info_init(struct cldma_ctrl * md_ctrl)1077 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
1078 {
1079 	struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
1080 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1081 	u32 phy_ao_base, phy_pd_base;
1082 
1083 	hw_info->hw_mode = MODE_BIT_64;
1084 
1085 	if (md_ctrl->hif_id == CLDMA_ID_MD) {
1086 		phy_ao_base = CLDMA1_AO_BASE;
1087 		phy_pd_base = CLDMA1_PD_BASE;
1088 		hw_info->phy_interrupt_id = CLDMA1_INT;
1089 	} else {
1090 		phy_ao_base = CLDMA0_AO_BASE;
1091 		phy_pd_base = CLDMA0_PD_BASE;
1092 		hw_info->phy_interrupt_id = CLDMA0_INT;
1093 	}
1094 
1095 	hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1096 						      pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
1097 	hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1098 						       pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
1099 }
1100 
t7xx_cldma_default_recv_skb(struct cldma_queue * queue,struct sk_buff * skb)1101 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
1102 {
1103 	dev_kfree_skb_any(skb);
1104 	return 0;
1105 }
1106 
t7xx_cldma_alloc(enum cldma_id hif_id,struct t7xx_pci_dev * t7xx_dev)1107 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
1108 {
1109 	struct device *dev = &t7xx_dev->pdev->dev;
1110 	struct cldma_ctrl *md_ctrl;
1111 	int qno;
1112 
1113 	md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
1114 	if (!md_ctrl)
1115 		return -ENOMEM;
1116 
1117 	md_ctrl->t7xx_dev = t7xx_dev;
1118 	md_ctrl->dev = dev;
1119 	md_ctrl->hif_id = hif_id;
1120 	for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
1121 		md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
1122 
1123 	t7xx_hw_info_init(md_ctrl);
1124 	t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
1125 	return 0;
1126 }
1127 
t7xx_cldma_resume_early(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1128 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1129 {
1130 	struct cldma_ctrl *md_ctrl = entity_param;
1131 	struct t7xx_cldma_hw *hw_info;
1132 	unsigned long flags;
1133 	int qno_t;
1134 
1135 	hw_info = &md_ctrl->hw_info;
1136 
1137 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1138 	t7xx_cldma_hw_restore(hw_info);
1139 	for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
1140 		t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
1141 					     MTK_TX);
1142 		t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
1143 					     MTK_RX);
1144 	}
1145 	t7xx_cldma_enable_irq(md_ctrl);
1146 	t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
1147 	md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
1148 	t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1149 	t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1150 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1151 }
1152 
t7xx_cldma_resume(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1153 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1154 {
1155 	struct cldma_ctrl *md_ctrl = entity_param;
1156 	unsigned long flags;
1157 
1158 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1159 	md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
1160 	t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1161 	t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1162 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1163 
1164 	if (md_ctrl->hif_id == CLDMA_ID_MD)
1165 		t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
1166 
1167 	return 0;
1168 }
1169 
t7xx_cldma_suspend_late(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1170 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1171 {
1172 	struct cldma_ctrl *md_ctrl = entity_param;
1173 	struct t7xx_cldma_hw *hw_info;
1174 	unsigned long flags;
1175 
1176 	hw_info = &md_ctrl->hw_info;
1177 
1178 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1179 	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1180 	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1181 	md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
1182 	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
1183 	t7xx_cldma_clear_ip_busy(hw_info);
1184 	t7xx_cldma_disable_irq(md_ctrl);
1185 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1186 }
1187 
t7xx_cldma_suspend(struct t7xx_pci_dev * t7xx_dev,void * entity_param)1188 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1189 {
1190 	struct cldma_ctrl *md_ctrl = entity_param;
1191 	struct t7xx_cldma_hw *hw_info;
1192 	unsigned long flags;
1193 
1194 	if (md_ctrl->hif_id == CLDMA_ID_MD)
1195 		t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
1196 
1197 	hw_info = &md_ctrl->hw_info;
1198 
1199 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1200 	t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
1201 	t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
1202 	md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
1203 	t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
1204 	md_ctrl->txq_started = 0;
1205 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1206 
1207 	return 0;
1208 }
1209 
t7xx_cldma_pm_init(struct cldma_ctrl * md_ctrl)1210 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
1211 {
1212 	md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
1213 	if (!md_ctrl->pm_entity)
1214 		return -ENOMEM;
1215 
1216 	md_ctrl->pm_entity->entity_param = md_ctrl;
1217 
1218 	if (md_ctrl->hif_id == CLDMA_ID_MD)
1219 		md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
1220 	else
1221 		md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
1222 
1223 	md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
1224 	md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
1225 	md_ctrl->pm_entity->resume = t7xx_cldma_resume;
1226 	md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
1227 
1228 	return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1229 }
1230 
t7xx_cldma_pm_uninit(struct cldma_ctrl * md_ctrl)1231 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
1232 {
1233 	if (!md_ctrl->pm_entity)
1234 		return -EINVAL;
1235 
1236 	t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1237 	kfree(md_ctrl->pm_entity);
1238 	md_ctrl->pm_entity = NULL;
1239 	return 0;
1240 }
1241 
t7xx_cldma_hif_hw_init(struct cldma_ctrl * md_ctrl)1242 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
1243 {
1244 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1245 	unsigned long flags;
1246 
1247 	spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1248 	t7xx_cldma_hw_stop(hw_info, MTK_TX);
1249 	t7xx_cldma_hw_stop(hw_info, MTK_RX);
1250 	t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1251 	t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1252 	t7xx_cldma_hw_init(hw_info);
1253 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1254 }
1255 
t7xx_cldma_isr_handler(int irq,void * data)1256 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
1257 {
1258 	struct cldma_ctrl *md_ctrl = data;
1259 	u32 interrupt;
1260 
1261 	interrupt = md_ctrl->hw_info.phy_interrupt_id;
1262 	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
1263 	t7xx_cldma_irq_work_cb(md_ctrl);
1264 	t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
1265 	t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
1266 	return IRQ_HANDLED;
1267 }
1268 
t7xx_cldma_destroy_wqs(struct cldma_ctrl * md_ctrl)1269 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
1270 {
1271 	int i;
1272 
1273 	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1274 		if (md_ctrl->txq[i].worker) {
1275 			destroy_workqueue(md_ctrl->txq[i].worker);
1276 			md_ctrl->txq[i].worker = NULL;
1277 		}
1278 	}
1279 
1280 	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1281 		if (md_ctrl->rxq[i].worker) {
1282 			destroy_workqueue(md_ctrl->rxq[i].worker);
1283 			md_ctrl->rxq[i].worker = NULL;
1284 		}
1285 	}
1286 }
1287 
1288 /**
1289  * t7xx_cldma_init() - Initialize CLDMA.
1290  * @md_ctrl: CLDMA context structure.
1291  *
1292  * Allocate and initialize device power management entity.
1293  * Initialize HIF TX/RX queue structure.
1294  * Register CLDMA callback ISR with PCIe driver.
1295  *
1296  * Return:
1297  * * 0		- Success.
1298  * * -ERROR	- Error code from failure sub-initializations.
1299  */
t7xx_cldma_init(struct cldma_ctrl * md_ctrl)1300 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
1301 {
1302 	struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1303 	int ret, i;
1304 
1305 	md_ctrl->txq_active = 0;
1306 	md_ctrl->rxq_active = 0;
1307 	md_ctrl->is_late_init = false;
1308 
1309 	ret = t7xx_cldma_pm_init(md_ctrl);
1310 	if (ret)
1311 		return ret;
1312 
1313 	spin_lock_init(&md_ctrl->cldma_lock);
1314 
1315 	for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1316 		md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
1317 		md_ctrl->txq[i].worker =
1318 			alloc_ordered_workqueue("md_hif%d_tx%d_worker",
1319 					WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1320 					md_ctrl->hif_id, i);
1321 		if (!md_ctrl->txq[i].worker)
1322 			goto err_workqueue;
1323 
1324 		INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
1325 	}
1326 
1327 	for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1328 		md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
1329 		INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
1330 
1331 		md_ctrl->rxq[i].worker =
1332 			alloc_ordered_workqueue("md_hif%d_rx%d_worker",
1333 						WQ_MEM_RECLAIM,
1334 						md_ctrl->hif_id, i);
1335 		if (!md_ctrl->rxq[i].worker)
1336 			goto err_workqueue;
1337 	}
1338 
1339 	t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1340 	md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
1341 	md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
1342 	md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
1343 	t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1344 	return 0;
1345 
1346 err_workqueue:
1347 	t7xx_cldma_destroy_wqs(md_ctrl);
1348 	t7xx_cldma_pm_uninit(md_ctrl);
1349 	return -ENOMEM;
1350 }
1351 
t7xx_cldma_switch_cfg(struct cldma_ctrl * md_ctrl,enum cldma_cfg cfg_id)1352 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
1353 {
1354 	t7xx_cldma_late_release(md_ctrl);
1355 	t7xx_cldma_adjust_config(md_ctrl, cfg_id);
1356 	t7xx_cldma_late_init(md_ctrl);
1357 }
1358 
t7xx_cldma_exit(struct cldma_ctrl * md_ctrl)1359 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
1360 {
1361 	t7xx_cldma_stop(md_ctrl);
1362 	t7xx_cldma_late_release(md_ctrl);
1363 	t7xx_cldma_destroy_wqs(md_ctrl);
1364 	t7xx_cldma_pm_uninit(md_ctrl);
1365 }
1366