xref: /linux/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Eliot Lee <eliot.lee@intel.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16  */
17 
18 #include <linux/atomic.h>
19 #include <linux/bitfield.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/dma-direction.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/list.h>
29 #include <linux/minmax.h>
30 #include <linux/netdevice.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/sched.h>
33 #include <linux/spinlock.h>
34 #include <linux/skbuff.h>
35 #include <linux/types.h>
36 #include <linux/wait.h>
37 #include <linux/workqueue.h>
38 
39 #include "t7xx_dpmaif.h"
40 #include "t7xx_hif_dpmaif.h"
41 #include "t7xx_hif_dpmaif_tx.h"
42 #include "t7xx_pci.h"
43 
44 #define DPMAIF_SKB_TX_BURST_CNT	5
45 #define DPMAIF_DRB_LIST_LEN	6144
46 
47 /* DRB dtype */
48 #define DES_DTYP_PD		0
49 #define DES_DTYP_MSG		1
50 
t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num)51 static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
52 						  unsigned int q_num)
53 {
54 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
55 	unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
56 	unsigned long flags;
57 
58 	if (!txq->que_started)
59 		return 0;
60 
61 	old_sw_rd_idx = txq->drb_rd_idx;
62 	new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
63 	if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
64 		dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
65 		return 0;
66 	}
67 
68 	if (old_sw_rd_idx <= new_hw_rd_idx)
69 		drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
70 	else
71 		drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
72 
73 	spin_lock_irqsave(&txq->tx_lock, flags);
74 	txq->drb_rd_idx = new_hw_rd_idx;
75 	spin_unlock_irqrestore(&txq->tx_lock, flags);
76 
77 	return drb_cnt;
78 }
79 
t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int release_cnt)80 static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
81 						  unsigned int q_num, unsigned int release_cnt)
82 {
83 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
84 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
85 	struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
86 	struct dpmaif_drb *cur_drb, *drb_base;
87 	unsigned int drb_cnt, i, cur_idx;
88 	unsigned long flags;
89 
90 	drb_skb_base = txq->drb_skb_base;
91 	drb_base = txq->drb_base;
92 
93 	spin_lock_irqsave(&txq->tx_lock, flags);
94 	drb_cnt = txq->drb_size_cnt;
95 	cur_idx = txq->drb_release_rd_idx;
96 	spin_unlock_irqrestore(&txq->tx_lock, flags);
97 
98 	for (i = 0; i < release_cnt; i++) {
99 		cur_drb = drb_base + cur_idx;
100 		if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
101 			cur_drb_skb = drb_skb_base + cur_idx;
102 			if (!cur_drb_skb->is_msg)
103 				dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
104 						 cur_drb_skb->data_len, DMA_TO_DEVICE);
105 
106 			if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
107 				if (!cur_drb_skb->skb) {
108 					dev_err(dpmaif_ctrl->dev,
109 						"txq%u: DRB check fail, invalid skb\n", q_num);
110 					continue;
111 				}
112 
113 				dev_kfree_skb_any(cur_drb_skb->skb);
114 			}
115 
116 			cur_drb_skb->skb = NULL;
117 		}
118 
119 		spin_lock_irqsave(&txq->tx_lock, flags);
120 		cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
121 		txq->drb_release_rd_idx = cur_idx;
122 		spin_unlock_irqrestore(&txq->tx_lock, flags);
123 
124 		if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
125 			cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
126 	}
127 
128 	if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
129 		dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
130 
131 	return i;
132 }
133 
t7xx_dpmaif_tx_release(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int budget)134 static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
135 				  unsigned int q_num, unsigned int budget)
136 {
137 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
138 	unsigned int rel_cnt, real_rel_cnt;
139 
140 	/* Update read index from HW */
141 	t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
142 
143 	rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
144 					    txq->drb_rd_idx, DPMAIF_READ);
145 
146 	real_rel_cnt = min_not_zero(budget, rel_cnt);
147 	if (real_rel_cnt)
148 		real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
149 
150 	return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
151 }
152 
t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue * txq)153 static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
154 {
155 	return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
156 }
157 
t7xx_dpmaif_tx_done(struct work_struct * work)158 static void t7xx_dpmaif_tx_done(struct work_struct *work)
159 {
160 	struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
161 	struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
162 	struct dpmaif_hw_info *hw_info;
163 	int ret;
164 
165 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
166 	if (ret < 0 && ret != -EACCES)
167 		return;
168 
169 	/* The device may be in low power state. Disable sleep if needed */
170 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
171 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
172 		hw_info = &dpmaif_ctrl->hw_info;
173 		ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
174 		if (ret == -EAGAIN ||
175 		    (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
176 		     t7xx_dpmaif_drb_ring_not_empty(txq))) {
177 			queue_work(dpmaif_ctrl->txq[txq->index].worker,
178 				   &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
179 			/* Give the device time to enter the low power state */
180 			t7xx_dpmaif_clr_ip_busy_sts(hw_info);
181 		} else {
182 			t7xx_dpmaif_clr_ip_busy_sts(hw_info);
183 			t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
184 		}
185 	}
186 
187 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
188 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
189 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
190 }
191 
t7xx_setup_msg_drb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,unsigned int pkt_len,unsigned int count_l,unsigned int channel_id)192 static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
193 			       unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
194 			       unsigned int channel_id)
195 {
196 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
197 	struct dpmaif_drb *drb = drb_base + cur_idx;
198 
199 	drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
200 				  FIELD_PREP(DRB_HDR_CONT, 1) |
201 				  FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
202 
203 	drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
204 				       FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
205 				       FIELD_PREP(DRB_MSG_L4_CHK, 1));
206 }
207 
t7xx_setup_payload_drb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,dma_addr_t data_addr,unsigned int pkt_size,bool last_one)208 static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
209 				   unsigned int cur_idx, dma_addr_t data_addr,
210 				   unsigned int pkt_size, bool last_one)
211 {
212 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
213 	struct dpmaif_drb *drb = drb_base + cur_idx;
214 	u32 header;
215 
216 	header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
217 	if (!last_one)
218 		header |= FIELD_PREP(DRB_HDR_CONT, 1);
219 
220 	drb->header = cpu_to_le32(header);
221 	drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
222 	drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
223 }
224 
t7xx_record_drb_skb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,struct sk_buff * skb,bool is_msg,bool is_frag,bool is_last_one,dma_addr_t bus_addr,unsigned int data_len)225 static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
226 				unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
227 				bool is_frag, bool is_last_one, dma_addr_t bus_addr,
228 				unsigned int data_len)
229 {
230 	struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
231 	struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
232 
233 	drb_skb->skb = skb;
234 	drb_skb->bus_addr = bus_addr;
235 	drb_skb->data_len = data_len;
236 	drb_skb->index = cur_idx;
237 	drb_skb->is_msg = is_msg;
238 	drb_skb->is_frag = is_frag;
239 	drb_skb->is_last = is_last_one;
240 }
241 
t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl * dpmaif_ctrl,struct sk_buff * skb)242 static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
243 {
244 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
245 	unsigned int wr_cnt, send_cnt, payload_cnt;
246 	unsigned int cur_idx, drb_wr_idx_backup;
247 	struct skb_shared_info *shinfo;
248 	struct dpmaif_tx_queue *txq;
249 	struct t7xx_skb_cb *skb_cb;
250 	unsigned long flags;
251 
252 	skb_cb = T7XX_SKB_CB(skb);
253 	txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
254 	if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
255 		return -ENODEV;
256 
257 	atomic_set(&txq->tx_processing, 1);
258 	 /* Ensure tx_processing is changed to 1 before actually begin TX flow */
259 	smp_mb();
260 
261 	shinfo = skb_shinfo(skb);
262 	if (shinfo->frag_list)
263 		dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
264 
265 	payload_cnt = shinfo->nr_frags + 1;
266 	/* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
267 	send_cnt = payload_cnt + 1;
268 
269 	spin_lock_irqsave(&txq->tx_lock, flags);
270 	cur_idx = txq->drb_wr_idx;
271 	drb_wr_idx_backup = cur_idx;
272 	txq->drb_wr_idx += send_cnt;
273 	if (txq->drb_wr_idx >= txq->drb_size_cnt)
274 		txq->drb_wr_idx -= txq->drb_size_cnt;
275 	t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
276 	t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
277 	spin_unlock_irqrestore(&txq->tx_lock, flags);
278 
279 	for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
280 		bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
281 		unsigned int data_len;
282 		dma_addr_t bus_addr;
283 		void *data_addr;
284 
285 		if (!wr_cnt) {
286 			data_len = skb_headlen(skb);
287 			data_addr = skb->data;
288 			is_frag = false;
289 		} else {
290 			skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
291 
292 			data_len = skb_frag_size(frag);
293 			data_addr = skb_frag_address(frag);
294 			is_frag = true;
295 		}
296 
297 		bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
298 		if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
299 			goto unmap_buffers;
300 
301 		cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
302 
303 		spin_lock_irqsave(&txq->tx_lock, flags);
304 		t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
305 				       is_last_one);
306 		t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
307 				    is_last_one, bus_addr, data_len);
308 		spin_unlock_irqrestore(&txq->tx_lock, flags);
309 	}
310 
311 	if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
312 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
313 
314 	atomic_set(&txq->tx_processing, 0);
315 
316 	return 0;
317 
318 unmap_buffers:
319 	while (wr_cnt--) {
320 		struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
321 
322 		cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
323 		drb_skb += cur_idx;
324 		dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
325 				 drb_skb->data_len, DMA_TO_DEVICE);
326 	}
327 
328 	txq->drb_wr_idx = drb_wr_idx_backup;
329 	atomic_set(&txq->tx_processing, 0);
330 
331 	return -ENOMEM;
332 }
333 
t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl * dpmaif_ctrl)334 static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
335 {
336 	int i;
337 
338 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
339 		if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
340 			return false;
341 	}
342 
343 	return true;
344 }
345 
346 /* Currently, only the default TX queue is used */
t7xx_select_tx_queue(struct dpmaif_ctrl * dpmaif_ctrl)347 static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
348 {
349 	struct dpmaif_tx_queue *txq;
350 
351 	txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
352 	if (!txq->que_started)
353 		return NULL;
354 
355 	return txq;
356 }
357 
t7xx_txq_drb_wr_available(struct dpmaif_tx_queue * txq)358 static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
359 {
360 	return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
361 					 txq->drb_wr_idx, DPMAIF_WRITE);
362 }
363 
t7xx_skb_drb_cnt(struct sk_buff * skb)364 static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
365 {
366 	/* Normal DRB (frags data + skb linear data) + msg DRB */
367 	return skb_shinfo(skb)->nr_frags + 2;
368 }
369 
t7xx_txq_burst_send_skb(struct dpmaif_tx_queue * txq)370 static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
371 {
372 	unsigned int drb_remain_cnt, i;
373 	unsigned int send_drb_cnt;
374 	int drb_cnt = 0;
375 	int ret = 0;
376 
377 	drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
378 
379 	for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
380 		struct sk_buff *skb;
381 
382 		skb = skb_peek(&txq->tx_skb_head);
383 		if (!skb)
384 			break;
385 
386 		send_drb_cnt = t7xx_skb_drb_cnt(skb);
387 		if (drb_remain_cnt < send_drb_cnt) {
388 			drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
389 			continue;
390 		}
391 
392 		drb_remain_cnt -= send_drb_cnt;
393 
394 		ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
395 		if (ret < 0) {
396 			dev_err(txq->dpmaif_ctrl->dev,
397 				"Failed to add skb to device's ring: %d\n", ret);
398 			break;
399 		}
400 
401 		drb_cnt += send_drb_cnt;
402 		skb_unlink(skb, &txq->tx_skb_head);
403 	}
404 
405 	if (drb_cnt > 0)
406 		return drb_cnt;
407 
408 	return ret;
409 }
410 
t7xx_do_tx_hw_push(struct dpmaif_ctrl * dpmaif_ctrl)411 static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
412 {
413 	bool wait_disable_sleep = true;
414 
415 	do {
416 		struct dpmaif_tx_queue *txq;
417 		int drb_send_cnt;
418 
419 		txq = t7xx_select_tx_queue(dpmaif_ctrl);
420 		if (!txq)
421 			return;
422 
423 		drb_send_cnt = t7xx_txq_burst_send_skb(txq);
424 		if (drb_send_cnt <= 0) {
425 			usleep_range(10, 20);
426 			cond_resched();
427 			continue;
428 		}
429 
430 		/* Wait for the PCIe resource to unlock */
431 		if (wait_disable_sleep) {
432 			if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
433 				return;
434 
435 			wait_disable_sleep = false;
436 		}
437 
438 		t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
439 						 drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
440 
441 		cond_resched();
442 	} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
443 		 (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
444 }
445 
t7xx_dpmaif_tx_hw_push_thread(void * arg)446 static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
447 {
448 	struct dpmaif_ctrl *dpmaif_ctrl = arg;
449 	int ret;
450 
451 	while (!kthread_should_stop()) {
452 		if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
453 		    dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
454 			if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
455 						     (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
456 						     dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
457 						     kthread_should_stop()))
458 				continue;
459 
460 			if (kthread_should_stop())
461 				break;
462 		}
463 
464 		ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
465 		if (ret < 0 && ret != -EACCES)
466 			return ret;
467 
468 		t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
469 		t7xx_do_tx_hw_push(dpmaif_ctrl);
470 		t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
471 		pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
472 		pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
473 	}
474 
475 	return 0;
476 }
477 
t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl * dpmaif_ctrl)478 int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
479 {
480 	init_waitqueue_head(&dpmaif_ctrl->tx_wq);
481 	dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
482 					     dpmaif_ctrl, "dpmaif_tx_hw_push");
483 	return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
484 }
485 
t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl * dpmaif_ctrl)486 void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
487 {
488 	if (dpmaif_ctrl->tx_thread)
489 		kthread_stop(dpmaif_ctrl->tx_thread);
490 }
491 
492 /**
493  * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
494  * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
495  * @txq_number: Queue number to xmit on.
496  * @skb: Pointer to the skb to transmit.
497  *
498  * Add the skb to the queue of the skbs to be transmit.
499  * Wake up the thread that push the skbs from the queue to the HW.
500  *
501  * Return:
502  * * 0		- Success.
503  * * -EBUSY	- Tx budget exhausted.
504  *		  In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
505  *		  state to prevent this error condition.
506  */
t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int txq_number,struct sk_buff * skb)507 int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
508 			    struct sk_buff *skb)
509 {
510 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
511 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
512 	struct t7xx_skb_cb *skb_cb;
513 
514 	if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
515 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
516 		return -EBUSY;
517 	}
518 
519 	skb_cb = T7XX_SKB_CB(skb);
520 	skb_cb->txq_number = txq_number;
521 	skb_queue_tail(&txq->tx_skb_head, skb);
522 	wake_up(&dpmaif_ctrl->tx_wq);
523 
524 	return 0;
525 }
526 
t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int que_mask)527 void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
528 {
529 	int i;
530 
531 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
532 		if (que_mask & BIT(i))
533 			queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
534 	}
535 }
536 
t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue * txq)537 static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
538 {
539 	size_t brb_skb_size, brb_pd_size;
540 
541 	brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
542 	brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
543 
544 	txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
545 
546 	/* For HW && AP SW */
547 	txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
548 					   &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
549 	if (!txq->drb_base)
550 		return -ENOMEM;
551 
552 	/* For AP SW to record the skb information */
553 	txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
554 	if (!txq->drb_skb_base) {
555 		dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
556 				  txq->drb_base, txq->drb_bus_addr);
557 		return -ENOMEM;
558 	}
559 
560 	return 0;
561 }
562 
t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue * txq)563 static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
564 {
565 	struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
566 	unsigned int i;
567 
568 	if (!drb_skb_base)
569 		return;
570 
571 	for (i = 0; i < txq->drb_size_cnt; i++) {
572 		drb_skb = drb_skb_base + i;
573 		if (!drb_skb->skb)
574 			continue;
575 
576 		if (!drb_skb->is_msg)
577 			dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
578 					 drb_skb->data_len, DMA_TO_DEVICE);
579 
580 		if (drb_skb->is_last) {
581 			dev_kfree_skb(drb_skb->skb);
582 			drb_skb->skb = NULL;
583 		}
584 	}
585 }
586 
t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue * txq)587 static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
588 {
589 	if (txq->drb_base)
590 		dma_free_coherent(txq->dpmaif_ctrl->dev,
591 				  txq->drb_size_cnt * sizeof(struct dpmaif_drb),
592 				  txq->drb_base, txq->drb_bus_addr);
593 
594 	t7xx_dpmaif_tx_free_drb_skb(txq);
595 }
596 
597 /**
598  * t7xx_dpmaif_txq_init() - Initialize TX queue.
599  * @txq: Pointer to struct dpmaif_tx_queue.
600  *
601  * Initialize the TX queue data structure and allocate memory for it to use.
602  *
603  * Return:
604  * * 0		- Success.
605  * * -ERROR	- Error code from failure sub-initializations.
606  */
t7xx_dpmaif_txq_init(struct dpmaif_tx_queue * txq)607 int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
608 {
609 	int ret;
610 
611 	skb_queue_head_init(&txq->tx_skb_head);
612 	init_waitqueue_head(&txq->req_wq);
613 	atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
614 
615 	ret = t7xx_dpmaif_tx_drb_buf_init(txq);
616 	if (ret) {
617 		dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
618 		return ret;
619 	}
620 
621 	txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
622 				WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
623 				txq->index);
624 	if (!txq->worker)
625 		return -ENOMEM;
626 
627 	INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
628 	spin_lock_init(&txq->tx_lock);
629 
630 	return 0;
631 }
632 
t7xx_dpmaif_txq_free(struct dpmaif_tx_queue * txq)633 void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
634 {
635 	if (txq->worker)
636 		destroy_workqueue(txq->worker);
637 
638 	skb_queue_purge(&txq->tx_skb_head);
639 	t7xx_dpmaif_tx_drb_buf_rel(txq);
640 }
641 
t7xx_dpmaif_tx_stop(struct dpmaif_ctrl * dpmaif_ctrl)642 void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
643 {
644 	int i;
645 
646 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
647 		struct dpmaif_tx_queue *txq;
648 		int count = 0;
649 
650 		txq = &dpmaif_ctrl->txq[i];
651 		txq->que_started = false;
652 		/* Make sure TXQ is disabled */
653 		smp_mb();
654 
655 		/* Wait for active Tx to be done */
656 		while (atomic_read(&txq->tx_processing)) {
657 			if (++count >= DPMAIF_MAX_CHECK_COUNT) {
658 				dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
659 				break;
660 			}
661 		}
662 	}
663 }
664 
t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue * txq)665 static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
666 {
667 	txq->que_started = false;
668 
669 	cancel_work_sync(&txq->dpmaif_tx_work);
670 	flush_work(&txq->dpmaif_tx_work);
671 	t7xx_dpmaif_tx_free_drb_skb(txq);
672 
673 	txq->drb_rd_idx = 0;
674 	txq->drb_wr_idx = 0;
675 	txq->drb_release_rd_idx = 0;
676 }
677 
t7xx_dpmaif_tx_clear(struct dpmaif_ctrl * dpmaif_ctrl)678 void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
679 {
680 	int i;
681 
682 	for (i = 0; i < DPMAIF_TXQ_NUM; i++)
683 		t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
684 }
685