xref: /linux/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c (revision bfc64d9b7e8cac82be6b8629865e137d962578f8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Eliot Lee <eliot.lee@intel.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 
45 #include "t7xx_dpmaif.h"
46 #include "t7xx_hif_dpmaif.h"
47 #include "t7xx_hif_dpmaif_rx.h"
48 #include "t7xx_netdev.h"
49 #include "t7xx_pci.h"
50 
51 #define DPMAIF_BAT_COUNT		8192
52 #define DPMAIF_FRG_COUNT		4814
53 #define DPMAIF_PIT_COUNT		(DPMAIF_BAT_COUNT * 2)
54 
55 #define DPMAIF_BAT_CNT_THRESHOLD	30
56 #define DPMAIF_PIT_CNT_THRESHOLD	60
57 #define DPMAIF_RX_PUSH_THRESHOLD_MASK	GENMASK(2, 0)
58 #define DPMAIF_NOTIFY_RELEASE_COUNT	128
59 #define DPMAIF_POLL_PIT_TIME_US		20
60 #define DPMAIF_POLL_PIT_MAX_TIME_US	2000
61 #define DPMAIF_WQ_TIME_LIMIT_MS		2
62 #define DPMAIF_CS_RESULT_PASS		0
63 
64 /* Packet type */
65 #define DES_PT_PD			0
66 #define DES_PT_MSG			1
67 /* Buffer type */
68 #define PKT_BUF_FRAG			1
69 
t7xx_normal_pit_bid(const struct dpmaif_pit * pit_info)70 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
71 {
72 	u32 value;
73 
74 	value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
75 	value <<= 13;
76 	value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
77 	return value;
78 }
79 
t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int bat_cnt)80 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
81 					 const unsigned int q_num, const unsigned int bat_cnt)
82 {
83 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
84 	struct dpmaif_bat_request *bat_req = rxq->bat_req;
85 	unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
86 
87 	if (!rxq->que_started) {
88 		dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
89 		return -EINVAL;
90 	}
91 
92 	old_rl_idx = bat_req->bat_release_rd_idx;
93 	old_wr_idx = bat_req->bat_wr_idx;
94 	new_wr_idx = old_wr_idx + bat_cnt;
95 
96 	if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
97 		goto err_flow;
98 
99 	if (new_wr_idx >= bat_req->bat_size_cnt) {
100 		new_wr_idx -= bat_req->bat_size_cnt;
101 		if (new_wr_idx >= old_rl_idx)
102 			goto err_flow;
103 	}
104 
105 	bat_req->bat_wr_idx = new_wr_idx;
106 	return 0;
107 
108 err_flow:
109 	dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
110 	return -EINVAL;
111 }
112 
t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int size,struct dpmaif_bat_skb * cur_skb)113 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
114 					const unsigned int size, struct dpmaif_bat_skb *cur_skb)
115 {
116 	dma_addr_t data_bus_addr;
117 	struct sk_buff *skb;
118 
119 	skb = __dev_alloc_skb(size, GFP_KERNEL);
120 	if (!skb)
121 		return false;
122 
123 	data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
124 	if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
125 		dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
126 		dev_kfree_skb_any(skb);
127 		return false;
128 	}
129 
130 	cur_skb->skb = skb;
131 	cur_skb->data_bus_addr = data_bus_addr;
132 	cur_skb->data_len = size;
133 
134 	return true;
135 }
136 
t7xx_unmap_bat_skb(struct device * dev,struct dpmaif_bat_skb * bat_skb_base,unsigned int index)137 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
138 			       unsigned int index)
139 {
140 	struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
141 
142 	if (bat_skb->skb) {
143 		dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
144 		dev_kfree_skb(bat_skb->skb);
145 		bat_skb->skb = NULL;
146 	}
147 }
148 
149 /**
150  * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
151  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
152  * @bat_req: Pointer to BAT request structure.
153  * @q_num: Queue number.
154  * @buf_cnt: Number of buffers to allocate.
155  * @initial: Indicates if the ring is being populated for the first time.
156  *
157  * Allocate skb and store the start address of the data buffer into the BAT ring.
158  * If this is not the initial call, notify the HW about the new entries.
159  *
160  * Return:
161  * * 0		- Success.
162  * * -ERROR	- Error code.
163  */
t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req,const unsigned int q_num,const unsigned int buf_cnt,const bool initial)164 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
165 			     const struct dpmaif_bat_request *bat_req,
166 			     const unsigned int q_num, const unsigned int buf_cnt,
167 			     const bool initial)
168 {
169 	unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
170 	int ret;
171 
172 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
173 		return -EINVAL;
174 
175 	/* Check BAT buffer space */
176 	bat_max_cnt = bat_req->bat_size_cnt;
177 
178 	bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
179 					    bat_req->bat_wr_idx, DPMAIF_WRITE);
180 	if (buf_cnt > bat_cnt)
181 		return -ENOMEM;
182 
183 	bat_start_idx = bat_req->bat_wr_idx;
184 
185 	for (i = 0; i < buf_cnt; i++) {
186 		unsigned int cur_bat_idx = bat_start_idx + i;
187 		struct dpmaif_bat_skb *cur_skb;
188 		struct dpmaif_bat *cur_bat;
189 
190 		if (cur_bat_idx >= bat_max_cnt)
191 			cur_bat_idx -= bat_max_cnt;
192 
193 		cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
194 		if (!cur_skb->skb &&
195 		    !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
196 			break;
197 
198 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
199 		cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
200 		cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
201 	}
202 
203 	if (!i)
204 		return -ENOMEM;
205 
206 	ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
207 	if (ret)
208 		goto err_unmap_skbs;
209 
210 	if (!initial) {
211 		unsigned int hw_wr_idx;
212 
213 		ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
214 		if (ret)
215 			goto err_unmap_skbs;
216 
217 		hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
218 							  DPF_RX_QNO_DFT);
219 		if (hw_wr_idx != bat_req->bat_wr_idx) {
220 			ret = -EFAULT;
221 			dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
222 			goto err_unmap_skbs;
223 		}
224 	}
225 
226 	return 0;
227 
228 err_unmap_skbs:
229 	while (i--)
230 		t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
231 
232 	return ret;
233 }
234 
t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num)235 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
236 					  const unsigned int rel_entry_num)
237 {
238 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
239 	unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
240 	int ret;
241 
242 	if (!rxq->que_started)
243 		return 0;
244 
245 	if (rel_entry_num >= rxq->pit_size_cnt) {
246 		dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
247 		return -EINVAL;
248 	}
249 
250 	old_rel_idx = rxq->pit_release_rd_idx;
251 	new_rel_idx = old_rel_idx + rel_entry_num;
252 	hw_wr_idx = rxq->pit_wr_idx;
253 	if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
254 		new_rel_idx -= rxq->pit_size_cnt;
255 
256 	ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
257 	if (ret) {
258 		dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
259 		return ret;
260 	}
261 
262 	rxq->pit_release_rd_idx = new_rel_idx;
263 	return 0;
264 }
265 
t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request * bat_req,unsigned int idx)266 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
267 {
268 	unsigned long flags;
269 
270 	spin_lock_irqsave(&bat_req->mask_lock, flags);
271 	set_bit(idx, bat_req->bat_bitmap);
272 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
273 }
274 
t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)275 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
276 				       const unsigned int cur_bid)
277 {
278 	struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
279 	struct dpmaif_bat_page *bat_page;
280 
281 	if (cur_bid >= DPMAIF_FRG_COUNT)
282 		return -EINVAL;
283 
284 	bat_page = bat_frag->bat_skb + cur_bid;
285 	if (!bat_page->page)
286 		return -EINVAL;
287 
288 	return 0;
289 }
290 
t7xx_unmap_bat_page(struct device * dev,struct dpmaif_bat_page * bat_page_base,unsigned int index)291 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
292 				unsigned int index)
293 {
294 	struct dpmaif_bat_page *bat_page = bat_page_base + index;
295 
296 	if (bat_page->page) {
297 		dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
298 		put_page(bat_page->page);
299 		bat_page->page = NULL;
300 	}
301 }
302 
303 /**
304  * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
305  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
306  * @bat_req: Pointer to BAT request structure.
307  * @buf_cnt: Number of buffers to allocate.
308  * @initial: Indicates if the ring is being populated for the first time.
309  *
310  * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
311  * This function allocates a page fragment and stores the start address of the page
312  * into the Fragment BAT ring.
313  * If this is not the initial call, notify the HW about the new entries.
314  *
315  * Return:
316  * * 0		- Success.
317  * * -ERROR	- Error code.
318  */
t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const unsigned int buf_cnt,const bool initial)319 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
320 			      const unsigned int buf_cnt, const bool initial)
321 {
322 	unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
323 	struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
324 	int ret = 0, i;
325 
326 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
327 		return -EINVAL;
328 
329 	buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
330 					      bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
331 					      DPMAIF_WRITE);
332 	if (buf_cnt > buf_space) {
333 		dev_err(dpmaif_ctrl->dev,
334 			"Requested more buffers than the space available in RX frag ring\n");
335 		return -EINVAL;
336 	}
337 
338 	for (i = 0; i < buf_cnt; i++) {
339 		struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
340 		struct dpmaif_bat *cur_bat;
341 		dma_addr_t data_base_addr;
342 
343 		if (!cur_page->page) {
344 			unsigned long offset;
345 			struct page *page;
346 			void *data;
347 
348 			data = netdev_alloc_frag(bat_req->pkt_buf_sz);
349 			if (!data)
350 				break;
351 
352 			page = virt_to_head_page(data);
353 			offset = data - page_address(page);
354 
355 			data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
356 						      bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
357 			if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
358 				put_page(virt_to_head_page(data));
359 				dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
360 				break;
361 			}
362 
363 			cur_page->page = page;
364 			cur_page->data_bus_addr = data_base_addr;
365 			cur_page->offset = offset;
366 			cur_page->data_len = bat_req->pkt_buf_sz;
367 		}
368 
369 		data_base_addr = cur_page->data_bus_addr;
370 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
371 		cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
372 		cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
373 		cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
374 	}
375 
376 	bat_req->bat_wr_idx = cur_bat_idx;
377 
378 	if (!initial)
379 		t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
380 
381 	if (i < buf_cnt) {
382 		ret = -ENOMEM;
383 		if (initial) {
384 			while (--i > 0)
385 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
386 		}
387 	}
388 
389 	return ret;
390 }
391 
t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct sk_buff * skb)392 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
393 				       const struct dpmaif_pit *pkt_info,
394 				       struct sk_buff *skb)
395 {
396 	unsigned long long data_bus_addr, data_base_addr;
397 	struct device *dev = rxq->dpmaif_ctrl->dev;
398 	struct dpmaif_bat_page *page_info;
399 	unsigned int data_len;
400 	int data_offset;
401 
402 	page_info = rxq->bat_frag->bat_skb;
403 	page_info += t7xx_normal_pit_bid(pkt_info);
404 	dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
405 
406 	if (!page_info->page)
407 		return -EINVAL;
408 
409 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
410 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
411 	data_base_addr = page_info->data_bus_addr;
412 	data_offset = data_bus_addr - data_base_addr;
413 	data_offset += page_info->offset;
414 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
415 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
416 			data_offset, data_len, page_info->data_len);
417 
418 	page_info->page = NULL;
419 	page_info->offset = 0;
420 	page_info->data_len = 0;
421 	return 0;
422 }
423 
t7xx_dpmaif_get_frag(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,const struct dpmaif_cur_rx_skb_info * skb_info)424 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
425 				const struct dpmaif_pit *pkt_info,
426 				const struct dpmaif_cur_rx_skb_info *skb_info)
427 {
428 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
429 	int ret;
430 
431 	ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
432 	if (ret < 0)
433 		return ret;
434 
435 	ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
436 	if (ret < 0) {
437 		dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
438 		return ret;
439 	}
440 
441 	t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
442 	return 0;
443 }
444 
t7xx_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)445 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
446 {
447 	struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
448 
449 	bat_skb += cur_bid;
450 	if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
451 		return -EINVAL;
452 
453 	return 0;
454 }
455 
t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit * pit)456 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
457 {
458 	return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
459 }
460 
t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pit)461 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
462 				     const struct dpmaif_pit *pit)
463 {
464 	unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
465 
466 	if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
467 				     cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
468 				     DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
469 		return -EFAULT;
470 
471 	rxq->expect_pit_seq++;
472 	if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
473 		rxq->expect_pit_seq = 0;
474 
475 	return 0;
476 }
477 
t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request * bat_req)478 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
479 {
480 	unsigned int zero_index;
481 	unsigned long flags;
482 
483 	spin_lock_irqsave(&bat_req->mask_lock, flags);
484 
485 	zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
486 					bat_req->bat_release_rd_idx);
487 
488 	if (zero_index < bat_req->bat_size_cnt) {
489 		spin_unlock_irqrestore(&bat_req->mask_lock, flags);
490 		return zero_index - bat_req->bat_release_rd_idx;
491 	}
492 
493 	/* limiting the search till bat_release_rd_idx */
494 	zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
495 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
496 	return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
497 }
498 
t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num,const enum bat_type buf_type)499 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
500 					 const unsigned int rel_entry_num,
501 					 const enum bat_type buf_type)
502 {
503 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
504 	unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
505 	struct dpmaif_bat_request *bat;
506 	unsigned long flags;
507 
508 	if (!rxq->que_started || !rel_entry_num)
509 		return -EINVAL;
510 
511 	if (buf_type == BAT_TYPE_FRAG) {
512 		bat = rxq->bat_frag;
513 		hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
514 	} else {
515 		bat = rxq->bat_req;
516 		hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
517 	}
518 
519 	if (rel_entry_num >= bat->bat_size_cnt)
520 		return -EINVAL;
521 
522 	old_rel_idx = bat->bat_release_rd_idx;
523 	new_rel_idx = old_rel_idx + rel_entry_num;
524 
525 	/* Do not need to release if the queue is empty */
526 	if (bat->bat_wr_idx == old_rel_idx)
527 		return 0;
528 
529 	if (hw_rd_idx >= old_rel_idx) {
530 		if (new_rel_idx > hw_rd_idx)
531 			return -EINVAL;
532 	}
533 
534 	if (new_rel_idx >= bat->bat_size_cnt) {
535 		new_rel_idx -= bat->bat_size_cnt;
536 		if (new_rel_idx > hw_rd_idx)
537 			return -EINVAL;
538 	}
539 
540 	spin_lock_irqsave(&bat->mask_lock, flags);
541 	for (i = 0; i < rel_entry_num; i++) {
542 		unsigned int index = bat->bat_release_rd_idx + i;
543 
544 		if (index >= bat->bat_size_cnt)
545 			index -= bat->bat_size_cnt;
546 
547 		clear_bit(index, bat->bat_bitmap);
548 	}
549 	spin_unlock_irqrestore(&bat->mask_lock, flags);
550 
551 	bat->bat_release_rd_idx = new_rel_idx;
552 	return rel_entry_num;
553 }
554 
t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue * rxq)555 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
556 {
557 	int ret;
558 
559 	if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
560 		return 0;
561 
562 	ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
563 	if (ret)
564 		return ret;
565 
566 	rxq->pit_remain_release_cnt = 0;
567 	return 0;
568 }
569 
t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue * rxq)570 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
571 {
572 	unsigned int bid_cnt;
573 	int ret;
574 
575 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
576 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
577 		return 0;
578 
579 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
580 	if (ret <= 0) {
581 		dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
582 		return ret;
583 	}
584 
585 	ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
586 	if (ret < 0)
587 		dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
588 
589 	return ret;
590 }
591 
t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue * rxq)592 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
593 {
594 	unsigned int bid_cnt;
595 	int ret;
596 
597 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
598 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
599 		return 0;
600 
601 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
602 	if (ret <= 0) {
603 		dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
604 		return ret;
605 	}
606 
607 	return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
608 }
609 
t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * msg_pit,struct dpmaif_cur_rx_skb_info * skb_info)610 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
611 				      const struct dpmaif_pit *msg_pit,
612 				      struct dpmaif_cur_rx_skb_info *skb_info)
613 {
614 	int header = le32_to_cpu(msg_pit->header);
615 
616 	skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
617 	skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
618 	skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
619 	skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
620 }
621 
t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)622 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
623 				       const struct dpmaif_pit *pkt_info,
624 				       struct dpmaif_cur_rx_skb_info *skb_info)
625 {
626 	unsigned long long data_bus_addr, data_base_addr;
627 	struct device *dev = rxq->dpmaif_ctrl->dev;
628 	struct dpmaif_bat_skb *bat_skb;
629 	unsigned int data_len;
630 	struct sk_buff *skb;
631 	int data_offset;
632 
633 	bat_skb = rxq->bat_req->bat_skb;
634 	bat_skb += t7xx_normal_pit_bid(pkt_info);
635 	dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
636 
637 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
638 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
639 	data_base_addr = bat_skb->data_bus_addr;
640 	data_offset = data_bus_addr - data_base_addr;
641 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
642 	skb = bat_skb->skb;
643 	skb->len = 0;
644 	skb_reset_tail_pointer(skb);
645 	skb_reserve(skb, data_offset);
646 
647 	if (skb->tail + data_len > skb->end) {
648 		dev_err(dev, "No buffer space available\n");
649 		return -ENOBUFS;
650 	}
651 
652 	skb_put(skb, data_len);
653 	skb_info->cur_skb = skb;
654 	bat_skb->skb = NULL;
655 	return 0;
656 }
657 
t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)658 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
659 				  const struct dpmaif_pit *pkt_info,
660 				  struct dpmaif_cur_rx_skb_info *skb_info)
661 {
662 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
663 	int ret;
664 
665 	ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
666 	if (ret < 0)
667 		return ret;
668 
669 	ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
670 	if (ret < 0) {
671 		dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
672 		return ret;
673 	}
674 
675 	t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
676 	return 0;
677 }
678 
t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue * rxq)679 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
680 {
681 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
682 	int ret;
683 
684 	queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
685 
686 	ret = t7xx_dpmaif_pit_release_and_add(rxq);
687 	if (ret < 0)
688 		dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
689 
690 	return ret;
691 }
692 
t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue * rxq,struct dpmaif_cur_rx_skb_info * skb_info)693 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
694 			       struct dpmaif_cur_rx_skb_info *skb_info)
695 {
696 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
697 	struct sk_buff *skb = skb_info->cur_skb;
698 	struct t7xx_skb_cb *skb_cb;
699 	u8 netif_id;
700 
701 	skb_info->cur_skb = NULL;
702 
703 	if (skb_info->pit_dp) {
704 		dev_kfree_skb_any(skb);
705 		return;
706 	}
707 
708 	skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
709 									CHECKSUM_NONE;
710 	netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
711 	skb_cb = T7XX_SKB_CB(skb);
712 	skb_cb->netif_idx = netif_id;
713 	skb_cb->rx_pkt_type = skb_info->pkt_type;
714 	dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
715 }
716 
t7xx_dpmaif_rx_start(struct dpmaif_rx_queue * rxq,const unsigned int pit_cnt,const unsigned int budget,int * once_more)717 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
718 				const unsigned int budget, int *once_more)
719 {
720 	unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
721 	struct device *dev = rxq->dpmaif_ctrl->dev;
722 	struct dpmaif_cur_rx_skb_info *skb_info;
723 	int ret = 0;
724 
725 	pit_len = rxq->pit_size_cnt;
726 	skb_info = &rxq->rx_data_info;
727 	cur_pit = rxq->pit_rd_idx;
728 
729 	for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
730 		struct dpmaif_pit *pkt_info;
731 		u32 val;
732 
733 		if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
734 			break;
735 
736 		pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
737 		if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
738 			dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
739 			*once_more = 1;
740 			return recv_skb_cnt;
741 		}
742 
743 		val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
744 		if (val == DES_PT_MSG) {
745 			if (skb_info->msg_pit_received)
746 				dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
747 
748 			skb_info->msg_pit_received = true;
749 			t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
750 		} else { /* DES_PT_PD */
751 			val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
752 			if (val != PKT_BUF_FRAG)
753 				ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
754 			else if (!skb_info->cur_skb)
755 				ret = -EINVAL;
756 			else
757 				ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
758 
759 			if (ret < 0) {
760 				skb_info->err_payload = 1;
761 				dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
762 			}
763 
764 			val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
765 			if (!val) {
766 				if (!skb_info->err_payload) {
767 					t7xx_dpmaif_rx_skb(rxq, skb_info);
768 				} else if (skb_info->cur_skb) {
769 					dev_kfree_skb_any(skb_info->cur_skb);
770 					skb_info->cur_skb = NULL;
771 				}
772 
773 				memset(skb_info, 0, sizeof(*skb_info));
774 				recv_skb_cnt++;
775 			}
776 		}
777 
778 		cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
779 		rxq->pit_rd_idx = cur_pit;
780 		rxq->pit_remain_release_cnt++;
781 
782 		if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
783 			ret = t7xx_dpmaifq_rx_notify_hw(rxq);
784 			if (ret < 0)
785 				break;
786 		}
787 	}
788 
789 	if (!ret)
790 		ret = t7xx_dpmaifq_rx_notify_hw(rxq);
791 
792 	if (ret)
793 		return ret;
794 
795 	return recv_skb_cnt;
796 }
797 
t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue * rxq)798 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
799 {
800 	unsigned int hw_wr_idx, pit_cnt;
801 
802 	if (!rxq->que_started)
803 		return 0;
804 
805 	hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
806 	pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
807 					    DPMAIF_READ);
808 	rxq->pit_wr_idx = hw_wr_idx;
809 	return pit_cnt;
810 }
811 
t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int budget,int * once_more)812 static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
813 					    const unsigned int q_num,
814 					    const unsigned int budget, int *once_more)
815 {
816 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
817 	unsigned int cnt;
818 	int ret = 0;
819 
820 	cnt = t7xx_dpmaifq_poll_pit(rxq);
821 	if (!cnt)
822 		return ret;
823 
824 	ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
825 	if (ret < 0)
826 		dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
827 
828 	return ret;
829 }
830 
t7xx_dpmaif_napi_rx_poll(struct napi_struct * napi,const int budget)831 int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
832 {
833 	struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
834 	struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
835 	int ret, once_more = 0, work_done = 0;
836 
837 	atomic_set(&rxq->rx_processing, 1);
838 	/* Ensure rx_processing is changed to 1 before actually begin RX flow */
839 	smp_mb();
840 
841 	if (!rxq->que_started) {
842 		atomic_set(&rxq->rx_processing, 0);
843 		pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
844 		dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
845 		return work_done;
846 	}
847 
848 	if (!rxq->sleep_lock_pending)
849 		t7xx_pci_disable_sleep(t7xx_dev);
850 
851 	ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
852 	if (!ret) {
853 		napi_complete_done(napi, work_done);
854 		rxq->sleep_lock_pending = true;
855 		napi_schedule(napi);
856 		return work_done;
857 	}
858 
859 	rxq->sleep_lock_pending = false;
860 	while (work_done < budget) {
861 		int each_budget = budget - work_done;
862 		int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
863 							      each_budget, &once_more);
864 		if (rx_cnt > 0)
865 			work_done += rx_cnt;
866 		else
867 			break;
868 	}
869 
870 	if (once_more) {
871 		napi_gro_flush(napi, false);
872 		work_done = budget;
873 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
874 	} else if (work_done < budget) {
875 		napi_complete_done(napi, work_done);
876 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
877 		t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
878 		t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
879 		pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
880 		pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
881 		atomic_set(&rxq->rx_processing, 0);
882 	} else {
883 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
884 	}
885 
886 	return work_done;
887 }
888 
t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int que_mask)889 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
890 {
891 	struct dpmaif_rx_queue *rxq;
892 	struct dpmaif_ctrl *ctrl;
893 	int qno, ret;
894 
895 	qno = ffs(que_mask) - 1;
896 	if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
897 		dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
898 		return;
899 	}
900 
901 	rxq = &dpmaif_ctrl->rxq[qno];
902 	ctrl = rxq->dpmaif_ctrl;
903 	/* We need to make sure that the modem has been resumed before
904 	 * calling napi. This can't be done inside the polling function
905 	 * as we could be blocked waiting for device to be resumed,
906 	 * which can't be done from softirq context the poll function
907 	 * is running in.
908 	 */
909 	ret = pm_runtime_resume_and_get(ctrl->dev);
910 	if (ret < 0 && ret != -EACCES) {
911 		dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
912 		return;
913 	}
914 	napi_schedule(&rxq->napi);
915 }
916 
t7xx_dpmaif_base_free(const struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req)917 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
918 				  const struct dpmaif_bat_request *bat_req)
919 {
920 	if (bat_req->bat_base)
921 		dma_free_coherent(dpmaif_ctrl->dev,
922 				  bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
923 				  bat_req->bat_base, bat_req->bat_bus_addr);
924 }
925 
926 /**
927  * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
928  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
929  * @bat_req: Pointer to BAT request structure.
930  * @buf_type: BAT ring type.
931  *
932  * This function allocates the BAT ring buffer shared with the HW device, also allocates
933  * a buffer used to store information about the BAT skbs for further release.
934  *
935  * Return:
936  * * 0		- Success.
937  * * -ERROR	- Error code.
938  */
t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const enum bat_type buf_type)939 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
940 			  const enum bat_type buf_type)
941 {
942 	int sw_buf_size;
943 
944 	if (buf_type == BAT_TYPE_FRAG) {
945 		sw_buf_size = sizeof(struct dpmaif_bat_page);
946 		bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
947 		bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
948 	} else {
949 		sw_buf_size = sizeof(struct dpmaif_bat_skb);
950 		bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
951 		bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
952 	}
953 
954 	bat_req->type = buf_type;
955 	bat_req->bat_wr_idx = 0;
956 	bat_req->bat_release_rd_idx = 0;
957 
958 	bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
959 					       bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
960 					       &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
961 	if (!bat_req->bat_base)
962 		return -ENOMEM;
963 
964 	/* For AP SW to record skb information */
965 	bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
966 					GFP_KERNEL);
967 	if (!bat_req->bat_skb)
968 		goto err_free_dma_mem;
969 
970 	bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
971 	if (!bat_req->bat_bitmap)
972 		goto err_free_dma_mem;
973 
974 	spin_lock_init(&bat_req->mask_lock);
975 	atomic_set(&bat_req->refcnt, 0);
976 	return 0;
977 
978 err_free_dma_mem:
979 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
980 
981 	return -ENOMEM;
982 }
983 
t7xx_dpmaif_bat_free(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req)984 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
985 {
986 	if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
987 		return;
988 
989 	bitmap_free(bat_req->bat_bitmap);
990 	bat_req->bat_bitmap = NULL;
991 
992 	if (bat_req->bat_skb) {
993 		unsigned int i;
994 
995 		for (i = 0; i < bat_req->bat_size_cnt; i++) {
996 			if (bat_req->type == BAT_TYPE_FRAG)
997 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
998 			else
999 				t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1000 		}
1001 	}
1002 
1003 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1004 }
1005 
t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue * rxq)1006 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1007 {
1008 	rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1009 	rxq->pit_rd_idx = 0;
1010 	rxq->pit_wr_idx = 0;
1011 	rxq->pit_release_rd_idx = 0;
1012 	rxq->expect_pit_seq = 0;
1013 	rxq->pit_remain_release_cnt = 0;
1014 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1015 
1016 	rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1017 					   rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1018 					   &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1019 	if (!rxq->pit_base)
1020 		return -ENOMEM;
1021 
1022 	rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1023 	atomic_inc(&rxq->bat_req->refcnt);
1024 
1025 	rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1026 	atomic_inc(&rxq->bat_frag->refcnt);
1027 	return 0;
1028 }
1029 
t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue * rxq)1030 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1031 {
1032 	if (!rxq->dpmaif_ctrl)
1033 		return;
1034 
1035 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1036 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1037 
1038 	if (rxq->pit_base)
1039 		dma_free_coherent(rxq->dpmaif_ctrl->dev,
1040 				  rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1041 				  rxq->pit_base, rxq->pit_bus_addr);
1042 }
1043 
t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue * queue)1044 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1045 {
1046 	int ret;
1047 
1048 	ret = t7xx_dpmaif_rx_alloc(queue);
1049 	if (ret < 0)
1050 		dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1051 
1052 	return ret;
1053 }
1054 
t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue * queue)1055 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1056 {
1057 	t7xx_dpmaif_rx_buf_free(queue);
1058 }
1059 
t7xx_dpmaif_bat_release_work(struct work_struct * work)1060 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1061 {
1062 	struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1063 	struct dpmaif_rx_queue *rxq;
1064 	int ret;
1065 
1066 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1067 	if (ret < 0 && ret != -EACCES)
1068 		return;
1069 
1070 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1071 
1072 	/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1073 	rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1074 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1075 		t7xx_dpmaif_bat_release_and_add(rxq);
1076 		t7xx_dpmaif_frag_bat_release_and_add(rxq);
1077 	}
1078 
1079 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1080 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1081 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1082 }
1083 
t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl * dpmaif_ctrl)1084 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1085 {
1086 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1087 						      WQ_MEM_RECLAIM, 1);
1088 	if (!dpmaif_ctrl->bat_release_wq)
1089 		return -ENOMEM;
1090 
1091 	INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1092 	return 0;
1093 }
1094 
t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl * dpmaif_ctrl)1095 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1096 {
1097 	flush_work(&dpmaif_ctrl->bat_release_work);
1098 
1099 	if (dpmaif_ctrl->bat_release_wq) {
1100 		destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1101 		dpmaif_ctrl->bat_release_wq = NULL;
1102 	}
1103 }
1104 
1105 /**
1106  * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1107  * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1108  *
1109  * Wait for all the RX work to finish executing and mark the RX queue as paused.
1110  */
t7xx_dpmaif_rx_stop(struct dpmaif_ctrl * dpmaif_ctrl)1111 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1112 {
1113 	unsigned int i;
1114 
1115 	for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1116 		struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1117 		int timeout, value;
1118 
1119 		timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1120 						    !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1121 		if (timeout)
1122 			dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1123 
1124 		/* Ensure RX processing has stopped before we set rxq->que_started to false */
1125 		smp_mb();
1126 		rxq->que_started = false;
1127 	}
1128 }
1129 
t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue * rxq)1130 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1131 {
1132 	int cnt, j = 0;
1133 
1134 	rxq->que_started = false;
1135 
1136 	do {
1137 		cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1138 						rxq->pit_wr_idx, DPMAIF_READ);
1139 
1140 		if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1141 			dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1142 			break;
1143 		}
1144 	} while (cnt);
1145 
1146 	memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1147 	memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1148 	bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1149 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1150 
1151 	rxq->pit_rd_idx = 0;
1152 	rxq->pit_wr_idx = 0;
1153 	rxq->pit_release_rd_idx = 0;
1154 	rxq->expect_pit_seq = 0;
1155 	rxq->pit_remain_release_cnt = 0;
1156 	rxq->bat_req->bat_release_rd_idx = 0;
1157 	rxq->bat_req->bat_wr_idx = 0;
1158 	rxq->bat_frag->bat_release_rd_idx = 0;
1159 	rxq->bat_frag->bat_wr_idx = 0;
1160 }
1161 
t7xx_dpmaif_rx_clear(struct dpmaif_ctrl * dpmaif_ctrl)1162 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1163 {
1164 	int i;
1165 
1166 	for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1167 		t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1168 }
1169