xref: /linux/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c (revision 1cac38910ecb881b09f61f57545a771bbe57ba68)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Eliot Lee <eliot.lee@intel.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 #include <net/gro.h>
45 
46 #include "t7xx_dpmaif.h"
47 #include "t7xx_hif_dpmaif.h"
48 #include "t7xx_hif_dpmaif_rx.h"
49 #include "t7xx_netdev.h"
50 #include "t7xx_pci.h"
51 
52 #define DPMAIF_BAT_COUNT		8192
53 #define DPMAIF_FRG_COUNT		4814
54 #define DPMAIF_PIT_COUNT		(DPMAIF_BAT_COUNT * 2)
55 
56 #define DPMAIF_BAT_CNT_THRESHOLD	30
57 #define DPMAIF_PIT_CNT_THRESHOLD	60
58 #define DPMAIF_RX_PUSH_THRESHOLD_MASK	GENMASK(2, 0)
59 #define DPMAIF_NOTIFY_RELEASE_COUNT	128
60 #define DPMAIF_POLL_PIT_TIME_US		20
61 #define DPMAIF_POLL_PIT_MAX_TIME_US	2000
62 #define DPMAIF_WQ_TIME_LIMIT_MS		2
63 #define DPMAIF_CS_RESULT_PASS		0
64 
65 /* Packet type */
66 #define DES_PT_PD			0
67 #define DES_PT_MSG			1
68 /* Buffer type */
69 #define PKT_BUF_FRAG			1
70 
t7xx_normal_pit_bid(const struct dpmaif_pit * pit_info)71 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
72 {
73 	u32 value;
74 
75 	value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
76 	value <<= 13;
77 	value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
78 	return value;
79 }
80 
t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int bat_cnt)81 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
82 					 const unsigned int q_num, const unsigned int bat_cnt)
83 {
84 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
85 	struct dpmaif_bat_request *bat_req = rxq->bat_req;
86 	unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
87 
88 	if (!rxq->que_started) {
89 		dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
90 		return -EINVAL;
91 	}
92 
93 	old_rl_idx = bat_req->bat_release_rd_idx;
94 	old_wr_idx = bat_req->bat_wr_idx;
95 	new_wr_idx = old_wr_idx + bat_cnt;
96 
97 	if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
98 		goto err_flow;
99 
100 	if (new_wr_idx >= bat_req->bat_size_cnt) {
101 		new_wr_idx -= bat_req->bat_size_cnt;
102 		if (new_wr_idx >= old_rl_idx)
103 			goto err_flow;
104 	}
105 
106 	bat_req->bat_wr_idx = new_wr_idx;
107 	return 0;
108 
109 err_flow:
110 	dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
111 	return -EINVAL;
112 }
113 
t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int size,struct dpmaif_bat_skb * cur_skb)114 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
115 					const unsigned int size, struct dpmaif_bat_skb *cur_skb)
116 {
117 	dma_addr_t data_bus_addr;
118 	struct sk_buff *skb;
119 
120 	skb = __dev_alloc_skb(size, GFP_KERNEL);
121 	if (!skb)
122 		return false;
123 
124 	data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
125 	if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
126 		dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
127 		dev_kfree_skb_any(skb);
128 		return false;
129 	}
130 
131 	cur_skb->skb = skb;
132 	cur_skb->data_bus_addr = data_bus_addr;
133 	cur_skb->data_len = size;
134 
135 	return true;
136 }
137 
t7xx_unmap_bat_skb(struct device * dev,struct dpmaif_bat_skb * bat_skb_base,unsigned int index)138 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
139 			       unsigned int index)
140 {
141 	struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
142 
143 	if (bat_skb->skb) {
144 		dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
145 		dev_kfree_skb(bat_skb->skb);
146 		bat_skb->skb = NULL;
147 	}
148 }
149 
150 /**
151  * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
152  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
153  * @bat_req: Pointer to BAT request structure.
154  * @q_num: Queue number.
155  * @buf_cnt: Number of buffers to allocate.
156  * @initial: Indicates if the ring is being populated for the first time.
157  *
158  * Allocate skb and store the start address of the data buffer into the BAT ring.
159  * If this is not the initial call, notify the HW about the new entries.
160  *
161  * Return:
162  * * 0		- Success.
163  * * -ERROR	- Error code.
164  */
t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req,const unsigned int q_num,const unsigned int buf_cnt,const bool initial)165 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
166 			     const struct dpmaif_bat_request *bat_req,
167 			     const unsigned int q_num, const unsigned int buf_cnt,
168 			     const bool initial)
169 {
170 	unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
171 	int ret;
172 
173 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
174 		return -EINVAL;
175 
176 	/* Check BAT buffer space */
177 	bat_max_cnt = bat_req->bat_size_cnt;
178 
179 	bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
180 					    bat_req->bat_wr_idx, DPMAIF_WRITE);
181 	if (buf_cnt > bat_cnt)
182 		return -ENOMEM;
183 
184 	bat_start_idx = bat_req->bat_wr_idx;
185 
186 	for (i = 0; i < buf_cnt; i++) {
187 		unsigned int cur_bat_idx = bat_start_idx + i;
188 		struct dpmaif_bat_skb *cur_skb;
189 		struct dpmaif_bat *cur_bat;
190 
191 		if (cur_bat_idx >= bat_max_cnt)
192 			cur_bat_idx -= bat_max_cnt;
193 
194 		cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
195 		if (!cur_skb->skb &&
196 		    !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
197 			break;
198 
199 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
200 		cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
201 		cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
202 	}
203 
204 	if (!i)
205 		return -ENOMEM;
206 
207 	ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
208 	if (ret)
209 		goto err_unmap_skbs;
210 
211 	if (!initial) {
212 		unsigned int hw_wr_idx;
213 
214 		ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
215 		if (ret)
216 			goto err_unmap_skbs;
217 
218 		hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
219 							  DPF_RX_QNO_DFT);
220 		if (hw_wr_idx != bat_req->bat_wr_idx) {
221 			ret = -EFAULT;
222 			dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
223 			goto err_unmap_skbs;
224 		}
225 	}
226 
227 	return 0;
228 
229 err_unmap_skbs:
230 	while (i--)
231 		t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
232 
233 	return ret;
234 }
235 
t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num)236 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
237 					  const unsigned int rel_entry_num)
238 {
239 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
240 	unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
241 	int ret;
242 
243 	if (!rxq->que_started)
244 		return 0;
245 
246 	if (rel_entry_num >= rxq->pit_size_cnt) {
247 		dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
248 		return -EINVAL;
249 	}
250 
251 	old_rel_idx = rxq->pit_release_rd_idx;
252 	new_rel_idx = old_rel_idx + rel_entry_num;
253 	hw_wr_idx = rxq->pit_wr_idx;
254 	if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
255 		new_rel_idx -= rxq->pit_size_cnt;
256 
257 	ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
258 	if (ret) {
259 		dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
260 		return ret;
261 	}
262 
263 	rxq->pit_release_rd_idx = new_rel_idx;
264 	return 0;
265 }
266 
t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request * bat_req,unsigned int idx)267 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
268 {
269 	unsigned long flags;
270 
271 	spin_lock_irqsave(&bat_req->mask_lock, flags);
272 	set_bit(idx, bat_req->bat_bitmap);
273 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
274 }
275 
t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)276 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
277 				       const unsigned int cur_bid)
278 {
279 	struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
280 	struct dpmaif_bat_page *bat_page;
281 
282 	if (cur_bid >= DPMAIF_FRG_COUNT)
283 		return -EINVAL;
284 
285 	bat_page = bat_frag->bat_skb + cur_bid;
286 	if (!bat_page->page)
287 		return -EINVAL;
288 
289 	return 0;
290 }
291 
t7xx_unmap_bat_page(struct device * dev,struct dpmaif_bat_page * bat_page_base,unsigned int index)292 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
293 				unsigned int index)
294 {
295 	struct dpmaif_bat_page *bat_page = bat_page_base + index;
296 
297 	if (bat_page->page) {
298 		dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
299 		put_page(bat_page->page);
300 		bat_page->page = NULL;
301 	}
302 }
303 
304 /**
305  * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
306  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
307  * @bat_req: Pointer to BAT request structure.
308  * @buf_cnt: Number of buffers to allocate.
309  * @initial: Indicates if the ring is being populated for the first time.
310  *
311  * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
312  * This function allocates a page fragment and stores the start address of the page
313  * into the Fragment BAT ring.
314  * If this is not the initial call, notify the HW about the new entries.
315  *
316  * Return:
317  * * 0		- Success.
318  * * -ERROR	- Error code.
319  */
t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const unsigned int buf_cnt,const bool initial)320 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
321 			      const unsigned int buf_cnt, const bool initial)
322 {
323 	unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
324 	struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
325 	int ret = 0, i;
326 
327 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
328 		return -EINVAL;
329 
330 	buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
331 					      bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
332 					      DPMAIF_WRITE);
333 	if (buf_cnt > buf_space) {
334 		dev_err(dpmaif_ctrl->dev,
335 			"Requested more buffers than the space available in RX frag ring\n");
336 		return -EINVAL;
337 	}
338 
339 	for (i = 0; i < buf_cnt; i++) {
340 		struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
341 		struct dpmaif_bat *cur_bat;
342 		dma_addr_t data_base_addr;
343 
344 		if (!cur_page->page) {
345 			unsigned long offset;
346 			struct page *page;
347 			void *data;
348 
349 			data = netdev_alloc_frag(bat_req->pkt_buf_sz);
350 			if (!data)
351 				break;
352 
353 			page = virt_to_head_page(data);
354 			offset = data - page_address(page);
355 
356 			data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
357 						      bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
358 			if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
359 				put_page(virt_to_head_page(data));
360 				dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
361 				break;
362 			}
363 
364 			cur_page->page = page;
365 			cur_page->data_bus_addr = data_base_addr;
366 			cur_page->offset = offset;
367 			cur_page->data_len = bat_req->pkt_buf_sz;
368 		}
369 
370 		data_base_addr = cur_page->data_bus_addr;
371 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
372 		cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
373 		cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
374 		cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
375 	}
376 
377 	bat_req->bat_wr_idx = cur_bat_idx;
378 
379 	if (!initial)
380 		t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
381 
382 	if (i < buf_cnt) {
383 		ret = -ENOMEM;
384 		if (initial) {
385 			while (--i > 0)
386 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
387 		}
388 	}
389 
390 	return ret;
391 }
392 
t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct sk_buff * skb)393 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
394 				       const struct dpmaif_pit *pkt_info,
395 				       struct sk_buff *skb)
396 {
397 	unsigned long long data_bus_addr, data_base_addr;
398 	struct skb_shared_info *shinfo = skb_shinfo(skb);
399 	struct device *dev = rxq->dpmaif_ctrl->dev;
400 	struct dpmaif_bat_page *page_info;
401 	unsigned int data_len;
402 	int data_offset;
403 
404 	page_info = rxq->bat_frag->bat_skb;
405 	page_info += t7xx_normal_pit_bid(pkt_info);
406 
407 	if (!page_info->page)
408 		return -EINVAL;
409 
410 	if (shinfo->nr_frags >= MAX_SKB_FRAGS)
411 		return -EINVAL;
412 
413 	dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
414 
415 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
416 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
417 	data_base_addr = page_info->data_bus_addr;
418 	data_offset = data_bus_addr - data_base_addr;
419 	data_offset += page_info->offset;
420 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
421 	skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page,
422 			data_offset, data_len, page_info->data_len);
423 
424 	page_info->page = NULL;
425 	page_info->offset = 0;
426 	page_info->data_len = 0;
427 	return 0;
428 }
429 
t7xx_dpmaif_get_frag(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,const struct dpmaif_cur_rx_skb_info * skb_info)430 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
431 				const struct dpmaif_pit *pkt_info,
432 				const struct dpmaif_cur_rx_skb_info *skb_info)
433 {
434 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
435 	int ret;
436 
437 	ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
438 	if (ret < 0)
439 		return ret;
440 
441 	ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
442 	if (ret < 0) {
443 		dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
444 		return ret;
445 	}
446 
447 	t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
448 	return 0;
449 }
450 
t7xx_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)451 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
452 {
453 	struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
454 
455 	bat_skb += cur_bid;
456 	if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
457 		return -EINVAL;
458 
459 	return 0;
460 }
461 
t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit * pit)462 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
463 {
464 	return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
465 }
466 
t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pit)467 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
468 				     const struct dpmaif_pit *pit)
469 {
470 	unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
471 
472 	if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
473 				     cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
474 				     DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
475 		return -EFAULT;
476 
477 	rxq->expect_pit_seq++;
478 	if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
479 		rxq->expect_pit_seq = 0;
480 
481 	return 0;
482 }
483 
t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request * bat_req)484 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
485 {
486 	unsigned int zero_index;
487 	unsigned long flags;
488 
489 	spin_lock_irqsave(&bat_req->mask_lock, flags);
490 
491 	zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
492 					bat_req->bat_release_rd_idx);
493 
494 	if (zero_index < bat_req->bat_size_cnt) {
495 		spin_unlock_irqrestore(&bat_req->mask_lock, flags);
496 		return zero_index - bat_req->bat_release_rd_idx;
497 	}
498 
499 	/* limiting the search till bat_release_rd_idx */
500 	zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
501 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
502 	return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
503 }
504 
t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num,const enum bat_type buf_type)505 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
506 					 const unsigned int rel_entry_num,
507 					 const enum bat_type buf_type)
508 {
509 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
510 	unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
511 	struct dpmaif_bat_request *bat;
512 	unsigned long flags;
513 
514 	if (!rxq->que_started || !rel_entry_num)
515 		return -EINVAL;
516 
517 	if (buf_type == BAT_TYPE_FRAG) {
518 		bat = rxq->bat_frag;
519 		hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
520 	} else {
521 		bat = rxq->bat_req;
522 		hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
523 	}
524 
525 	if (rel_entry_num >= bat->bat_size_cnt)
526 		return -EINVAL;
527 
528 	old_rel_idx = bat->bat_release_rd_idx;
529 	new_rel_idx = old_rel_idx + rel_entry_num;
530 
531 	/* Do not need to release if the queue is empty */
532 	if (bat->bat_wr_idx == old_rel_idx)
533 		return 0;
534 
535 	if (hw_rd_idx >= old_rel_idx) {
536 		if (new_rel_idx > hw_rd_idx)
537 			return -EINVAL;
538 	}
539 
540 	if (new_rel_idx >= bat->bat_size_cnt) {
541 		new_rel_idx -= bat->bat_size_cnt;
542 		if (new_rel_idx > hw_rd_idx)
543 			return -EINVAL;
544 	}
545 
546 	spin_lock_irqsave(&bat->mask_lock, flags);
547 	for (i = 0; i < rel_entry_num; i++) {
548 		unsigned int index = bat->bat_release_rd_idx + i;
549 
550 		if (index >= bat->bat_size_cnt)
551 			index -= bat->bat_size_cnt;
552 
553 		clear_bit(index, bat->bat_bitmap);
554 	}
555 	spin_unlock_irqrestore(&bat->mask_lock, flags);
556 
557 	bat->bat_release_rd_idx = new_rel_idx;
558 	return rel_entry_num;
559 }
560 
t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue * rxq)561 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
562 {
563 	int ret;
564 
565 	if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
566 		return 0;
567 
568 	ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
569 	if (ret)
570 		return ret;
571 
572 	rxq->pit_remain_release_cnt = 0;
573 	return 0;
574 }
575 
t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue * rxq)576 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
577 {
578 	unsigned int bid_cnt;
579 	int ret;
580 
581 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
582 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
583 		return 0;
584 
585 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
586 	if (ret <= 0) {
587 		dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
588 		return ret;
589 	}
590 
591 	ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
592 	if (ret < 0)
593 		dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
594 
595 	return ret;
596 }
597 
t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue * rxq)598 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
599 {
600 	unsigned int bid_cnt;
601 	int ret;
602 
603 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
604 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
605 		return 0;
606 
607 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
608 	if (ret <= 0) {
609 		dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
610 		return ret;
611 	}
612 
613 	return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
614 }
615 
t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * msg_pit,struct dpmaif_cur_rx_skb_info * skb_info)616 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
617 				      const struct dpmaif_pit *msg_pit,
618 				      struct dpmaif_cur_rx_skb_info *skb_info)
619 {
620 	int header = le32_to_cpu(msg_pit->header);
621 
622 	skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
623 	skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
624 	skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
625 	skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
626 }
627 
t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)628 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
629 				       const struct dpmaif_pit *pkt_info,
630 				       struct dpmaif_cur_rx_skb_info *skb_info)
631 {
632 	unsigned long long data_bus_addr, data_base_addr;
633 	struct device *dev = rxq->dpmaif_ctrl->dev;
634 	struct dpmaif_bat_skb *bat_skb;
635 	unsigned int data_len;
636 	struct sk_buff *skb;
637 	int data_offset;
638 
639 	bat_skb = rxq->bat_req->bat_skb;
640 	bat_skb += t7xx_normal_pit_bid(pkt_info);
641 	dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
642 
643 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
644 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
645 	data_base_addr = bat_skb->data_bus_addr;
646 	data_offset = data_bus_addr - data_base_addr;
647 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
648 	skb = bat_skb->skb;
649 	skb->len = 0;
650 	skb_reset_tail_pointer(skb);
651 	skb_reserve(skb, data_offset);
652 
653 	if (skb->tail + data_len > skb->end) {
654 		dev_err(dev, "No buffer space available\n");
655 		return -ENOBUFS;
656 	}
657 
658 	skb_put(skb, data_len);
659 	skb_info->cur_skb = skb;
660 	bat_skb->skb = NULL;
661 	return 0;
662 }
663 
t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)664 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
665 				  const struct dpmaif_pit *pkt_info,
666 				  struct dpmaif_cur_rx_skb_info *skb_info)
667 {
668 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
669 	int ret;
670 
671 	ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
672 	if (ret < 0)
673 		return ret;
674 
675 	ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
676 	if (ret < 0) {
677 		dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
678 		return ret;
679 	}
680 
681 	t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
682 	return 0;
683 }
684 
t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue * rxq)685 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
686 {
687 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
688 	int ret;
689 
690 	queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
691 
692 	ret = t7xx_dpmaif_pit_release_and_add(rxq);
693 	if (ret < 0)
694 		dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
695 
696 	return ret;
697 }
698 
t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue * rxq,struct dpmaif_cur_rx_skb_info * skb_info)699 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
700 			       struct dpmaif_cur_rx_skb_info *skb_info)
701 {
702 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
703 	struct sk_buff *skb = skb_info->cur_skb;
704 	struct t7xx_skb_cb *skb_cb;
705 	u8 netif_id;
706 
707 	skb_info->cur_skb = NULL;
708 
709 	if (skb_info->pit_dp) {
710 		dev_kfree_skb_any(skb);
711 		return;
712 	}
713 
714 	skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
715 									CHECKSUM_NONE;
716 	netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
717 	skb_cb = T7XX_SKB_CB(skb);
718 	skb_cb->netif_idx = netif_id;
719 	skb_cb->rx_pkt_type = skb_info->pkt_type;
720 	dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
721 }
722 
t7xx_dpmaif_rx_start(struct dpmaif_rx_queue * rxq,const unsigned int pit_cnt,const unsigned int budget,int * once_more)723 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
724 				const unsigned int budget, int *once_more)
725 {
726 	unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
727 	struct device *dev = rxq->dpmaif_ctrl->dev;
728 	struct dpmaif_cur_rx_skb_info *skb_info;
729 	int ret = 0;
730 
731 	pit_len = rxq->pit_size_cnt;
732 	skb_info = &rxq->rx_data_info;
733 	cur_pit = rxq->pit_rd_idx;
734 
735 	for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
736 		struct dpmaif_pit *pkt_info;
737 		u32 val;
738 
739 		if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
740 			break;
741 
742 		pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
743 		if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
744 			dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
745 			*once_more = 1;
746 			return recv_skb_cnt;
747 		}
748 
749 		val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
750 		if (val == DES_PT_MSG) {
751 			if (skb_info->msg_pit_received)
752 				dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
753 
754 			skb_info->msg_pit_received = true;
755 			t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
756 		} else { /* DES_PT_PD */
757 			val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
758 			if (val != PKT_BUF_FRAG)
759 				ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
760 			else if (!skb_info->cur_skb)
761 				ret = -EINVAL;
762 			else
763 				ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
764 
765 			if (ret < 0) {
766 				skb_info->err_payload = 1;
767 				dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
768 			}
769 
770 			val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
771 			if (!val) {
772 				if (!skb_info->err_payload) {
773 					t7xx_dpmaif_rx_skb(rxq, skb_info);
774 				} else if (skb_info->cur_skb) {
775 					dev_kfree_skb_any(skb_info->cur_skb);
776 					skb_info->cur_skb = NULL;
777 				}
778 
779 				memset(skb_info, 0, sizeof(*skb_info));
780 				recv_skb_cnt++;
781 			}
782 		}
783 
784 		cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
785 		rxq->pit_rd_idx = cur_pit;
786 		rxq->pit_remain_release_cnt++;
787 
788 		if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
789 			ret = t7xx_dpmaifq_rx_notify_hw(rxq);
790 			if (ret < 0)
791 				break;
792 		}
793 	}
794 
795 	if (!ret)
796 		ret = t7xx_dpmaifq_rx_notify_hw(rxq);
797 
798 	if (ret)
799 		return ret;
800 
801 	return recv_skb_cnt;
802 }
803 
t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue * rxq)804 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
805 {
806 	unsigned int hw_wr_idx, pit_cnt;
807 
808 	if (!rxq->que_started)
809 		return 0;
810 
811 	hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
812 	pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
813 					    DPMAIF_READ);
814 	rxq->pit_wr_idx = hw_wr_idx;
815 	return pit_cnt;
816 }
817 
t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int budget,int * once_more)818 static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
819 					    const unsigned int q_num,
820 					    const unsigned int budget, int *once_more)
821 {
822 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
823 	unsigned int cnt;
824 	int ret = 0;
825 
826 	cnt = t7xx_dpmaifq_poll_pit(rxq);
827 	if (!cnt)
828 		return ret;
829 
830 	ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
831 	if (ret < 0)
832 		dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
833 
834 	return ret;
835 }
836 
t7xx_dpmaif_napi_rx_poll(struct napi_struct * napi,const int budget)837 int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
838 {
839 	struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
840 	struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
841 	int ret, once_more = 0, work_done = 0;
842 
843 	atomic_set(&rxq->rx_processing, 1);
844 	/* Ensure rx_processing is changed to 1 before actually begin RX flow */
845 	smp_mb();
846 
847 	if (!rxq->que_started) {
848 		atomic_set(&rxq->rx_processing, 0);
849 		pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
850 		dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
851 		return work_done;
852 	}
853 
854 	if (!rxq->sleep_lock_pending)
855 		t7xx_pci_disable_sleep(t7xx_dev);
856 
857 	ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
858 	if (!ret) {
859 		napi_complete_done(napi, work_done);
860 		rxq->sleep_lock_pending = true;
861 		napi_schedule(napi);
862 		return work_done;
863 	}
864 
865 	rxq->sleep_lock_pending = false;
866 	while (work_done < budget) {
867 		int each_budget = budget - work_done;
868 		int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
869 							      each_budget, &once_more);
870 		if (rx_cnt > 0)
871 			work_done += rx_cnt;
872 		else
873 			break;
874 	}
875 
876 	if (once_more) {
877 		napi_gro_flush(napi, false);
878 		work_done = budget;
879 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
880 	} else if (work_done < budget) {
881 		napi_complete_done(napi, work_done);
882 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
883 		t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
884 		t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
885 		pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
886 		atomic_set(&rxq->rx_processing, 0);
887 	} else {
888 		t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
889 	}
890 
891 	return work_done;
892 }
893 
t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int que_mask)894 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
895 {
896 	struct dpmaif_rx_queue *rxq;
897 	struct dpmaif_ctrl *ctrl;
898 	int qno, ret;
899 
900 	qno = ffs(que_mask) - 1;
901 	if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
902 		dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
903 		return;
904 	}
905 
906 	rxq = &dpmaif_ctrl->rxq[qno];
907 	ctrl = rxq->dpmaif_ctrl;
908 	/* We need to make sure that the modem has been resumed before
909 	 * calling napi. This can't be done inside the polling function
910 	 * as we could be blocked waiting for device to be resumed,
911 	 * which can't be done from softirq context the poll function
912 	 * is running in.
913 	 */
914 	ret = pm_runtime_resume_and_get(ctrl->dev);
915 	if (ret < 0 && ret != -EACCES) {
916 		dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
917 		return;
918 	}
919 	napi_schedule(&rxq->napi);
920 }
921 
t7xx_dpmaif_base_free(const struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req)922 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
923 				  const struct dpmaif_bat_request *bat_req)
924 {
925 	if (bat_req->bat_base)
926 		dma_free_coherent(dpmaif_ctrl->dev,
927 				  bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
928 				  bat_req->bat_base, bat_req->bat_bus_addr);
929 }
930 
931 /**
932  * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
933  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
934  * @bat_req: Pointer to BAT request structure.
935  * @buf_type: BAT ring type.
936  *
937  * This function allocates the BAT ring buffer shared with the HW device, also allocates
938  * a buffer used to store information about the BAT skbs for further release.
939  *
940  * Return:
941  * * 0		- Success.
942  * * -ERROR	- Error code.
943  */
t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const enum bat_type buf_type)944 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
945 			  const enum bat_type buf_type)
946 {
947 	int sw_buf_size;
948 
949 	if (buf_type == BAT_TYPE_FRAG) {
950 		sw_buf_size = sizeof(struct dpmaif_bat_page);
951 		bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
952 		bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
953 	} else {
954 		sw_buf_size = sizeof(struct dpmaif_bat_skb);
955 		bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
956 		bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
957 	}
958 
959 	bat_req->type = buf_type;
960 	bat_req->bat_wr_idx = 0;
961 	bat_req->bat_release_rd_idx = 0;
962 
963 	bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
964 					       bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
965 					       &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
966 	if (!bat_req->bat_base)
967 		return -ENOMEM;
968 
969 	/* For AP SW to record skb information */
970 	bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
971 					GFP_KERNEL);
972 	if (!bat_req->bat_skb)
973 		goto err_free_dma_mem;
974 
975 	bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
976 	if (!bat_req->bat_bitmap)
977 		goto err_free_dma_mem;
978 
979 	spin_lock_init(&bat_req->mask_lock);
980 	atomic_set(&bat_req->refcnt, 0);
981 	return 0;
982 
983 err_free_dma_mem:
984 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
985 
986 	return -ENOMEM;
987 }
988 
t7xx_dpmaif_bat_free(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req)989 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
990 {
991 	if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
992 		return;
993 
994 	bitmap_free(bat_req->bat_bitmap);
995 	bat_req->bat_bitmap = NULL;
996 
997 	if (bat_req->bat_skb) {
998 		unsigned int i;
999 
1000 		for (i = 0; i < bat_req->bat_size_cnt; i++) {
1001 			if (bat_req->type == BAT_TYPE_FRAG)
1002 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1003 			else
1004 				t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1005 		}
1006 	}
1007 
1008 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1009 }
1010 
t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue * rxq)1011 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1012 {
1013 	rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1014 	rxq->pit_rd_idx = 0;
1015 	rxq->pit_wr_idx = 0;
1016 	rxq->pit_release_rd_idx = 0;
1017 	rxq->expect_pit_seq = 0;
1018 	rxq->pit_remain_release_cnt = 0;
1019 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1020 
1021 	rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1022 					   rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1023 					   &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1024 	if (!rxq->pit_base)
1025 		return -ENOMEM;
1026 
1027 	rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1028 	atomic_inc(&rxq->bat_req->refcnt);
1029 
1030 	rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1031 	atomic_inc(&rxq->bat_frag->refcnt);
1032 	return 0;
1033 }
1034 
t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue * rxq)1035 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1036 {
1037 	if (!rxq->dpmaif_ctrl)
1038 		return;
1039 
1040 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1041 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1042 
1043 	if (rxq->pit_base)
1044 		dma_free_coherent(rxq->dpmaif_ctrl->dev,
1045 				  rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1046 				  rxq->pit_base, rxq->pit_bus_addr);
1047 }
1048 
t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue * queue)1049 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1050 {
1051 	int ret;
1052 
1053 	ret = t7xx_dpmaif_rx_alloc(queue);
1054 	if (ret < 0)
1055 		dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1056 
1057 	return ret;
1058 }
1059 
t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue * queue)1060 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1061 {
1062 	t7xx_dpmaif_rx_buf_free(queue);
1063 }
1064 
t7xx_dpmaif_bat_release_work(struct work_struct * work)1065 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1066 {
1067 	struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1068 	struct dpmaif_rx_queue *rxq;
1069 	int ret;
1070 
1071 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1072 	if (ret < 0 && ret != -EACCES)
1073 		return;
1074 
1075 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1076 
1077 	/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1078 	rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1079 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1080 		t7xx_dpmaif_bat_release_and_add(rxq);
1081 		t7xx_dpmaif_frag_bat_release_and_add(rxq);
1082 	}
1083 
1084 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1085 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1086 }
1087 
t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl * dpmaif_ctrl)1088 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1089 {
1090 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1091 						      WQ_MEM_RECLAIM | WQ_PERCPU,
1092 						      1);
1093 	if (!dpmaif_ctrl->bat_release_wq)
1094 		return -ENOMEM;
1095 
1096 	INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1097 	return 0;
1098 }
1099 
t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl * dpmaif_ctrl)1100 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1101 {
1102 	flush_work(&dpmaif_ctrl->bat_release_work);
1103 
1104 	if (dpmaif_ctrl->bat_release_wq) {
1105 		destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1106 		dpmaif_ctrl->bat_release_wq = NULL;
1107 	}
1108 }
1109 
1110 /**
1111  * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1112  * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1113  *
1114  * Wait for all the RX work to finish executing and mark the RX queue as paused.
1115  */
t7xx_dpmaif_rx_stop(struct dpmaif_ctrl * dpmaif_ctrl)1116 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1117 {
1118 	unsigned int i;
1119 
1120 	for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1121 		struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1122 		int timeout, value;
1123 
1124 		timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1125 						    !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1126 		if (timeout)
1127 			dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1128 
1129 		/* Ensure RX processing has stopped before we set rxq->que_started to false */
1130 		smp_mb();
1131 		rxq->que_started = false;
1132 	}
1133 }
1134 
t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue * rxq)1135 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1136 {
1137 	int cnt, j = 0;
1138 
1139 	rxq->que_started = false;
1140 
1141 	do {
1142 		cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1143 						rxq->pit_wr_idx, DPMAIF_READ);
1144 
1145 		if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1146 			dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1147 			break;
1148 		}
1149 	} while (cnt);
1150 
1151 	memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1152 	memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1153 	bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1154 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1155 
1156 	rxq->pit_rd_idx = 0;
1157 	rxq->pit_wr_idx = 0;
1158 	rxq->pit_release_rd_idx = 0;
1159 	rxq->expect_pit_seq = 0;
1160 	rxq->pit_remain_release_cnt = 0;
1161 	rxq->bat_req->bat_release_rd_idx = 0;
1162 	rxq->bat_req->bat_wr_idx = 0;
1163 	rxq->bat_frag->bat_release_rd_idx = 0;
1164 	rxq->bat_frag->bat_wr_idx = 0;
1165 }
1166 
t7xx_dpmaif_rx_clear(struct dpmaif_ctrl * dpmaif_ctrl)1167 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1168 {
1169 	int i;
1170 
1171 	for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1172 		t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1173 }
1174