1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Eliot Lee <eliot.lee@intel.com>
10 * Moises Veleta <moises.veleta@intel.com>
11 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
12 *
13 * Contributors:
14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16 * Sreehari Kancharla <sreehari.kancharla@intel.com>
17 */
18
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 #include <net/gro.h>
45
46 #include "t7xx_dpmaif.h"
47 #include "t7xx_hif_dpmaif.h"
48 #include "t7xx_hif_dpmaif_rx.h"
49 #include "t7xx_netdev.h"
50 #include "t7xx_pci.h"
51
52 #define DPMAIF_BAT_COUNT 8192
53 #define DPMAIF_FRG_COUNT 4814
54 #define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
55
56 #define DPMAIF_BAT_CNT_THRESHOLD 30
57 #define DPMAIF_PIT_CNT_THRESHOLD 60
58 #define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
59 #define DPMAIF_NOTIFY_RELEASE_COUNT 128
60 #define DPMAIF_POLL_PIT_TIME_US 20
61 #define DPMAIF_POLL_PIT_MAX_TIME_US 2000
62 #define DPMAIF_WQ_TIME_LIMIT_MS 2
63 #define DPMAIF_CS_RESULT_PASS 0
64
65 /* Packet type */
66 #define DES_PT_PD 0
67 #define DES_PT_MSG 1
68 /* Buffer type */
69 #define PKT_BUF_FRAG 1
70
t7xx_normal_pit_bid(const struct dpmaif_pit * pit_info)71 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
72 {
73 u32 value;
74
75 value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
76 value <<= 13;
77 value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
78 return value;
79 }
80
t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int bat_cnt)81 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
82 const unsigned int q_num, const unsigned int bat_cnt)
83 {
84 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
85 struct dpmaif_bat_request *bat_req = rxq->bat_req;
86 unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
87
88 if (!rxq->que_started) {
89 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
90 return -EINVAL;
91 }
92
93 old_rl_idx = bat_req->bat_release_rd_idx;
94 old_wr_idx = bat_req->bat_wr_idx;
95 new_wr_idx = old_wr_idx + bat_cnt;
96
97 if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
98 goto err_flow;
99
100 if (new_wr_idx >= bat_req->bat_size_cnt) {
101 new_wr_idx -= bat_req->bat_size_cnt;
102 if (new_wr_idx >= old_rl_idx)
103 goto err_flow;
104 }
105
106 bat_req->bat_wr_idx = new_wr_idx;
107 return 0;
108
109 err_flow:
110 dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
111 return -EINVAL;
112 }
113
t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int size,struct dpmaif_bat_skb * cur_skb)114 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
115 const unsigned int size, struct dpmaif_bat_skb *cur_skb)
116 {
117 dma_addr_t data_bus_addr;
118 struct sk_buff *skb;
119
120 skb = __dev_alloc_skb(size, GFP_KERNEL);
121 if (!skb)
122 return false;
123
124 data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
126 dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
127 dev_kfree_skb_any(skb);
128 return false;
129 }
130
131 cur_skb->skb = skb;
132 cur_skb->data_bus_addr = data_bus_addr;
133 cur_skb->data_len = size;
134
135 return true;
136 }
137
t7xx_unmap_bat_skb(struct device * dev,struct dpmaif_bat_skb * bat_skb_base,unsigned int index)138 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
139 unsigned int index)
140 {
141 struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
142
143 if (bat_skb->skb) {
144 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
145 dev_kfree_skb(bat_skb->skb);
146 bat_skb->skb = NULL;
147 }
148 }
149
150 /**
151 * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
152 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
153 * @bat_req: Pointer to BAT request structure.
154 * @q_num: Queue number.
155 * @buf_cnt: Number of buffers to allocate.
156 * @initial: Indicates if the ring is being populated for the first time.
157 *
158 * Allocate skb and store the start address of the data buffer into the BAT ring.
159 * If this is not the initial call, notify the HW about the new entries.
160 *
161 * Return:
162 * * 0 - Success.
163 * * -ERROR - Error code.
164 */
t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req,const unsigned int q_num,const unsigned int buf_cnt,const bool initial)165 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
166 const struct dpmaif_bat_request *bat_req,
167 const unsigned int q_num, const unsigned int buf_cnt,
168 const bool initial)
169 {
170 unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
171 int ret;
172
173 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
174 return -EINVAL;
175
176 /* Check BAT buffer space */
177 bat_max_cnt = bat_req->bat_size_cnt;
178
179 bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
180 bat_req->bat_wr_idx, DPMAIF_WRITE);
181 if (buf_cnt > bat_cnt)
182 return -ENOMEM;
183
184 bat_start_idx = bat_req->bat_wr_idx;
185
186 for (i = 0; i < buf_cnt; i++) {
187 unsigned int cur_bat_idx = bat_start_idx + i;
188 struct dpmaif_bat_skb *cur_skb;
189 struct dpmaif_bat *cur_bat;
190
191 if (cur_bat_idx >= bat_max_cnt)
192 cur_bat_idx -= bat_max_cnt;
193
194 cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
195 if (!cur_skb->skb &&
196 !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
197 break;
198
199 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
200 cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
201 cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
202 }
203
204 if (!i)
205 return -ENOMEM;
206
207 ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
208 if (ret)
209 goto err_unmap_skbs;
210
211 if (!initial) {
212 unsigned int hw_wr_idx;
213
214 ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
215 if (ret)
216 goto err_unmap_skbs;
217
218 hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
219 DPF_RX_QNO_DFT);
220 if (hw_wr_idx != bat_req->bat_wr_idx) {
221 ret = -EFAULT;
222 dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
223 goto err_unmap_skbs;
224 }
225 }
226
227 return 0;
228
229 err_unmap_skbs:
230 while (i--)
231 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
232
233 return ret;
234 }
235
t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num)236 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
237 const unsigned int rel_entry_num)
238 {
239 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
240 unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
241 int ret;
242
243 if (!rxq->que_started)
244 return 0;
245
246 if (rel_entry_num >= rxq->pit_size_cnt) {
247 dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
248 return -EINVAL;
249 }
250
251 old_rel_idx = rxq->pit_release_rd_idx;
252 new_rel_idx = old_rel_idx + rel_entry_num;
253 hw_wr_idx = rxq->pit_wr_idx;
254 if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
255 new_rel_idx -= rxq->pit_size_cnt;
256
257 ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
258 if (ret) {
259 dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
260 return ret;
261 }
262
263 rxq->pit_release_rd_idx = new_rel_idx;
264 return 0;
265 }
266
t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request * bat_req,unsigned int idx)267 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
268 {
269 unsigned long flags;
270
271 spin_lock_irqsave(&bat_req->mask_lock, flags);
272 set_bit(idx, bat_req->bat_bitmap);
273 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
274 }
275
t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)276 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
277 const unsigned int cur_bid)
278 {
279 struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
280 struct dpmaif_bat_page *bat_page;
281
282 if (cur_bid >= DPMAIF_FRG_COUNT)
283 return -EINVAL;
284
285 bat_page = bat_frag->bat_skb + cur_bid;
286 if (!bat_page->page)
287 return -EINVAL;
288
289 return 0;
290 }
291
t7xx_unmap_bat_page(struct device * dev,struct dpmaif_bat_page * bat_page_base,unsigned int index)292 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
293 unsigned int index)
294 {
295 struct dpmaif_bat_page *bat_page = bat_page_base + index;
296
297 if (bat_page->page) {
298 dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
299 put_page(bat_page->page);
300 bat_page->page = NULL;
301 }
302 }
303
304 /**
305 * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
306 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
307 * @bat_req: Pointer to BAT request structure.
308 * @buf_cnt: Number of buffers to allocate.
309 * @initial: Indicates if the ring is being populated for the first time.
310 *
311 * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
312 * This function allocates a page fragment and stores the start address of the page
313 * into the Fragment BAT ring.
314 * If this is not the initial call, notify the HW about the new entries.
315 *
316 * Return:
317 * * 0 - Success.
318 * * -ERROR - Error code.
319 */
t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const unsigned int buf_cnt,const bool initial)320 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
321 const unsigned int buf_cnt, const bool initial)
322 {
323 unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
324 struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
325 int ret = 0, i;
326
327 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
328 return -EINVAL;
329
330 buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
331 bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
332 DPMAIF_WRITE);
333 if (buf_cnt > buf_space) {
334 dev_err(dpmaif_ctrl->dev,
335 "Requested more buffers than the space available in RX frag ring\n");
336 return -EINVAL;
337 }
338
339 for (i = 0; i < buf_cnt; i++) {
340 struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
341 struct dpmaif_bat *cur_bat;
342 dma_addr_t data_base_addr;
343
344 if (!cur_page->page) {
345 unsigned long offset;
346 struct page *page;
347 void *data;
348
349 data = netdev_alloc_frag(bat_req->pkt_buf_sz);
350 if (!data)
351 break;
352
353 page = virt_to_head_page(data);
354 offset = data - page_address(page);
355
356 data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
357 bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
358 if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
359 put_page(virt_to_head_page(data));
360 dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
361 break;
362 }
363
364 cur_page->page = page;
365 cur_page->data_bus_addr = data_base_addr;
366 cur_page->offset = offset;
367 cur_page->data_len = bat_req->pkt_buf_sz;
368 }
369
370 data_base_addr = cur_page->data_bus_addr;
371 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
372 cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
373 cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
374 cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
375 }
376
377 bat_req->bat_wr_idx = cur_bat_idx;
378
379 if (!initial)
380 t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
381
382 if (i < buf_cnt) {
383 ret = -ENOMEM;
384 if (initial) {
385 while (--i > 0)
386 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
387 }
388 }
389
390 return ret;
391 }
392
t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct sk_buff * skb)393 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
394 const struct dpmaif_pit *pkt_info,
395 struct sk_buff *skb)
396 {
397 unsigned long long data_bus_addr, data_base_addr;
398 struct device *dev = rxq->dpmaif_ctrl->dev;
399 struct dpmaif_bat_page *page_info;
400 unsigned int data_len;
401 int data_offset;
402
403 page_info = rxq->bat_frag->bat_skb;
404 page_info += t7xx_normal_pit_bid(pkt_info);
405 dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
406
407 if (!page_info->page)
408 return -EINVAL;
409
410 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
411 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
412 data_base_addr = page_info->data_bus_addr;
413 data_offset = data_bus_addr - data_base_addr;
414 data_offset += page_info->offset;
415 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
416 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
417 data_offset, data_len, page_info->data_len);
418
419 page_info->page = NULL;
420 page_info->offset = 0;
421 page_info->data_len = 0;
422 return 0;
423 }
424
t7xx_dpmaif_get_frag(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,const struct dpmaif_cur_rx_skb_info * skb_info)425 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
426 const struct dpmaif_pit *pkt_info,
427 const struct dpmaif_cur_rx_skb_info *skb_info)
428 {
429 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
430 int ret;
431
432 ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
433 if (ret < 0)
434 return ret;
435
436 ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
437 if (ret < 0) {
438 dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
439 return ret;
440 }
441
442 t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
443 return 0;
444 }
445
t7xx_bat_cur_bid_check(struct dpmaif_rx_queue * rxq,const unsigned int cur_bid)446 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
447 {
448 struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
449
450 bat_skb += cur_bid;
451 if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
452 return -EINVAL;
453
454 return 0;
455 }
456
t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit * pit)457 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
458 {
459 return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
460 }
461
t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pit)462 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
463 const struct dpmaif_pit *pit)
464 {
465 unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
466
467 if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
468 cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
469 DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
470 return -EFAULT;
471
472 rxq->expect_pit_seq++;
473 if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
474 rxq->expect_pit_seq = 0;
475
476 return 0;
477 }
478
t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request * bat_req)479 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
480 {
481 unsigned int zero_index;
482 unsigned long flags;
483
484 spin_lock_irqsave(&bat_req->mask_lock, flags);
485
486 zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
487 bat_req->bat_release_rd_idx);
488
489 if (zero_index < bat_req->bat_size_cnt) {
490 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
491 return zero_index - bat_req->bat_release_rd_idx;
492 }
493
494 /* limiting the search till bat_release_rd_idx */
495 zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
496 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
497 return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
498 }
499
t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue * rxq,const unsigned int rel_entry_num,const enum bat_type buf_type)500 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
501 const unsigned int rel_entry_num,
502 const enum bat_type buf_type)
503 {
504 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
505 unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
506 struct dpmaif_bat_request *bat;
507 unsigned long flags;
508
509 if (!rxq->que_started || !rel_entry_num)
510 return -EINVAL;
511
512 if (buf_type == BAT_TYPE_FRAG) {
513 bat = rxq->bat_frag;
514 hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
515 } else {
516 bat = rxq->bat_req;
517 hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
518 }
519
520 if (rel_entry_num >= bat->bat_size_cnt)
521 return -EINVAL;
522
523 old_rel_idx = bat->bat_release_rd_idx;
524 new_rel_idx = old_rel_idx + rel_entry_num;
525
526 /* Do not need to release if the queue is empty */
527 if (bat->bat_wr_idx == old_rel_idx)
528 return 0;
529
530 if (hw_rd_idx >= old_rel_idx) {
531 if (new_rel_idx > hw_rd_idx)
532 return -EINVAL;
533 }
534
535 if (new_rel_idx >= bat->bat_size_cnt) {
536 new_rel_idx -= bat->bat_size_cnt;
537 if (new_rel_idx > hw_rd_idx)
538 return -EINVAL;
539 }
540
541 spin_lock_irqsave(&bat->mask_lock, flags);
542 for (i = 0; i < rel_entry_num; i++) {
543 unsigned int index = bat->bat_release_rd_idx + i;
544
545 if (index >= bat->bat_size_cnt)
546 index -= bat->bat_size_cnt;
547
548 clear_bit(index, bat->bat_bitmap);
549 }
550 spin_unlock_irqrestore(&bat->mask_lock, flags);
551
552 bat->bat_release_rd_idx = new_rel_idx;
553 return rel_entry_num;
554 }
555
t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue * rxq)556 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
557 {
558 int ret;
559
560 if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
561 return 0;
562
563 ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
564 if (ret)
565 return ret;
566
567 rxq->pit_remain_release_cnt = 0;
568 return 0;
569 }
570
t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue * rxq)571 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
572 {
573 unsigned int bid_cnt;
574 int ret;
575
576 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
577 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
578 return 0;
579
580 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
581 if (ret <= 0) {
582 dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
583 return ret;
584 }
585
586 ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
587 if (ret < 0)
588 dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
589
590 return ret;
591 }
592
t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue * rxq)593 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
594 {
595 unsigned int bid_cnt;
596 int ret;
597
598 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
599 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
600 return 0;
601
602 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
603 if (ret <= 0) {
604 dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
605 return ret;
606 }
607
608 return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
609 }
610
t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * msg_pit,struct dpmaif_cur_rx_skb_info * skb_info)611 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
612 const struct dpmaif_pit *msg_pit,
613 struct dpmaif_cur_rx_skb_info *skb_info)
614 {
615 int header = le32_to_cpu(msg_pit->header);
616
617 skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
618 skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
619 skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
620 skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
621 }
622
t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)623 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
624 const struct dpmaif_pit *pkt_info,
625 struct dpmaif_cur_rx_skb_info *skb_info)
626 {
627 unsigned long long data_bus_addr, data_base_addr;
628 struct device *dev = rxq->dpmaif_ctrl->dev;
629 struct dpmaif_bat_skb *bat_skb;
630 unsigned int data_len;
631 struct sk_buff *skb;
632 int data_offset;
633
634 bat_skb = rxq->bat_req->bat_skb;
635 bat_skb += t7xx_normal_pit_bid(pkt_info);
636 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
637
638 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
639 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
640 data_base_addr = bat_skb->data_bus_addr;
641 data_offset = data_bus_addr - data_base_addr;
642 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
643 skb = bat_skb->skb;
644 skb->len = 0;
645 skb_reset_tail_pointer(skb);
646 skb_reserve(skb, data_offset);
647
648 if (skb->tail + data_len > skb->end) {
649 dev_err(dev, "No buffer space available\n");
650 return -ENOBUFS;
651 }
652
653 skb_put(skb, data_len);
654 skb_info->cur_skb = skb;
655 bat_skb->skb = NULL;
656 return 0;
657 }
658
t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue * rxq,const struct dpmaif_pit * pkt_info,struct dpmaif_cur_rx_skb_info * skb_info)659 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
660 const struct dpmaif_pit *pkt_info,
661 struct dpmaif_cur_rx_skb_info *skb_info)
662 {
663 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
664 int ret;
665
666 ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
667 if (ret < 0)
668 return ret;
669
670 ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
671 if (ret < 0) {
672 dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
673 return ret;
674 }
675
676 t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
677 return 0;
678 }
679
t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue * rxq)680 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
681 {
682 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
683 int ret;
684
685 queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
686
687 ret = t7xx_dpmaif_pit_release_and_add(rxq);
688 if (ret < 0)
689 dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
690
691 return ret;
692 }
693
t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue * rxq,struct dpmaif_cur_rx_skb_info * skb_info)694 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
695 struct dpmaif_cur_rx_skb_info *skb_info)
696 {
697 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
698 struct sk_buff *skb = skb_info->cur_skb;
699 struct t7xx_skb_cb *skb_cb;
700 u8 netif_id;
701
702 skb_info->cur_skb = NULL;
703
704 if (skb_info->pit_dp) {
705 dev_kfree_skb_any(skb);
706 return;
707 }
708
709 skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
710 CHECKSUM_NONE;
711 netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
712 skb_cb = T7XX_SKB_CB(skb);
713 skb_cb->netif_idx = netif_id;
714 skb_cb->rx_pkt_type = skb_info->pkt_type;
715 dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
716 }
717
t7xx_dpmaif_rx_start(struct dpmaif_rx_queue * rxq,const unsigned int pit_cnt,const unsigned int budget,int * once_more)718 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
719 const unsigned int budget, int *once_more)
720 {
721 unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
722 struct device *dev = rxq->dpmaif_ctrl->dev;
723 struct dpmaif_cur_rx_skb_info *skb_info;
724 int ret = 0;
725
726 pit_len = rxq->pit_size_cnt;
727 skb_info = &rxq->rx_data_info;
728 cur_pit = rxq->pit_rd_idx;
729
730 for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
731 struct dpmaif_pit *pkt_info;
732 u32 val;
733
734 if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
735 break;
736
737 pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
738 if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
739 dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
740 *once_more = 1;
741 return recv_skb_cnt;
742 }
743
744 val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
745 if (val == DES_PT_MSG) {
746 if (skb_info->msg_pit_received)
747 dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
748
749 skb_info->msg_pit_received = true;
750 t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
751 } else { /* DES_PT_PD */
752 val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
753 if (val != PKT_BUF_FRAG)
754 ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
755 else if (!skb_info->cur_skb)
756 ret = -EINVAL;
757 else
758 ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
759
760 if (ret < 0) {
761 skb_info->err_payload = 1;
762 dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
763 }
764
765 val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
766 if (!val) {
767 if (!skb_info->err_payload) {
768 t7xx_dpmaif_rx_skb(rxq, skb_info);
769 } else if (skb_info->cur_skb) {
770 dev_kfree_skb_any(skb_info->cur_skb);
771 skb_info->cur_skb = NULL;
772 }
773
774 memset(skb_info, 0, sizeof(*skb_info));
775 recv_skb_cnt++;
776 }
777 }
778
779 cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
780 rxq->pit_rd_idx = cur_pit;
781 rxq->pit_remain_release_cnt++;
782
783 if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
784 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
785 if (ret < 0)
786 break;
787 }
788 }
789
790 if (!ret)
791 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
792
793 if (ret)
794 return ret;
795
796 return recv_skb_cnt;
797 }
798
t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue * rxq)799 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
800 {
801 unsigned int hw_wr_idx, pit_cnt;
802
803 if (!rxq->que_started)
804 return 0;
805
806 hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
807 pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
808 DPMAIF_READ);
809 rxq->pit_wr_idx = hw_wr_idx;
810 return pit_cnt;
811 }
812
t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int q_num,const unsigned int budget,int * once_more)813 static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
814 const unsigned int q_num,
815 const unsigned int budget, int *once_more)
816 {
817 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
818 unsigned int cnt;
819 int ret = 0;
820
821 cnt = t7xx_dpmaifq_poll_pit(rxq);
822 if (!cnt)
823 return ret;
824
825 ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
826 if (ret < 0)
827 dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
828
829 return ret;
830 }
831
t7xx_dpmaif_napi_rx_poll(struct napi_struct * napi,const int budget)832 int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
833 {
834 struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
835 struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
836 int ret, once_more = 0, work_done = 0;
837
838 atomic_set(&rxq->rx_processing, 1);
839 /* Ensure rx_processing is changed to 1 before actually begin RX flow */
840 smp_mb();
841
842 if (!rxq->que_started) {
843 atomic_set(&rxq->rx_processing, 0);
844 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
845 dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
846 return work_done;
847 }
848
849 if (!rxq->sleep_lock_pending)
850 t7xx_pci_disable_sleep(t7xx_dev);
851
852 ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
853 if (!ret) {
854 napi_complete_done(napi, work_done);
855 rxq->sleep_lock_pending = true;
856 napi_schedule(napi);
857 return work_done;
858 }
859
860 rxq->sleep_lock_pending = false;
861 while (work_done < budget) {
862 int each_budget = budget - work_done;
863 int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
864 each_budget, &once_more);
865 if (rx_cnt > 0)
866 work_done += rx_cnt;
867 else
868 break;
869 }
870
871 if (once_more) {
872 napi_gro_flush(napi, false);
873 work_done = budget;
874 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
875 } else if (work_done < budget) {
876 napi_complete_done(napi, work_done);
877 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
878 t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
879 t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
880 pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
881 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
882 atomic_set(&rxq->rx_processing, 0);
883 } else {
884 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
885 }
886
887 return work_done;
888 }
889
t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl * dpmaif_ctrl,const unsigned int que_mask)890 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
891 {
892 struct dpmaif_rx_queue *rxq;
893 struct dpmaif_ctrl *ctrl;
894 int qno, ret;
895
896 qno = ffs(que_mask) - 1;
897 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
898 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
899 return;
900 }
901
902 rxq = &dpmaif_ctrl->rxq[qno];
903 ctrl = rxq->dpmaif_ctrl;
904 /* We need to make sure that the modem has been resumed before
905 * calling napi. This can't be done inside the polling function
906 * as we could be blocked waiting for device to be resumed,
907 * which can't be done from softirq context the poll function
908 * is running in.
909 */
910 ret = pm_runtime_resume_and_get(ctrl->dev);
911 if (ret < 0 && ret != -EACCES) {
912 dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
913 return;
914 }
915 napi_schedule(&rxq->napi);
916 }
917
t7xx_dpmaif_base_free(const struct dpmaif_ctrl * dpmaif_ctrl,const struct dpmaif_bat_request * bat_req)918 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
919 const struct dpmaif_bat_request *bat_req)
920 {
921 if (bat_req->bat_base)
922 dma_free_coherent(dpmaif_ctrl->dev,
923 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
924 bat_req->bat_base, bat_req->bat_bus_addr);
925 }
926
927 /**
928 * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
929 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
930 * @bat_req: Pointer to BAT request structure.
931 * @buf_type: BAT ring type.
932 *
933 * This function allocates the BAT ring buffer shared with the HW device, also allocates
934 * a buffer used to store information about the BAT skbs for further release.
935 *
936 * Return:
937 * * 0 - Success.
938 * * -ERROR - Error code.
939 */
t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req,const enum bat_type buf_type)940 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
941 const enum bat_type buf_type)
942 {
943 int sw_buf_size;
944
945 if (buf_type == BAT_TYPE_FRAG) {
946 sw_buf_size = sizeof(struct dpmaif_bat_page);
947 bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
948 bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
949 } else {
950 sw_buf_size = sizeof(struct dpmaif_bat_skb);
951 bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
952 bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
953 }
954
955 bat_req->type = buf_type;
956 bat_req->bat_wr_idx = 0;
957 bat_req->bat_release_rd_idx = 0;
958
959 bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
960 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
961 &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
962 if (!bat_req->bat_base)
963 return -ENOMEM;
964
965 /* For AP SW to record skb information */
966 bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
967 GFP_KERNEL);
968 if (!bat_req->bat_skb)
969 goto err_free_dma_mem;
970
971 bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
972 if (!bat_req->bat_bitmap)
973 goto err_free_dma_mem;
974
975 spin_lock_init(&bat_req->mask_lock);
976 atomic_set(&bat_req->refcnt, 0);
977 return 0;
978
979 err_free_dma_mem:
980 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
981
982 return -ENOMEM;
983 }
984
t7xx_dpmaif_bat_free(const struct dpmaif_ctrl * dpmaif_ctrl,struct dpmaif_bat_request * bat_req)985 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
986 {
987 if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
988 return;
989
990 bitmap_free(bat_req->bat_bitmap);
991 bat_req->bat_bitmap = NULL;
992
993 if (bat_req->bat_skb) {
994 unsigned int i;
995
996 for (i = 0; i < bat_req->bat_size_cnt; i++) {
997 if (bat_req->type == BAT_TYPE_FRAG)
998 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
999 else
1000 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1001 }
1002 }
1003
1004 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1005 }
1006
t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue * rxq)1007 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1008 {
1009 rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1010 rxq->pit_rd_idx = 0;
1011 rxq->pit_wr_idx = 0;
1012 rxq->pit_release_rd_idx = 0;
1013 rxq->expect_pit_seq = 0;
1014 rxq->pit_remain_release_cnt = 0;
1015 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1016
1017 rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1018 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1019 &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1020 if (!rxq->pit_base)
1021 return -ENOMEM;
1022
1023 rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1024 atomic_inc(&rxq->bat_req->refcnt);
1025
1026 rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1027 atomic_inc(&rxq->bat_frag->refcnt);
1028 return 0;
1029 }
1030
t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue * rxq)1031 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1032 {
1033 if (!rxq->dpmaif_ctrl)
1034 return;
1035
1036 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1037 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1038
1039 if (rxq->pit_base)
1040 dma_free_coherent(rxq->dpmaif_ctrl->dev,
1041 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1042 rxq->pit_base, rxq->pit_bus_addr);
1043 }
1044
t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue * queue)1045 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1046 {
1047 int ret;
1048
1049 ret = t7xx_dpmaif_rx_alloc(queue);
1050 if (ret < 0)
1051 dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1052
1053 return ret;
1054 }
1055
t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue * queue)1056 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1057 {
1058 t7xx_dpmaif_rx_buf_free(queue);
1059 }
1060
t7xx_dpmaif_bat_release_work(struct work_struct * work)1061 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1062 {
1063 struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1064 struct dpmaif_rx_queue *rxq;
1065 int ret;
1066
1067 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1068 if (ret < 0 && ret != -EACCES)
1069 return;
1070
1071 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1072
1073 /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1074 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1075 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1076 t7xx_dpmaif_bat_release_and_add(rxq);
1077 t7xx_dpmaif_frag_bat_release_and_add(rxq);
1078 }
1079
1080 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1081 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1082 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1083 }
1084
t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl * dpmaif_ctrl)1085 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1086 {
1087 dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1088 WQ_MEM_RECLAIM, 1);
1089 if (!dpmaif_ctrl->bat_release_wq)
1090 return -ENOMEM;
1091
1092 INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1093 return 0;
1094 }
1095
t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl * dpmaif_ctrl)1096 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1097 {
1098 flush_work(&dpmaif_ctrl->bat_release_work);
1099
1100 if (dpmaif_ctrl->bat_release_wq) {
1101 destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1102 dpmaif_ctrl->bat_release_wq = NULL;
1103 }
1104 }
1105
1106 /**
1107 * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1108 * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1109 *
1110 * Wait for all the RX work to finish executing and mark the RX queue as paused.
1111 */
t7xx_dpmaif_rx_stop(struct dpmaif_ctrl * dpmaif_ctrl)1112 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1113 {
1114 unsigned int i;
1115
1116 for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1117 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1118 int timeout, value;
1119
1120 timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1121 !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1122 if (timeout)
1123 dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1124
1125 /* Ensure RX processing has stopped before we set rxq->que_started to false */
1126 smp_mb();
1127 rxq->que_started = false;
1128 }
1129 }
1130
t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue * rxq)1131 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1132 {
1133 int cnt, j = 0;
1134
1135 rxq->que_started = false;
1136
1137 do {
1138 cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1139 rxq->pit_wr_idx, DPMAIF_READ);
1140
1141 if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1142 dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1143 break;
1144 }
1145 } while (cnt);
1146
1147 memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1148 memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1149 bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1150 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1151
1152 rxq->pit_rd_idx = 0;
1153 rxq->pit_wr_idx = 0;
1154 rxq->pit_release_rd_idx = 0;
1155 rxq->expect_pit_seq = 0;
1156 rxq->pit_remain_release_cnt = 0;
1157 rxq->bat_req->bat_release_rd_idx = 0;
1158 rxq->bat_req->bat_wr_idx = 0;
1159 rxq->bat_frag->bat_release_rd_idx = 0;
1160 rxq->bat_frag->bat_wr_idx = 0;
1161 }
1162
t7xx_dpmaif_rx_clear(struct dpmaif_ctrl * dpmaif_ctrl)1163 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1164 {
1165 int i;
1166
1167 for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1168 t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1169 }
1170