xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/dma-mapping.h>
5 
6 #include "hinic3_hwdev.h"
7 #include "hinic3_wq.h"
8 
9 #define WQ_MIN_DEPTH            64
10 #define WQ_MAX_DEPTH            65536
11 #define WQ_PAGE_ADDR_SIZE       sizeof(u64)
12 #define WQ_MAX_NUM_PAGES        (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
13 
14 static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
15 {
16 	struct hinic3_queue_pages *qpages = &wq->qpages;
17 	int i;
18 
19 	if (hinic3_wq_is_0_level_cla(wq)) {
20 		wq->wq_block_paddr = qpages->pages[0].align_paddr;
21 		wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
22 
23 		return 0;
24 	}
25 
26 	if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
27 		dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
28 			WQ_MAX_NUM_PAGES);
29 		return -EFAULT;
30 	}
31 
32 	wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
33 						HINIC3_MIN_PAGE_SIZE,
34 						&wq->wq_block_paddr,
35 						GFP_KERNEL);
36 	if (!wq->wq_block_vaddr)
37 		return -ENOMEM;
38 
39 	for (i = 0; i < qpages->num_pages; i++)
40 		wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
41 
42 	return 0;
43 }
44 
45 static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
46 {
47 	int err;
48 
49 	err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
50 	if (err)
51 		return err;
52 
53 	err = wq_init_wq_block(hwdev, wq);
54 	if (err) {
55 		hinic3_queue_pages_free(hwdev, &wq->qpages);
56 		return err;
57 	}
58 
59 	return 0;
60 }
61 
62 static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
63 {
64 	if (!hinic3_wq_is_0_level_cla(wq))
65 		dma_free_coherent(hwdev->dev,
66 				  HINIC3_MIN_PAGE_SIZE,
67 				  wq->wq_block_vaddr,
68 				  wq->wq_block_paddr);
69 
70 	hinic3_queue_pages_free(hwdev, &wq->qpages);
71 }
72 
73 int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
74 		     u32 q_depth, u16 wqebb_size)
75 {
76 	u32 wq_page_size;
77 
78 	if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
79 	    !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
80 		dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
81 			q_depth, wqebb_size);
82 		return -EINVAL;
83 	}
84 
85 	wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
86 
87 	memset(wq, 0, sizeof(*wq));
88 	wq->q_depth = q_depth;
89 	wq->idx_mask = q_depth - 1;
90 
91 	hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
92 
93 	return wq_alloc_pages(hwdev, wq);
94 }
95 
96 void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
97 {
98 	wq_free_pages(hwdev, wq);
99 }
100 
101 void hinic3_wq_reset(struct hinic3_wq *wq)
102 {
103 	struct hinic3_queue_pages *qpages = &wq->qpages;
104 	u16 pg_idx;
105 
106 	wq->cons_idx = 0;
107 	wq->prod_idx = 0;
108 
109 	for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
110 		memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
111 }
112 
113 void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
114 				u16 num_wqebbs, u16 *prod_idx,
115 				struct hinic3_sq_bufdesc **first_part_wqebbs,
116 				struct hinic3_sq_bufdesc **second_part_wqebbs,
117 				u16 *first_part_wqebbs_num)
118 {
119 	u32 idx, remaining;
120 
121 	idx = wq->prod_idx & wq->idx_mask;
122 	wq->prod_idx += num_wqebbs;
123 	*prod_idx = idx;
124 	*first_part_wqebbs = get_q_element(&wq->qpages, idx, &remaining);
125 	if (likely(remaining >= num_wqebbs)) {
126 		*first_part_wqebbs_num = num_wqebbs;
127 		*second_part_wqebbs = NULL;
128 	} else {
129 		*first_part_wqebbs_num = remaining;
130 		idx += remaining;
131 		*second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
132 	}
133 }
134 
135 bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
136 {
137 	return wq->qpages.num_pages == 1;
138 }
139