1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3
4 #include "hinic3_cmdq.h"
5 #include "hinic3_hw_comm.h"
6 #include "hinic3_hw_intf.h"
7 #include "hinic3_hwdev.h"
8 #include "hinic3_hwif.h"
9 #include "hinic3_nic_cfg.h"
10 #include "hinic3_nic_dev.h"
11 #include "hinic3_nic_io.h"
12
13 #define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1
14 #define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1
15 #define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF)
16 #define HINIC3_DEFAULT_DROP_THD_OFF 0
17
18 #define HINIC3_CI_Q_ADDR_SIZE (64)
19
20 #define HINIC3_CI_TABLE_SIZE(num_qps) \
21 (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE))
22
23 #define HINIC3_CI_VADDR(base_addr, q_id) \
24 ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
25
26 #define HINIC3_CI_PADDR(base_paddr, q_id) \
27 ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
28
29 #define SQ_WQ_PREFETCH_MAX 1
30 #define SQ_WQ_PREFETCH_MIN 1
31 #define SQ_WQ_PREFETCH_THRESHOLD 16
32
33 #define RQ_WQ_PREFETCH_MAX 4
34 #define RQ_WQ_PREFETCH_MIN 1
35 #define RQ_WQ_PREFETCH_THRESHOLD 256
36
37 /* (2048 - 8) / 64 */
38 #define HINIC3_Q_CTXT_MAX 31
39
40 enum hinic3_qp_ctxt_type {
41 HINIC3_QP_CTXT_TYPE_SQ = 0,
42 HINIC3_QP_CTXT_TYPE_RQ = 1,
43 };
44
45 struct hinic3_qp_ctxt_hdr {
46 __le16 num_queues;
47 __le16 queue_type;
48 __le16 start_qid;
49 __le16 rsvd;
50 };
51
52 struct hinic3_sq_ctxt {
53 __le32 ci_pi;
54 __le32 drop_mode_sp;
55 __le32 wq_pfn_hi_owner;
56 __le32 wq_pfn_lo;
57
58 __le32 rsvd0;
59 __le32 pkt_drop_thd;
60 __le32 global_sq_id;
61 __le32 vlan_ceq_attr;
62
63 __le32 pref_cache;
64 __le32 pref_ci_owner;
65 __le32 pref_wq_pfn_hi_ci;
66 __le32 pref_wq_pfn_lo;
67
68 __le32 rsvd8;
69 __le32 rsvd9;
70 __le32 wq_block_pfn_hi;
71 __le32 wq_block_pfn_lo;
72 };
73
74 struct hinic3_rq_ctxt {
75 __le32 ci_pi;
76 __le32 ceq_attr;
77 __le32 wq_pfn_hi_type_owner;
78 __le32 wq_pfn_lo;
79
80 __le32 rsvd[3];
81 __le32 cqe_sge_len;
82
83 __le32 pref_cache;
84 __le32 pref_ci_owner;
85 __le32 pref_wq_pfn_hi_ci;
86 __le32 pref_wq_pfn_lo;
87
88 __le32 pi_paddr_hi;
89 __le32 pi_paddr_lo;
90 __le32 wq_block_pfn_hi;
91 __le32 wq_block_pfn_lo;
92 };
93
94 struct hinic3_sq_ctxt_block {
95 struct hinic3_qp_ctxt_hdr cmdq_hdr;
96 struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX];
97 };
98
99 struct hinic3_rq_ctxt_block {
100 struct hinic3_qp_ctxt_hdr cmdq_hdr;
101 struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
102 };
103
104 struct hinic3_clean_queue_ctxt {
105 struct hinic3_qp_ctxt_hdr cmdq_hdr;
106 __le32 rsvd;
107 };
108
109 #define SQ_CTXT_SIZE(num_sqs) \
110 (sizeof(struct hinic3_qp_ctxt_hdr) + \
111 (num_sqs) * sizeof(struct hinic3_sq_ctxt))
112
113 #define RQ_CTXT_SIZE(num_rqs) \
114 (sizeof(struct hinic3_qp_ctxt_hdr) + \
115 (num_rqs) * sizeof(struct hinic3_rq_ctxt))
116
117 #define SQ_CTXT_PREF_CI_HI_SHIFT 12
118 #define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
119
120 #define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
121 #define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
122 #define SQ_CTXT_CI_PI_SET(val, member) \
123 FIELD_PREP(SQ_CTXT_##member##_MASK, val)
124
125 #define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0)
126 #define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1)
127 #define SQ_CTXT_MODE_SET(val, member) \
128 FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
129
130 #define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
131 #define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23)
132 #define SQ_CTXT_WQ_PAGE_SET(val, member) \
133 FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
134
135 #define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0)
136 #define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16)
137 #define SQ_CTXT_PKT_DROP_THD_SET(val, member) \
138 FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
139
140 #define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0)
141 #define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \
142 FIELD_PREP(SQ_CTXT_##member##_MASK, val)
143
144 #define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19)
145 #define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23)
146 #define SQ_CTXT_VLAN_CEQ_SET(val, member) \
147 FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
148
149 #define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
150 #define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
151 #define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
152
153 #define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
154 #define SQ_CTXT_PREF_OWNER_MASK BIT(4)
155
156 #define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
157 #define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
158 #define SQ_CTXT_PREF_SET(val, member) \
159 FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
160
161 #define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
162 #define SQ_CTXT_WQ_BLOCK_SET(val, member) \
163 FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
164
165 /* reuse SQ macro for RQ because the hardware format is identical */
166 #define RQ_CTXT_PREF_CI_HI(val) SQ_CTXT_PREF_CI_HI(val)
167
168 #define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
169 #define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
170 #define RQ_CTXT_CI_PI_SET(val, member) \
171 FIELD_PREP(RQ_CTXT_##member##_MASK, val)
172
173 #define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21)
174 #define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31)
175 #define RQ_CTXT_CEQ_ATTR_SET(val, member) \
176 FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
177
178 #define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
179 #define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28)
180 #define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31)
181 #define RQ_CTXT_WQ_PAGE_SET(val, member) \
182 FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
183
184 #define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28)
185 #define RQ_CTXT_CQE_LEN_SET(val, member) \
186 FIELD_PREP(RQ_CTXT_##member##_MASK, val)
187
188 #define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
189 #define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
190 #define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
191
192 #define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
193 #define RQ_CTXT_PREF_OWNER_MASK BIT(4)
194
195 #define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
196 #define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
197 #define RQ_CTXT_PREF_SET(val, member) \
198 FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
199
200 #define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
201 #define RQ_CTXT_WQ_BLOCK_SET(val, member) \
202 FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
203
204 #define WQ_PAGE_PFN_SHIFT 12
205 #define WQ_BLOCK_PFN_SHIFT 9
206 #define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
207 #define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
208
hinic3_init_nic_io(struct hinic3_nic_dev * nic_dev)209 int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
210 {
211 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
212 struct hinic3_nic_io *nic_io;
213 int err;
214
215 nic_io = kzalloc_obj(*nic_io);
216 if (!nic_io)
217 return -ENOMEM;
218
219 nic_dev->nic_io = nic_io;
220
221 err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1);
222 if (err) {
223 dev_err(hwdev->dev, "Failed to set function svc used state\n");
224 goto err_free_nicio;
225 }
226
227 err = hinic3_init_function_table(nic_dev);
228 if (err) {
229 dev_err(hwdev->dev, "Failed to init function table\n");
230 goto err_clear_func_svc_used_state;
231 }
232
233 nic_io->rx_buf_len = nic_dev->rx_buf_len;
234
235 err = hinic3_get_nic_feature_from_hw(nic_dev);
236 if (err) {
237 dev_err(hwdev->dev, "Failed to get nic features\n");
238 goto err_clear_func_svc_used_state;
239 }
240
241 nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK;
242 nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE;
243 dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap);
244
245 return 0;
246
247 err_clear_func_svc_used_state:
248 hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0);
249 err_free_nicio:
250 nic_dev->nic_io = NULL;
251 kfree(nic_io);
252
253 return err;
254 }
255
hinic3_free_nic_io(struct hinic3_nic_dev * nic_dev)256 void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
257 {
258 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
259
260 hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0);
261 nic_dev->nic_io = NULL;
262 kfree(nic_io);
263 }
264
hinic3_init_nicio_res(struct hinic3_nic_dev * nic_dev)265 int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev)
266 {
267 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
268 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
269 void __iomem *db_base;
270 int err;
271
272 nic_io->max_qps = hinic3_func_max_qnum(hwdev);
273
274 err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
275 if (err) {
276 dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n");
277 return err;
278 }
279 nic_io->sqs_db_addr = db_base;
280
281 err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
282 if (err) {
283 hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
284 dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n");
285 return err;
286 }
287 nic_io->rqs_db_addr = db_base;
288
289 nic_io->ci_vaddr_base =
290 dma_alloc_coherent(hwdev->dev,
291 HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
292 &nic_io->ci_dma_base,
293 GFP_KERNEL);
294 if (!nic_io->ci_vaddr_base) {
295 hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
296 hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
297 return -ENOMEM;
298 }
299
300 return 0;
301 }
302
hinic3_free_nicio_res(struct hinic3_nic_dev * nic_dev)303 void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev)
304 {
305 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
306 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
307
308 dma_free_coherent(hwdev->dev,
309 HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
310 nic_io->ci_vaddr_base, nic_io->ci_dma_base);
311
312 hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
313 hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
314 }
315
hinic3_create_sq(struct hinic3_hwdev * hwdev,struct hinic3_io_queue * sq,u16 q_id,u32 sq_depth,u16 sq_msix_idx)316 static int hinic3_create_sq(struct hinic3_hwdev *hwdev,
317 struct hinic3_io_queue *sq,
318 u16 q_id, u32 sq_depth, u16 sq_msix_idx)
319 {
320 int err;
321
322 /* sq used & hardware request init 1 */
323 sq->owner = 1;
324
325 sq->q_id = q_id;
326 sq->msix_entry_idx = sq_msix_idx;
327
328 err = hinic3_wq_create(hwdev, &sq->wq, sq_depth,
329 BIT(HINIC3_SQ_WQEBB_SHIFT));
330 if (err) {
331 dev_err(hwdev->dev, "Failed to create tx queue %u wq\n",
332 q_id);
333 return err;
334 }
335
336 return 0;
337 }
338
hinic3_create_rq(struct hinic3_hwdev * hwdev,struct hinic3_io_queue * rq,u16 q_id,u32 rq_depth,u16 rq_msix_idx)339 static int hinic3_create_rq(struct hinic3_hwdev *hwdev,
340 struct hinic3_io_queue *rq,
341 u16 q_id, u32 rq_depth, u16 rq_msix_idx)
342 {
343 int err;
344
345 rq->q_id = q_id;
346 rq->msix_entry_idx = rq_msix_idx;
347
348 err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
349 BIT(HINIC3_RQ_WQEBB_SHIFT +
350 HINIC3_NORMAL_RQ_WQE));
351 if (err) {
352 dev_err(hwdev->dev, "Failed to create rx queue %u wq\n",
353 q_id);
354 return err;
355 }
356
357 return 0;
358 }
359
hinic3_create_qp(struct hinic3_hwdev * hwdev,struct hinic3_io_queue * sq,struct hinic3_io_queue * rq,u16 q_id,u32 sq_depth,u32 rq_depth,u16 qp_msix_idx)360 static int hinic3_create_qp(struct hinic3_hwdev *hwdev,
361 struct hinic3_io_queue *sq,
362 struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
363 u32 rq_depth, u16 qp_msix_idx)
364 {
365 int err;
366
367 err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx);
368 if (err) {
369 dev_err(hwdev->dev, "Failed to create sq, qid: %u\n",
370 q_id);
371 return err;
372 }
373
374 err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
375 if (err) {
376 dev_err(hwdev->dev, "Failed to create rq, qid: %u\n",
377 q_id);
378 goto err_destroy_sq_wq;
379 }
380
381 return 0;
382
383 err_destroy_sq_wq:
384 hinic3_wq_destroy(hwdev, &sq->wq);
385
386 return err;
387 }
388
hinic3_destroy_qp(struct hinic3_hwdev * hwdev,struct hinic3_io_queue * sq,struct hinic3_io_queue * rq)389 static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev,
390 struct hinic3_io_queue *sq,
391 struct hinic3_io_queue *rq)
392 {
393 hinic3_wq_destroy(hwdev, &sq->wq);
394 hinic3_wq_destroy(hwdev, &rq->wq);
395 }
396
hinic3_alloc_qps(struct hinic3_nic_dev * nic_dev,struct hinic3_dyna_qp_params * qp_params)397 int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
398 struct hinic3_dyna_qp_params *qp_params)
399 {
400 struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries;
401 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
402 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
403 struct hinic3_io_queue *sqs;
404 struct hinic3_io_queue *rqs;
405 u16 q_id;
406 int err;
407
408 if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps)
409 return -EINVAL;
410
411 sqs = kzalloc_objs(*sqs, qp_params->num_qps);
412 if (!sqs) {
413 err = -ENOMEM;
414 goto err_out;
415 }
416
417 rqs = kzalloc_objs(*rqs, qp_params->num_qps);
418 if (!rqs) {
419 err = -ENOMEM;
420 goto err_free_sqs;
421 }
422
423 for (q_id = 0; q_id < qp_params->num_qps; q_id++) {
424 err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id,
425 qp_params->sq_depth, qp_params->rq_depth,
426 qps_msix_entries[q_id].entry);
427 if (err) {
428 dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n",
429 q_id, err);
430 goto err_destroy_qp;
431 }
432 }
433
434 qp_params->sqs = sqs;
435 qp_params->rqs = rqs;
436
437 return 0;
438
439 err_destroy_qp:
440 while (q_id > 0) {
441 q_id--;
442 hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]);
443 }
444 kfree(rqs);
445 err_free_sqs:
446 kfree(sqs);
447 err_out:
448 return err;
449 }
450
hinic3_free_qps(struct hinic3_nic_dev * nic_dev,struct hinic3_dyna_qp_params * qp_params)451 void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
452 struct hinic3_dyna_qp_params *qp_params)
453 {
454 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
455 u16 q_id;
456
457 for (q_id = 0; q_id < qp_params->num_qps; q_id++)
458 hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id],
459 &qp_params->rqs[q_id]);
460
461 kfree(qp_params->sqs);
462 kfree(qp_params->rqs);
463 }
464
hinic3_init_qps(struct hinic3_nic_dev * nic_dev,struct hinic3_dyna_qp_params * qp_params)465 void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
466 struct hinic3_dyna_qp_params *qp_params)
467 {
468 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
469 struct hinic3_io_queue *sqs = qp_params->sqs;
470 struct hinic3_io_queue *rqs = qp_params->rqs;
471 u16 q_id;
472
473 nic_io->num_qps = qp_params->num_qps;
474 nic_io->sq = qp_params->sqs;
475 nic_io->rq = qp_params->rqs;
476 for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
477 sqs[q_id].cons_idx_addr =
478 (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id);
479 /* clear ci value */
480 WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0);
481
482 sqs[q_id].db_addr = nic_io->sqs_db_addr;
483 rqs[q_id].db_addr = nic_io->rqs_db_addr;
484 }
485 }
486
hinic3_uninit_qps(struct hinic3_nic_dev * nic_dev,struct hinic3_dyna_qp_params * qp_params)487 void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
488 struct hinic3_dyna_qp_params *qp_params)
489 {
490 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
491
492 qp_params->sqs = nic_io->sq;
493 qp_params->rqs = nic_io->rq;
494 qp_params->num_qps = nic_io->num_qps;
495 }
496
hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr * qp_ctxt_hdr,enum hinic3_qp_ctxt_type ctxt_type,u16 num_queues,u16 q_id)497 static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
498 enum hinic3_qp_ctxt_type ctxt_type,
499 u16 num_queues, u16 q_id)
500 {
501 qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
502 qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
503 qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
504 qp_ctxt_hdr->rsvd = 0;
505 }
506
hinic3_sq_prepare_ctxt(struct hinic3_io_queue * sq,u16 sq_id,struct hinic3_sq_ctxt * sq_ctxt)507 static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
508 struct hinic3_sq_ctxt *sq_ctxt)
509 {
510 u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
511 u32 wq_block_pfn_hi, wq_block_pfn_lo;
512 u32 wq_page_pfn_hi, wq_page_pfn_lo;
513 u16 pi_start, ci_start;
514
515 ci_start = hinic3_get_sq_local_ci(sq);
516 pi_start = hinic3_get_sq_local_pi(sq);
517
518 wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
519
520 wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
521 wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
522 wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
523
524 wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
525 wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
526 wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
527
528 sq_ctxt->ci_pi =
529 cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
530 SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
531
532 sq_ctxt->drop_mode_sp =
533 cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
534 SQ_CTXT_MODE_SET(0, PKT_DROP));
535
536 sq_ctxt->wq_pfn_hi_owner =
537 cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
538 SQ_CTXT_WQ_PAGE_SET(1, OWNER));
539
540 sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
541
542 sq_ctxt->pkt_drop_thd =
543 cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
544 SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
545
546 sq_ctxt->global_sq_id =
547 cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
548 GLOBAL_SQ_ID));
549
550 /* enable insert c-vlan by default */
551 sq_ctxt->vlan_ceq_attr =
552 cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
553 SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
554
555 sq_ctxt->rsvd0 = 0;
556
557 sq_ctxt->pref_cache =
558 cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
559 SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
560 SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
561
562 sq_ctxt->pref_ci_owner =
563 cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
564 SQ_CTXT_PREF_SET(1, OWNER));
565
566 sq_ctxt->pref_wq_pfn_hi_ci =
567 cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
568 SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
569
570 sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
571
572 sq_ctxt->wq_block_pfn_hi =
573 cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
574
575 sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
576 }
577
hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue * rq,u32 * wq_page_pfn_hi,u32 * wq_page_pfn_lo,u32 * wq_block_pfn_hi,u32 * wq_block_pfn_lo)578 static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
579 u32 *wq_page_pfn_hi,
580 u32 *wq_page_pfn_lo,
581 u32 *wq_block_pfn_hi,
582 u32 *wq_block_pfn_lo)
583 {
584 u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
585
586 wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
587
588 wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
589 *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
590 *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
591
592 wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
593 *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
594 *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
595 }
596
hinic3_rq_prepare_ctxt(struct hinic3_io_queue * rq,struct hinic3_rq_ctxt * rq_ctxt)597 static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
598 struct hinic3_rq_ctxt *rq_ctxt)
599 {
600 u32 wq_block_pfn_hi, wq_block_pfn_lo;
601 u32 wq_page_pfn_hi, wq_page_pfn_lo;
602 u16 pi_start, ci_start;
603
604 ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
605 pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
606
607 hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
608 &wq_block_pfn_hi, &wq_block_pfn_lo);
609
610 rq_ctxt->ci_pi =
611 cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
612 RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
613
614 rq_ctxt->ceq_attr =
615 cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
616 RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
617
618 rq_ctxt->wq_pfn_hi_type_owner =
619 cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
620 RQ_CTXT_WQ_PAGE_SET(1, OWNER));
621
622 /* use 16Byte WQE */
623 rq_ctxt->wq_pfn_hi_type_owner |=
624 cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
625 rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
626
627 rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
628
629 rq_ctxt->pref_cache =
630 cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
631 RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
632 RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
633
634 rq_ctxt->pref_ci_owner =
635 cpu_to_le32(RQ_CTXT_PREF_SET(RQ_CTXT_PREF_CI_HI(ci_start),
636 CI_HI) |
637 RQ_CTXT_PREF_SET(1, OWNER));
638
639 rq_ctxt->pref_wq_pfn_hi_ci =
640 cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
641 RQ_CTXT_PREF_SET(ci_start, CI_LOW));
642
643 rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
644
645 rq_ctxt->wq_block_pfn_hi =
646 cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
647
648 rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
649 }
650
init_sq_ctxts(struct hinic3_nic_dev * nic_dev)651 static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
652 {
653 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
654 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
655 struct hinic3_sq_ctxt_block *sq_ctxt_block;
656 u16 q_id, curr_id, max_ctxts, i;
657 struct hinic3_sq_ctxt *sq_ctxt;
658 struct hinic3_cmd_buf *cmd_buf;
659 struct hinic3_io_queue *sq;
660 __le64 out_param;
661 int err = 0;
662
663 cmd_buf = hinic3_alloc_cmd_buf(hwdev);
664 if (!cmd_buf) {
665 dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
666 return -ENOMEM;
667 }
668
669 q_id = 0;
670 while (q_id < nic_io->num_qps) {
671 sq_ctxt_block = cmd_buf->buf;
672 sq_ctxt = sq_ctxt_block->sq_ctxt;
673
674 max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
675 HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
676
677 hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
678 HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
679 q_id);
680
681 for (i = 0; i < max_ctxts; i++) {
682 curr_id = q_id + i;
683 sq = &nic_io->sq[curr_id];
684 hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
685 }
686
687 hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
688
689 cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
690 err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
691 L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
692 cmd_buf, &out_param);
693 if (err || out_param) {
694 dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
695 err, out_param);
696 err = -EFAULT;
697 break;
698 }
699
700 q_id += max_ctxts;
701 }
702
703 hinic3_free_cmd_buf(hwdev, cmd_buf);
704
705 return err;
706 }
707
init_rq_ctxts(struct hinic3_nic_dev * nic_dev)708 static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
709 {
710 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
711 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
712 struct hinic3_rq_ctxt_block *rq_ctxt_block;
713 u16 q_id, curr_id, max_ctxts, i;
714 struct hinic3_rq_ctxt *rq_ctxt;
715 struct hinic3_cmd_buf *cmd_buf;
716 struct hinic3_io_queue *rq;
717 __le64 out_param;
718 int err = 0;
719
720 cmd_buf = hinic3_alloc_cmd_buf(hwdev);
721 if (!cmd_buf) {
722 dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
723 return -ENOMEM;
724 }
725
726 q_id = 0;
727 while (q_id < nic_io->num_qps) {
728 rq_ctxt_block = cmd_buf->buf;
729 rq_ctxt = rq_ctxt_block->rq_ctxt;
730
731 max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
732 HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
733
734 hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
735 HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
736 q_id);
737
738 for (i = 0; i < max_ctxts; i++) {
739 curr_id = q_id + i;
740 rq = &nic_io->rq[curr_id];
741 hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
742 }
743
744 hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
745
746 cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
747
748 err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
749 L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
750 cmd_buf, &out_param);
751 if (err || out_param) {
752 dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
753 err, out_param);
754 err = -EFAULT;
755 break;
756 }
757
758 q_id += max_ctxts;
759 }
760
761 hinic3_free_cmd_buf(hwdev, cmd_buf);
762
763 return err;
764 }
765
init_qp_ctxts(struct hinic3_nic_dev * nic_dev)766 static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
767 {
768 int err;
769
770 err = init_sq_ctxts(nic_dev);
771 if (err)
772 return err;
773
774 err = init_rq_ctxts(nic_dev);
775 if (err)
776 return err;
777
778 return 0;
779 }
780
clean_queue_offload_ctxt(struct hinic3_nic_dev * nic_dev,enum hinic3_qp_ctxt_type ctxt_type)781 static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
782 enum hinic3_qp_ctxt_type ctxt_type)
783 {
784 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
785 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
786 struct hinic3_clean_queue_ctxt *ctxt_block;
787 struct hinic3_cmd_buf *cmd_buf;
788 __le64 out_param;
789 int err;
790
791 cmd_buf = hinic3_alloc_cmd_buf(hwdev);
792 if (!cmd_buf) {
793 dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
794 return -ENOMEM;
795 }
796
797 ctxt_block = cmd_buf->buf;
798 ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
799 ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
800 ctxt_block->cmdq_hdr.start_qid = 0;
801 ctxt_block->cmdq_hdr.rsvd = 0;
802 ctxt_block->rsvd = 0;
803
804 hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
805
806 cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
807
808 err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
809 L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
810 cmd_buf, &out_param);
811 if (err || out_param) {
812 dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
813 err, out_param);
814
815 err = -EFAULT;
816 }
817
818 hinic3_free_cmd_buf(hwdev, cmd_buf);
819
820 return err;
821 }
822
clean_qp_offload_ctxt(struct hinic3_nic_dev * nic_dev)823 static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
824 {
825 /* clean LRO/TSO context space */
826 return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
827 clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
828 }
829
830 /* init qps ctxt and set sq ci attr and arm all sq */
hinic3_init_qp_ctxts(struct hinic3_nic_dev * nic_dev)831 int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
832 {
833 struct hinic3_nic_io *nic_io = nic_dev->nic_io;
834 struct hinic3_hwdev *hwdev = nic_dev->hwdev;
835 struct hinic3_sq_attr sq_attr;
836 u32 rq_depth;
837 u16 q_id;
838 int err;
839
840 err = init_qp_ctxts(nic_dev);
841 if (err) {
842 dev_err(hwdev->dev, "Failed to init QP ctxts\n");
843 return err;
844 }
845
846 /* clean LRO/TSO context space */
847 err = clean_qp_offload_ctxt(nic_dev);
848 if (err) {
849 dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
850 return err;
851 }
852
853 rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
854
855 err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
856 nic_io->rx_buf_len);
857 if (err) {
858 dev_err(hwdev->dev, "Failed to set root context\n");
859 return err;
860 }
861
862 for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
863 sq_attr.ci_dma_base =
864 HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
865 sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
866 sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
867 sq_attr.intr_en = 1;
868 sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
869 sq_attr.l2nic_sqn = q_id;
870 sq_attr.dma_attr_off = 0;
871 err = hinic3_set_ci_table(hwdev, &sq_attr);
872 if (err) {
873 dev_err(hwdev->dev, "Failed to set ci table\n");
874 goto err_clean_root_ctxt;
875 }
876 }
877
878 return 0;
879
880 err_clean_root_ctxt:
881 hinic3_clean_root_ctxt(hwdev);
882
883 return err;
884 }
885
hinic3_free_qp_ctxts(struct hinic3_nic_dev * nic_dev)886 void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
887 {
888 hinic3_clean_root_ctxt(nic_dev->hwdev);
889 }
890