xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include "hinic3_cmdq.h"
5 #include "hinic3_csr.h"
6 #include "hinic3_eqs.h"
7 #include "hinic3_hw_comm.h"
8 #include "hinic3_hwdev.h"
9 #include "hinic3_hwif.h"
10 #include "hinic3_mbox.h"
11 #include "hinic3_mgmt.h"
12 
13 #define HINIC3_PCIE_SNOOP        0
14 #define HINIC3_PCIE_TPH_DISABLE  0
15 
16 #define HINIC3_DMA_ATTR_INDIR_IDX_MASK          GENMASK(9, 0)
17 #define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member)  \
18 	FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
19 
20 #define HINIC3_DMA_ATTR_ENTRY_ST_MASK           GENMASK(7, 0)
21 #define HINIC3_DMA_ATTR_ENTRY_AT_MASK           GENMASK(9, 8)
22 #define HINIC3_DMA_ATTR_ENTRY_PH_MASK           GENMASK(11, 10)
23 #define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK  BIT(12)
24 #define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK       BIT(13)
25 #define HINIC3_DMA_ATTR_ENTRY_SET(val, member)  \
26 	FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
27 
28 #define HINIC3_PCIE_ST_DISABLE       0
29 #define HINIC3_PCIE_AT_DISABLE       0
30 #define HINIC3_PCIE_PH_DISABLE       0
31 #define HINIC3_PCIE_MSIX_ATTR_ENTRY  0
32 
33 #define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT      0
34 #define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG  0xFF
35 #define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG   7
36 
37 #define HINIC3_HWDEV_WQ_NAME    "hinic3_hardware"
38 #define HINIC3_WQ_MAX_REQ       10
39 
40 enum hinic3_hwdev_init_state {
41 	HINIC3_HWDEV_MBOX_INITED = 2,
42 	HINIC3_HWDEV_CMDQ_INITED = 3,
43 };
44 
45 static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
46 {
47 	struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
48 	u16 num_aeqs, resp_num_irq, i;
49 	int err;
50 
51 	num_aeqs = hwdev->hwif->attr.num_aeqs;
52 	if (num_aeqs > HINIC3_MAX_AEQS) {
53 		dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
54 			 HINIC3_MAX_AEQS);
55 		num_aeqs = HINIC3_MAX_AEQS;
56 	}
57 	err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
58 				&resp_num_irq);
59 	if (err) {
60 		dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
61 			num_aeqs);
62 		return err;
63 	}
64 
65 	if (resp_num_irq < num_aeqs) {
66 		dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
67 			 resp_num_irq);
68 		num_aeqs = resp_num_irq;
69 	}
70 
71 	err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
72 	if (err) {
73 		dev_err(hwdev->dev, "Failed to init aeqs\n");
74 		goto err_free_irqs;
75 	}
76 
77 	return 0;
78 
79 err_free_irqs:
80 	for (i = 0; i < num_aeqs; i++)
81 		hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
82 
83 	return err;
84 }
85 
86 static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
87 {
88 	struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
89 	u16 num_ceqs, resp_num_irq, i;
90 	int err;
91 
92 	num_ceqs = hwdev->hwif->attr.num_ceqs;
93 	if (num_ceqs > HINIC3_MAX_CEQS) {
94 		dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
95 			 HINIC3_MAX_CEQS);
96 		num_ceqs = HINIC3_MAX_CEQS;
97 	}
98 
99 	err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
100 				&resp_num_irq);
101 	if (err) {
102 		dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
103 			num_ceqs);
104 		return err;
105 	}
106 
107 	if (resp_num_irq < num_ceqs) {
108 		dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
109 			 resp_num_irq);
110 		num_ceqs = resp_num_irq;
111 	}
112 
113 	err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
114 	if (err) {
115 		dev_err(hwdev->dev,
116 			"Failed to init ceqs, err:%d\n", err);
117 		goto err_free_irqs;
118 	}
119 
120 	return 0;
121 
122 err_free_irqs:
123 	for (i = 0; i < num_ceqs; i++)
124 		hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
125 
126 	return err;
127 }
128 
129 static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
130 {
131 	int err;
132 
133 	err = hinic3_init_mbox(hwdev);
134 	if (err)
135 		return err;
136 
137 	hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
138 			       hinic3_mbox_func_aeqe_handler);
139 	hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
140 			       hinic3_mgmt_msg_aeqe_handler);
141 
142 	set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
143 
144 	return 0;
145 }
146 
147 static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
148 {
149 	spin_lock_bh(&hwdev->channel_lock);
150 	clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
151 	spin_unlock_bh(&hwdev->channel_lock);
152 	hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
153 	hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
154 	hinic3_free_mbox(hwdev);
155 }
156 
157 static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
158 {
159 	struct hinic3_aeqs *aeqs = hwdev->aeqs;
160 	struct hinic3_interrupt_info info = {};
161 	struct hinic3_eq *eq;
162 	u16 q_id;
163 	int err;
164 
165 	info.interrupt_coalesc_set = 1;
166 	info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
167 	info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
168 	info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
169 
170 	for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
171 		eq = &aeqs->aeq[q_id];
172 		info.msix_index = eq->msix_entry_idx;
173 		err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
174 		if (err) {
175 			dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
176 				q_id);
177 			return err;
178 		}
179 	}
180 
181 	return 0;
182 }
183 
184 static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
185 {
186 	struct hinic3_ceqs *ceqs = hwdev->ceqs;
187 	struct hinic3_interrupt_info info = {};
188 	struct hinic3_eq *eq;
189 	u16 q_id;
190 	int err;
191 
192 	info.interrupt_coalesc_set = 1;
193 	info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
194 	info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
195 	info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
196 
197 	for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
198 		eq = &ceqs->ceq[q_id];
199 		info.msix_index = eq->msix_entry_idx;
200 		err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
201 		if (err) {
202 			dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
203 				q_id);
204 			return err;
205 		}
206 	}
207 
208 	return 0;
209 }
210 
211 static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
212 {
213 	int err;
214 
215 	err = hinic3_comm_aeqs_init(hwdev);
216 	if (err) {
217 		dev_err(hwdev->dev, "Failed to init async event queues\n");
218 		return err;
219 	}
220 
221 	err = hinic3_comm_mbox_init(hwdev);
222 	if (err) {
223 		dev_err(hwdev->dev, "Failed to init mailbox\n");
224 		goto err_free_comm_aeqs;
225 	}
226 
227 	err = init_aeqs_msix_attr(hwdev);
228 	if (err) {
229 		dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
230 		goto err_free_comm_mbox;
231 	}
232 
233 	return 0;
234 
235 err_free_comm_mbox:
236 	hinic3_comm_mbox_free(hwdev);
237 err_free_comm_aeqs:
238 	hinic3_aeqs_free(hwdev);
239 
240 	return err;
241 }
242 
243 static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
244 {
245 	hinic3_comm_mbox_free(hwdev);
246 	hinic3_aeqs_free(hwdev);
247 }
248 
249 static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
250 {
251 	u32 addr, val, dst_attr;
252 
253 	/* Indirect access, set entry_idx first */
254 	addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
255 	val = hinic3_hwif_read_reg(hwdev->hwif, addr);
256 	val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
257 	val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
258 	hinic3_hwif_write_reg(hwdev->hwif, addr, val);
259 
260 	addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
261 	val = hinic3_hwif_read_reg(hwdev->hwif, addr);
262 
263 	dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
264 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
265 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
266 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
267 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
268 	if (val == dst_attr)
269 		return 0;
270 
271 	return hinic3_set_dma_attr_tbl(hwdev,
272 				       HINIC3_PCIE_MSIX_ATTR_ENTRY,
273 				       HINIC3_PCIE_ST_DISABLE,
274 				       HINIC3_PCIE_AT_DISABLE,
275 				       HINIC3_PCIE_PH_DISABLE,
276 				       HINIC3_PCIE_SNOOP,
277 				       HINIC3_PCIE_TPH_DISABLE);
278 }
279 
280 static int init_basic_attributes(struct hinic3_hwdev *hwdev)
281 {
282 	struct comm_global_attr glb_attr;
283 	int err;
284 
285 	err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
286 				COMM_FUNC_RESET_FLAG);
287 	if (err)
288 		return err;
289 
290 	err = hinic3_get_comm_features(hwdev, hwdev->features,
291 				       COMM_MAX_FEATURE_QWORD);
292 	if (err)
293 		return err;
294 
295 	dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
296 
297 	err = hinic3_get_global_attr(hwdev, &glb_attr);
298 	if (err)
299 		return err;
300 
301 	err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
302 	if (err)
303 		return err;
304 
305 	err = dma_attr_table_init(hwdev);
306 	if (err)
307 		return err;
308 
309 	hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
310 	dev_dbg(hwdev->dev,
311 		"global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
312 		glb_attr.max_host_num, glb_attr.max_pf_num,
313 		glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
314 		glb_attr.cmdq_num);
315 
316 	return 0;
317 }
318 
319 static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
320 {
321 	int err;
322 
323 	err = hinic3_cmdqs_init(hwdev);
324 	if (err) {
325 		dev_err(hwdev->dev, "Failed to init cmd queues\n");
326 		return err;
327 	}
328 
329 	hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
330 
331 	err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
332 	if (err) {
333 		dev_err(hwdev->dev, "Failed to set cmdq depth\n");
334 		goto err_free_cmdqs;
335 	}
336 
337 	set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
338 
339 	return 0;
340 
341 err_free_cmdqs:
342 	hinic3_cmdqs_free(hwdev);
343 
344 	return err;
345 }
346 
347 static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
348 {
349 	spin_lock_bh(&hwdev->channel_lock);
350 	clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
351 	spin_unlock_bh(&hwdev->channel_lock);
352 
353 	hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
354 	hinic3_cmdqs_free(hwdev);
355 }
356 
357 static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
358 {
359 	int err;
360 
361 	err = hinic3_comm_ceqs_init(hwdev);
362 	if (err) {
363 		dev_err(hwdev->dev, "Failed to init completion event queues\n");
364 		return err;
365 	}
366 
367 	err = init_ceqs_msix_attr(hwdev);
368 	if (err) {
369 		dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
370 		goto err_free_ceqs;
371 	}
372 
373 	hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
374 	err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
375 				      hwdev->wq_page_size);
376 	if (err) {
377 		dev_err(hwdev->dev, "Failed to set wq page size\n");
378 		goto err_free_ceqs;
379 	}
380 
381 	err = hinic3_comm_cmdqs_init(hwdev);
382 	if (err) {
383 		dev_err(hwdev->dev, "Failed to init cmd queues\n");
384 		goto err_reset_wq_page_size;
385 	}
386 
387 	return 0;
388 
389 err_reset_wq_page_size:
390 	hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
391 				HINIC3_MIN_PAGE_SIZE);
392 err_free_ceqs:
393 	hinic3_ceqs_free(hwdev);
394 
395 	return err;
396 }
397 
398 static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
399 {
400 	hinic3_comm_cmdqs_free(hwdev);
401 	hinic3_ceqs_free(hwdev);
402 }
403 
404 static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
405 {
406 	int err;
407 
408 	err = init_basic_mgmt_channel(hwdev);
409 	if (err)
410 		return err;
411 
412 	err = init_basic_attributes(hwdev);
413 	if (err)
414 		goto err_free_basic_mgmt_ch;
415 
416 	err = init_cmdqs_channel(hwdev);
417 	if (err) {
418 		dev_err(hwdev->dev, "Failed to init cmdq channel\n");
419 		goto err_clear_func_svc_used_state;
420 	}
421 
422 	return 0;
423 
424 err_clear_func_svc_used_state:
425 	hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
426 err_free_basic_mgmt_ch:
427 	free_base_mgmt_channel(hwdev);
428 
429 	return err;
430 }
431 
432 static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
433 {
434 	hinic3_free_cmdqs_channel(hwdev);
435 	hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
436 	free_base_mgmt_channel(hwdev);
437 }
438 
439 static DEFINE_IDA(hinic3_adev_ida);
440 
441 static int hinic3_adev_idx_alloc(void)
442 {
443 	return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
444 }
445 
446 static void hinic3_adev_idx_free(int id)
447 {
448 	ida_free(&hinic3_adev_ida, id);
449 }
450 
451 int hinic3_init_hwdev(struct pci_dev *pdev)
452 {
453 	struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
454 	struct hinic3_hwdev *hwdev;
455 	int err;
456 
457 	hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
458 	if (!hwdev)
459 		return -ENOMEM;
460 
461 	pci_adapter->hwdev = hwdev;
462 	hwdev->adapter = pci_adapter;
463 	hwdev->pdev = pci_adapter->pdev;
464 	hwdev->dev = &pci_adapter->pdev->dev;
465 	hwdev->func_state = 0;
466 	hwdev->dev_id = hinic3_adev_idx_alloc();
467 	spin_lock_init(&hwdev->channel_lock);
468 
469 	err = hinic3_init_hwif(hwdev);
470 	if (err) {
471 		dev_err(hwdev->dev, "Failed to init hwif\n");
472 		goto err_free_hwdev;
473 	}
474 
475 	hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
476 				       HINIC3_WQ_MAX_REQ);
477 	if (!hwdev->workq) {
478 		dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
479 		err = -ENOMEM;
480 		goto err_free_hwif;
481 	}
482 
483 	err = hinic3_init_cfg_mgmt(hwdev);
484 	if (err) {
485 		dev_err(hwdev->dev, "Failed to init config mgmt\n");
486 		goto err_destroy_workqueue;
487 	}
488 
489 	err = hinic3_init_comm_ch(hwdev);
490 	if (err) {
491 		dev_err(hwdev->dev, "Failed to init communication channel\n");
492 		goto err_free_cfg_mgmt;
493 	}
494 
495 	err = hinic3_init_capability(hwdev);
496 	if (err) {
497 		dev_err(hwdev->dev, "Failed to init capability\n");
498 		goto err_uninit_comm_ch;
499 	}
500 
501 	err = hinic3_set_comm_features(hwdev, hwdev->features,
502 				       COMM_MAX_FEATURE_QWORD);
503 	if (err) {
504 		dev_err(hwdev->dev, "Failed to set comm features\n");
505 		goto err_uninit_comm_ch;
506 	}
507 
508 	return 0;
509 
510 err_uninit_comm_ch:
511 	hinic3_uninit_comm_ch(hwdev);
512 err_free_cfg_mgmt:
513 	hinic3_free_cfg_mgmt(hwdev);
514 err_destroy_workqueue:
515 	destroy_workqueue(hwdev->workq);
516 err_free_hwif:
517 	hinic3_free_hwif(hwdev);
518 err_free_hwdev:
519 	pci_adapter->hwdev = NULL;
520 	hinic3_adev_idx_free(hwdev->dev_id);
521 	kfree(hwdev);
522 
523 	return err;
524 }
525 
526 void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
527 {
528 	u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
529 
530 	hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
531 	hinic3_func_rx_tx_flush(hwdev);
532 	hinic3_uninit_comm_ch(hwdev);
533 	hinic3_free_cfg_mgmt(hwdev);
534 	destroy_workqueue(hwdev->workq);
535 	hinic3_free_hwif(hwdev);
536 	hinic3_adev_idx_free(hwdev->dev_id);
537 	kfree(hwdev);
538 }
539 
540 void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
541 {
542 	struct hinic3_mbox *mbox;
543 
544 	spin_lock_bh(&hwdev->channel_lock);
545 	if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
546 		mbox = hwdev->mbox;
547 		spin_lock(&mbox->mbox_lock);
548 		if (mbox->event_flag == MBOX_EVENT_START)
549 			mbox->event_flag = MBOX_EVENT_TIMEOUT;
550 		spin_unlock(&mbox->mbox_lock);
551 	}
552 
553 	if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
554 		hinic3_cmdq_flush_sync_cmd(hwdev);
555 
556 	spin_unlock_bh(&hwdev->channel_lock);
557 }
558