xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include "hinic3_cmdq.h"
5 #include "hinic3_csr.h"
6 #include "hinic3_eqs.h"
7 #include "hinic3_hw_comm.h"
8 #include "hinic3_hwdev.h"
9 #include "hinic3_hwif.h"
10 #include "hinic3_mbox.h"
11 #include "hinic3_mgmt.h"
12 
13 #define HINIC3_PCIE_SNOOP        0
14 #define HINIC3_PCIE_TPH_DISABLE  0
15 
16 #define HINIC3_SYNFW_TIME_PERIOD  (60 * 60 * 1000)
17 
18 #define HINIC3_DMA_ATTR_INDIR_IDX_MASK          GENMASK(9, 0)
19 #define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member)  \
20 	FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
21 
22 #define HINIC3_DMA_ATTR_ENTRY_ST_MASK           GENMASK(7, 0)
23 #define HINIC3_DMA_ATTR_ENTRY_AT_MASK           GENMASK(9, 8)
24 #define HINIC3_DMA_ATTR_ENTRY_PH_MASK           GENMASK(11, 10)
25 #define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK  BIT(12)
26 #define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK       BIT(13)
27 #define HINIC3_DMA_ATTR_ENTRY_SET(val, member)  \
28 	FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
29 
30 #define HINIC3_PCIE_ST_DISABLE       0
31 #define HINIC3_PCIE_AT_DISABLE       0
32 #define HINIC3_PCIE_PH_DISABLE       0
33 #define HINIC3_PCIE_MSIX_ATTR_ENTRY  0
34 
35 #define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT      0
36 #define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG  0xFF
37 #define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG   7
38 
39 #define HINIC3_HWDEV_WQ_NAME    "hinic3_hardware"
40 #define HINIC3_WQ_MAX_REQ       10
41 
42 enum hinic3_hwdev_init_state {
43 	HINIC3_HWDEV_MGMT_INITED = 1,
44 	HINIC3_HWDEV_MBOX_INITED = 2,
45 	HINIC3_HWDEV_CMDQ_INITED = 3,
46 };
47 
hinic3_comm_aeqs_init(struct hinic3_hwdev * hwdev)48 static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
49 {
50 	struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
51 	u16 num_aeqs, resp_num_irq, i;
52 	int err;
53 
54 	num_aeqs = hwdev->hwif->attr.num_aeqs;
55 	if (num_aeqs > HINIC3_MAX_AEQS) {
56 		dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
57 			 HINIC3_MAX_AEQS);
58 		num_aeqs = HINIC3_MAX_AEQS;
59 	}
60 	err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
61 				&resp_num_irq);
62 	if (err) {
63 		dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
64 			num_aeqs);
65 		return err;
66 	}
67 
68 	if (resp_num_irq < num_aeqs) {
69 		dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
70 			 resp_num_irq);
71 		num_aeqs = resp_num_irq;
72 	}
73 
74 	err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
75 	if (err) {
76 		dev_err(hwdev->dev, "Failed to init aeqs\n");
77 		goto err_free_irqs;
78 	}
79 
80 	return 0;
81 
82 err_free_irqs:
83 	for (i = 0; i < num_aeqs; i++)
84 		hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
85 
86 	return err;
87 }
88 
hinic3_comm_ceqs_init(struct hinic3_hwdev * hwdev)89 static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
90 {
91 	struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
92 	u16 num_ceqs, resp_num_irq, i;
93 	int err;
94 
95 	num_ceqs = hwdev->hwif->attr.num_ceqs;
96 	if (num_ceqs > HINIC3_MAX_CEQS) {
97 		dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
98 			 HINIC3_MAX_CEQS);
99 		num_ceqs = HINIC3_MAX_CEQS;
100 	}
101 
102 	err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
103 				&resp_num_irq);
104 	if (err) {
105 		dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
106 			num_ceqs);
107 		return err;
108 	}
109 
110 	if (resp_num_irq < num_ceqs) {
111 		dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
112 			 resp_num_irq);
113 		num_ceqs = resp_num_irq;
114 	}
115 
116 	err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
117 	if (err) {
118 		dev_err(hwdev->dev,
119 			"Failed to init ceqs, err:%d\n", err);
120 		goto err_free_irqs;
121 	}
122 
123 	return 0;
124 
125 err_free_irqs:
126 	for (i = 0; i < num_ceqs; i++)
127 		hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
128 
129 	return err;
130 }
131 
hinic3_comm_mbox_init(struct hinic3_hwdev * hwdev)132 static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
133 {
134 	int err;
135 
136 	err = hinic3_init_mbox(hwdev);
137 	if (err)
138 		return err;
139 
140 	hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
141 			       hinic3_mbox_func_aeqe_handler);
142 	hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
143 			       hinic3_mgmt_msg_aeqe_handler);
144 
145 	set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
146 
147 	return 0;
148 }
149 
hinic3_comm_mbox_free(struct hinic3_hwdev * hwdev)150 static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
151 {
152 	spin_lock_bh(&hwdev->channel_lock);
153 	clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
154 	spin_unlock_bh(&hwdev->channel_lock);
155 	hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
156 	hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
157 	hinic3_free_mbox(hwdev);
158 }
159 
init_aeqs_msix_attr(struct hinic3_hwdev * hwdev)160 static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
161 {
162 	struct hinic3_aeqs *aeqs = hwdev->aeqs;
163 	struct hinic3_interrupt_info info = {};
164 	struct hinic3_eq *eq;
165 	u16 q_id;
166 	int err;
167 
168 	info.interrupt_coalesc_set = 1;
169 	info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
170 	info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
171 	info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
172 
173 	for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
174 		eq = &aeqs->aeq[q_id];
175 		info.msix_index = eq->msix_entry_idx;
176 		err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
177 		if (err) {
178 			dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
179 				q_id);
180 			return err;
181 		}
182 	}
183 
184 	return 0;
185 }
186 
init_ceqs_msix_attr(struct hinic3_hwdev * hwdev)187 static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
188 {
189 	struct hinic3_ceqs *ceqs = hwdev->ceqs;
190 	struct hinic3_interrupt_info info = {};
191 	struct hinic3_eq *eq;
192 	u16 q_id;
193 	int err;
194 
195 	info.interrupt_coalesc_set = 1;
196 	info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
197 	info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
198 	info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
199 
200 	for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
201 		eq = &ceqs->ceq[q_id];
202 		info.msix_index = eq->msix_entry_idx;
203 		err = hinic3_set_interrupt_cfg(hwdev, info);
204 		if (err) {
205 			dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
206 				q_id);
207 			return err;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev * hwdev)214 static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev)
215 {
216 	int err;
217 
218 	if (HINIC3_IS_VF(hwdev))
219 		return 0;
220 
221 	err = hinic3_pf_to_mgmt_init(hwdev);
222 	if (err)
223 		return err;
224 
225 	set_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
226 
227 	return 0;
228 }
229 
hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev * hwdev)230 static void hinic3_comm_pf_to_mgmt_free(struct hinic3_hwdev *hwdev)
231 {
232 	if (HINIC3_IS_VF(hwdev))
233 		return;
234 
235 	spin_lock_bh(&hwdev->channel_lock);
236 	clear_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state);
237 	spin_unlock_bh(&hwdev->channel_lock);
238 
239 	hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
240 
241 	hinic3_pf_to_mgmt_free(hwdev);
242 }
243 
init_basic_mgmt_channel(struct hinic3_hwdev * hwdev)244 static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
245 {
246 	int err;
247 
248 	err = hinic3_comm_aeqs_init(hwdev);
249 	if (err) {
250 		dev_err(hwdev->dev, "Failed to init async event queues\n");
251 		return err;
252 	}
253 
254 	err = hinic3_comm_mbox_init(hwdev);
255 	if (err) {
256 		dev_err(hwdev->dev, "Failed to init mailbox\n");
257 		goto err_free_comm_aeqs;
258 	}
259 
260 	err = init_aeqs_msix_attr(hwdev);
261 	if (err) {
262 		dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
263 		goto err_free_comm_mbox;
264 	}
265 
266 	return 0;
267 
268 err_free_comm_mbox:
269 	hinic3_comm_mbox_free(hwdev);
270 err_free_comm_aeqs:
271 	hinic3_aeqs_free(hwdev);
272 
273 	return err;
274 }
275 
free_base_mgmt_channel(struct hinic3_hwdev * hwdev)276 static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
277 {
278 	hinic3_comm_mbox_free(hwdev);
279 	hinic3_aeqs_free(hwdev);
280 }
281 
dma_attr_table_init(struct hinic3_hwdev * hwdev)282 static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
283 {
284 	u32 addr, val, dst_attr;
285 
286 	/* Indirect access, set entry_idx first */
287 	addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
288 	val = hinic3_hwif_read_reg(hwdev->hwif, addr);
289 	val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
290 	val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
291 	hinic3_hwif_write_reg(hwdev->hwif, addr, val);
292 
293 	addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
294 	val = hinic3_hwif_read_reg(hwdev->hwif, addr);
295 
296 	dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
297 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
298 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
299 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
300 		   HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
301 	if (val == dst_attr)
302 		return 0;
303 
304 	return hinic3_set_dma_attr_tbl(hwdev,
305 				       HINIC3_PCIE_MSIX_ATTR_ENTRY,
306 				       HINIC3_PCIE_ST_DISABLE,
307 				       HINIC3_PCIE_AT_DISABLE,
308 				       HINIC3_PCIE_PH_DISABLE,
309 				       HINIC3_PCIE_SNOOP,
310 				       HINIC3_PCIE_TPH_DISABLE);
311 }
312 
init_basic_attributes(struct hinic3_hwdev * hwdev)313 static int init_basic_attributes(struct hinic3_hwdev *hwdev)
314 {
315 	struct comm_global_attr glb_attr;
316 	int err;
317 
318 	err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
319 				COMM_FUNC_RESET_FLAG);
320 	if (err)
321 		return err;
322 
323 	err = hinic3_get_comm_features(hwdev, hwdev->features,
324 				       COMM_MAX_FEATURE_QWORD);
325 	if (err)
326 		return err;
327 
328 	dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
329 
330 	err = hinic3_get_global_attr(hwdev, &glb_attr);
331 	if (err)
332 		return err;
333 
334 	err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
335 	if (err)
336 		return err;
337 
338 	err = dma_attr_table_init(hwdev);
339 	if (err)
340 		return err;
341 
342 	hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
343 	dev_dbg(hwdev->dev,
344 		"global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
345 		glb_attr.max_host_num, glb_attr.max_pf_num,
346 		glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
347 		glb_attr.cmdq_num);
348 
349 	return 0;
350 }
351 
hinic3_comm_cmdqs_init(struct hinic3_hwdev * hwdev)352 static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
353 {
354 	int err;
355 
356 	err = hinic3_cmdqs_init(hwdev);
357 	if (err) {
358 		dev_err(hwdev->dev, "Failed to init cmd queues\n");
359 		return err;
360 	}
361 
362 	hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
363 
364 	err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
365 	if (err) {
366 		dev_err(hwdev->dev, "Failed to set cmdq depth\n");
367 		goto err_free_cmdqs;
368 	}
369 
370 	set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
371 
372 	return 0;
373 
374 err_free_cmdqs:
375 	hinic3_cmdqs_free(hwdev);
376 
377 	return err;
378 }
379 
hinic3_comm_cmdqs_free(struct hinic3_hwdev * hwdev)380 static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
381 {
382 	spin_lock_bh(&hwdev->channel_lock);
383 	clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
384 	spin_unlock_bh(&hwdev->channel_lock);
385 
386 	hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
387 	hinic3_cmdqs_free(hwdev);
388 }
389 
init_cmdqs_channel(struct hinic3_hwdev * hwdev)390 static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
391 {
392 	int err;
393 
394 	err = hinic3_comm_ceqs_init(hwdev);
395 	if (err) {
396 		dev_err(hwdev->dev, "Failed to init completion event queues\n");
397 		return err;
398 	}
399 
400 	err = init_ceqs_msix_attr(hwdev);
401 	if (err) {
402 		dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
403 		goto err_free_ceqs;
404 	}
405 
406 	hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
407 	err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
408 				      hwdev->wq_page_size);
409 	if (err) {
410 		dev_err(hwdev->dev, "Failed to set wq page size\n");
411 		goto err_free_ceqs;
412 	}
413 
414 	err = hinic3_comm_cmdqs_init(hwdev);
415 	if (err) {
416 		dev_err(hwdev->dev, "Failed to init cmd queues\n");
417 		goto err_reset_wq_page_size;
418 	}
419 
420 	return 0;
421 
422 err_reset_wq_page_size:
423 	hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
424 				HINIC3_MIN_PAGE_SIZE);
425 err_free_ceqs:
426 	hinic3_ceqs_free(hwdev);
427 
428 	return err;
429 }
430 
hinic3_free_cmdqs_channel(struct hinic3_hwdev * hwdev)431 static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
432 {
433 	hinic3_comm_cmdqs_free(hwdev);
434 	hinic3_ceqs_free(hwdev);
435 }
436 
hinic3_init_comm_ch(struct hinic3_hwdev * hwdev)437 static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
438 {
439 	int err;
440 
441 	err = init_basic_mgmt_channel(hwdev);
442 	if (err)
443 		return err;
444 
445 	err = hinic3_comm_pf_to_mgmt_init(hwdev);
446 	if (err)
447 		goto err_free_basic_mgmt_ch;
448 
449 	err = init_basic_attributes(hwdev);
450 	if (err)
451 		goto err_free_comm_pf_to_mgmt;
452 
453 	err = init_cmdqs_channel(hwdev);
454 	if (err) {
455 		dev_err(hwdev->dev, "Failed to init cmdq channel\n");
456 		goto err_clear_func_svc_used_state;
457 	}
458 
459 	hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG);
460 
461 	return 0;
462 
463 err_clear_func_svc_used_state:
464 	hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
465 err_free_comm_pf_to_mgmt:
466 	hinic3_comm_pf_to_mgmt_free(hwdev);
467 err_free_basic_mgmt_ch:
468 	free_base_mgmt_channel(hwdev);
469 
470 	return err;
471 }
472 
hinic3_uninit_comm_ch(struct hinic3_hwdev * hwdev)473 static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
474 {
475 	hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_INIT);
476 	hinic3_free_cmdqs_channel(hwdev);
477 	hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
478 	hinic3_comm_pf_to_mgmt_free(hwdev);
479 	free_base_mgmt_channel(hwdev);
480 }
481 
hinic3_auto_sync_time_work(struct work_struct * work)482 static void hinic3_auto_sync_time_work(struct work_struct *work)
483 {
484 	struct delayed_work *delay = to_delayed_work(work);
485 	struct hinic3_hwdev *hwdev;
486 
487 	hwdev = container_of(delay, struct hinic3_hwdev, sync_time_task);
488 
489 	hinic3_sync_time_to_fw(hwdev);
490 
491 	queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
492 			   msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
493 }
494 
hinic3_init_ppf_work(struct hinic3_hwdev * hwdev)495 static void hinic3_init_ppf_work(struct hinic3_hwdev *hwdev)
496 {
497 	if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
498 		return;
499 
500 	INIT_DELAYED_WORK(&hwdev->sync_time_task, hinic3_auto_sync_time_work);
501 	queue_delayed_work(hwdev->workq, &hwdev->sync_time_task,
502 			   msecs_to_jiffies(HINIC3_SYNFW_TIME_PERIOD));
503 }
504 
hinic3_free_ppf_work(struct hinic3_hwdev * hwdev)505 static void hinic3_free_ppf_work(struct hinic3_hwdev *hwdev)
506 {
507 	if (hinic3_ppf_idx(hwdev) != hinic3_global_func_id(hwdev))
508 		return;
509 
510 	disable_delayed_work_sync(&hwdev->sync_time_task);
511 }
512 
513 static DEFINE_IDA(hinic3_adev_ida);
514 
hinic3_adev_idx_alloc(void)515 static int hinic3_adev_idx_alloc(void)
516 {
517 	return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
518 }
519 
hinic3_adev_idx_free(int id)520 static void hinic3_adev_idx_free(int id)
521 {
522 	ida_free(&hinic3_adev_ida, id);
523 }
524 
hinic3_init_hwdev(struct pci_dev * pdev)525 int hinic3_init_hwdev(struct pci_dev *pdev)
526 {
527 	struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
528 	struct hinic3_hwdev *hwdev;
529 	int err;
530 
531 	hwdev = kzalloc_obj(*hwdev);
532 	if (!hwdev)
533 		return -ENOMEM;
534 
535 	pci_adapter->hwdev = hwdev;
536 	hwdev->adapter = pci_adapter;
537 	hwdev->pdev = pci_adapter->pdev;
538 	hwdev->dev = &pci_adapter->pdev->dev;
539 	hwdev->func_state = 0;
540 	hwdev->dev_id = hinic3_adev_idx_alloc();
541 	spin_lock_init(&hwdev->channel_lock);
542 
543 	err = hinic3_init_hwif(hwdev);
544 	if (err) {
545 		dev_err(hwdev->dev, "Failed to init hwif\n");
546 		goto err_free_hwdev;
547 	}
548 
549 	hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM | WQ_PERCPU,
550 				       HINIC3_WQ_MAX_REQ);
551 	if (!hwdev->workq) {
552 		dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
553 		err = -ENOMEM;
554 		goto err_free_hwif;
555 	}
556 
557 	err = hinic3_init_cfg_mgmt(hwdev);
558 	if (err) {
559 		dev_err(hwdev->dev, "Failed to init config mgmt\n");
560 		goto err_destroy_workqueue;
561 	}
562 
563 	err = hinic3_init_comm_ch(hwdev);
564 	if (err) {
565 		dev_err(hwdev->dev, "Failed to init communication channel\n");
566 		goto err_free_cfg_mgmt;
567 	}
568 
569 	err = hinic3_init_capability(hwdev);
570 	if (err) {
571 		dev_err(hwdev->dev, "Failed to init capability\n");
572 		goto err_uninit_comm_ch;
573 	}
574 
575 	hinic3_init_ppf_work(hwdev);
576 
577 	err = hinic3_set_comm_features(hwdev, hwdev->features,
578 				       COMM_MAX_FEATURE_QWORD);
579 	if (err) {
580 		dev_err(hwdev->dev, "Failed to set comm features\n");
581 		goto err_free_ppf_work;
582 	}
583 
584 	return 0;
585 
586 err_free_ppf_work:
587 	hinic3_free_ppf_work(hwdev);
588 err_uninit_comm_ch:
589 	hinic3_uninit_comm_ch(hwdev);
590 err_free_cfg_mgmt:
591 	hinic3_free_cfg_mgmt(hwdev);
592 err_destroy_workqueue:
593 	destroy_workqueue(hwdev->workq);
594 err_free_hwif:
595 	hinic3_free_hwif(hwdev);
596 err_free_hwdev:
597 	pci_adapter->hwdev = NULL;
598 	hinic3_adev_idx_free(hwdev->dev_id);
599 	kfree(hwdev);
600 
601 	return err;
602 }
603 
hinic3_free_hwdev(struct hinic3_hwdev * hwdev)604 void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
605 {
606 	u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
607 
608 	hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
609 	hinic3_free_ppf_work(hwdev);
610 	hinic3_func_rx_tx_flush(hwdev);
611 	hinic3_uninit_comm_ch(hwdev);
612 	hinic3_free_cfg_mgmt(hwdev);
613 	destroy_workqueue(hwdev->workq);
614 	hinic3_free_hwif(hwdev);
615 	hinic3_adev_idx_free(hwdev->dev_id);
616 	kfree(hwdev);
617 }
618 
hinic3_set_api_stop(struct hinic3_hwdev * hwdev)619 void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
620 {
621 	struct hinic3_recv_msg *recv_resp_msg;
622 	struct hinic3_mbox *mbox;
623 
624 	spin_lock_bh(&hwdev->channel_lock);
625 	if (HINIC3_IS_PF(hwdev) &&
626 	    test_bit(HINIC3_HWDEV_MGMT_INITED, &hwdev->func_state)) {
627 		recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt;
628 		spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
629 		if (hwdev->pf_to_mgmt->event_flag == COMM_SEND_EVENT_START) {
630 			complete(&recv_resp_msg->recv_done);
631 			hwdev->pf_to_mgmt->event_flag = COMM_SEND_EVENT_TIMEOUT;
632 		}
633 		spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock);
634 	}
635 
636 	if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
637 		mbox = hwdev->mbox;
638 		spin_lock(&mbox->mbox_lock);
639 		if (mbox->event_flag == MBOX_EVENT_START)
640 			mbox->event_flag = MBOX_EVENT_TIMEOUT;
641 		spin_unlock(&mbox->mbox_lock);
642 	}
643 
644 	if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
645 		hinic3_cmdq_flush_sync_cmd(hwdev);
646 
647 	spin_unlock_bh(&hwdev->channel_lock);
648 }
649