xref: /linux/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c (revision 4201c9260a8d3c4ef238e51692a7e9b4e1e29efe)
1 /*
2  * Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/pci.h>
19 #include <linux/device.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/semaphore.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/io.h>
25 #include <linux/err.h>
26 
27 #include "hinic_hw_if.h"
28 #include "hinic_hw_eqs.h"
29 #include "hinic_hw_wqe.h"
30 #include "hinic_hw_wq.h"
31 #include "hinic_hw_cmdq.h"
32 #include "hinic_hw_qp_ctxt.h"
33 #include "hinic_hw_qp.h"
34 #include "hinic_hw_io.h"
35 
36 #define CI_Q_ADDR_SIZE                  sizeof(u32)
37 
38 #define CI_ADDR(base_addr, q_id)        ((base_addr) + \
39 					 (q_id) * CI_Q_ADDR_SIZE)
40 
41 #define CI_TABLE_SIZE(num_qps)          ((num_qps) * CI_Q_ADDR_SIZE)
42 
43 #define DB_IDX(db, db_base)             \
44 	(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
45 
46 enum io_cmd {
47 	IO_CMD_MODIFY_QUEUE_CTXT = 0,
48 	IO_CMD_CLEAN_QUEUE_CTXT,
49 };
50 
51 static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
52 {
53 	int i;
54 
55 	for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
56 		free_db_area->db_idx[i] = i;
57 
58 	free_db_area->alloc_pos = 0;
59 	free_db_area->return_pos = HINIC_DB_MAX_AREAS;
60 
61 	free_db_area->num_free = HINIC_DB_MAX_AREAS;
62 
63 	sema_init(&free_db_area->idx_lock, 1);
64 }
65 
66 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
67 {
68 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
69 	int pos, idx;
70 
71 	down(&free_db_area->idx_lock);
72 
73 	free_db_area->num_free--;
74 
75 	if (free_db_area->num_free < 0) {
76 		free_db_area->num_free++;
77 		up(&free_db_area->idx_lock);
78 		return ERR_PTR(-ENOMEM);
79 	}
80 
81 	pos = free_db_area->alloc_pos++;
82 	pos &= HINIC_DB_MAX_AREAS - 1;
83 
84 	idx = free_db_area->db_idx[pos];
85 
86 	free_db_area->db_idx[pos] = -1;
87 
88 	up(&free_db_area->idx_lock);
89 
90 	return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
91 }
92 
93 static void return_db_area(struct hinic_func_to_io *func_to_io,
94 			   void __iomem *db_base)
95 {
96 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
97 	int pos, idx = DB_IDX(db_base, func_to_io->db_base);
98 
99 	down(&free_db_area->idx_lock);
100 
101 	pos = free_db_area->return_pos++;
102 	pos &= HINIC_DB_MAX_AREAS - 1;
103 
104 	free_db_area->db_idx[pos] = idx;
105 
106 	free_db_area->num_free++;
107 
108 	up(&free_db_area->idx_lock);
109 }
110 
111 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
112 			  u16 num_sqs)
113 {
114 	struct hinic_hwif *hwif = func_to_io->hwif;
115 	struct hinic_sq_ctxt_block *sq_ctxt_block;
116 	struct pci_dev *pdev = hwif->pdev;
117 	struct hinic_cmdq_buf cmdq_buf;
118 	struct hinic_sq_ctxt *sq_ctxt;
119 	struct hinic_qp *qp;
120 	u64 out_param;
121 	int err, i;
122 
123 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
124 	if (err) {
125 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
126 		return err;
127 	}
128 
129 	sq_ctxt_block = cmdq_buf.buf;
130 	sq_ctxt = sq_ctxt_block->sq_ctxt;
131 
132 	hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
133 				num_sqs, func_to_io->max_qps);
134 	for (i = 0; i < num_sqs; i++) {
135 		qp = &func_to_io->qps[i];
136 
137 		hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
138 				      base_qpn + qp->q_id);
139 	}
140 
141 	cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
142 
143 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
144 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
145 				     &out_param);
146 	if ((err) || (out_param != 0)) {
147 		dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
148 		err = -EFAULT;
149 	}
150 
151 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
152 	return err;
153 }
154 
155 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
156 			  u16 num_rqs)
157 {
158 	struct hinic_hwif *hwif = func_to_io->hwif;
159 	struct hinic_rq_ctxt_block *rq_ctxt_block;
160 	struct pci_dev *pdev = hwif->pdev;
161 	struct hinic_cmdq_buf cmdq_buf;
162 	struct hinic_rq_ctxt *rq_ctxt;
163 	struct hinic_qp *qp;
164 	u64 out_param;
165 	int err, i;
166 
167 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
168 	if (err) {
169 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
170 		return err;
171 	}
172 
173 	rq_ctxt_block = cmdq_buf.buf;
174 	rq_ctxt = rq_ctxt_block->rq_ctxt;
175 
176 	hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
177 				num_rqs, func_to_io->max_qps);
178 	for (i = 0; i < num_rqs; i++) {
179 		qp = &func_to_io->qps[i];
180 
181 		hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
182 				      base_qpn + qp->q_id);
183 	}
184 
185 	cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
186 
187 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
188 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
189 				     &out_param);
190 	if ((err) || (out_param != 0)) {
191 		dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
192 		err = -EFAULT;
193 	}
194 
195 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
196 	return err;
197 }
198 
199 /**
200  * write_qp_ctxts - write the qp ctxt to HW
201  * @func_to_io: func to io channel that holds the IO components
202  * @base_qpn: first qp number
203  * @num_qps: number of qps to write
204  *
205  * Return 0 - Success, negative - Failure
206  **/
207 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
208 			  u16 num_qps)
209 {
210 	return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
211 		write_rq_ctxts(func_to_io, base_qpn, num_qps));
212 }
213 
214 static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io *func_to_io,
215 					  enum hinic_qp_ctxt_type ctxt_type)
216 {
217 	struct hinic_hwif *hwif = func_to_io->hwif;
218 	struct hinic_clean_queue_ctxt *ctxt_block;
219 	struct pci_dev *pdev = hwif->pdev;
220 	struct hinic_cmdq_buf cmdq_buf;
221 	u64 out_param = 0;
222 	int err;
223 
224 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
225 	if (err) {
226 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
227 		return err;
228 	}
229 
230 	ctxt_block = cmdq_buf.buf;
231 	ctxt_block->cmdq_hdr.num_queues = func_to_io->max_qps;
232 	ctxt_block->cmdq_hdr.queue_type = ctxt_type;
233 	ctxt_block->cmdq_hdr.addr_offset = 0;
234 
235 	/* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
236 	ctxt_block->ctxt_size = 0x3;
237 
238 	hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
239 
240 	cmdq_buf.size = sizeof(*ctxt_block);
241 
242 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
243 				     IO_CMD_CLEAN_QUEUE_CTXT,
244 				     &cmdq_buf, &out_param);
245 
246 	if (err || out_param) {
247 		dev_err(&pdev->dev, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
248 			err, out_param);
249 
250 		err = -EFAULT;
251 	}
252 
253 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
254 
255 	return err;
256 }
257 
258 static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io *func_to_io)
259 {
260 	/* clean LRO/TSO context space */
261 	return (hinic_clean_queue_offload_ctxt(func_to_io,
262 					       HINIC_QP_CTXT_TYPE_SQ) ||
263 		hinic_clean_queue_offload_ctxt(func_to_io,
264 					       HINIC_QP_CTXT_TYPE_RQ));
265 }
266 
267 /**
268  * init_qp - Initialize a Queue Pair
269  * @func_to_io: func to io channel that holds the IO components
270  * @qp: pointer to the qp to initialize
271  * @q_id: the id of the qp
272  * @sq_msix_entry: msix entry for sq
273  * @rq_msix_entry: msix entry for rq
274  *
275  * Return 0 - Success, negative - Failure
276  **/
277 static int init_qp(struct hinic_func_to_io *func_to_io,
278 		   struct hinic_qp *qp, int q_id,
279 		   struct msix_entry *sq_msix_entry,
280 		   struct msix_entry *rq_msix_entry)
281 {
282 	struct hinic_hwif *hwif = func_to_io->hwif;
283 	struct pci_dev *pdev = hwif->pdev;
284 	void __iomem *db_base;
285 	int err;
286 
287 	qp->q_id = q_id;
288 
289 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
290 				HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
291 				HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
292 	if (err) {
293 		dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
294 		return err;
295 	}
296 
297 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
298 				HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
299 				HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
300 	if (err) {
301 		dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
302 		goto err_rq_alloc;
303 	}
304 
305 	db_base = get_db_area(func_to_io);
306 	if (IS_ERR(db_base)) {
307 		dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
308 		err = PTR_ERR(db_base);
309 		goto err_get_db;
310 	}
311 
312 	func_to_io->sq_db[q_id] = db_base;
313 
314 	err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
315 			    sq_msix_entry,
316 			    CI_ADDR(func_to_io->ci_addr_base, q_id),
317 			    CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
318 	if (err) {
319 		dev_err(&pdev->dev, "Failed to init SQ\n");
320 		goto err_sq_init;
321 	}
322 
323 	err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
324 			    rq_msix_entry);
325 	if (err) {
326 		dev_err(&pdev->dev, "Failed to init RQ\n");
327 		goto err_rq_init;
328 	}
329 
330 	return 0;
331 
332 err_rq_init:
333 	hinic_clean_sq(&qp->sq);
334 
335 err_sq_init:
336 	return_db_area(func_to_io, db_base);
337 
338 err_get_db:
339 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
340 
341 err_rq_alloc:
342 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
343 	return err;
344 }
345 
346 /**
347  * destroy_qp - Clean the resources of a Queue Pair
348  * @func_to_io: func to io channel that holds the IO components
349  * @qp: pointer to the qp to clean
350  **/
351 static void destroy_qp(struct hinic_func_to_io *func_to_io,
352 		       struct hinic_qp *qp)
353 {
354 	int q_id = qp->q_id;
355 
356 	hinic_clean_rq(&qp->rq);
357 	hinic_clean_sq(&qp->sq);
358 
359 	return_db_area(func_to_io, func_to_io->sq_db[q_id]);
360 
361 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
362 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
363 }
364 
365 /**
366  * hinic_io_create_qps - Create Queue Pairs
367  * @func_to_io: func to io channel that holds the IO components
368  * @base_qpn: base qp number
369  * @num_qps: number queue pairs to create
370  * @sq_msix_entry: msix entries for sq
371  * @rq_msix_entry: msix entries for rq
372  *
373  * Return 0 - Success, negative - Failure
374  **/
375 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
376 			u16 base_qpn, int num_qps,
377 			struct msix_entry *sq_msix_entries,
378 			struct msix_entry *rq_msix_entries)
379 {
380 	struct hinic_hwif *hwif = func_to_io->hwif;
381 	struct pci_dev *pdev = hwif->pdev;
382 	size_t qps_size, wq_size, db_size;
383 	void *ci_addr_base;
384 	int i, j, err;
385 
386 	qps_size = num_qps * sizeof(*func_to_io->qps);
387 	func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
388 	if (!func_to_io->qps)
389 		return -ENOMEM;
390 
391 	wq_size = num_qps * sizeof(*func_to_io->sq_wq);
392 	func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
393 	if (!func_to_io->sq_wq) {
394 		err = -ENOMEM;
395 		goto err_sq_wq;
396 	}
397 
398 	wq_size = num_qps * sizeof(*func_to_io->rq_wq);
399 	func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
400 	if (!func_to_io->rq_wq) {
401 		err = -ENOMEM;
402 		goto err_rq_wq;
403 	}
404 
405 	db_size = num_qps * sizeof(*func_to_io->sq_db);
406 	func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
407 	if (!func_to_io->sq_db) {
408 		err = -ENOMEM;
409 		goto err_sq_db;
410 	}
411 
412 	ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
413 					  &func_to_io->ci_dma_base,
414 					  GFP_KERNEL);
415 	if (!ci_addr_base) {
416 		dev_err(&pdev->dev, "Failed to allocate CI area\n");
417 		err = -ENOMEM;
418 		goto err_ci_base;
419 	}
420 
421 	func_to_io->ci_addr_base = ci_addr_base;
422 
423 	for (i = 0; i < num_qps; i++) {
424 		err = init_qp(func_to_io, &func_to_io->qps[i], i,
425 			      &sq_msix_entries[i], &rq_msix_entries[i]);
426 		if (err) {
427 			dev_err(&pdev->dev, "Failed to create QP %d\n", i);
428 			goto err_init_qp;
429 		}
430 	}
431 
432 	err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
433 	if (err) {
434 		dev_err(&pdev->dev, "Failed to init QP ctxts\n");
435 		goto err_write_qp_ctxts;
436 	}
437 
438 	err = hinic_clean_qp_offload_ctxt(func_to_io);
439 	if (err) {
440 		dev_err(&pdev->dev, "Failed to clean QP contexts space\n");
441 		goto err_write_qp_ctxts;
442 	}
443 
444 	return 0;
445 
446 err_write_qp_ctxts:
447 err_init_qp:
448 	for (j = 0; j < i; j++)
449 		destroy_qp(func_to_io, &func_to_io->qps[j]);
450 
451 	dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
452 			  func_to_io->ci_addr_base, func_to_io->ci_dma_base);
453 
454 err_ci_base:
455 	devm_kfree(&pdev->dev, func_to_io->sq_db);
456 
457 err_sq_db:
458 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
459 
460 err_rq_wq:
461 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
462 
463 err_sq_wq:
464 	devm_kfree(&pdev->dev, func_to_io->qps);
465 	return err;
466 }
467 
468 /**
469  * hinic_io_destroy_qps - Destroy the IO Queue Pairs
470  * @func_to_io: func to io channel that holds the IO components
471  * @num_qps: number queue pairs to destroy
472  **/
473 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
474 {
475 	struct hinic_hwif *hwif = func_to_io->hwif;
476 	struct pci_dev *pdev = hwif->pdev;
477 	size_t ci_table_size;
478 	int i;
479 
480 	ci_table_size = CI_TABLE_SIZE(num_qps);
481 
482 	for (i = 0; i < num_qps; i++)
483 		destroy_qp(func_to_io, &func_to_io->qps[i]);
484 
485 	dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
486 			  func_to_io->ci_dma_base);
487 
488 	devm_kfree(&pdev->dev, func_to_io->sq_db);
489 
490 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
491 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
492 
493 	devm_kfree(&pdev->dev, func_to_io->qps);
494 }
495 
496 /**
497  * hinic_io_init - Initialize the IO components
498  * @func_to_io: func to io channel that holds the IO components
499  * @hwif: HW interface for accessing IO
500  * @max_qps: maximum QPs in HW
501  * @num_ceqs: number completion event queues
502  * @ceq_msix_entries: msix entries for ceqs
503  *
504  * Return 0 - Success, negative - Failure
505  **/
506 int hinic_io_init(struct hinic_func_to_io *func_to_io,
507 		  struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
508 		  struct msix_entry *ceq_msix_entries)
509 {
510 	struct pci_dev *pdev = hwif->pdev;
511 	enum hinic_cmdq_type cmdq, type;
512 	void __iomem *db_area;
513 	int err;
514 
515 	func_to_io->hwif = hwif;
516 	func_to_io->qps = NULL;
517 	func_to_io->max_qps = max_qps;
518 
519 	err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
520 			      HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
521 			      ceq_msix_entries);
522 	if (err) {
523 		dev_err(&pdev->dev, "Failed to init CEQs\n");
524 		return err;
525 	}
526 
527 	err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
528 	if (err) {
529 		dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
530 		goto err_wqs_alloc;
531 	}
532 
533 	func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
534 	if (!func_to_io->db_base) {
535 		dev_err(&pdev->dev, "Failed to remap IO DB area\n");
536 		err = -ENOMEM;
537 		goto err_db_ioremap;
538 	}
539 
540 	init_db_area_idx(&func_to_io->free_db_area);
541 
542 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
543 		db_area = get_db_area(func_to_io);
544 		if (IS_ERR(db_area)) {
545 			dev_err(&pdev->dev, "Failed to get cmdq db area\n");
546 			err = PTR_ERR(db_area);
547 			goto err_db_area;
548 		}
549 
550 		func_to_io->cmdq_db_area[cmdq] = db_area;
551 	}
552 
553 	err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
554 			       func_to_io->cmdq_db_area);
555 	if (err) {
556 		dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
557 		goto err_init_cmdqs;
558 	}
559 
560 	return 0;
561 
562 err_init_cmdqs:
563 err_db_area:
564 	for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
565 		return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
566 
567 	iounmap(func_to_io->db_base);
568 
569 err_db_ioremap:
570 	hinic_wqs_free(&func_to_io->wqs);
571 
572 err_wqs_alloc:
573 	hinic_ceqs_free(&func_to_io->ceqs);
574 	return err;
575 }
576 
577 /**
578  * hinic_io_free - Free the IO components
579  * @func_to_io: func to io channel that holds the IO components
580  **/
581 void hinic_io_free(struct hinic_func_to_io *func_to_io)
582 {
583 	enum hinic_cmdq_type cmdq;
584 
585 	hinic_free_cmdqs(&func_to_io->cmdqs);
586 
587 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
588 		return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
589 
590 	iounmap(func_to_io->db_base);
591 	hinic_wqs_free(&func_to_io->wqs);
592 	hinic_ceqs_free(&func_to_io->ceqs);
593 }
594