xref: /linux/drivers/net/ethernet/microsoft/mana/hw_channel.c (revision 0e2b2a76278153d1ac312b0691cb65dabb9aef3e)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 
7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8 {
9 	struct gdma_resource *r = &hwc->inflight_msg_res;
10 	unsigned long flags;
11 	u32 index;
12 
13 	down(&hwc->sema);
14 
15 	spin_lock_irqsave(&r->lock, flags);
16 
17 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
18 				    hwc->inflight_msg_res.size);
19 
20 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
21 
22 	spin_unlock_irqrestore(&r->lock, flags);
23 
24 	*msg_id = index;
25 
26 	return 0;
27 }
28 
29 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30 {
31 	struct gdma_resource *r = &hwc->inflight_msg_res;
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&r->lock, flags);
35 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36 	spin_unlock_irqrestore(&r->lock, flags);
37 
38 	up(&hwc->sema);
39 }
40 
41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 				    const struct gdma_resp_hdr *resp_msg,
43 				    u32 resp_len)
44 {
45 	if (resp_len < sizeof(*resp_msg))
46 		return -EPROTO;
47 
48 	if (resp_len > caller_ctx->output_buflen)
49 		return -EPROTO;
50 
51 	return 0;
52 }
53 
54 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55 				 const struct gdma_resp_hdr *resp_msg)
56 {
57 	struct hwc_caller_ctx *ctx;
58 	int err;
59 
60 	if (!test_bit(resp_msg->response.hwc_msg_id,
61 		      hwc->inflight_msg_res.map)) {
62 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63 			resp_msg->response.hwc_msg_id);
64 		return;
65 	}
66 
67 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
69 	if (err)
70 		goto out;
71 
72 	ctx->status_code = resp_msg->status;
73 
74 	memcpy(ctx->output_buf, resp_msg, resp_len);
75 out:
76 	ctx->error = err;
77 	complete(&ctx->comp_event);
78 }
79 
80 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81 				struct hwc_work_request *req)
82 {
83 	struct device *dev = hwc_rxq->hwc->dev;
84 	struct gdma_sge *sge;
85 	int err;
86 
87 	sge = &req->sge;
88 	sge->address = (u64)req->buf_sge_addr;
89 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90 	sge->size = req->buf_len;
91 
92 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93 	req->wqe_req.sgl = sge;
94 	req->wqe_req.num_sge = 1;
95 	req->wqe_req.client_data_unit = 0;
96 
97 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98 	if (err)
99 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100 	return err;
101 }
102 
103 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104 					struct gdma_event *event)
105 {
106 	struct hw_channel_context *hwc = ctx;
107 	struct gdma_dev *gd = hwc->gdma_dev;
108 	union hwc_init_type_data type_data;
109 	union hwc_init_eq_id_db eq_db;
110 	u32 type, val;
111 
112 	switch (event->type) {
113 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114 		eq_db.as_uint32 = event->details[0];
115 		hwc->cq->gdma_eq->id = eq_db.eq_id;
116 		gd->doorbell = eq_db.doorbell;
117 		break;
118 
119 	case GDMA_EQE_HWC_INIT_DATA:
120 		type_data.as_uint32 = event->details[0];
121 		type = type_data.type;
122 		val = type_data.value;
123 
124 		switch (type) {
125 		case HWC_INIT_DATA_CQID:
126 			hwc->cq->gdma_cq->id = val;
127 			break;
128 
129 		case HWC_INIT_DATA_RQID:
130 			hwc->rxq->gdma_wq->id = val;
131 			break;
132 
133 		case HWC_INIT_DATA_SQID:
134 			hwc->txq->gdma_wq->id = val;
135 			break;
136 
137 		case HWC_INIT_DATA_QUEUE_DEPTH:
138 			hwc->hwc_init_q_depth_max = (u16)val;
139 			break;
140 
141 		case HWC_INIT_DATA_MAX_REQUEST:
142 			hwc->hwc_init_max_req_msg_size = val;
143 			break;
144 
145 		case HWC_INIT_DATA_MAX_RESPONSE:
146 			hwc->hwc_init_max_resp_msg_size = val;
147 			break;
148 
149 		case HWC_INIT_DATA_MAX_NUM_CQS:
150 			gd->gdma_context->max_num_cqs = val;
151 			break;
152 
153 		case HWC_INIT_DATA_PDID:
154 			hwc->gdma_dev->pdid = val;
155 			break;
156 
157 		case HWC_INIT_DATA_GPA_MKEY:
158 			hwc->rxq->msg_buf->gpa_mkey = val;
159 			hwc->txq->msg_buf->gpa_mkey = val;
160 			break;
161 
162 		case HWC_INIT_DATA_PF_DEST_RQ_ID:
163 			hwc->pf_dest_vrq_id = val;
164 			break;
165 
166 		case HWC_INIT_DATA_PF_DEST_CQ_ID:
167 			hwc->pf_dest_vrcq_id = val;
168 			break;
169 		}
170 
171 		break;
172 
173 	case GDMA_EQE_HWC_INIT_DONE:
174 		complete(&hwc->hwc_init_eqe_comp);
175 		break;
176 
177 	default:
178 		/* Ignore unknown events, which should never happen. */
179 		break;
180 	}
181 }
182 
183 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
184 				      const struct hwc_rx_oob *rx_oob)
185 {
186 	struct hw_channel_context *hwc = ctx;
187 	struct hwc_wq *hwc_rxq = hwc->rxq;
188 	struct hwc_work_request *rx_req;
189 	struct gdma_resp_hdr *resp;
190 	struct gdma_wqe *dma_oob;
191 	struct gdma_queue *rq;
192 	struct gdma_sge *sge;
193 	u64 rq_base_addr;
194 	u64 rx_req_idx;
195 	u8 *wqe;
196 
197 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
198 		return;
199 
200 	rq = hwc_rxq->gdma_wq;
201 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
202 	dma_oob = (struct gdma_wqe *)wqe;
203 
204 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
205 
206 	/* Select the RX work request for virtual address and for reposting. */
207 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
208 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
209 
210 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
211 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
212 
213 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
214 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
215 			resp->response.hwc_msg_id);
216 		return;
217 	}
218 
219 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
220 
221 	/* Do no longer use 'resp', because the buffer is posted to the HW
222 	 * in the below mana_hwc_post_rx_wqe().
223 	 */
224 	resp = NULL;
225 
226 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
227 }
228 
229 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
230 				      const struct hwc_rx_oob *rx_oob)
231 {
232 	struct hw_channel_context *hwc = ctx;
233 	struct hwc_wq *hwc_txq = hwc->txq;
234 
235 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
236 }
237 
238 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
239 				   enum gdma_queue_type type, u64 queue_size,
240 				   struct gdma_queue **queue)
241 {
242 	struct gdma_queue_spec spec = {};
243 
244 	if (type != GDMA_SQ && type != GDMA_RQ)
245 		return -EINVAL;
246 
247 	spec.type = type;
248 	spec.monitor_avl_buf = false;
249 	spec.queue_size = queue_size;
250 
251 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
252 }
253 
254 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
255 				   u64 queue_size,
256 				   void *ctx, gdma_cq_callback *cb,
257 				   struct gdma_queue *parent_eq,
258 				   struct gdma_queue **queue)
259 {
260 	struct gdma_queue_spec spec = {};
261 
262 	spec.type = GDMA_CQ;
263 	spec.monitor_avl_buf = false;
264 	spec.queue_size = queue_size;
265 	spec.cq.context = ctx;
266 	spec.cq.callback = cb;
267 	spec.cq.parent_eq = parent_eq;
268 
269 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
270 }
271 
272 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
273 				   u64 queue_size,
274 				   void *ctx, gdma_eq_callback *cb,
275 				   struct gdma_queue **queue)
276 {
277 	struct gdma_queue_spec spec = {};
278 
279 	spec.type = GDMA_EQ;
280 	spec.monitor_avl_buf = false;
281 	spec.queue_size = queue_size;
282 	spec.eq.context = ctx;
283 	spec.eq.callback = cb;
284 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
285 
286 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
287 }
288 
289 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
290 {
291 	struct hwc_rx_oob comp_data = {};
292 	struct gdma_comp *completions;
293 	struct hwc_cq *hwc_cq = ctx;
294 	int comp_read, i;
295 
296 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
297 
298 	completions = hwc_cq->comp_buf;
299 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
300 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
301 
302 	for (i = 0; i < comp_read; ++i) {
303 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
304 
305 		if (completions[i].is_sq)
306 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
307 						completions[i].wq_num,
308 						&comp_data);
309 		else
310 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
311 						completions[i].wq_num,
312 						&comp_data);
313 	}
314 
315 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
316 }
317 
318 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
319 {
320 	kfree(hwc_cq->comp_buf);
321 
322 	if (hwc_cq->gdma_cq)
323 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
324 
325 	if (hwc_cq->gdma_eq)
326 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
327 
328 	kfree(hwc_cq);
329 }
330 
331 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
332 			      gdma_eq_callback *callback, void *ctx,
333 			      hwc_rx_event_handler_t *rx_ev_hdlr,
334 			      void *rx_ev_ctx,
335 			      hwc_tx_event_handler_t *tx_ev_hdlr,
336 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
337 {
338 	struct gdma_queue *eq, *cq;
339 	struct gdma_comp *comp_buf;
340 	struct hwc_cq *hwc_cq;
341 	u32 eq_size, cq_size;
342 	int err;
343 
344 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
345 	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
346 		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
347 
348 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
349 	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
350 		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
351 
352 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
353 	if (!hwc_cq)
354 		return -ENOMEM;
355 
356 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
357 	if (err) {
358 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
359 		goto out;
360 	}
361 	hwc_cq->gdma_eq = eq;
362 
363 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
364 				      eq, &cq);
365 	if (err) {
366 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
367 		goto out;
368 	}
369 	hwc_cq->gdma_cq = cq;
370 
371 	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
372 	if (!comp_buf) {
373 		err = -ENOMEM;
374 		goto out;
375 	}
376 
377 	hwc_cq->hwc = hwc;
378 	hwc_cq->comp_buf = comp_buf;
379 	hwc_cq->queue_depth = q_depth;
380 	hwc_cq->rx_event_handler = rx_ev_hdlr;
381 	hwc_cq->rx_event_ctx = rx_ev_ctx;
382 	hwc_cq->tx_event_handler = tx_ev_hdlr;
383 	hwc_cq->tx_event_ctx = tx_ev_ctx;
384 
385 	*hwc_cq_ptr = hwc_cq;
386 	return 0;
387 out:
388 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
389 	return err;
390 }
391 
392 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
393 				  u32 max_msg_size,
394 				  struct hwc_dma_buf **dma_buf_ptr)
395 {
396 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
397 	struct hwc_work_request *hwc_wr;
398 	struct hwc_dma_buf *dma_buf;
399 	struct gdma_mem_info *gmi;
400 	void *virt_addr;
401 	u32 buf_size;
402 	u8 *base_pa;
403 	int err;
404 	u16 i;
405 
406 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
407 	if (!dma_buf)
408 		return -ENOMEM;
409 
410 	dma_buf->num_reqs = q_depth;
411 
412 	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
413 
414 	gmi = &dma_buf->mem_info;
415 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
416 	if (err) {
417 		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
418 		goto out;
419 	}
420 
421 	virt_addr = dma_buf->mem_info.virt_addr;
422 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
423 
424 	for (i = 0; i < q_depth; i++) {
425 		hwc_wr = &dma_buf->reqs[i];
426 
427 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
428 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
429 
430 		hwc_wr->buf_len = max_msg_size;
431 	}
432 
433 	*dma_buf_ptr = dma_buf;
434 	return 0;
435 out:
436 	kfree(dma_buf);
437 	return err;
438 }
439 
440 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
441 				     struct hwc_dma_buf *dma_buf)
442 {
443 	if (!dma_buf)
444 		return;
445 
446 	mana_gd_free_memory(&dma_buf->mem_info);
447 
448 	kfree(dma_buf);
449 }
450 
451 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
452 				struct hwc_wq *hwc_wq)
453 {
454 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
455 
456 	if (hwc_wq->gdma_wq)
457 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
458 				      hwc_wq->gdma_wq);
459 
460 	kfree(hwc_wq);
461 }
462 
463 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
464 			      enum gdma_queue_type q_type, u16 q_depth,
465 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
466 			      struct hwc_wq **hwc_wq_ptr)
467 {
468 	struct gdma_queue *queue;
469 	struct hwc_wq *hwc_wq;
470 	u32 queue_size;
471 	int err;
472 
473 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
474 
475 	if (q_type == GDMA_RQ)
476 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
477 	else
478 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
479 
480 	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
481 		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
482 
483 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
484 	if (!hwc_wq)
485 		return -ENOMEM;
486 
487 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
488 	if (err)
489 		goto out;
490 
491 	hwc_wq->hwc = hwc;
492 	hwc_wq->gdma_wq = queue;
493 	hwc_wq->queue_depth = q_depth;
494 	hwc_wq->hwc_cq = hwc_cq;
495 
496 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
497 				     &hwc_wq->msg_buf);
498 	if (err)
499 		goto out;
500 
501 	*hwc_wq_ptr = hwc_wq;
502 	return 0;
503 out:
504 	if (err)
505 		mana_hwc_destroy_wq(hwc, hwc_wq);
506 	return err;
507 }
508 
509 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
510 				struct hwc_work_request *req,
511 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
512 				bool dest_pf)
513 {
514 	struct device *dev = hwc_txq->hwc->dev;
515 	struct hwc_tx_oob *tx_oob;
516 	struct gdma_sge *sge;
517 	int err;
518 
519 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
520 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
521 			req->msg_size, req->buf_len);
522 		return -EINVAL;
523 	}
524 
525 	tx_oob = &req->tx_oob;
526 
527 	tx_oob->vrq_id = dest_virt_rq_id;
528 	tx_oob->dest_vfid = 0;
529 	tx_oob->vrcq_id = dest_virt_rcq_id;
530 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
531 	tx_oob->loopback = false;
532 	tx_oob->lso_override = false;
533 	tx_oob->dest_pf = dest_pf;
534 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
535 
536 	sge = &req->sge;
537 	sge->address = (u64)req->buf_sge_addr;
538 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
539 	sge->size = req->msg_size;
540 
541 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
542 	req->wqe_req.sgl = sge;
543 	req->wqe_req.num_sge = 1;
544 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
545 	req->wqe_req.inline_oob_data = tx_oob;
546 	req->wqe_req.client_data_unit = 0;
547 
548 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
549 	if (err)
550 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
551 	return err;
552 }
553 
554 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
555 				      u16 num_msg)
556 {
557 	int err;
558 
559 	sema_init(&hwc->sema, num_msg);
560 
561 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
562 	if (err)
563 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
564 	return err;
565 }
566 
567 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
568 				 u32 max_req_msg_size, u32 max_resp_msg_size)
569 {
570 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
571 	struct hwc_wq *hwc_rxq = hwc->rxq;
572 	struct hwc_work_request *req;
573 	struct hwc_caller_ctx *ctx;
574 	int err;
575 	int i;
576 
577 	/* Post all WQEs on the RQ */
578 	for (i = 0; i < q_depth; i++) {
579 		req = &hwc_rxq->msg_buf->reqs[i];
580 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
581 		if (err)
582 			return err;
583 	}
584 
585 	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
586 	if (!ctx)
587 		return -ENOMEM;
588 
589 	for (i = 0; i < q_depth; ++i)
590 		init_completion(&ctx[i].comp_event);
591 
592 	hwc->caller_ctx = ctx;
593 
594 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
595 }
596 
597 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
598 				      u32 *max_req_msg_size,
599 				      u32 *max_resp_msg_size)
600 {
601 	struct hw_channel_context *hwc = gc->hwc.driver_data;
602 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
603 	struct gdma_queue *sq = hwc->txq->gdma_wq;
604 	struct gdma_queue *eq = hwc->cq->gdma_eq;
605 	struct gdma_queue *cq = hwc->cq->gdma_cq;
606 	int err;
607 
608 	init_completion(&hwc->hwc_init_eqe_comp);
609 
610 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
611 				 eq->mem_info.dma_handle,
612 				 cq->mem_info.dma_handle,
613 				 rq->mem_info.dma_handle,
614 				 sq->mem_info.dma_handle,
615 				 eq->eq.msix_index);
616 	if (err)
617 		return err;
618 
619 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
620 		return -ETIMEDOUT;
621 
622 	*q_depth = hwc->hwc_init_q_depth_max;
623 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
624 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
625 
626 	/* Both were set in mana_hwc_init_event_handler(). */
627 	if (WARN_ON(cq->id >= gc->max_num_cqs))
628 		return -EPROTO;
629 
630 	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
631 	if (!gc->cq_table)
632 		return -ENOMEM;
633 
634 	gc->cq_table[cq->id] = cq;
635 
636 	return 0;
637 }
638 
639 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
640 				u32 max_req_msg_size, u32 max_resp_msg_size)
641 {
642 	int err;
643 
644 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
645 	if (err)
646 		return err;
647 
648 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
649 	 * queue depth and RQ queue depth.
650 	 */
651 	err = mana_hwc_create_cq(hwc, q_depth * 2,
652 				 mana_hwc_init_event_handler, hwc,
653 				 mana_hwc_rx_event_handler, hwc,
654 				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
655 	if (err) {
656 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
657 		goto out;
658 	}
659 
660 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
661 				 hwc->cq, &hwc->rxq);
662 	if (err) {
663 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
664 		goto out;
665 	}
666 
667 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
668 				 hwc->cq, &hwc->txq);
669 	if (err) {
670 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
671 		goto out;
672 	}
673 
674 	hwc->num_inflight_msg = q_depth;
675 	hwc->max_req_msg_size = max_req_msg_size;
676 
677 	return 0;
678 out:
679 	/* mana_hwc_create_channel() will do the cleanup.*/
680 	return err;
681 }
682 
683 int mana_hwc_create_channel(struct gdma_context *gc)
684 {
685 	u32 max_req_msg_size, max_resp_msg_size;
686 	struct gdma_dev *gd = &gc->hwc;
687 	struct hw_channel_context *hwc;
688 	u16 q_depth_max;
689 	int err;
690 
691 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
692 	if (!hwc)
693 		return -ENOMEM;
694 
695 	gd->gdma_context = gc;
696 	gd->driver_data = hwc;
697 	hwc->gdma_dev = gd;
698 	hwc->dev = gc->dev;
699 
700 	/* HWC's instance number is always 0. */
701 	gd->dev_id.as_uint32 = 0;
702 	gd->dev_id.type = GDMA_DEVICE_HWC;
703 
704 	gd->pdid = INVALID_PDID;
705 	gd->doorbell = INVALID_DOORBELL;
706 
707 	/* mana_hwc_init_queues() only creates the required data structures,
708 	 * and doesn't touch the HWC device.
709 	 */
710 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
711 				   HW_CHANNEL_MAX_REQUEST_SIZE,
712 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
713 	if (err) {
714 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
715 		goto out;
716 	}
717 
718 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
719 					 &max_resp_msg_size);
720 	if (err) {
721 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
722 		goto out;
723 	}
724 
725 	err = mana_hwc_test_channel(gc->hwc.driver_data,
726 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
727 				    max_req_msg_size, max_resp_msg_size);
728 	if (err) {
729 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
730 		goto out;
731 	}
732 
733 	return 0;
734 out:
735 	mana_hwc_destroy_channel(gc);
736 	return err;
737 }
738 
739 void mana_hwc_destroy_channel(struct gdma_context *gc)
740 {
741 	struct hw_channel_context *hwc = gc->hwc.driver_data;
742 
743 	if (!hwc)
744 		return;
745 
746 	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
747 	 * non-zero, the HWC worked and we should tear down the HWC here.
748 	 */
749 	if (gc->max_num_cqs > 0) {
750 		mana_smc_teardown_hwc(&gc->shm_channel, false);
751 		gc->max_num_cqs = 0;
752 	}
753 
754 	kfree(hwc->caller_ctx);
755 	hwc->caller_ctx = NULL;
756 
757 	if (hwc->txq)
758 		mana_hwc_destroy_wq(hwc, hwc->txq);
759 
760 	if (hwc->rxq)
761 		mana_hwc_destroy_wq(hwc, hwc->rxq);
762 
763 	if (hwc->cq)
764 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
765 
766 	mana_gd_free_res_map(&hwc->inflight_msg_res);
767 
768 	hwc->num_inflight_msg = 0;
769 
770 	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
771 	hwc->gdma_dev->pdid = INVALID_PDID;
772 
773 	kfree(hwc);
774 	gc->hwc.driver_data = NULL;
775 	gc->hwc.gdma_context = NULL;
776 
777 	vfree(gc->cq_table);
778 	gc->cq_table = NULL;
779 }
780 
781 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
782 			  const void *req, u32 resp_len, void *resp)
783 {
784 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
785 	struct hwc_work_request *tx_wr;
786 	struct hwc_wq *txq = hwc->txq;
787 	struct gdma_req_hdr *req_msg;
788 	struct hwc_caller_ctx *ctx;
789 	u32 dest_vrcq = 0;
790 	u32 dest_vrq = 0;
791 	u16 msg_id;
792 	int err;
793 
794 	mana_hwc_get_msg_index(hwc, &msg_id);
795 
796 	tx_wr = &txq->msg_buf->reqs[msg_id];
797 
798 	if (req_len > tx_wr->buf_len) {
799 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
800 			tx_wr->buf_len);
801 		err = -EINVAL;
802 		goto out;
803 	}
804 
805 	ctx = hwc->caller_ctx + msg_id;
806 	ctx->output_buf = resp;
807 	ctx->output_buflen = resp_len;
808 
809 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
810 	if (req)
811 		memcpy(req_msg, req, req_len);
812 
813 	req_msg->req.hwc_msg_id = msg_id;
814 
815 	tx_wr->msg_size = req_len;
816 
817 	if (gc->is_pf) {
818 		dest_vrq = hwc->pf_dest_vrq_id;
819 		dest_vrcq = hwc->pf_dest_vrcq_id;
820 	}
821 
822 	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
823 	if (err) {
824 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
825 		goto out;
826 	}
827 
828 	if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
829 		dev_err(hwc->dev, "HWC: Request timed out!\n");
830 		err = -ETIMEDOUT;
831 		goto out;
832 	}
833 
834 	if (ctx->error) {
835 		err = ctx->error;
836 		goto out;
837 	}
838 
839 	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
840 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
841 			ctx->status_code);
842 		err = -EPROTO;
843 		goto out;
844 	}
845 out:
846 	mana_hwc_put_msg_index(hwc, msg_id);
847 	return err;
848 }
849