xref: /linux/drivers/net/ethernet/microsoft/mana/hw_channel.c (revision 9dbbc3b9d09d6deba9f3b9e1d5b355032ed46a75)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include "gdma.h"
5 #include "hw_channel.h"
6 
7 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
8 {
9 	struct gdma_resource *r = &hwc->inflight_msg_res;
10 	unsigned long flags;
11 	u32 index;
12 
13 	down(&hwc->sema);
14 
15 	spin_lock_irqsave(&r->lock, flags);
16 
17 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
18 				    hwc->inflight_msg_res.size);
19 
20 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
21 
22 	spin_unlock_irqrestore(&r->lock, flags);
23 
24 	*msg_id = index;
25 
26 	return 0;
27 }
28 
29 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
30 {
31 	struct gdma_resource *r = &hwc->inflight_msg_res;
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&r->lock, flags);
35 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
36 	spin_unlock_irqrestore(&r->lock, flags);
37 
38 	up(&hwc->sema);
39 }
40 
41 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
42 				    const struct gdma_resp_hdr *resp_msg,
43 				    u32 resp_len)
44 {
45 	if (resp_len < sizeof(*resp_msg))
46 		return -EPROTO;
47 
48 	if (resp_len > caller_ctx->output_buflen)
49 		return -EPROTO;
50 
51 	return 0;
52 }
53 
54 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
55 				 const struct gdma_resp_hdr *resp_msg)
56 {
57 	struct hwc_caller_ctx *ctx;
58 	int err;
59 
60 	if (!test_bit(resp_msg->response.hwc_msg_id,
61 		      hwc->inflight_msg_res.map)) {
62 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
63 			resp_msg->response.hwc_msg_id);
64 		return;
65 	}
66 
67 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
68 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
69 	if (err)
70 		goto out;
71 
72 	ctx->status_code = resp_msg->status;
73 
74 	memcpy(ctx->output_buf, resp_msg, resp_len);
75 out:
76 	ctx->error = err;
77 	complete(&ctx->comp_event);
78 }
79 
80 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
81 				struct hwc_work_request *req)
82 {
83 	struct device *dev = hwc_rxq->hwc->dev;
84 	struct gdma_sge *sge;
85 	int err;
86 
87 	sge = &req->sge;
88 	sge->address = (u64)req->buf_sge_addr;
89 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
90 	sge->size = req->buf_len;
91 
92 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
93 	req->wqe_req.sgl = sge;
94 	req->wqe_req.num_sge = 1;
95 	req->wqe_req.client_data_unit = 0;
96 
97 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
98 	if (err)
99 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
100 	return err;
101 }
102 
103 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
104 					struct gdma_event *event)
105 {
106 	struct hw_channel_context *hwc = ctx;
107 	struct gdma_dev *gd = hwc->gdma_dev;
108 	union hwc_init_type_data type_data;
109 	union hwc_init_eq_id_db eq_db;
110 	u32 type, val;
111 
112 	switch (event->type) {
113 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
114 		eq_db.as_uint32 = event->details[0];
115 		hwc->cq->gdma_eq->id = eq_db.eq_id;
116 		gd->doorbell = eq_db.doorbell;
117 		break;
118 
119 	case GDMA_EQE_HWC_INIT_DATA:
120 		type_data.as_uint32 = event->details[0];
121 		type = type_data.type;
122 		val = type_data.value;
123 
124 		switch (type) {
125 		case HWC_INIT_DATA_CQID:
126 			hwc->cq->gdma_cq->id = val;
127 			break;
128 
129 		case HWC_INIT_DATA_RQID:
130 			hwc->rxq->gdma_wq->id = val;
131 			break;
132 
133 		case HWC_INIT_DATA_SQID:
134 			hwc->txq->gdma_wq->id = val;
135 			break;
136 
137 		case HWC_INIT_DATA_QUEUE_DEPTH:
138 			hwc->hwc_init_q_depth_max = (u16)val;
139 			break;
140 
141 		case HWC_INIT_DATA_MAX_REQUEST:
142 			hwc->hwc_init_max_req_msg_size = val;
143 			break;
144 
145 		case HWC_INIT_DATA_MAX_RESPONSE:
146 			hwc->hwc_init_max_resp_msg_size = val;
147 			break;
148 
149 		case HWC_INIT_DATA_MAX_NUM_CQS:
150 			gd->gdma_context->max_num_cqs = val;
151 			break;
152 
153 		case HWC_INIT_DATA_PDID:
154 			hwc->gdma_dev->pdid = val;
155 			break;
156 
157 		case HWC_INIT_DATA_GPA_MKEY:
158 			hwc->rxq->msg_buf->gpa_mkey = val;
159 			hwc->txq->msg_buf->gpa_mkey = val;
160 			break;
161 		}
162 
163 		break;
164 
165 	case GDMA_EQE_HWC_INIT_DONE:
166 		complete(&hwc->hwc_init_eqe_comp);
167 		break;
168 
169 	default:
170 		/* Ignore unknown events, which should never happen. */
171 		break;
172 	}
173 }
174 
175 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
176 				      const struct hwc_rx_oob *rx_oob)
177 {
178 	struct hw_channel_context *hwc = ctx;
179 	struct hwc_wq *hwc_rxq = hwc->rxq;
180 	struct hwc_work_request *rx_req;
181 	struct gdma_resp_hdr *resp;
182 	struct gdma_wqe *dma_oob;
183 	struct gdma_queue *rq;
184 	struct gdma_sge *sge;
185 	u64 rq_base_addr;
186 	u64 rx_req_idx;
187 	u8 *wqe;
188 
189 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
190 		return;
191 
192 	rq = hwc_rxq->gdma_wq;
193 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
194 	dma_oob = (struct gdma_wqe *)wqe;
195 
196 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
197 
198 	/* Select the RX work request for virtual address and for reposting. */
199 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
200 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
201 
202 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
203 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
204 
205 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
206 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
207 			resp->response.hwc_msg_id);
208 		return;
209 	}
210 
211 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
212 
213 	/* Do no longer use 'resp', because the buffer is posted to the HW
214 	 * in the below mana_hwc_post_rx_wqe().
215 	 */
216 	resp = NULL;
217 
218 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
219 }
220 
221 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
222 				      const struct hwc_rx_oob *rx_oob)
223 {
224 	struct hw_channel_context *hwc = ctx;
225 	struct hwc_wq *hwc_txq = hwc->txq;
226 
227 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
228 }
229 
230 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
231 				   enum gdma_queue_type type, u64 queue_size,
232 				   struct gdma_queue **queue)
233 {
234 	struct gdma_queue_spec spec = {};
235 
236 	if (type != GDMA_SQ && type != GDMA_RQ)
237 		return -EINVAL;
238 
239 	spec.type = type;
240 	spec.monitor_avl_buf = false;
241 	spec.queue_size = queue_size;
242 
243 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
244 }
245 
246 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
247 				   u64 queue_size,
248 				   void *ctx, gdma_cq_callback *cb,
249 				   struct gdma_queue *parent_eq,
250 				   struct gdma_queue **queue)
251 {
252 	struct gdma_queue_spec spec = {};
253 
254 	spec.type = GDMA_CQ;
255 	spec.monitor_avl_buf = false;
256 	spec.queue_size = queue_size;
257 	spec.cq.context = ctx;
258 	spec.cq.callback = cb;
259 	spec.cq.parent_eq = parent_eq;
260 
261 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
262 }
263 
264 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
265 				   u64 queue_size,
266 				   void *ctx, gdma_eq_callback *cb,
267 				   struct gdma_queue **queue)
268 {
269 	struct gdma_queue_spec spec = {};
270 
271 	spec.type = GDMA_EQ;
272 	spec.monitor_avl_buf = false;
273 	spec.queue_size = queue_size;
274 	spec.eq.context = ctx;
275 	spec.eq.callback = cb;
276 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
277 
278 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
279 }
280 
281 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
282 {
283 	struct hwc_rx_oob comp_data = {};
284 	struct gdma_comp *completions;
285 	struct hwc_cq *hwc_cq = ctx;
286 	int comp_read, i;
287 
288 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
289 
290 	completions = hwc_cq->comp_buf;
291 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
292 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
293 
294 	for (i = 0; i < comp_read; ++i) {
295 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
296 
297 		if (completions[i].is_sq)
298 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
299 						completions[i].wq_num,
300 						&comp_data);
301 		else
302 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
303 						completions[i].wq_num,
304 						&comp_data);
305 	}
306 
307 	mana_gd_arm_cq(q_self);
308 }
309 
310 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
311 {
312 	if (!hwc_cq)
313 		return;
314 
315 	kfree(hwc_cq->comp_buf);
316 
317 	if (hwc_cq->gdma_cq)
318 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
319 
320 	if (hwc_cq->gdma_eq)
321 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
322 
323 	kfree(hwc_cq);
324 }
325 
326 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
327 			      gdma_eq_callback *callback, void *ctx,
328 			      hwc_rx_event_handler_t *rx_ev_hdlr,
329 			      void *rx_ev_ctx,
330 			      hwc_tx_event_handler_t *tx_ev_hdlr,
331 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
332 {
333 	struct gdma_queue *eq, *cq;
334 	struct gdma_comp *comp_buf;
335 	struct hwc_cq *hwc_cq;
336 	u32 eq_size, cq_size;
337 	int err;
338 
339 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
340 	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
341 		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
342 
343 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
344 	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
345 		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
346 
347 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
348 	if (!hwc_cq)
349 		return -ENOMEM;
350 
351 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
352 	if (err) {
353 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
354 		goto out;
355 	}
356 	hwc_cq->gdma_eq = eq;
357 
358 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
359 				      eq, &cq);
360 	if (err) {
361 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
362 		goto out;
363 	}
364 	hwc_cq->gdma_cq = cq;
365 
366 	comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
367 	if (!comp_buf) {
368 		err = -ENOMEM;
369 		goto out;
370 	}
371 
372 	hwc_cq->hwc = hwc;
373 	hwc_cq->comp_buf = comp_buf;
374 	hwc_cq->queue_depth = q_depth;
375 	hwc_cq->rx_event_handler = rx_ev_hdlr;
376 	hwc_cq->rx_event_ctx = rx_ev_ctx;
377 	hwc_cq->tx_event_handler = tx_ev_hdlr;
378 	hwc_cq->tx_event_ctx = tx_ev_ctx;
379 
380 	*hwc_cq_ptr = hwc_cq;
381 	return 0;
382 out:
383 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
384 	return err;
385 }
386 
387 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
388 				  u32 max_msg_size,
389 				  struct hwc_dma_buf **dma_buf_ptr)
390 {
391 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
392 	struct hwc_work_request *hwc_wr;
393 	struct hwc_dma_buf *dma_buf;
394 	struct gdma_mem_info *gmi;
395 	void *virt_addr;
396 	u32 buf_size;
397 	u8 *base_pa;
398 	int err;
399 	u16 i;
400 
401 	dma_buf = kzalloc(sizeof(*dma_buf) +
402 			  q_depth * sizeof(struct hwc_work_request),
403 			  GFP_KERNEL);
404 	if (!dma_buf)
405 		return -ENOMEM;
406 
407 	dma_buf->num_reqs = q_depth;
408 
409 	buf_size = PAGE_ALIGN(q_depth * max_msg_size);
410 
411 	gmi = &dma_buf->mem_info;
412 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
413 	if (err) {
414 		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
415 		goto out;
416 	}
417 
418 	virt_addr = dma_buf->mem_info.virt_addr;
419 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
420 
421 	for (i = 0; i < q_depth; i++) {
422 		hwc_wr = &dma_buf->reqs[i];
423 
424 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
425 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
426 
427 		hwc_wr->buf_len = max_msg_size;
428 	}
429 
430 	*dma_buf_ptr = dma_buf;
431 	return 0;
432 out:
433 	kfree(dma_buf);
434 	return err;
435 }
436 
437 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
438 				     struct hwc_dma_buf *dma_buf)
439 {
440 	if (!dma_buf)
441 		return;
442 
443 	mana_gd_free_memory(&dma_buf->mem_info);
444 
445 	kfree(dma_buf);
446 }
447 
448 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
449 				struct hwc_wq *hwc_wq)
450 {
451 	if (!hwc_wq)
452 		return;
453 
454 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
455 
456 	if (hwc_wq->gdma_wq)
457 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
458 				      hwc_wq->gdma_wq);
459 
460 	kfree(hwc_wq);
461 }
462 
463 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
464 			      enum gdma_queue_type q_type, u16 q_depth,
465 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
466 			      struct hwc_wq **hwc_wq_ptr)
467 {
468 	struct gdma_queue *queue;
469 	struct hwc_wq *hwc_wq;
470 	u32 queue_size;
471 	int err;
472 
473 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
474 
475 	if (q_type == GDMA_RQ)
476 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
477 	else
478 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
479 
480 	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
481 		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
482 
483 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
484 	if (!hwc_wq)
485 		return -ENOMEM;
486 
487 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
488 	if (err)
489 		goto out;
490 
491 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
492 				     &hwc_wq->msg_buf);
493 	if (err)
494 		goto out;
495 
496 	hwc_wq->hwc = hwc;
497 	hwc_wq->gdma_wq = queue;
498 	hwc_wq->queue_depth = q_depth;
499 	hwc_wq->hwc_cq = hwc_cq;
500 
501 	*hwc_wq_ptr = hwc_wq;
502 	return 0;
503 out:
504 	if (err)
505 		mana_hwc_destroy_wq(hwc, hwc_wq);
506 	return err;
507 }
508 
509 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
510 				struct hwc_work_request *req,
511 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
512 				bool dest_pf)
513 {
514 	struct device *dev = hwc_txq->hwc->dev;
515 	struct hwc_tx_oob *tx_oob;
516 	struct gdma_sge *sge;
517 	int err;
518 
519 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
520 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
521 			req->msg_size, req->buf_len);
522 		return -EINVAL;
523 	}
524 
525 	tx_oob = &req->tx_oob;
526 
527 	tx_oob->vrq_id = dest_virt_rq_id;
528 	tx_oob->dest_vfid = 0;
529 	tx_oob->vrcq_id = dest_virt_rcq_id;
530 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
531 	tx_oob->loopback = false;
532 	tx_oob->lso_override = false;
533 	tx_oob->dest_pf = dest_pf;
534 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
535 
536 	sge = &req->sge;
537 	sge->address = (u64)req->buf_sge_addr;
538 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
539 	sge->size = req->msg_size;
540 
541 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
542 	req->wqe_req.sgl = sge;
543 	req->wqe_req.num_sge = 1;
544 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
545 	req->wqe_req.inline_oob_data = tx_oob;
546 	req->wqe_req.client_data_unit = 0;
547 
548 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
549 	if (err)
550 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
551 	return err;
552 }
553 
554 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
555 				      u16 num_msg)
556 {
557 	int err;
558 
559 	sema_init(&hwc->sema, num_msg);
560 
561 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
562 	if (err)
563 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
564 	return err;
565 }
566 
567 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
568 				 u32 max_req_msg_size, u32 max_resp_msg_size)
569 {
570 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
571 	struct hwc_wq *hwc_rxq = hwc->rxq;
572 	struct hwc_work_request *req;
573 	struct hwc_caller_ctx *ctx;
574 	int err;
575 	int i;
576 
577 	/* Post all WQEs on the RQ */
578 	for (i = 0; i < q_depth; i++) {
579 		req = &hwc_rxq->msg_buf->reqs[i];
580 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
581 		if (err)
582 			return err;
583 	}
584 
585 	ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
586 	if (!ctx)
587 		return -ENOMEM;
588 
589 	for (i = 0; i < q_depth; ++i)
590 		init_completion(&ctx[i].comp_event);
591 
592 	hwc->caller_ctx = ctx;
593 
594 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
595 }
596 
597 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
598 				      u32 *max_req_msg_size,
599 				      u32 *max_resp_msg_size)
600 {
601 	struct hw_channel_context *hwc = gc->hwc.driver_data;
602 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
603 	struct gdma_queue *sq = hwc->txq->gdma_wq;
604 	struct gdma_queue *eq = hwc->cq->gdma_eq;
605 	struct gdma_queue *cq = hwc->cq->gdma_cq;
606 	int err;
607 
608 	init_completion(&hwc->hwc_init_eqe_comp);
609 
610 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
611 				 eq->mem_info.dma_handle,
612 				 cq->mem_info.dma_handle,
613 				 rq->mem_info.dma_handle,
614 				 sq->mem_info.dma_handle,
615 				 eq->eq.msix_index);
616 	if (err)
617 		return err;
618 
619 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
620 		return -ETIMEDOUT;
621 
622 	*q_depth = hwc->hwc_init_q_depth_max;
623 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
624 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
625 
626 	if (WARN_ON(cq->id >= gc->max_num_cqs))
627 		return -EPROTO;
628 
629 	gc->cq_table = vzalloc(gc->max_num_cqs * sizeof(struct gdma_queue *));
630 	if (!gc->cq_table)
631 		return -ENOMEM;
632 
633 	gc->cq_table[cq->id] = cq;
634 
635 	return 0;
636 }
637 
638 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
639 				u32 max_req_msg_size, u32 max_resp_msg_size)
640 {
641 	struct hwc_wq *hwc_rxq = NULL;
642 	struct hwc_wq *hwc_txq = NULL;
643 	struct hwc_cq *hwc_cq = NULL;
644 	int err;
645 
646 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
647 	if (err)
648 		return err;
649 
650 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
651 	 * queue depth and RQ queue depth.
652 	 */
653 	err = mana_hwc_create_cq(hwc, q_depth * 2,
654 				 mana_hwc_init_event_handler, hwc,
655 				 mana_hwc_rx_event_handler, hwc,
656 				 mana_hwc_tx_event_handler, hwc, &hwc_cq);
657 	if (err) {
658 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
659 		goto out;
660 	}
661 	hwc->cq = hwc_cq;
662 
663 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
664 				 hwc_cq, &hwc_rxq);
665 	if (err) {
666 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
667 		goto out;
668 	}
669 	hwc->rxq = hwc_rxq;
670 
671 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
672 				 hwc_cq, &hwc_txq);
673 	if (err) {
674 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
675 		goto out;
676 	}
677 	hwc->txq = hwc_txq;
678 
679 	hwc->num_inflight_msg = q_depth;
680 	hwc->max_req_msg_size = max_req_msg_size;
681 
682 	return 0;
683 out:
684 	if (hwc_txq)
685 		mana_hwc_destroy_wq(hwc, hwc_txq);
686 
687 	if (hwc_rxq)
688 		mana_hwc_destroy_wq(hwc, hwc_rxq);
689 
690 	if (hwc_cq)
691 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
692 
693 	mana_gd_free_res_map(&hwc->inflight_msg_res);
694 	return err;
695 }
696 
697 int mana_hwc_create_channel(struct gdma_context *gc)
698 {
699 	u32 max_req_msg_size, max_resp_msg_size;
700 	struct gdma_dev *gd = &gc->hwc;
701 	struct hw_channel_context *hwc;
702 	u16 q_depth_max;
703 	int err;
704 
705 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
706 	if (!hwc)
707 		return -ENOMEM;
708 
709 	gd->gdma_context = gc;
710 	gd->driver_data = hwc;
711 	hwc->gdma_dev = gd;
712 	hwc->dev = gc->dev;
713 
714 	/* HWC's instance number is always 0. */
715 	gd->dev_id.as_uint32 = 0;
716 	gd->dev_id.type = GDMA_DEVICE_HWC;
717 
718 	gd->pdid = INVALID_PDID;
719 	gd->doorbell = INVALID_DOORBELL;
720 
721 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
722 				   HW_CHANNEL_MAX_REQUEST_SIZE,
723 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
724 	if (err) {
725 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
726 		goto out;
727 	}
728 
729 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
730 					 &max_resp_msg_size);
731 	if (err) {
732 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
733 		goto out;
734 	}
735 
736 	err = mana_hwc_test_channel(gc->hwc.driver_data,
737 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
738 				    max_req_msg_size, max_resp_msg_size);
739 	if (err) {
740 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
741 		goto out;
742 	}
743 
744 	return 0;
745 out:
746 	kfree(hwc);
747 	return err;
748 }
749 
750 void mana_hwc_destroy_channel(struct gdma_context *gc)
751 {
752 	struct hw_channel_context *hwc = gc->hwc.driver_data;
753 	struct hwc_caller_ctx *ctx;
754 
755 	mana_smc_teardown_hwc(&gc->shm_channel, false);
756 
757 	ctx = hwc->caller_ctx;
758 	kfree(ctx);
759 	hwc->caller_ctx = NULL;
760 
761 	mana_hwc_destroy_wq(hwc, hwc->txq);
762 	hwc->txq = NULL;
763 
764 	mana_hwc_destroy_wq(hwc, hwc->rxq);
765 	hwc->rxq = NULL;
766 
767 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
768 	hwc->cq = NULL;
769 
770 	mana_gd_free_res_map(&hwc->inflight_msg_res);
771 
772 	hwc->num_inflight_msg = 0;
773 
774 	if (hwc->gdma_dev->pdid != INVALID_PDID) {
775 		hwc->gdma_dev->doorbell = INVALID_DOORBELL;
776 		hwc->gdma_dev->pdid = INVALID_PDID;
777 	}
778 
779 	kfree(hwc);
780 	gc->hwc.driver_data = NULL;
781 	gc->hwc.gdma_context = NULL;
782 }
783 
784 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
785 			  const void *req, u32 resp_len, void *resp)
786 {
787 	struct hwc_work_request *tx_wr;
788 	struct hwc_wq *txq = hwc->txq;
789 	struct gdma_req_hdr *req_msg;
790 	struct hwc_caller_ctx *ctx;
791 	u16 msg_id;
792 	int err;
793 
794 	mana_hwc_get_msg_index(hwc, &msg_id);
795 
796 	tx_wr = &txq->msg_buf->reqs[msg_id];
797 
798 	if (req_len > tx_wr->buf_len) {
799 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
800 			tx_wr->buf_len);
801 		err = -EINVAL;
802 		goto out;
803 	}
804 
805 	ctx = hwc->caller_ctx + msg_id;
806 	ctx->output_buf = resp;
807 	ctx->output_buflen = resp_len;
808 
809 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
810 	if (req)
811 		memcpy(req_msg, req, req_len);
812 
813 	req_msg->req.hwc_msg_id = msg_id;
814 
815 	tx_wr->msg_size = req_len;
816 
817 	err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
818 	if (err) {
819 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
820 		goto out;
821 	}
822 
823 	if (!wait_for_completion_timeout(&ctx->comp_event, 30 * HZ)) {
824 		dev_err(hwc->dev, "HWC: Request timed out!\n");
825 		err = -ETIMEDOUT;
826 		goto out;
827 	}
828 
829 	if (ctx->error) {
830 		err = ctx->error;
831 		goto out;
832 	}
833 
834 	if (ctx->status_code) {
835 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
836 			ctx->status_code);
837 		err = -EPROTO;
838 		goto out;
839 	}
840 out:
841 	mana_hwc_put_msg_index(hwc, msg_id);
842 	return err;
843 }
844