xref: /freebsd/sys/dev/mana/hw_channel.c (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/bus.h>
42 #include <machine/bus.h>
43 
44 #include "mana.h"
45 #include "hw_channel.h"
46 
47 static int
48 mana_hwc_get_msg_index(struct hw_channel_context *hwc, uint16_t *msg_id)
49 {
50 	struct gdma_resource *r = &hwc->inflight_msg_res;
51 	uint32_t index;
52 
53 	sema_wait(&hwc->sema);
54 
55 	mtx_lock_spin(&r->lock_spin);
56 
57 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
58 	    hwc->inflight_msg_res.size);
59 
60 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
61 
62 	mtx_unlock_spin(&r->lock_spin);
63 
64 	*msg_id = index;
65 
66 	return 0;
67 }
68 
69 static void
70 mana_hwc_put_msg_index(struct hw_channel_context *hwc, uint16_t msg_id)
71 {
72 	struct gdma_resource *r = &hwc->inflight_msg_res;
73 
74 	mtx_lock_spin(&r->lock_spin);
75 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
76 	mtx_unlock_spin(&r->lock_spin);
77 
78 	sema_post(&hwc->sema);
79 }
80 
81 static int
82 mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
83     const struct gdma_resp_hdr *resp_msg,
84     uint32_t resp_len)
85 {
86 	if (resp_len < sizeof(*resp_msg))
87 		return EPROTO;
88 
89 	if (resp_len > caller_ctx->output_buflen)
90 		return EPROTO;
91 
92 	return 0;
93 }
94 
95 static void
96 mana_hwc_handle_resp(struct hw_channel_context *hwc, uint32_t resp_len,
97     const struct gdma_resp_hdr *resp_msg)
98 {
99 	struct hwc_caller_ctx *ctx;
100 	int err;
101 
102 	if (!test_bit(resp_msg->response.hwc_msg_id,
103 	    hwc->inflight_msg_res.map)) {
104 		device_printf(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
105 		    resp_msg->response.hwc_msg_id);
106 		return;
107 	}
108 
109 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
110 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
111 	if (err)
112 		goto out;
113 
114 	ctx->status_code = resp_msg->status;
115 
116 	memcpy(ctx->output_buf, resp_msg, resp_len);
117 out:
118 	ctx->error = err;
119 	complete(&ctx->comp_event);
120 }
121 
122 static int
123 mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
124     struct hwc_work_request *req)
125 {
126 	device_t dev = hwc_rxq->hwc->dev;
127 	struct gdma_sge *sge;
128 	int err;
129 
130 	sge = &req->sge;
131 	sge->address = (uint64_t)req->buf_sge_addr;
132 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
133 	sge->size = req->buf_len;
134 
135 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
136 	req->wqe_req.sgl = sge;
137 	req->wqe_req.num_sge = 1;
138 	req->wqe_req.client_data_unit = 0;
139 
140 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
141 	if (err)
142 		device_printf(dev,
143 		    "Failed to post WQE on HWC RQ: %d\n", err);
144 	return err;
145 }
146 
147 static void
148 mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
149     struct gdma_event *event)
150 {
151 	struct hw_channel_context *hwc = ctx;
152 	struct gdma_dev *gd = hwc->gdma_dev;
153 	union hwc_init_type_data type_data;
154 	union hwc_init_eq_id_db eq_db;
155 	uint32_t type, val;
156 
157 	switch (event->type) {
158 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
159 		eq_db.as_uint32 = event->details[0];
160 		hwc->cq->gdma_eq->id = eq_db.eq_id;
161 		gd->doorbell = eq_db.doorbell;
162 		break;
163 
164 	case GDMA_EQE_HWC_INIT_DATA:
165 		type_data.as_uint32 = event->details[0];
166 		type = type_data.type;
167 		val = type_data.value;
168 
169 		switch (type) {
170 		case HWC_INIT_DATA_CQID:
171 			hwc->cq->gdma_cq->id = val;
172 			break;
173 
174 		case HWC_INIT_DATA_RQID:
175 			hwc->rxq->gdma_wq->id = val;
176 			break;
177 
178 		case HWC_INIT_DATA_SQID:
179 			hwc->txq->gdma_wq->id = val;
180 			break;
181 
182 		case HWC_INIT_DATA_QUEUE_DEPTH:
183 			hwc->hwc_init_q_depth_max = (uint16_t)val;
184 			break;
185 
186 		case HWC_INIT_DATA_MAX_REQUEST:
187 			hwc->hwc_init_max_req_msg_size = val;
188 			break;
189 
190 		case HWC_INIT_DATA_MAX_RESPONSE:
191 			hwc->hwc_init_max_resp_msg_size = val;
192 			break;
193 
194 		case HWC_INIT_DATA_MAX_NUM_CQS:
195 			gd->gdma_context->max_num_cqs = val;
196 			break;
197 
198 		case HWC_INIT_DATA_PDID:
199 			hwc->gdma_dev->pdid = val;
200 			break;
201 
202 		case HWC_INIT_DATA_GPA_MKEY:
203 			hwc->rxq->msg_buf->gpa_mkey = val;
204 			hwc->txq->msg_buf->gpa_mkey = val;
205 			break;
206 		}
207 
208 		break;
209 
210 	case GDMA_EQE_HWC_INIT_DONE:
211 		complete(&hwc->hwc_init_eqe_comp);
212 		break;
213 
214 	default:
215 		/* Ignore unknown events, which should never happen. */
216 		break;
217 	}
218 }
219 
220 static void
221 mana_hwc_rx_event_handler(void *ctx, uint32_t gdma_rxq_id,
222     const struct hwc_rx_oob *rx_oob)
223 {
224 	struct hw_channel_context *hwc = ctx;
225 	struct hwc_wq *hwc_rxq = hwc->rxq;
226 	struct hwc_work_request *rx_req;
227 	struct gdma_resp_hdr *resp;
228 	struct gdma_wqe *dma_oob;
229 	struct gdma_queue *rq;
230 	struct gdma_sge *sge;
231 	uint64_t rq_base_addr;
232 	uint64_t rx_req_idx;
233 	uint8_t *wqe;
234 
235 	if (hwc_rxq->gdma_wq->id != gdma_rxq_id) {
236 		mana_warn(NULL, "unmatched rx queue %u != %u\n",
237 		    hwc_rxq->gdma_wq->id, gdma_rxq_id);
238 		return;
239 	}
240 
241 
242 	rq = hwc_rxq->gdma_wq;
243 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
244 	dma_oob = (struct gdma_wqe *)wqe;
245 
246 	bus_dmamap_sync(rq->mem_info.dma_tag, rq->mem_info.dma_map,
247 	    BUS_DMASYNC_POSTREAD);
248 
249 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
250 
251 	/* Select the RX work request for virtual address and for reposting. */
252 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
253 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
254 
255 	bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
256 	    hwc_rxq->msg_buf->mem_info.dma_map,
257 	    BUS_DMASYNC_POSTREAD);
258 
259 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
260 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
261 
262 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
263 		device_printf(hwc->dev, "HWC RX: wrong msg_id=%u\n",
264 		    resp->response.hwc_msg_id);
265 		return;
266 	}
267 
268 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
269 
270 	/* Do no longer use 'resp', because the buffer is posted to the HW
271 	 * in the below mana_hwc_post_rx_wqe().
272 	 */
273 	resp = NULL;
274 
275 	bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
276 	    hwc_rxq->msg_buf->mem_info.dma_map,
277 	    BUS_DMASYNC_PREREAD);
278 
279 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
280 }
281 
282 static void
283 mana_hwc_tx_event_handler(void *ctx, uint32_t gdma_txq_id,
284     const struct hwc_rx_oob *rx_oob)
285 {
286 	struct hw_channel_context *hwc = ctx;
287 	struct hwc_wq *hwc_txq = hwc->txq;
288 
289 	if (!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id) {
290 		mana_warn(NULL, "unmatched tx queue %u != %u\n",
291 		    hwc_txq->gdma_wq->id, gdma_txq_id);
292 	}
293 
294 	bus_dmamap_sync(hwc_txq->gdma_wq->mem_info.dma_tag,
295 	    hwc_txq->gdma_wq->mem_info.dma_map,
296 	    BUS_DMASYNC_POSTWRITE);
297 }
298 
299 static int
300 mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
301     enum gdma_queue_type type, uint64_t queue_size,
302     struct gdma_queue **queue)
303 {
304 	struct gdma_queue_spec spec = {};
305 
306 	if (type != GDMA_SQ && type != GDMA_RQ)
307 		return EINVAL;
308 
309 	spec.type = type;
310 	spec.monitor_avl_buf = false;
311 	spec.queue_size = queue_size;
312 
313 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
314 }
315 
316 static int
317 mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
318     uint64_t queue_size,
319     void *ctx, gdma_cq_callback *cb,
320     struct gdma_queue *parent_eq,
321     struct gdma_queue **queue)
322 {
323 	struct gdma_queue_spec spec = {};
324 
325 	spec.type = GDMA_CQ;
326 	spec.monitor_avl_buf = false;
327 	spec.queue_size = queue_size;
328 	spec.cq.context = ctx;
329 	spec.cq.callback = cb;
330 	spec.cq.parent_eq = parent_eq;
331 
332 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
333 }
334 
335 static int
336 mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
337     uint64_t queue_size,
338     void *ctx, gdma_eq_callback *cb,
339     struct gdma_queue **queue)
340 {
341 	struct gdma_queue_spec spec = {};
342 
343 	spec.type = GDMA_EQ;
344 	spec.monitor_avl_buf = false;
345 	spec.queue_size = queue_size;
346 	spec.eq.context = ctx;
347 	spec.eq.callback = cb;
348 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
349 
350 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
351 }
352 
353 static void
354 mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
355 {
356 	struct hwc_rx_oob comp_data = {};
357 	struct gdma_comp *completions;
358 	struct hwc_cq *hwc_cq = ctx;
359 	int comp_read, i;
360 
361 	completions = hwc_cq->comp_buf;
362 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
363 
364 	for (i = 0; i < comp_read; ++i) {
365 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
366 
367 		if (completions[i].is_sq)
368 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
369 			    completions[i].wq_num,
370 			    &comp_data);
371 		else
372 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
373 			    completions[i].wq_num,
374 			    &comp_data);
375 	}
376 
377 	bus_dmamap_sync(q_self->mem_info.dma_tag, q_self->mem_info.dma_map,
378 	    BUS_DMASYNC_POSTREAD);
379 
380 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
381 }
382 
383 static void
384 mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
385 {
386 	if (!hwc_cq)
387 		return;
388 
389 	if (hwc_cq->comp_buf)
390 		free(hwc_cq->comp_buf, M_DEVBUF);
391 
392 	if (hwc_cq->gdma_cq)
393 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
394 
395 	if (hwc_cq->gdma_eq)
396 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
397 
398 	free(hwc_cq, M_DEVBUF);
399 }
400 
401 static int
402 mana_hwc_create_cq(struct hw_channel_context *hwc,
403     uint16_t q_depth,
404     gdma_eq_callback *callback, void *ctx,
405     hwc_rx_event_handler_t *rx_ev_hdlr, void *rx_ev_ctx,
406     hwc_tx_event_handler_t *tx_ev_hdlr, void *tx_ev_ctx,
407     struct hwc_cq **hwc_cq_ptr)
408 {
409 	struct gdma_queue *eq, *cq;
410 	struct gdma_comp *comp_buf;
411 	struct hwc_cq *hwc_cq;
412 	uint32_t eq_size, cq_size;
413 	int err;
414 
415 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
416 	if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
417 		eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
418 
419 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
420 	if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
421 		cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
422 
423 	hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
424 	if (!hwc_cq)
425 		return ENOMEM;
426 
427 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
428 	if (err) {
429 		device_printf(hwc->dev,
430 		    "Failed to create HWC EQ for RQ: %d\n", err);
431 		goto out;
432 	}
433 	hwc_cq->gdma_eq = eq;
434 
435 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq,
436 	    mana_hwc_comp_event, eq, &cq);
437 	if (err) {
438 		device_printf(hwc->dev,
439 		    "Failed to create HWC CQ for RQ: %d\n", err);
440 		goto out;
441 	}
442 	hwc_cq->gdma_cq = cq;
443 
444 	comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
445 	    M_DEVBUF, M_WAITOK | M_ZERO);
446 	if (!comp_buf) {
447 		err = ENOMEM;
448 		goto out;
449 	}
450 
451 	hwc_cq->hwc = hwc;
452 	hwc_cq->comp_buf = comp_buf;
453 	hwc_cq->queue_depth = q_depth;
454 	hwc_cq->rx_event_handler = rx_ev_hdlr;
455 	hwc_cq->rx_event_ctx = rx_ev_ctx;
456 	hwc_cq->tx_event_handler = tx_ev_hdlr;
457 	hwc_cq->tx_event_ctx = tx_ev_ctx;
458 
459 	*hwc_cq_ptr = hwc_cq;
460 	return 0;
461 out:
462 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
463 	return err;
464 }
465 
466 static int
467 mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth,
468     uint32_t max_msg_size,
469     struct hwc_dma_buf **dma_buf_ptr)
470 {
471 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
472 	struct hwc_work_request *hwc_wr;
473 	struct hwc_dma_buf *dma_buf;
474 	struct gdma_mem_info *gmi;
475 	uint32_t buf_size;
476 	uint8_t *base_pa;
477 	void *virt_addr;
478 	uint16_t i;
479 	int err;
480 
481 	dma_buf = malloc(sizeof(*dma_buf) +
482 	    q_depth * sizeof(struct hwc_work_request),
483 	    M_DEVBUF, M_WAITOK | M_ZERO);
484 	if (!dma_buf)
485 		return ENOMEM;
486 
487 	dma_buf->num_reqs = q_depth;
488 
489 	buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE);
490 
491 	gmi = &dma_buf->mem_info;
492 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
493 	if (err) {
494 		device_printf(hwc->dev,
495 		    "Failed to allocate DMA buffer: %d\n", err);
496 		goto out;
497 	}
498 
499 	virt_addr = dma_buf->mem_info.virt_addr;
500 	base_pa = (uint8_t *)dma_buf->mem_info.dma_handle;
501 
502 	for (i = 0; i < q_depth; i++) {
503 		hwc_wr = &dma_buf->reqs[i];
504 
505 		hwc_wr->buf_va = (char *)virt_addr + i * max_msg_size;
506 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
507 
508 		hwc_wr->buf_len = max_msg_size;
509 	}
510 
511 	*dma_buf_ptr = dma_buf;
512 	return 0;
513 out:
514 	free(dma_buf, M_DEVBUF);
515 	return err;
516 }
517 
518 static void
519 mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
520     struct hwc_dma_buf *dma_buf)
521 {
522 	if (!dma_buf)
523 		return;
524 
525 	mana_gd_free_memory(&dma_buf->mem_info);
526 
527 	free(dma_buf, M_DEVBUF);
528 }
529 
530 static void
531 mana_hwc_destroy_wq(struct hw_channel_context *hwc,
532     struct hwc_wq *hwc_wq)
533 {
534 	if (!hwc_wq)
535 		return;
536 
537 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
538 
539 	if (hwc_wq->gdma_wq)
540 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
541 		    hwc_wq->gdma_wq);
542 
543 	free(hwc_wq, M_DEVBUF);
544 }
545 
546 static int
547 mana_hwc_create_wq(struct hw_channel_context *hwc,
548     enum gdma_queue_type q_type, uint16_t q_depth,
549     uint32_t max_msg_size, struct hwc_cq *hwc_cq,
550     struct hwc_wq **hwc_wq_ptr)
551 {
552 	struct gdma_queue *queue;
553 	struct hwc_wq *hwc_wq;
554 	uint32_t queue_size;
555 	int err;
556 
557 	if (q_type != GDMA_SQ && q_type != GDMA_RQ) {
558 		/* XXX should fail and return error? */
559 		mana_warn(NULL, "Invalid q_type %u\n", q_type);
560 	}
561 
562 	if (q_type == GDMA_RQ)
563 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
564 	else
565 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
566 
567 	if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
568 		queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
569 
570 	hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
571 	if (!hwc_wq)
572 		return ENOMEM;
573 
574 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
575 	if (err)
576 		goto out;
577 
578 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
579 	    &hwc_wq->msg_buf);
580 	if (err)
581 		goto out;
582 
583 	hwc_wq->hwc = hwc;
584 	hwc_wq->gdma_wq = queue;
585 	hwc_wq->queue_depth = q_depth;
586 	hwc_wq->hwc_cq = hwc_cq;
587 
588 	*hwc_wq_ptr = hwc_wq;
589 	return 0;
590 out:
591 	if (err)
592 		mana_hwc_destroy_wq(hwc, hwc_wq);
593 	return err;
594 }
595 
596 static int
597 mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
598     struct hwc_work_request *req,
599     uint32_t dest_virt_rq_id, uint32_t dest_virt_rcq_id,
600     bool dest_pf)
601 {
602 	device_t dev = hwc_txq->hwc->dev;
603 	struct hwc_tx_oob *tx_oob;
604 	struct gdma_sge *sge;
605 	int err;
606 
607 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
608 		device_printf(dev, "wrong msg_size: %u, buf_len: %u\n",
609 		    req->msg_size, req->buf_len);
610 		return EINVAL;
611 	}
612 
613 	tx_oob = &req->tx_oob;
614 
615 	tx_oob->vrq_id = dest_virt_rq_id;
616 	tx_oob->dest_vfid = 0;
617 	tx_oob->vrcq_id = dest_virt_rcq_id;
618 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
619 	tx_oob->loopback = false;
620 	tx_oob->lso_override = false;
621 	tx_oob->dest_pf = dest_pf;
622 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
623 
624 	sge = &req->sge;
625 	sge->address = (uint64_t)req->buf_sge_addr;
626 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
627 	sge->size = req->msg_size;
628 
629 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
630 	req->wqe_req.sgl = sge;
631 	req->wqe_req.num_sge = 1;
632 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
633 	req->wqe_req.inline_oob_data = tx_oob;
634 	req->wqe_req.client_data_unit = 0;
635 
636 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
637 	if (err)
638 		device_printf(dev,
639 		    "Failed to post WQE on HWC SQ: %d\n", err);
640 	return err;
641 }
642 
643 static int
644 mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, uint16_t num_msg)
645 {
646 	int err;
647 
648 	sema_init(&hwc->sema, num_msg, "gdma hwc sema");
649 
650 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res,
651 	    "gdma hwc res lock");
652 	if (err)
653 		device_printf(hwc->dev,
654 		    "Failed to init inflight_msg_res: %d\n", err);
655 
656 	return (err);
657 }
658 
659 static int
660 mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth,
661     uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
662 {
663 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
664 	struct hwc_wq *hwc_rxq = hwc->rxq;
665 	struct hwc_work_request *req;
666 	struct hwc_caller_ctx *ctx;
667 	int err;
668 	int i;
669 
670 	/* Post all WQEs on the RQ */
671 	for (i = 0; i < q_depth; i++) {
672 		req = &hwc_rxq->msg_buf->reqs[i];
673 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
674 		if (err)
675 			return err;
676 	}
677 
678 	ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
679 	    M_DEVBUF, M_WAITOK | M_ZERO);
680 	if (!ctx)
681 		return ENOMEM;
682 
683 	for (i = 0; i < q_depth; ++i)
684 		init_completion(&ctx[i].comp_event);
685 
686 	hwc->caller_ctx = ctx;
687 
688 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
689 }
690 
691 static int
692 mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth,
693     uint32_t *max_req_msg_size,
694     uint32_t *max_resp_msg_size)
695 {
696 	struct hw_channel_context *hwc = gc->hwc.driver_data;
697 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
698 	struct gdma_queue *sq = hwc->txq->gdma_wq;
699 	struct gdma_queue *eq = hwc->cq->gdma_eq;
700 	struct gdma_queue *cq = hwc->cq->gdma_cq;
701 	int err;
702 
703 	init_completion(&hwc->hwc_init_eqe_comp);
704 
705 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
706 	    eq->mem_info.dma_handle,
707 	    cq->mem_info.dma_handle,
708 	    rq->mem_info.dma_handle,
709 	    sq->mem_info.dma_handle,
710 	    eq->eq.msix_index);
711 	if (err)
712 		return err;
713 
714 	if (wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * hz))
715 		return ETIMEDOUT;
716 
717 	*q_depth = hwc->hwc_init_q_depth_max;
718 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
719 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
720 
721 	if (cq->id >= gc->max_num_cqs) {
722 		mana_warn(NULL, "invalid cq id %u > %u\n",
723 		    cq->id, gc->max_num_cqs);
724 		return EPROTO;
725 	}
726 
727 	gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
728 	    M_DEVBUF, M_WAITOK | M_ZERO);
729 	if (!gc->cq_table)
730 		return ENOMEM;
731 
732 	gc->cq_table[cq->id] = cq;
733 
734 	return 0;
735 }
736 
737 static int
738 mana_hwc_init_queues(struct hw_channel_context *hwc, uint16_t q_depth,
739     uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
740 {
741 	struct hwc_wq *hwc_rxq = NULL;
742 	struct hwc_wq *hwc_txq = NULL;
743 	struct hwc_cq *hwc_cq = NULL;
744 	int err;
745 
746 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
747 	if (err)
748 		return err;
749 
750 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
751 	 * queue depth and RQ queue depth.
752 	 */
753 	err = mana_hwc_create_cq(hwc, q_depth * 2,
754 	    mana_hwc_init_event_handler, hwc,
755 	    mana_hwc_rx_event_handler, hwc,
756 	    mana_hwc_tx_event_handler, hwc, &hwc_cq);
757 	if (err) {
758 		device_printf(hwc->dev, "Failed to create HWC CQ: %d\n", err);
759 		goto out;
760 	}
761 	hwc->cq = hwc_cq;
762 
763 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
764 	    hwc_cq, &hwc_rxq);
765 	if (err) {
766 		device_printf(hwc->dev, "Failed to create HWC RQ: %d\n", err);
767 		goto out;
768 	}
769 	hwc->rxq = hwc_rxq;
770 
771 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
772 	    hwc_cq, &hwc_txq);
773 	if (err) {
774 		device_printf(hwc->dev, "Failed to create HWC SQ: %d\n", err);
775 		goto out;
776 	}
777 	hwc->txq = hwc_txq;
778 
779 	hwc->num_inflight_msg = q_depth;
780 	hwc->max_req_msg_size = max_req_msg_size;
781 
782 	return 0;
783 out:
784 	if (hwc_txq)
785 		mana_hwc_destroy_wq(hwc, hwc_txq);
786 
787 	if (hwc_rxq)
788 		mana_hwc_destroy_wq(hwc, hwc_rxq);
789 
790 	if (hwc_cq)
791 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
792 
793 	mana_gd_free_res_map(&hwc->inflight_msg_res);
794 	return err;
795 }
796 
797 int
798 mana_hwc_create_channel(struct gdma_context *gc)
799 {
800 	uint32_t max_req_msg_size, max_resp_msg_size;
801 	struct gdma_dev *gd = &gc->hwc;
802 	struct hw_channel_context *hwc;
803 	uint16_t q_depth_max;
804 	int err;
805 
806 	hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
807 	if (!hwc)
808 		return ENOMEM;
809 
810 	gd->gdma_context = gc;
811 	gd->driver_data = hwc;
812 	hwc->gdma_dev = gd;
813 	hwc->dev = gc->dev;
814 
815 	/* HWC's instance number is always 0. */
816 	gd->dev_id.as_uint32 = 0;
817 	gd->dev_id.type = GDMA_DEVICE_HWC;
818 
819 	gd->pdid = INVALID_PDID;
820 	gd->doorbell = INVALID_DOORBELL;
821 
822 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
823 	    HW_CHANNEL_MAX_REQUEST_SIZE,
824 	    HW_CHANNEL_MAX_RESPONSE_SIZE);
825 	if (err) {
826 		device_printf(hwc->dev, "Failed to initialize HWC: %d\n",
827 		    err);
828 		goto out;
829 	}
830 
831 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
832 	    &max_resp_msg_size);
833 	if (err) {
834 		device_printf(hwc->dev, "Failed to establish HWC: %d\n", err);
835 		goto out;
836 	}
837 
838 	err = mana_hwc_test_channel(gc->hwc.driver_data,
839 	    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
840 	    max_req_msg_size, max_resp_msg_size);
841 	if (err) {
842 		/* Test failed, but the channel has been established */
843 		device_printf(hwc->dev, "Failed to test HWC: %d\n", err);
844 		return EIO;
845 	}
846 
847 	return 0;
848 out:
849 	free(hwc, M_DEVBUF);
850 	return (err);
851 }
852 
853 void
854 mana_hwc_destroy_channel(struct gdma_context *gc)
855 {
856 	struct hw_channel_context *hwc = gc->hwc.driver_data;
857 	struct hwc_caller_ctx *ctx;
858 
859 	mana_smc_teardown_hwc(&gc->shm_channel, false);
860 
861 	ctx = hwc->caller_ctx;
862 	free(ctx, M_DEVBUF);
863 	hwc->caller_ctx = NULL;
864 
865 	mana_hwc_destroy_wq(hwc, hwc->txq);
866 	hwc->txq = NULL;
867 
868 	mana_hwc_destroy_wq(hwc, hwc->rxq);
869 	hwc->rxq = NULL;
870 
871 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
872 	hwc->cq = NULL;
873 
874 	mana_gd_free_res_map(&hwc->inflight_msg_res);
875 
876 	hwc->num_inflight_msg = 0;
877 
878 	if (hwc->gdma_dev->pdid != INVALID_PDID) {
879 		hwc->gdma_dev->doorbell = INVALID_DOORBELL;
880 		hwc->gdma_dev->pdid = INVALID_PDID;
881 	}
882 
883 	free(hwc, M_DEVBUF);
884 	gc->hwc.driver_data = NULL;
885 	gc->hwc.gdma_context = NULL;
886 }
887 
888 int
889 mana_hwc_send_request(struct hw_channel_context *hwc, uint32_t req_len,
890     const void *req, uint32_t resp_len, void *resp)
891 {
892 	struct hwc_work_request *tx_wr;
893 	struct hwc_wq *txq = hwc->txq;
894 	struct gdma_req_hdr *req_msg;
895 	struct hwc_caller_ctx *ctx;
896 	uint16_t msg_id;
897 	int err;
898 
899 	mana_hwc_get_msg_index(hwc, &msg_id);
900 
901 	tx_wr = &txq->msg_buf->reqs[msg_id];
902 
903 	if (req_len > tx_wr->buf_len) {
904 		device_printf(hwc->dev,
905 		    "HWC: req msg size: %d > %d\n", req_len,
906 		    tx_wr->buf_len);
907 		err = EINVAL;
908 		goto out;
909 	}
910 
911 	ctx = hwc->caller_ctx + msg_id;
912 	ctx->output_buf = resp;
913 	ctx->output_buflen = resp_len;
914 
915 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
916 	if (req)
917 		memcpy(req_msg, req, req_len);
918 
919 	req_msg->req.hwc_msg_id = msg_id;
920 
921 	tx_wr->msg_size = req_len;
922 
923 	err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
924 	if (err) {
925 		device_printf(hwc->dev,
926 		    "HWC: Failed to post send WQE: %d\n", err);
927 		goto out;
928 	}
929 
930 	if (wait_for_completion_timeout(&ctx->comp_event, 30 * hz)) {
931 		device_printf(hwc->dev, "HWC: Request timed out!\n");
932 		err = ETIMEDOUT;
933 		goto out;
934 	}
935 
936 	if (ctx->error) {
937 		err = ctx->error;
938 		goto out;
939 	}
940 
941 	if (ctx->status_code) {
942 		device_printf(hwc->dev,
943 		    "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code);
944 		err = EPROTO;
945 		goto out;
946 	}
947 out:
948 	mana_hwc_put_msg_index(hwc, msg_id);
949 	return err;
950 }
951