xref: /linux/drivers/net/ethernet/microsoft/mana/hw_channel.c (revision b8e4b0529d59a3ccd0b25a31d3cfc8b0f3b34068)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 #include <linux/vmalloc.h>
7 
8 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
9 {
10 	struct gdma_resource *r = &hwc->inflight_msg_res;
11 	unsigned long flags;
12 	u32 index;
13 
14 	down(&hwc->sema);
15 
16 	spin_lock_irqsave(&r->lock, flags);
17 
18 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
19 				    hwc->inflight_msg_res.size);
20 
21 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
22 
23 	spin_unlock_irqrestore(&r->lock, flags);
24 
25 	*msg_id = index;
26 
27 	return 0;
28 }
29 
30 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
31 {
32 	struct gdma_resource *r = &hwc->inflight_msg_res;
33 	unsigned long flags;
34 
35 	spin_lock_irqsave(&r->lock, flags);
36 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
37 	spin_unlock_irqrestore(&r->lock, flags);
38 
39 	up(&hwc->sema);
40 }
41 
42 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
43 				    const struct gdma_resp_hdr *resp_msg,
44 				    u32 resp_len)
45 {
46 	if (resp_len < sizeof(*resp_msg))
47 		return -EPROTO;
48 
49 	if (resp_len > caller_ctx->output_buflen)
50 		return -EPROTO;
51 
52 	return 0;
53 }
54 
55 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
56 				 const struct gdma_resp_hdr *resp_msg)
57 {
58 	struct hwc_caller_ctx *ctx;
59 	int err;
60 
61 	if (!test_bit(resp_msg->response.hwc_msg_id,
62 		      hwc->inflight_msg_res.map)) {
63 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
64 			resp_msg->response.hwc_msg_id);
65 		return;
66 	}
67 
68 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
69 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
70 	if (err)
71 		goto out;
72 
73 	ctx->status_code = resp_msg->status;
74 
75 	memcpy(ctx->output_buf, resp_msg, resp_len);
76 out:
77 	ctx->error = err;
78 	complete(&ctx->comp_event);
79 }
80 
81 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
82 				struct hwc_work_request *req)
83 {
84 	struct device *dev = hwc_rxq->hwc->dev;
85 	struct gdma_sge *sge;
86 	int err;
87 
88 	sge = &req->sge;
89 	sge->address = (u64)req->buf_sge_addr;
90 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
91 	sge->size = req->buf_len;
92 
93 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
94 	req->wqe_req.sgl = sge;
95 	req->wqe_req.num_sge = 1;
96 	req->wqe_req.client_data_unit = 0;
97 
98 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
99 	if (err)
100 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
101 	return err;
102 }
103 
104 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
105 					struct gdma_event *event)
106 {
107 	struct hw_channel_context *hwc = ctx;
108 	struct gdma_dev *gd = hwc->gdma_dev;
109 	union hwc_init_type_data type_data;
110 	union hwc_init_eq_id_db eq_db;
111 	u32 type, val;
112 
113 	switch (event->type) {
114 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
115 		eq_db.as_uint32 = event->details[0];
116 		hwc->cq->gdma_eq->id = eq_db.eq_id;
117 		gd->doorbell = eq_db.doorbell;
118 		break;
119 
120 	case GDMA_EQE_HWC_INIT_DATA:
121 		type_data.as_uint32 = event->details[0];
122 		type = type_data.type;
123 		val = type_data.value;
124 
125 		switch (type) {
126 		case HWC_INIT_DATA_CQID:
127 			hwc->cq->gdma_cq->id = val;
128 			break;
129 
130 		case HWC_INIT_DATA_RQID:
131 			hwc->rxq->gdma_wq->id = val;
132 			break;
133 
134 		case HWC_INIT_DATA_SQID:
135 			hwc->txq->gdma_wq->id = val;
136 			break;
137 
138 		case HWC_INIT_DATA_QUEUE_DEPTH:
139 			hwc->hwc_init_q_depth_max = (u16)val;
140 			break;
141 
142 		case HWC_INIT_DATA_MAX_REQUEST:
143 			hwc->hwc_init_max_req_msg_size = val;
144 			break;
145 
146 		case HWC_INIT_DATA_MAX_RESPONSE:
147 			hwc->hwc_init_max_resp_msg_size = val;
148 			break;
149 
150 		case HWC_INIT_DATA_MAX_NUM_CQS:
151 			gd->gdma_context->max_num_cqs = val;
152 			break;
153 
154 		case HWC_INIT_DATA_PDID:
155 			hwc->gdma_dev->pdid = val;
156 			break;
157 
158 		case HWC_INIT_DATA_GPA_MKEY:
159 			hwc->rxq->msg_buf->gpa_mkey = val;
160 			hwc->txq->msg_buf->gpa_mkey = val;
161 			break;
162 
163 		case HWC_INIT_DATA_PF_DEST_RQ_ID:
164 			hwc->pf_dest_vrq_id = val;
165 			break;
166 
167 		case HWC_INIT_DATA_PF_DEST_CQ_ID:
168 			hwc->pf_dest_vrcq_id = val;
169 			break;
170 		}
171 
172 		break;
173 
174 	case GDMA_EQE_HWC_INIT_DONE:
175 		complete(&hwc->hwc_init_eqe_comp);
176 		break;
177 
178 	case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
179 		type_data.as_uint32 = event->details[0];
180 		type = type_data.type;
181 		val = type_data.value;
182 
183 		switch (type) {
184 		case HWC_DATA_CFG_HWC_TIMEOUT:
185 			hwc->hwc_timeout = val;
186 			break;
187 
188 		default:
189 			dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
190 			break;
191 		}
192 
193 		break;
194 
195 	default:
196 		dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
197 		/* Ignore unknown events, which should never happen. */
198 		break;
199 	}
200 }
201 
202 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
203 				      const struct hwc_rx_oob *rx_oob)
204 {
205 	struct hw_channel_context *hwc = ctx;
206 	struct hwc_wq *hwc_rxq = hwc->rxq;
207 	struct hwc_work_request *rx_req;
208 	struct gdma_resp_hdr *resp;
209 	struct gdma_wqe *dma_oob;
210 	struct gdma_queue *rq;
211 	struct gdma_sge *sge;
212 	u64 rq_base_addr;
213 	u64 rx_req_idx;
214 	u8 *wqe;
215 
216 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
217 		return;
218 
219 	rq = hwc_rxq->gdma_wq;
220 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
221 	dma_oob = (struct gdma_wqe *)wqe;
222 
223 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
224 
225 	/* Select the RX work request for virtual address and for reposting. */
226 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
227 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
228 
229 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
230 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
231 
232 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
233 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
234 			resp->response.hwc_msg_id);
235 		return;
236 	}
237 
238 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
239 
240 	/* Do no longer use 'resp', because the buffer is posted to the HW
241 	 * in the below mana_hwc_post_rx_wqe().
242 	 */
243 	resp = NULL;
244 
245 	mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
246 }
247 
248 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
249 				      const struct hwc_rx_oob *rx_oob)
250 {
251 	struct hw_channel_context *hwc = ctx;
252 	struct hwc_wq *hwc_txq = hwc->txq;
253 
254 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
255 }
256 
257 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
258 				   enum gdma_queue_type type, u64 queue_size,
259 				   struct gdma_queue **queue)
260 {
261 	struct gdma_queue_spec spec = {};
262 
263 	if (type != GDMA_SQ && type != GDMA_RQ)
264 		return -EINVAL;
265 
266 	spec.type = type;
267 	spec.monitor_avl_buf = false;
268 	spec.queue_size = queue_size;
269 
270 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
271 }
272 
273 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
274 				   u64 queue_size,
275 				   void *ctx, gdma_cq_callback *cb,
276 				   struct gdma_queue *parent_eq,
277 				   struct gdma_queue **queue)
278 {
279 	struct gdma_queue_spec spec = {};
280 
281 	spec.type = GDMA_CQ;
282 	spec.monitor_avl_buf = false;
283 	spec.queue_size = queue_size;
284 	spec.cq.context = ctx;
285 	spec.cq.callback = cb;
286 	spec.cq.parent_eq = parent_eq;
287 
288 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
289 }
290 
291 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
292 				   u64 queue_size,
293 				   void *ctx, gdma_eq_callback *cb,
294 				   struct gdma_queue **queue)
295 {
296 	struct gdma_queue_spec spec = {};
297 
298 	spec.type = GDMA_EQ;
299 	spec.monitor_avl_buf = false;
300 	spec.queue_size = queue_size;
301 	spec.eq.context = ctx;
302 	spec.eq.callback = cb;
303 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
304 	spec.eq.msix_index = 0;
305 
306 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
307 }
308 
309 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
310 {
311 	struct hwc_rx_oob comp_data = {};
312 	struct gdma_comp *completions;
313 	struct hwc_cq *hwc_cq = ctx;
314 	int comp_read, i;
315 
316 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
317 
318 	completions = hwc_cq->comp_buf;
319 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
320 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
321 
322 	for (i = 0; i < comp_read; ++i) {
323 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
324 
325 		if (completions[i].is_sq)
326 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
327 						completions[i].wq_num,
328 						&comp_data);
329 		else
330 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
331 						completions[i].wq_num,
332 						&comp_data);
333 	}
334 
335 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
336 }
337 
338 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
339 {
340 	kfree(hwc_cq->comp_buf);
341 
342 	if (hwc_cq->gdma_cq)
343 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
344 
345 	if (hwc_cq->gdma_eq)
346 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
347 
348 	kfree(hwc_cq);
349 }
350 
351 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
352 			      gdma_eq_callback *callback, void *ctx,
353 			      hwc_rx_event_handler_t *rx_ev_hdlr,
354 			      void *rx_ev_ctx,
355 			      hwc_tx_event_handler_t *tx_ev_hdlr,
356 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
357 {
358 	struct gdma_queue *eq, *cq;
359 	struct gdma_comp *comp_buf;
360 	struct hwc_cq *hwc_cq;
361 	u32 eq_size, cq_size;
362 	int err;
363 
364 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
365 	if (eq_size < MANA_MIN_QSIZE)
366 		eq_size = MANA_MIN_QSIZE;
367 
368 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
369 	if (cq_size < MANA_MIN_QSIZE)
370 		cq_size = MANA_MIN_QSIZE;
371 
372 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
373 	if (!hwc_cq)
374 		return -ENOMEM;
375 
376 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
377 	if (err) {
378 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
379 		goto out;
380 	}
381 	hwc_cq->gdma_eq = eq;
382 
383 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
384 				      eq, &cq);
385 	if (err) {
386 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
387 		goto out;
388 	}
389 	hwc_cq->gdma_cq = cq;
390 
391 	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
392 	if (!comp_buf) {
393 		err = -ENOMEM;
394 		goto out;
395 	}
396 
397 	hwc_cq->hwc = hwc;
398 	hwc_cq->comp_buf = comp_buf;
399 	hwc_cq->queue_depth = q_depth;
400 	hwc_cq->rx_event_handler = rx_ev_hdlr;
401 	hwc_cq->rx_event_ctx = rx_ev_ctx;
402 	hwc_cq->tx_event_handler = tx_ev_hdlr;
403 	hwc_cq->tx_event_ctx = tx_ev_ctx;
404 
405 	*hwc_cq_ptr = hwc_cq;
406 	return 0;
407 out:
408 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
409 	return err;
410 }
411 
412 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
413 				  u32 max_msg_size,
414 				  struct hwc_dma_buf **dma_buf_ptr)
415 {
416 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
417 	struct hwc_work_request *hwc_wr;
418 	struct hwc_dma_buf *dma_buf;
419 	struct gdma_mem_info *gmi;
420 	void *virt_addr;
421 	u32 buf_size;
422 	u8 *base_pa;
423 	int err;
424 	u16 i;
425 
426 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
427 	if (!dma_buf)
428 		return -ENOMEM;
429 
430 	dma_buf->num_reqs = q_depth;
431 
432 	buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
433 
434 	gmi = &dma_buf->mem_info;
435 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
436 	if (err) {
437 		dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
438 		goto out;
439 	}
440 
441 	virt_addr = dma_buf->mem_info.virt_addr;
442 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
443 
444 	for (i = 0; i < q_depth; i++) {
445 		hwc_wr = &dma_buf->reqs[i];
446 
447 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
448 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
449 
450 		hwc_wr->buf_len = max_msg_size;
451 	}
452 
453 	*dma_buf_ptr = dma_buf;
454 	return 0;
455 out:
456 	kfree(dma_buf);
457 	return err;
458 }
459 
460 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
461 				     struct hwc_dma_buf *dma_buf)
462 {
463 	if (!dma_buf)
464 		return;
465 
466 	mana_gd_free_memory(&dma_buf->mem_info);
467 
468 	kfree(dma_buf);
469 }
470 
471 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
472 				struct hwc_wq *hwc_wq)
473 {
474 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
475 
476 	if (hwc_wq->gdma_wq)
477 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
478 				      hwc_wq->gdma_wq);
479 
480 	kfree(hwc_wq);
481 }
482 
483 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
484 			      enum gdma_queue_type q_type, u16 q_depth,
485 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
486 			      struct hwc_wq **hwc_wq_ptr)
487 {
488 	struct gdma_queue *queue;
489 	struct hwc_wq *hwc_wq;
490 	u32 queue_size;
491 	int err;
492 
493 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
494 
495 	if (q_type == GDMA_RQ)
496 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
497 	else
498 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
499 
500 	if (queue_size < MANA_MIN_QSIZE)
501 		queue_size = MANA_MIN_QSIZE;
502 
503 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
504 	if (!hwc_wq)
505 		return -ENOMEM;
506 
507 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
508 	if (err)
509 		goto out;
510 
511 	hwc_wq->hwc = hwc;
512 	hwc_wq->gdma_wq = queue;
513 	hwc_wq->queue_depth = q_depth;
514 	hwc_wq->hwc_cq = hwc_cq;
515 
516 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
517 				     &hwc_wq->msg_buf);
518 	if (err)
519 		goto out;
520 
521 	*hwc_wq_ptr = hwc_wq;
522 	return 0;
523 out:
524 	if (err)
525 		mana_hwc_destroy_wq(hwc, hwc_wq);
526 	return err;
527 }
528 
529 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
530 				struct hwc_work_request *req,
531 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
532 				bool dest_pf)
533 {
534 	struct device *dev = hwc_txq->hwc->dev;
535 	struct hwc_tx_oob *tx_oob;
536 	struct gdma_sge *sge;
537 	int err;
538 
539 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
540 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
541 			req->msg_size, req->buf_len);
542 		return -EINVAL;
543 	}
544 
545 	tx_oob = &req->tx_oob;
546 
547 	tx_oob->vrq_id = dest_virt_rq_id;
548 	tx_oob->dest_vfid = 0;
549 	tx_oob->vrcq_id = dest_virt_rcq_id;
550 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
551 	tx_oob->loopback = false;
552 	tx_oob->lso_override = false;
553 	tx_oob->dest_pf = dest_pf;
554 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
555 
556 	sge = &req->sge;
557 	sge->address = (u64)req->buf_sge_addr;
558 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
559 	sge->size = req->msg_size;
560 
561 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
562 	req->wqe_req.sgl = sge;
563 	req->wqe_req.num_sge = 1;
564 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
565 	req->wqe_req.inline_oob_data = tx_oob;
566 	req->wqe_req.client_data_unit = 0;
567 
568 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
569 	if (err)
570 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
571 	return err;
572 }
573 
574 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
575 				      u16 num_msg)
576 {
577 	int err;
578 
579 	sema_init(&hwc->sema, num_msg);
580 
581 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
582 	if (err)
583 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
584 	return err;
585 }
586 
587 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
588 				 u32 max_req_msg_size, u32 max_resp_msg_size)
589 {
590 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
591 	struct hwc_wq *hwc_rxq = hwc->rxq;
592 	struct hwc_work_request *req;
593 	struct hwc_caller_ctx *ctx;
594 	int err;
595 	int i;
596 
597 	/* Post all WQEs on the RQ */
598 	for (i = 0; i < q_depth; i++) {
599 		req = &hwc_rxq->msg_buf->reqs[i];
600 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
601 		if (err)
602 			return err;
603 	}
604 
605 	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
606 	if (!ctx)
607 		return -ENOMEM;
608 
609 	for (i = 0; i < q_depth; ++i)
610 		init_completion(&ctx[i].comp_event);
611 
612 	hwc->caller_ctx = ctx;
613 
614 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
615 }
616 
617 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
618 				      u32 *max_req_msg_size,
619 				      u32 *max_resp_msg_size)
620 {
621 	struct hw_channel_context *hwc = gc->hwc.driver_data;
622 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
623 	struct gdma_queue *sq = hwc->txq->gdma_wq;
624 	struct gdma_queue *eq = hwc->cq->gdma_eq;
625 	struct gdma_queue *cq = hwc->cq->gdma_cq;
626 	int err;
627 
628 	init_completion(&hwc->hwc_init_eqe_comp);
629 
630 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
631 				 eq->mem_info.dma_handle,
632 				 cq->mem_info.dma_handle,
633 				 rq->mem_info.dma_handle,
634 				 sq->mem_info.dma_handle,
635 				 eq->eq.msix_index);
636 	if (err)
637 		return err;
638 
639 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
640 		return -ETIMEDOUT;
641 
642 	*q_depth = hwc->hwc_init_q_depth_max;
643 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
644 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
645 
646 	/* Both were set in mana_hwc_init_event_handler(). */
647 	if (WARN_ON(cq->id >= gc->max_num_cqs))
648 		return -EPROTO;
649 
650 	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
651 	if (!gc->cq_table)
652 		return -ENOMEM;
653 
654 	gc->cq_table[cq->id] = cq;
655 
656 	return 0;
657 }
658 
659 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
660 				u32 max_req_msg_size, u32 max_resp_msg_size)
661 {
662 	int err;
663 
664 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
665 	if (err)
666 		return err;
667 
668 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
669 	 * queue depth and RQ queue depth.
670 	 */
671 	err = mana_hwc_create_cq(hwc, q_depth * 2,
672 				 mana_hwc_init_event_handler, hwc,
673 				 mana_hwc_rx_event_handler, hwc,
674 				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
675 	if (err) {
676 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
677 		goto out;
678 	}
679 
680 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
681 				 hwc->cq, &hwc->rxq);
682 	if (err) {
683 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
684 		goto out;
685 	}
686 
687 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
688 				 hwc->cq, &hwc->txq);
689 	if (err) {
690 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
691 		goto out;
692 	}
693 
694 	hwc->num_inflight_msg = q_depth;
695 	hwc->max_req_msg_size = max_req_msg_size;
696 
697 	return 0;
698 out:
699 	/* mana_hwc_create_channel() will do the cleanup.*/
700 	return err;
701 }
702 
703 int mana_hwc_create_channel(struct gdma_context *gc)
704 {
705 	u32 max_req_msg_size, max_resp_msg_size;
706 	struct gdma_dev *gd = &gc->hwc;
707 	struct hw_channel_context *hwc;
708 	u16 q_depth_max;
709 	int err;
710 
711 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
712 	if (!hwc)
713 		return -ENOMEM;
714 
715 	gd->gdma_context = gc;
716 	gd->driver_data = hwc;
717 	hwc->gdma_dev = gd;
718 	hwc->dev = gc->dev;
719 	hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
720 
721 	/* HWC's instance number is always 0. */
722 	gd->dev_id.as_uint32 = 0;
723 	gd->dev_id.type = GDMA_DEVICE_HWC;
724 
725 	gd->pdid = INVALID_PDID;
726 	gd->doorbell = INVALID_DOORBELL;
727 
728 	/* mana_hwc_init_queues() only creates the required data structures,
729 	 * and doesn't touch the HWC device.
730 	 */
731 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
732 				   HW_CHANNEL_MAX_REQUEST_SIZE,
733 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
734 	if (err) {
735 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
736 		goto out;
737 	}
738 
739 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
740 					 &max_resp_msg_size);
741 	if (err) {
742 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
743 		goto out;
744 	}
745 
746 	err = mana_hwc_test_channel(gc->hwc.driver_data,
747 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
748 				    max_req_msg_size, max_resp_msg_size);
749 	if (err) {
750 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
751 		goto out;
752 	}
753 
754 	return 0;
755 out:
756 	mana_hwc_destroy_channel(gc);
757 	return err;
758 }
759 
760 void mana_hwc_destroy_channel(struct gdma_context *gc)
761 {
762 	struct hw_channel_context *hwc = gc->hwc.driver_data;
763 
764 	if (!hwc)
765 		return;
766 
767 	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
768 	 * non-zero, the HWC worked and we should tear down the HWC here.
769 	 */
770 	if (gc->max_num_cqs > 0) {
771 		mana_smc_teardown_hwc(&gc->shm_channel, false);
772 		gc->max_num_cqs = 0;
773 	}
774 
775 	kfree(hwc->caller_ctx);
776 	hwc->caller_ctx = NULL;
777 
778 	if (hwc->txq)
779 		mana_hwc_destroy_wq(hwc, hwc->txq);
780 
781 	if (hwc->rxq)
782 		mana_hwc_destroy_wq(hwc, hwc->rxq);
783 
784 	if (hwc->cq)
785 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
786 
787 	mana_gd_free_res_map(&hwc->inflight_msg_res);
788 
789 	hwc->num_inflight_msg = 0;
790 
791 	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
792 	hwc->gdma_dev->pdid = INVALID_PDID;
793 
794 	hwc->hwc_timeout = 0;
795 
796 	kfree(hwc);
797 	gc->hwc.driver_data = NULL;
798 	gc->hwc.gdma_context = NULL;
799 
800 	vfree(gc->cq_table);
801 	gc->cq_table = NULL;
802 }
803 
804 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
805 			  const void *req, u32 resp_len, void *resp)
806 {
807 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
808 	struct hwc_work_request *tx_wr;
809 	struct hwc_wq *txq = hwc->txq;
810 	struct gdma_req_hdr *req_msg;
811 	struct hwc_caller_ctx *ctx;
812 	u32 dest_vrcq = 0;
813 	u32 dest_vrq = 0;
814 	u16 msg_id;
815 	int err;
816 
817 	mana_hwc_get_msg_index(hwc, &msg_id);
818 
819 	tx_wr = &txq->msg_buf->reqs[msg_id];
820 
821 	if (req_len > tx_wr->buf_len) {
822 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
823 			tx_wr->buf_len);
824 		err = -EINVAL;
825 		goto out;
826 	}
827 
828 	ctx = hwc->caller_ctx + msg_id;
829 	ctx->output_buf = resp;
830 	ctx->output_buflen = resp_len;
831 
832 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
833 	if (req)
834 		memcpy(req_msg, req, req_len);
835 
836 	req_msg->req.hwc_msg_id = msg_id;
837 
838 	tx_wr->msg_size = req_len;
839 
840 	if (gc->is_pf) {
841 		dest_vrq = hwc->pf_dest_vrq_id;
842 		dest_vrcq = hwc->pf_dest_vrcq_id;
843 	}
844 
845 	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
846 	if (err) {
847 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
848 		goto out;
849 	}
850 
851 	if (!wait_for_completion_timeout(&ctx->comp_event,
852 					 (msecs_to_jiffies(hwc->hwc_timeout)))) {
853 		dev_err(hwc->dev, "HWC: Request timed out!\n");
854 		err = -ETIMEDOUT;
855 		goto out;
856 	}
857 
858 	if (ctx->error) {
859 		err = ctx->error;
860 		goto out;
861 	}
862 
863 	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
864 		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
865 			ctx->status_code);
866 		err = -EPROTO;
867 		goto out;
868 	}
869 out:
870 	mana_hwc_put_msg_index(hwc, msg_id);
871 	return err;
872 }
873