xref: /linux/drivers/net/ethernet/microsoft/mana/hw_channel.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <net/mana/gdma.h>
5 #include <net/mana/mana.h>
6 #include <net/mana/hw_channel.h>
7 #include <linux/vmalloc.h>
8 
mana_hwc_get_msg_index(struct hw_channel_context * hwc,u16 * msg_id)9 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
10 {
11 	struct gdma_resource *r = &hwc->inflight_msg_res;
12 	unsigned long flags;
13 	u32 index;
14 
15 	down(&hwc->sema);
16 
17 	spin_lock_irqsave(&r->lock, flags);
18 
19 	index = find_first_zero_bit(hwc->inflight_msg_res.map,
20 				    hwc->inflight_msg_res.size);
21 
22 	bitmap_set(hwc->inflight_msg_res.map, index, 1);
23 
24 	spin_unlock_irqrestore(&r->lock, flags);
25 
26 	*msg_id = index;
27 
28 	return 0;
29 }
30 
mana_hwc_put_msg_index(struct hw_channel_context * hwc,u16 msg_id)31 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
32 {
33 	struct gdma_resource *r = &hwc->inflight_msg_res;
34 	unsigned long flags;
35 
36 	spin_lock_irqsave(&r->lock, flags);
37 	bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
38 	spin_unlock_irqrestore(&r->lock, flags);
39 
40 	up(&hwc->sema);
41 }
42 
mana_hwc_verify_resp_msg(const struct hwc_caller_ctx * caller_ctx,const struct gdma_resp_hdr * resp_msg,u32 resp_len)43 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
44 				    const struct gdma_resp_hdr *resp_msg,
45 				    u32 resp_len)
46 {
47 	if (resp_len < sizeof(*resp_msg))
48 		return -EPROTO;
49 
50 	if (resp_len > caller_ctx->output_buflen)
51 		return -EPROTO;
52 
53 	return 0;
54 }
55 
mana_hwc_post_rx_wqe(const struct hwc_wq * hwc_rxq,struct hwc_work_request * req)56 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
57 				struct hwc_work_request *req)
58 {
59 	struct device *dev = hwc_rxq->hwc->dev;
60 	struct gdma_sge *sge;
61 	int err;
62 
63 	sge = &req->sge;
64 	sge->address = (u64)req->buf_sge_addr;
65 	sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
66 	sge->size = req->buf_len;
67 
68 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
69 	req->wqe_req.sgl = sge;
70 	req->wqe_req.num_sge = 1;
71 	req->wqe_req.client_data_unit = 0;
72 
73 	err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
74 	if (err)
75 		dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
76 	return err;
77 }
78 
mana_hwc_handle_resp(struct hw_channel_context * hwc,u32 resp_len,struct hwc_work_request * rx_req)79 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
80 				 struct hwc_work_request *rx_req)
81 {
82 	const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
83 	struct hwc_caller_ctx *ctx;
84 	int err;
85 
86 	if (!test_bit(resp_msg->response.hwc_msg_id,
87 		      hwc->inflight_msg_res.map)) {
88 		dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
89 			resp_msg->response.hwc_msg_id);
90 		mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
91 		return;
92 	}
93 
94 	ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
95 	err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
96 	if (err)
97 		goto out;
98 
99 	ctx->status_code = resp_msg->status;
100 
101 	memcpy(ctx->output_buf, resp_msg, resp_len);
102 out:
103 	ctx->error = err;
104 
105 	/* Must post rx wqe before complete(), otherwise the next rx may
106 	 * hit no_wqe error.
107 	 */
108 	mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
109 
110 	complete(&ctx->comp_event);
111 }
112 
mana_hwc_init_event_handler(void * ctx,struct gdma_queue * q_self,struct gdma_event * event)113 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
114 					struct gdma_event *event)
115 {
116 	union hwc_init_soc_service_type service_data;
117 	struct hw_channel_context *hwc = ctx;
118 	struct gdma_dev *gd = hwc->gdma_dev;
119 	union hwc_init_type_data type_data;
120 	union hwc_init_eq_id_db eq_db;
121 	u32 type, val;
122 	int ret;
123 
124 	switch (event->type) {
125 	case GDMA_EQE_HWC_INIT_EQ_ID_DB:
126 		eq_db.as_uint32 = event->details[0];
127 		hwc->cq->gdma_eq->id = eq_db.eq_id;
128 		gd->doorbell = eq_db.doorbell;
129 		break;
130 
131 	case GDMA_EQE_HWC_INIT_DATA:
132 		type_data.as_uint32 = event->details[0];
133 		type = type_data.type;
134 		val = type_data.value;
135 
136 		switch (type) {
137 		case HWC_INIT_DATA_CQID:
138 			hwc->cq->gdma_cq->id = val;
139 			break;
140 
141 		case HWC_INIT_DATA_RQID:
142 			hwc->rxq->gdma_wq->id = val;
143 			break;
144 
145 		case HWC_INIT_DATA_SQID:
146 			hwc->txq->gdma_wq->id = val;
147 			break;
148 
149 		case HWC_INIT_DATA_QUEUE_DEPTH:
150 			hwc->hwc_init_q_depth_max = (u16)val;
151 			break;
152 
153 		case HWC_INIT_DATA_MAX_REQUEST:
154 			hwc->hwc_init_max_req_msg_size = val;
155 			break;
156 
157 		case HWC_INIT_DATA_MAX_RESPONSE:
158 			hwc->hwc_init_max_resp_msg_size = val;
159 			break;
160 
161 		case HWC_INIT_DATA_MAX_NUM_CQS:
162 			gd->gdma_context->max_num_cqs = val;
163 			break;
164 
165 		case HWC_INIT_DATA_PDID:
166 			hwc->gdma_dev->pdid = val;
167 			break;
168 
169 		case HWC_INIT_DATA_GPA_MKEY:
170 			hwc->rxq->msg_buf->gpa_mkey = val;
171 			hwc->txq->msg_buf->gpa_mkey = val;
172 			break;
173 
174 		case HWC_INIT_DATA_PF_DEST_RQ_ID:
175 			hwc->pf_dest_vrq_id = val;
176 			break;
177 
178 		case HWC_INIT_DATA_PF_DEST_CQ_ID:
179 			hwc->pf_dest_vrcq_id = val;
180 			break;
181 		}
182 
183 		break;
184 
185 	case GDMA_EQE_HWC_INIT_DONE:
186 		complete(&hwc->hwc_init_eqe_comp);
187 		break;
188 
189 	case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
190 		type_data.as_uint32 = event->details[0];
191 		type = type_data.type;
192 		val = type_data.value;
193 
194 		switch (type) {
195 		case HWC_DATA_CFG_HWC_TIMEOUT:
196 			hwc->hwc_timeout = val;
197 			break;
198 
199 		default:
200 			dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
201 			break;
202 		}
203 
204 		break;
205 	case GDMA_EQE_HWC_SOC_SERVICE:
206 		service_data.as_uint32 = event->details[0];
207 		type = service_data.type;
208 
209 		switch (type) {
210 		case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
211 		case GDMA_SERVICE_TYPE_RDMA_RESUME:
212 			ret = mana_rdma_service_event(gd->gdma_context, type);
213 			if (ret)
214 				dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
215 					ret);
216 			break;
217 		default:
218 			dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
219 			break;
220 		}
221 
222 		break;
223 	default:
224 		dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
225 		/* Ignore unknown events, which should never happen. */
226 		break;
227 	}
228 }
229 
mana_hwc_rx_event_handler(void * ctx,u32 gdma_rxq_id,const struct hwc_rx_oob * rx_oob)230 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
231 				      const struct hwc_rx_oob *rx_oob)
232 {
233 	struct hw_channel_context *hwc = ctx;
234 	struct hwc_wq *hwc_rxq = hwc->rxq;
235 	struct hwc_work_request *rx_req;
236 	struct gdma_resp_hdr *resp;
237 	struct gdma_wqe *dma_oob;
238 	struct gdma_queue *rq;
239 	struct gdma_sge *sge;
240 	u64 rq_base_addr;
241 	u64 rx_req_idx;
242 	u8 *wqe;
243 
244 	if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
245 		return;
246 
247 	rq = hwc_rxq->gdma_wq;
248 	wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
249 	dma_oob = (struct gdma_wqe *)wqe;
250 
251 	sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
252 
253 	/* Select the RX work request for virtual address and for reposting. */
254 	rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
255 	rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
256 
257 	rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
258 	resp = (struct gdma_resp_hdr *)rx_req->buf_va;
259 
260 	if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
261 		dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
262 			resp->response.hwc_msg_id);
263 		return;
264 	}
265 
266 	mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
267 
268 	/* Can no longer use 'resp', because the buffer is posted to the HW
269 	 * in mana_hwc_handle_resp() above.
270 	 */
271 	resp = NULL;
272 }
273 
mana_hwc_tx_event_handler(void * ctx,u32 gdma_txq_id,const struct hwc_rx_oob * rx_oob)274 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
275 				      const struct hwc_rx_oob *rx_oob)
276 {
277 	struct hw_channel_context *hwc = ctx;
278 	struct hwc_wq *hwc_txq = hwc->txq;
279 
280 	WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
281 }
282 
mana_hwc_create_gdma_wq(struct hw_channel_context * hwc,enum gdma_queue_type type,u64 queue_size,struct gdma_queue ** queue)283 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
284 				   enum gdma_queue_type type, u64 queue_size,
285 				   struct gdma_queue **queue)
286 {
287 	struct gdma_queue_spec spec = {};
288 
289 	if (type != GDMA_SQ && type != GDMA_RQ)
290 		return -EINVAL;
291 
292 	spec.type = type;
293 	spec.monitor_avl_buf = false;
294 	spec.queue_size = queue_size;
295 
296 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
297 }
298 
mana_hwc_create_gdma_cq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_cq_callback * cb,struct gdma_queue * parent_eq,struct gdma_queue ** queue)299 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
300 				   u64 queue_size,
301 				   void *ctx, gdma_cq_callback *cb,
302 				   struct gdma_queue *parent_eq,
303 				   struct gdma_queue **queue)
304 {
305 	struct gdma_queue_spec spec = {};
306 
307 	spec.type = GDMA_CQ;
308 	spec.monitor_avl_buf = false;
309 	spec.queue_size = queue_size;
310 	spec.cq.context = ctx;
311 	spec.cq.callback = cb;
312 	spec.cq.parent_eq = parent_eq;
313 
314 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
315 }
316 
mana_hwc_create_gdma_eq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_eq_callback * cb,struct gdma_queue ** queue)317 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
318 				   u64 queue_size,
319 				   void *ctx, gdma_eq_callback *cb,
320 				   struct gdma_queue **queue)
321 {
322 	struct gdma_queue_spec spec = {};
323 
324 	spec.type = GDMA_EQ;
325 	spec.monitor_avl_buf = false;
326 	spec.queue_size = queue_size;
327 	spec.eq.context = ctx;
328 	spec.eq.callback = cb;
329 	spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
330 	spec.eq.msix_index = 0;
331 
332 	return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
333 }
334 
mana_hwc_comp_event(void * ctx,struct gdma_queue * q_self)335 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
336 {
337 	struct hwc_rx_oob comp_data = {};
338 	struct gdma_comp *completions;
339 	struct hwc_cq *hwc_cq = ctx;
340 	int comp_read, i;
341 
342 	WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
343 
344 	completions = hwc_cq->comp_buf;
345 	comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
346 	WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
347 
348 	for (i = 0; i < comp_read; ++i) {
349 		comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
350 
351 		if (completions[i].is_sq)
352 			hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
353 						completions[i].wq_num,
354 						&comp_data);
355 		else
356 			hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
357 						completions[i].wq_num,
358 						&comp_data);
359 	}
360 
361 	mana_gd_ring_cq(q_self, SET_ARM_BIT);
362 }
363 
mana_hwc_destroy_cq(struct gdma_context * gc,struct hwc_cq * hwc_cq)364 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
365 {
366 	kfree(hwc_cq->comp_buf);
367 
368 	if (hwc_cq->gdma_cq)
369 		mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
370 
371 	if (hwc_cq->gdma_eq)
372 		mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
373 
374 	kfree(hwc_cq);
375 }
376 
mana_hwc_create_cq(struct hw_channel_context * hwc,u16 q_depth,gdma_eq_callback * callback,void * ctx,hwc_rx_event_handler_t * rx_ev_hdlr,void * rx_ev_ctx,hwc_tx_event_handler_t * tx_ev_hdlr,void * tx_ev_ctx,struct hwc_cq ** hwc_cq_ptr)377 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
378 			      gdma_eq_callback *callback, void *ctx,
379 			      hwc_rx_event_handler_t *rx_ev_hdlr,
380 			      void *rx_ev_ctx,
381 			      hwc_tx_event_handler_t *tx_ev_hdlr,
382 			      void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
383 {
384 	struct gdma_queue *eq, *cq;
385 	struct gdma_comp *comp_buf;
386 	struct hwc_cq *hwc_cq;
387 	u32 eq_size, cq_size;
388 	int err;
389 
390 	eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
391 	if (eq_size < MANA_MIN_QSIZE)
392 		eq_size = MANA_MIN_QSIZE;
393 
394 	cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
395 	if (cq_size < MANA_MIN_QSIZE)
396 		cq_size = MANA_MIN_QSIZE;
397 
398 	hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
399 	if (!hwc_cq)
400 		return -ENOMEM;
401 
402 	err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
403 	if (err) {
404 		dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
405 		goto out;
406 	}
407 	hwc_cq->gdma_eq = eq;
408 
409 	err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
410 				      eq, &cq);
411 	if (err) {
412 		dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
413 		goto out;
414 	}
415 	hwc_cq->gdma_cq = cq;
416 
417 	comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
418 	if (!comp_buf) {
419 		err = -ENOMEM;
420 		goto out;
421 	}
422 
423 	hwc_cq->hwc = hwc;
424 	hwc_cq->comp_buf = comp_buf;
425 	hwc_cq->queue_depth = q_depth;
426 	hwc_cq->rx_event_handler = rx_ev_hdlr;
427 	hwc_cq->rx_event_ctx = rx_ev_ctx;
428 	hwc_cq->tx_event_handler = tx_ev_hdlr;
429 	hwc_cq->tx_event_ctx = tx_ev_ctx;
430 
431 	*hwc_cq_ptr = hwc_cq;
432 	return 0;
433 out:
434 	mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
435 	return err;
436 }
437 
mana_hwc_alloc_dma_buf(struct hw_channel_context * hwc,u16 q_depth,u32 max_msg_size,struct hwc_dma_buf ** dma_buf_ptr)438 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
439 				  u32 max_msg_size,
440 				  struct hwc_dma_buf **dma_buf_ptr)
441 {
442 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
443 	struct hwc_work_request *hwc_wr;
444 	struct hwc_dma_buf *dma_buf;
445 	struct gdma_mem_info *gmi;
446 	void *virt_addr;
447 	u32 buf_size;
448 	u8 *base_pa;
449 	int err;
450 	u16 i;
451 
452 	dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
453 	if (!dma_buf)
454 		return -ENOMEM;
455 
456 	dma_buf->num_reqs = q_depth;
457 
458 	buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
459 
460 	gmi = &dma_buf->mem_info;
461 	err = mana_gd_alloc_memory(gc, buf_size, gmi);
462 	if (err) {
463 		dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
464 			buf_size, err);
465 		goto out;
466 	}
467 
468 	virt_addr = dma_buf->mem_info.virt_addr;
469 	base_pa = (u8 *)dma_buf->mem_info.dma_handle;
470 
471 	for (i = 0; i < q_depth; i++) {
472 		hwc_wr = &dma_buf->reqs[i];
473 
474 		hwc_wr->buf_va = virt_addr + i * max_msg_size;
475 		hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
476 
477 		hwc_wr->buf_len = max_msg_size;
478 	}
479 
480 	*dma_buf_ptr = dma_buf;
481 	return 0;
482 out:
483 	kfree(dma_buf);
484 	return err;
485 }
486 
mana_hwc_dealloc_dma_buf(struct hw_channel_context * hwc,struct hwc_dma_buf * dma_buf)487 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
488 				     struct hwc_dma_buf *dma_buf)
489 {
490 	if (!dma_buf)
491 		return;
492 
493 	mana_gd_free_memory(&dma_buf->mem_info);
494 
495 	kfree(dma_buf);
496 }
497 
mana_hwc_destroy_wq(struct hw_channel_context * hwc,struct hwc_wq * hwc_wq)498 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
499 				struct hwc_wq *hwc_wq)
500 {
501 	mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
502 
503 	if (hwc_wq->gdma_wq)
504 		mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
505 				      hwc_wq->gdma_wq);
506 
507 	kfree(hwc_wq);
508 }
509 
mana_hwc_create_wq(struct hw_channel_context * hwc,enum gdma_queue_type q_type,u16 q_depth,u32 max_msg_size,struct hwc_cq * hwc_cq,struct hwc_wq ** hwc_wq_ptr)510 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
511 			      enum gdma_queue_type q_type, u16 q_depth,
512 			      u32 max_msg_size, struct hwc_cq *hwc_cq,
513 			      struct hwc_wq **hwc_wq_ptr)
514 {
515 	struct gdma_queue *queue;
516 	struct hwc_wq *hwc_wq;
517 	u32 queue_size;
518 	int err;
519 
520 	WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
521 
522 	if (q_type == GDMA_RQ)
523 		queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
524 	else
525 		queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
526 
527 	if (queue_size < MANA_MIN_QSIZE)
528 		queue_size = MANA_MIN_QSIZE;
529 
530 	hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
531 	if (!hwc_wq)
532 		return -ENOMEM;
533 
534 	err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
535 	if (err)
536 		goto out;
537 
538 	hwc_wq->hwc = hwc;
539 	hwc_wq->gdma_wq = queue;
540 	hwc_wq->queue_depth = q_depth;
541 	hwc_wq->hwc_cq = hwc_cq;
542 
543 	err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
544 				     &hwc_wq->msg_buf);
545 	if (err)
546 		goto out;
547 
548 	*hwc_wq_ptr = hwc_wq;
549 	return 0;
550 out:
551 	if (err)
552 		mana_hwc_destroy_wq(hwc, hwc_wq);
553 
554 	dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
555 		queue_size, q_type, err);
556 	return err;
557 }
558 
mana_hwc_post_tx_wqe(const struct hwc_wq * hwc_txq,struct hwc_work_request * req,u32 dest_virt_rq_id,u32 dest_virt_rcq_id,bool dest_pf)559 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
560 				struct hwc_work_request *req,
561 				u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
562 				bool dest_pf)
563 {
564 	struct device *dev = hwc_txq->hwc->dev;
565 	struct hwc_tx_oob *tx_oob;
566 	struct gdma_sge *sge;
567 	int err;
568 
569 	if (req->msg_size == 0 || req->msg_size > req->buf_len) {
570 		dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
571 			req->msg_size, req->buf_len);
572 		return -EINVAL;
573 	}
574 
575 	tx_oob = &req->tx_oob;
576 
577 	tx_oob->vrq_id = dest_virt_rq_id;
578 	tx_oob->dest_vfid = 0;
579 	tx_oob->vrcq_id = dest_virt_rcq_id;
580 	tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
581 	tx_oob->loopback = false;
582 	tx_oob->lso_override = false;
583 	tx_oob->dest_pf = dest_pf;
584 	tx_oob->vsq_id = hwc_txq->gdma_wq->id;
585 
586 	sge = &req->sge;
587 	sge->address = (u64)req->buf_sge_addr;
588 	sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
589 	sge->size = req->msg_size;
590 
591 	memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
592 	req->wqe_req.sgl = sge;
593 	req->wqe_req.num_sge = 1;
594 	req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
595 	req->wqe_req.inline_oob_data = tx_oob;
596 	req->wqe_req.client_data_unit = 0;
597 
598 	err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
599 	if (err)
600 		dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
601 	return err;
602 }
603 
mana_hwc_init_inflight_msg(struct hw_channel_context * hwc,u16 num_msg)604 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
605 				      u16 num_msg)
606 {
607 	int err;
608 
609 	sema_init(&hwc->sema, num_msg);
610 
611 	err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
612 	if (err)
613 		dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
614 	return err;
615 }
616 
mana_hwc_test_channel(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)617 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
618 				 u32 max_req_msg_size, u32 max_resp_msg_size)
619 {
620 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
621 	struct hwc_wq *hwc_rxq = hwc->rxq;
622 	struct hwc_work_request *req;
623 	struct hwc_caller_ctx *ctx;
624 	int err;
625 	int i;
626 
627 	/* Post all WQEs on the RQ */
628 	for (i = 0; i < q_depth; i++) {
629 		req = &hwc_rxq->msg_buf->reqs[i];
630 		err = mana_hwc_post_rx_wqe(hwc_rxq, req);
631 		if (err)
632 			return err;
633 	}
634 
635 	ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
636 	if (!ctx)
637 		return -ENOMEM;
638 
639 	for (i = 0; i < q_depth; ++i)
640 		init_completion(&ctx[i].comp_event);
641 
642 	hwc->caller_ctx = ctx;
643 
644 	return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
645 }
646 
mana_hwc_establish_channel(struct gdma_context * gc,u16 * q_depth,u32 * max_req_msg_size,u32 * max_resp_msg_size)647 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
648 				      u32 *max_req_msg_size,
649 				      u32 *max_resp_msg_size)
650 {
651 	struct hw_channel_context *hwc = gc->hwc.driver_data;
652 	struct gdma_queue *rq = hwc->rxq->gdma_wq;
653 	struct gdma_queue *sq = hwc->txq->gdma_wq;
654 	struct gdma_queue *eq = hwc->cq->gdma_eq;
655 	struct gdma_queue *cq = hwc->cq->gdma_cq;
656 	int err;
657 
658 	init_completion(&hwc->hwc_init_eqe_comp);
659 
660 	err = mana_smc_setup_hwc(&gc->shm_channel, false,
661 				 eq->mem_info.dma_handle,
662 				 cq->mem_info.dma_handle,
663 				 rq->mem_info.dma_handle,
664 				 sq->mem_info.dma_handle,
665 				 eq->eq.msix_index);
666 	if (err)
667 		return err;
668 
669 	if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
670 		return -ETIMEDOUT;
671 
672 	*q_depth = hwc->hwc_init_q_depth_max;
673 	*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
674 	*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
675 
676 	/* Both were set in mana_hwc_init_event_handler(). */
677 	if (WARN_ON(cq->id >= gc->max_num_cqs))
678 		return -EPROTO;
679 
680 	gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
681 	if (!gc->cq_table)
682 		return -ENOMEM;
683 
684 	gc->cq_table[cq->id] = cq;
685 
686 	return 0;
687 }
688 
mana_hwc_init_queues(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)689 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
690 				u32 max_req_msg_size, u32 max_resp_msg_size)
691 {
692 	int err;
693 
694 	err = mana_hwc_init_inflight_msg(hwc, q_depth);
695 	if (err)
696 		return err;
697 
698 	/* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
699 	 * queue depth and RQ queue depth.
700 	 */
701 	err = mana_hwc_create_cq(hwc, q_depth * 2,
702 				 mana_hwc_init_event_handler, hwc,
703 				 mana_hwc_rx_event_handler, hwc,
704 				 mana_hwc_tx_event_handler, hwc, &hwc->cq);
705 	if (err) {
706 		dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
707 		goto out;
708 	}
709 
710 	err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
711 				 hwc->cq, &hwc->rxq);
712 	if (err) {
713 		dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
714 		goto out;
715 	}
716 
717 	err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
718 				 hwc->cq, &hwc->txq);
719 	if (err) {
720 		dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
721 		goto out;
722 	}
723 
724 	hwc->num_inflight_msg = q_depth;
725 	hwc->max_req_msg_size = max_req_msg_size;
726 
727 	return 0;
728 out:
729 	/* mana_hwc_create_channel() will do the cleanup.*/
730 	return err;
731 }
732 
mana_hwc_create_channel(struct gdma_context * gc)733 int mana_hwc_create_channel(struct gdma_context *gc)
734 {
735 	u32 max_req_msg_size, max_resp_msg_size;
736 	struct gdma_dev *gd = &gc->hwc;
737 	struct hw_channel_context *hwc;
738 	u16 q_depth_max;
739 	int err;
740 
741 	hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
742 	if (!hwc)
743 		return -ENOMEM;
744 
745 	gd->gdma_context = gc;
746 	gd->driver_data = hwc;
747 	hwc->gdma_dev = gd;
748 	hwc->dev = gc->dev;
749 	hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
750 
751 	/* HWC's instance number is always 0. */
752 	gd->dev_id.as_uint32 = 0;
753 	gd->dev_id.type = GDMA_DEVICE_HWC;
754 
755 	gd->pdid = INVALID_PDID;
756 	gd->doorbell = INVALID_DOORBELL;
757 
758 	/* mana_hwc_init_queues() only creates the required data structures,
759 	 * and doesn't touch the HWC device.
760 	 */
761 	err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
762 				   HW_CHANNEL_MAX_REQUEST_SIZE,
763 				   HW_CHANNEL_MAX_RESPONSE_SIZE);
764 	if (err) {
765 		dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
766 		goto out;
767 	}
768 
769 	err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
770 					 &max_resp_msg_size);
771 	if (err) {
772 		dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
773 		goto out;
774 	}
775 
776 	err = mana_hwc_test_channel(gc->hwc.driver_data,
777 				    HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
778 				    max_req_msg_size, max_resp_msg_size);
779 	if (err) {
780 		dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
781 		goto out;
782 	}
783 
784 	return 0;
785 out:
786 	mana_hwc_destroy_channel(gc);
787 	return err;
788 }
789 
mana_hwc_destroy_channel(struct gdma_context * gc)790 void mana_hwc_destroy_channel(struct gdma_context *gc)
791 {
792 	struct hw_channel_context *hwc = gc->hwc.driver_data;
793 
794 	if (!hwc)
795 		return;
796 
797 	/* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
798 	 * non-zero, the HWC worked and we should tear down the HWC here.
799 	 */
800 	if (gc->max_num_cqs > 0) {
801 		mana_smc_teardown_hwc(&gc->shm_channel, false);
802 		gc->max_num_cqs = 0;
803 	}
804 
805 	kfree(hwc->caller_ctx);
806 	hwc->caller_ctx = NULL;
807 
808 	if (hwc->txq)
809 		mana_hwc_destroy_wq(hwc, hwc->txq);
810 
811 	if (hwc->rxq)
812 		mana_hwc_destroy_wq(hwc, hwc->rxq);
813 
814 	if (hwc->cq)
815 		mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
816 
817 	mana_gd_free_res_map(&hwc->inflight_msg_res);
818 
819 	hwc->num_inflight_msg = 0;
820 
821 	hwc->gdma_dev->doorbell = INVALID_DOORBELL;
822 	hwc->gdma_dev->pdid = INVALID_PDID;
823 
824 	hwc->hwc_timeout = 0;
825 
826 	kfree(hwc);
827 	gc->hwc.driver_data = NULL;
828 	gc->hwc.gdma_context = NULL;
829 
830 	vfree(gc->cq_table);
831 	gc->cq_table = NULL;
832 }
833 
mana_hwc_send_request(struct hw_channel_context * hwc,u32 req_len,const void * req,u32 resp_len,void * resp)834 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
835 			  const void *req, u32 resp_len, void *resp)
836 {
837 	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
838 	struct hwc_work_request *tx_wr;
839 	struct hwc_wq *txq = hwc->txq;
840 	struct gdma_req_hdr *req_msg;
841 	struct hwc_caller_ctx *ctx;
842 	u32 dest_vrcq = 0;
843 	u32 dest_vrq = 0;
844 	u16 msg_id;
845 	int err;
846 
847 	mana_hwc_get_msg_index(hwc, &msg_id);
848 
849 	tx_wr = &txq->msg_buf->reqs[msg_id];
850 
851 	if (req_len > tx_wr->buf_len) {
852 		dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
853 			tx_wr->buf_len);
854 		err = -EINVAL;
855 		goto out;
856 	}
857 
858 	ctx = hwc->caller_ctx + msg_id;
859 	ctx->output_buf = resp;
860 	ctx->output_buflen = resp_len;
861 
862 	req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
863 	if (req)
864 		memcpy(req_msg, req, req_len);
865 
866 	req_msg->req.hwc_msg_id = msg_id;
867 
868 	tx_wr->msg_size = req_len;
869 
870 	if (gc->is_pf) {
871 		dest_vrq = hwc->pf_dest_vrq_id;
872 		dest_vrcq = hwc->pf_dest_vrcq_id;
873 	}
874 
875 	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
876 	if (err) {
877 		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
878 		goto out;
879 	}
880 
881 	if (!wait_for_completion_timeout(&ctx->comp_event,
882 					 (msecs_to_jiffies(hwc->hwc_timeout)))) {
883 		if (hwc->hwc_timeout != 0)
884 			dev_err(hwc->dev, "HWC: Request timed out!\n");
885 
886 		err = -ETIMEDOUT;
887 		goto out;
888 	}
889 
890 	if (ctx->error) {
891 		err = ctx->error;
892 		goto out;
893 	}
894 
895 	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
896 		if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
897 			err = -EOPNOTSUPP;
898 			goto out;
899 		}
900 		if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
901 			dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
902 				ctx->status_code);
903 		err = -EPROTO;
904 		goto out;
905 	}
906 out:
907 	mana_hwc_put_msg_index(hwc, msg_id);
908 	return err;
909 }
910