1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 #include <linux/vmalloc.h>
7
mana_hwc_get_msg_index(struct hw_channel_context * hwc,u16 * msg_id)8 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
9 {
10 struct gdma_resource *r = &hwc->inflight_msg_res;
11 unsigned long flags;
12 u32 index;
13
14 down(&hwc->sema);
15
16 spin_lock_irqsave(&r->lock, flags);
17
18 index = find_first_zero_bit(hwc->inflight_msg_res.map,
19 hwc->inflight_msg_res.size);
20
21 bitmap_set(hwc->inflight_msg_res.map, index, 1);
22
23 spin_unlock_irqrestore(&r->lock, flags);
24
25 *msg_id = index;
26
27 return 0;
28 }
29
mana_hwc_put_msg_index(struct hw_channel_context * hwc,u16 msg_id)30 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
31 {
32 struct gdma_resource *r = &hwc->inflight_msg_res;
33 unsigned long flags;
34
35 spin_lock_irqsave(&r->lock, flags);
36 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
37 spin_unlock_irqrestore(&r->lock, flags);
38
39 up(&hwc->sema);
40 }
41
mana_hwc_verify_resp_msg(const struct hwc_caller_ctx * caller_ctx,const struct gdma_resp_hdr * resp_msg,u32 resp_len)42 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
43 const struct gdma_resp_hdr *resp_msg,
44 u32 resp_len)
45 {
46 if (resp_len < sizeof(*resp_msg))
47 return -EPROTO;
48
49 if (resp_len > caller_ctx->output_buflen)
50 return -EPROTO;
51
52 return 0;
53 }
54
mana_hwc_post_rx_wqe(const struct hwc_wq * hwc_rxq,struct hwc_work_request * req)55 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
56 struct hwc_work_request *req)
57 {
58 struct device *dev = hwc_rxq->hwc->dev;
59 struct gdma_sge *sge;
60 int err;
61
62 sge = &req->sge;
63 sge->address = (u64)req->buf_sge_addr;
64 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
65 sge->size = req->buf_len;
66
67 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
68 req->wqe_req.sgl = sge;
69 req->wqe_req.num_sge = 1;
70 req->wqe_req.client_data_unit = 0;
71
72 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
73 if (err)
74 dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
75 return err;
76 }
77
mana_hwc_handle_resp(struct hw_channel_context * hwc,u32 resp_len,struct hwc_work_request * rx_req)78 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
79 struct hwc_work_request *rx_req)
80 {
81 const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
82 struct hwc_caller_ctx *ctx;
83 int err;
84
85 if (!test_bit(resp_msg->response.hwc_msg_id,
86 hwc->inflight_msg_res.map)) {
87 dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
88 resp_msg->response.hwc_msg_id);
89 mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
90 return;
91 }
92
93 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
94 err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
95 if (err)
96 goto out;
97
98 ctx->status_code = resp_msg->status;
99
100 memcpy(ctx->output_buf, resp_msg, resp_len);
101 out:
102 ctx->error = err;
103
104 /* Must post rx wqe before complete(), otherwise the next rx may
105 * hit no_wqe error.
106 */
107 mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
108
109 complete(&ctx->comp_event);
110 }
111
mana_hwc_init_event_handler(void * ctx,struct gdma_queue * q_self,struct gdma_event * event)112 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
113 struct gdma_event *event)
114 {
115 union hwc_init_soc_service_type service_data;
116 struct hw_channel_context *hwc = ctx;
117 struct gdma_dev *gd = hwc->gdma_dev;
118 union hwc_init_type_data type_data;
119 union hwc_init_eq_id_db eq_db;
120 u32 type, val;
121 int ret;
122
123 switch (event->type) {
124 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
125 eq_db.as_uint32 = event->details[0];
126 hwc->cq->gdma_eq->id = eq_db.eq_id;
127 gd->doorbell = eq_db.doorbell;
128 break;
129
130 case GDMA_EQE_HWC_INIT_DATA:
131 type_data.as_uint32 = event->details[0];
132 type = type_data.type;
133 val = type_data.value;
134
135 switch (type) {
136 case HWC_INIT_DATA_CQID:
137 hwc->cq->gdma_cq->id = val;
138 break;
139
140 case HWC_INIT_DATA_RQID:
141 hwc->rxq->gdma_wq->id = val;
142 break;
143
144 case HWC_INIT_DATA_SQID:
145 hwc->txq->gdma_wq->id = val;
146 break;
147
148 case HWC_INIT_DATA_QUEUE_DEPTH:
149 hwc->hwc_init_q_depth_max = (u16)val;
150 break;
151
152 case HWC_INIT_DATA_MAX_REQUEST:
153 hwc->hwc_init_max_req_msg_size = val;
154 break;
155
156 case HWC_INIT_DATA_MAX_RESPONSE:
157 hwc->hwc_init_max_resp_msg_size = val;
158 break;
159
160 case HWC_INIT_DATA_MAX_NUM_CQS:
161 gd->gdma_context->max_num_cqs = val;
162 break;
163
164 case HWC_INIT_DATA_PDID:
165 hwc->gdma_dev->pdid = val;
166 break;
167
168 case HWC_INIT_DATA_GPA_MKEY:
169 hwc->rxq->msg_buf->gpa_mkey = val;
170 hwc->txq->msg_buf->gpa_mkey = val;
171 break;
172
173 case HWC_INIT_DATA_PF_DEST_RQ_ID:
174 hwc->pf_dest_vrq_id = val;
175 break;
176
177 case HWC_INIT_DATA_PF_DEST_CQ_ID:
178 hwc->pf_dest_vrcq_id = val;
179 break;
180 }
181
182 break;
183
184 case GDMA_EQE_HWC_INIT_DONE:
185 complete(&hwc->hwc_init_eqe_comp);
186 break;
187
188 case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
189 type_data.as_uint32 = event->details[0];
190 type = type_data.type;
191 val = type_data.value;
192
193 switch (type) {
194 case HWC_DATA_CFG_HWC_TIMEOUT:
195 hwc->hwc_timeout = val;
196 break;
197
198 default:
199 dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
200 break;
201 }
202
203 break;
204 case GDMA_EQE_HWC_SOC_SERVICE:
205 service_data.as_uint32 = event->details[0];
206 type = service_data.type;
207
208 switch (type) {
209 case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
210 case GDMA_SERVICE_TYPE_RDMA_RESUME:
211 ret = mana_rdma_service_event(gd->gdma_context, type);
212 if (ret)
213 dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
214 ret);
215 break;
216 default:
217 dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
218 break;
219 }
220
221 break;
222 default:
223 dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
224 /* Ignore unknown events, which should never happen. */
225 break;
226 }
227 }
228
mana_hwc_rx_event_handler(void * ctx,u32 gdma_rxq_id,const struct hwc_rx_oob * rx_oob)229 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
230 const struct hwc_rx_oob *rx_oob)
231 {
232 struct hw_channel_context *hwc = ctx;
233 struct hwc_wq *hwc_rxq = hwc->rxq;
234 struct hwc_work_request *rx_req;
235 struct gdma_resp_hdr *resp;
236 struct gdma_wqe *dma_oob;
237 struct gdma_queue *rq;
238 struct gdma_sge *sge;
239 u64 rq_base_addr;
240 u64 rx_req_idx;
241 u8 *wqe;
242
243 if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
244 return;
245
246 rq = hwc_rxq->gdma_wq;
247 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
248 dma_oob = (struct gdma_wqe *)wqe;
249
250 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
251
252 /* Select the RX work request for virtual address and for reposting. */
253 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
254 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
255
256 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
257 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
258
259 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
260 dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
261 resp->response.hwc_msg_id);
262 return;
263 }
264
265 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
266
267 /* Can no longer use 'resp', because the buffer is posted to the HW
268 * in mana_hwc_handle_resp() above.
269 */
270 resp = NULL;
271 }
272
mana_hwc_tx_event_handler(void * ctx,u32 gdma_txq_id,const struct hwc_rx_oob * rx_oob)273 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
274 const struct hwc_rx_oob *rx_oob)
275 {
276 struct hw_channel_context *hwc = ctx;
277 struct hwc_wq *hwc_txq = hwc->txq;
278
279 WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
280 }
281
mana_hwc_create_gdma_wq(struct hw_channel_context * hwc,enum gdma_queue_type type,u64 queue_size,struct gdma_queue ** queue)282 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
283 enum gdma_queue_type type, u64 queue_size,
284 struct gdma_queue **queue)
285 {
286 struct gdma_queue_spec spec = {};
287
288 if (type != GDMA_SQ && type != GDMA_RQ)
289 return -EINVAL;
290
291 spec.type = type;
292 spec.monitor_avl_buf = false;
293 spec.queue_size = queue_size;
294
295 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
296 }
297
mana_hwc_create_gdma_cq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_cq_callback * cb,struct gdma_queue * parent_eq,struct gdma_queue ** queue)298 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
299 u64 queue_size,
300 void *ctx, gdma_cq_callback *cb,
301 struct gdma_queue *parent_eq,
302 struct gdma_queue **queue)
303 {
304 struct gdma_queue_spec spec = {};
305
306 spec.type = GDMA_CQ;
307 spec.monitor_avl_buf = false;
308 spec.queue_size = queue_size;
309 spec.cq.context = ctx;
310 spec.cq.callback = cb;
311 spec.cq.parent_eq = parent_eq;
312
313 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
314 }
315
mana_hwc_create_gdma_eq(struct hw_channel_context * hwc,u64 queue_size,void * ctx,gdma_eq_callback * cb,struct gdma_queue ** queue)316 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
317 u64 queue_size,
318 void *ctx, gdma_eq_callback *cb,
319 struct gdma_queue **queue)
320 {
321 struct gdma_queue_spec spec = {};
322
323 spec.type = GDMA_EQ;
324 spec.monitor_avl_buf = false;
325 spec.queue_size = queue_size;
326 spec.eq.context = ctx;
327 spec.eq.callback = cb;
328 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
329 spec.eq.msix_index = 0;
330
331 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
332 }
333
mana_hwc_comp_event(void * ctx,struct gdma_queue * q_self)334 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
335 {
336 struct hwc_rx_oob comp_data = {};
337 struct gdma_comp *completions;
338 struct hwc_cq *hwc_cq = ctx;
339 int comp_read, i;
340
341 WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
342
343 completions = hwc_cq->comp_buf;
344 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
345 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
346
347 for (i = 0; i < comp_read; ++i) {
348 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
349
350 if (completions[i].is_sq)
351 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
352 completions[i].wq_num,
353 &comp_data);
354 else
355 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
356 completions[i].wq_num,
357 &comp_data);
358 }
359
360 mana_gd_ring_cq(q_self, SET_ARM_BIT);
361 }
362
mana_hwc_destroy_cq(struct gdma_context * gc,struct hwc_cq * hwc_cq)363 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
364 {
365 kfree(hwc_cq->comp_buf);
366
367 if (hwc_cq->gdma_cq)
368 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
369
370 if (hwc_cq->gdma_eq)
371 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
372
373 kfree(hwc_cq);
374 }
375
mana_hwc_create_cq(struct hw_channel_context * hwc,u16 q_depth,gdma_eq_callback * callback,void * ctx,hwc_rx_event_handler_t * rx_ev_hdlr,void * rx_ev_ctx,hwc_tx_event_handler_t * tx_ev_hdlr,void * tx_ev_ctx,struct hwc_cq ** hwc_cq_ptr)376 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
377 gdma_eq_callback *callback, void *ctx,
378 hwc_rx_event_handler_t *rx_ev_hdlr,
379 void *rx_ev_ctx,
380 hwc_tx_event_handler_t *tx_ev_hdlr,
381 void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
382 {
383 struct gdma_queue *eq, *cq;
384 struct gdma_comp *comp_buf;
385 struct hwc_cq *hwc_cq;
386 u32 eq_size, cq_size;
387 int err;
388
389 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
390 if (eq_size < MANA_MIN_QSIZE)
391 eq_size = MANA_MIN_QSIZE;
392
393 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
394 if (cq_size < MANA_MIN_QSIZE)
395 cq_size = MANA_MIN_QSIZE;
396
397 hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
398 if (!hwc_cq)
399 return -ENOMEM;
400
401 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
402 if (err) {
403 dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
404 goto out;
405 }
406 hwc_cq->gdma_eq = eq;
407
408 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
409 eq, &cq);
410 if (err) {
411 dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
412 goto out;
413 }
414 hwc_cq->gdma_cq = cq;
415
416 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
417 if (!comp_buf) {
418 err = -ENOMEM;
419 goto out;
420 }
421
422 hwc_cq->hwc = hwc;
423 hwc_cq->comp_buf = comp_buf;
424 hwc_cq->queue_depth = q_depth;
425 hwc_cq->rx_event_handler = rx_ev_hdlr;
426 hwc_cq->rx_event_ctx = rx_ev_ctx;
427 hwc_cq->tx_event_handler = tx_ev_hdlr;
428 hwc_cq->tx_event_ctx = tx_ev_ctx;
429
430 *hwc_cq_ptr = hwc_cq;
431 return 0;
432 out:
433 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
434 return err;
435 }
436
mana_hwc_alloc_dma_buf(struct hw_channel_context * hwc,u16 q_depth,u32 max_msg_size,struct hwc_dma_buf ** dma_buf_ptr)437 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
438 u32 max_msg_size,
439 struct hwc_dma_buf **dma_buf_ptr)
440 {
441 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
442 struct hwc_work_request *hwc_wr;
443 struct hwc_dma_buf *dma_buf;
444 struct gdma_mem_info *gmi;
445 void *virt_addr;
446 u32 buf_size;
447 u8 *base_pa;
448 int err;
449 u16 i;
450
451 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
452 if (!dma_buf)
453 return -ENOMEM;
454
455 dma_buf->num_reqs = q_depth;
456
457 buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
458
459 gmi = &dma_buf->mem_info;
460 err = mana_gd_alloc_memory(gc, buf_size, gmi);
461 if (err) {
462 dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
463 buf_size, err);
464 goto out;
465 }
466
467 virt_addr = dma_buf->mem_info.virt_addr;
468 base_pa = (u8 *)dma_buf->mem_info.dma_handle;
469
470 for (i = 0; i < q_depth; i++) {
471 hwc_wr = &dma_buf->reqs[i];
472
473 hwc_wr->buf_va = virt_addr + i * max_msg_size;
474 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
475
476 hwc_wr->buf_len = max_msg_size;
477 }
478
479 *dma_buf_ptr = dma_buf;
480 return 0;
481 out:
482 kfree(dma_buf);
483 return err;
484 }
485
mana_hwc_dealloc_dma_buf(struct hw_channel_context * hwc,struct hwc_dma_buf * dma_buf)486 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
487 struct hwc_dma_buf *dma_buf)
488 {
489 if (!dma_buf)
490 return;
491
492 mana_gd_free_memory(&dma_buf->mem_info);
493
494 kfree(dma_buf);
495 }
496
mana_hwc_destroy_wq(struct hw_channel_context * hwc,struct hwc_wq * hwc_wq)497 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
498 struct hwc_wq *hwc_wq)
499 {
500 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
501
502 if (hwc_wq->gdma_wq)
503 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
504 hwc_wq->gdma_wq);
505
506 kfree(hwc_wq);
507 }
508
mana_hwc_create_wq(struct hw_channel_context * hwc,enum gdma_queue_type q_type,u16 q_depth,u32 max_msg_size,struct hwc_cq * hwc_cq,struct hwc_wq ** hwc_wq_ptr)509 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
510 enum gdma_queue_type q_type, u16 q_depth,
511 u32 max_msg_size, struct hwc_cq *hwc_cq,
512 struct hwc_wq **hwc_wq_ptr)
513 {
514 struct gdma_queue *queue;
515 struct hwc_wq *hwc_wq;
516 u32 queue_size;
517 int err;
518
519 WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
520
521 if (q_type == GDMA_RQ)
522 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
523 else
524 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
525
526 if (queue_size < MANA_MIN_QSIZE)
527 queue_size = MANA_MIN_QSIZE;
528
529 hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
530 if (!hwc_wq)
531 return -ENOMEM;
532
533 err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
534 if (err)
535 goto out;
536
537 hwc_wq->hwc = hwc;
538 hwc_wq->gdma_wq = queue;
539 hwc_wq->queue_depth = q_depth;
540 hwc_wq->hwc_cq = hwc_cq;
541
542 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
543 &hwc_wq->msg_buf);
544 if (err)
545 goto out;
546
547 *hwc_wq_ptr = hwc_wq;
548 return 0;
549 out:
550 if (err)
551 mana_hwc_destroy_wq(hwc, hwc_wq);
552
553 dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
554 queue_size, q_type, err);
555 return err;
556 }
557
mana_hwc_post_tx_wqe(const struct hwc_wq * hwc_txq,struct hwc_work_request * req,u32 dest_virt_rq_id,u32 dest_virt_rcq_id,bool dest_pf)558 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
559 struct hwc_work_request *req,
560 u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
561 bool dest_pf)
562 {
563 struct device *dev = hwc_txq->hwc->dev;
564 struct hwc_tx_oob *tx_oob;
565 struct gdma_sge *sge;
566 int err;
567
568 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
569 dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
570 req->msg_size, req->buf_len);
571 return -EINVAL;
572 }
573
574 tx_oob = &req->tx_oob;
575
576 tx_oob->vrq_id = dest_virt_rq_id;
577 tx_oob->dest_vfid = 0;
578 tx_oob->vrcq_id = dest_virt_rcq_id;
579 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
580 tx_oob->loopback = false;
581 tx_oob->lso_override = false;
582 tx_oob->dest_pf = dest_pf;
583 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
584
585 sge = &req->sge;
586 sge->address = (u64)req->buf_sge_addr;
587 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
588 sge->size = req->msg_size;
589
590 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
591 req->wqe_req.sgl = sge;
592 req->wqe_req.num_sge = 1;
593 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
594 req->wqe_req.inline_oob_data = tx_oob;
595 req->wqe_req.client_data_unit = 0;
596
597 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
598 if (err)
599 dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
600 return err;
601 }
602
mana_hwc_init_inflight_msg(struct hw_channel_context * hwc,u16 num_msg)603 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
604 u16 num_msg)
605 {
606 int err;
607
608 sema_init(&hwc->sema, num_msg);
609
610 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
611 if (err)
612 dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
613 return err;
614 }
615
mana_hwc_test_channel(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)616 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
617 u32 max_req_msg_size, u32 max_resp_msg_size)
618 {
619 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
620 struct hwc_wq *hwc_rxq = hwc->rxq;
621 struct hwc_work_request *req;
622 struct hwc_caller_ctx *ctx;
623 int err;
624 int i;
625
626 /* Post all WQEs on the RQ */
627 for (i = 0; i < q_depth; i++) {
628 req = &hwc_rxq->msg_buf->reqs[i];
629 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
630 if (err)
631 return err;
632 }
633
634 ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
635 if (!ctx)
636 return -ENOMEM;
637
638 for (i = 0; i < q_depth; ++i)
639 init_completion(&ctx[i].comp_event);
640
641 hwc->caller_ctx = ctx;
642
643 return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
644 }
645
mana_hwc_establish_channel(struct gdma_context * gc,u16 * q_depth,u32 * max_req_msg_size,u32 * max_resp_msg_size)646 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
647 u32 *max_req_msg_size,
648 u32 *max_resp_msg_size)
649 {
650 struct hw_channel_context *hwc = gc->hwc.driver_data;
651 struct gdma_queue *rq = hwc->rxq->gdma_wq;
652 struct gdma_queue *sq = hwc->txq->gdma_wq;
653 struct gdma_queue *eq = hwc->cq->gdma_eq;
654 struct gdma_queue *cq = hwc->cq->gdma_cq;
655 int err;
656
657 init_completion(&hwc->hwc_init_eqe_comp);
658
659 err = mana_smc_setup_hwc(&gc->shm_channel, false,
660 eq->mem_info.dma_handle,
661 cq->mem_info.dma_handle,
662 rq->mem_info.dma_handle,
663 sq->mem_info.dma_handle,
664 eq->eq.msix_index);
665 if (err)
666 return err;
667
668 if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
669 return -ETIMEDOUT;
670
671 *q_depth = hwc->hwc_init_q_depth_max;
672 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
673 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
674
675 /* Both were set in mana_hwc_init_event_handler(). */
676 if (WARN_ON(cq->id >= gc->max_num_cqs))
677 return -EPROTO;
678
679 gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
680 if (!gc->cq_table)
681 return -ENOMEM;
682
683 gc->cq_table[cq->id] = cq;
684
685 return 0;
686 }
687
mana_hwc_init_queues(struct hw_channel_context * hwc,u16 q_depth,u32 max_req_msg_size,u32 max_resp_msg_size)688 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
689 u32 max_req_msg_size, u32 max_resp_msg_size)
690 {
691 int err;
692
693 err = mana_hwc_init_inflight_msg(hwc, q_depth);
694 if (err)
695 return err;
696
697 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
698 * queue depth and RQ queue depth.
699 */
700 err = mana_hwc_create_cq(hwc, q_depth * 2,
701 mana_hwc_init_event_handler, hwc,
702 mana_hwc_rx_event_handler, hwc,
703 mana_hwc_tx_event_handler, hwc, &hwc->cq);
704 if (err) {
705 dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
706 goto out;
707 }
708
709 err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
710 hwc->cq, &hwc->rxq);
711 if (err) {
712 dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
713 goto out;
714 }
715
716 err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
717 hwc->cq, &hwc->txq);
718 if (err) {
719 dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
720 goto out;
721 }
722
723 hwc->num_inflight_msg = q_depth;
724 hwc->max_req_msg_size = max_req_msg_size;
725
726 return 0;
727 out:
728 /* mana_hwc_create_channel() will do the cleanup.*/
729 return err;
730 }
731
mana_hwc_create_channel(struct gdma_context * gc)732 int mana_hwc_create_channel(struct gdma_context *gc)
733 {
734 u32 max_req_msg_size, max_resp_msg_size;
735 struct gdma_dev *gd = &gc->hwc;
736 struct hw_channel_context *hwc;
737 u16 q_depth_max;
738 int err;
739
740 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
741 if (!hwc)
742 return -ENOMEM;
743
744 gd->gdma_context = gc;
745 gd->driver_data = hwc;
746 hwc->gdma_dev = gd;
747 hwc->dev = gc->dev;
748 hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
749
750 /* HWC's instance number is always 0. */
751 gd->dev_id.as_uint32 = 0;
752 gd->dev_id.type = GDMA_DEVICE_HWC;
753
754 gd->pdid = INVALID_PDID;
755 gd->doorbell = INVALID_DOORBELL;
756
757 /* mana_hwc_init_queues() only creates the required data structures,
758 * and doesn't touch the HWC device.
759 */
760 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
761 HW_CHANNEL_MAX_REQUEST_SIZE,
762 HW_CHANNEL_MAX_RESPONSE_SIZE);
763 if (err) {
764 dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
765 goto out;
766 }
767
768 err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
769 &max_resp_msg_size);
770 if (err) {
771 dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
772 goto out;
773 }
774
775 err = mana_hwc_test_channel(gc->hwc.driver_data,
776 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
777 max_req_msg_size, max_resp_msg_size);
778 if (err) {
779 dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
780 goto out;
781 }
782
783 return 0;
784 out:
785 mana_hwc_destroy_channel(gc);
786 return err;
787 }
788
mana_hwc_destroy_channel(struct gdma_context * gc)789 void mana_hwc_destroy_channel(struct gdma_context *gc)
790 {
791 struct hw_channel_context *hwc = gc->hwc.driver_data;
792
793 if (!hwc)
794 return;
795
796 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
797 * non-zero, the HWC worked and we should tear down the HWC here.
798 */
799 if (gc->max_num_cqs > 0) {
800 mana_smc_teardown_hwc(&gc->shm_channel, false);
801 gc->max_num_cqs = 0;
802 }
803
804 kfree(hwc->caller_ctx);
805 hwc->caller_ctx = NULL;
806
807 if (hwc->txq)
808 mana_hwc_destroy_wq(hwc, hwc->txq);
809
810 if (hwc->rxq)
811 mana_hwc_destroy_wq(hwc, hwc->rxq);
812
813 if (hwc->cq)
814 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
815
816 mana_gd_free_res_map(&hwc->inflight_msg_res);
817
818 hwc->num_inflight_msg = 0;
819
820 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
821 hwc->gdma_dev->pdid = INVALID_PDID;
822
823 hwc->hwc_timeout = 0;
824
825 kfree(hwc);
826 gc->hwc.driver_data = NULL;
827 gc->hwc.gdma_context = NULL;
828
829 vfree(gc->cq_table);
830 gc->cq_table = NULL;
831 }
832
mana_hwc_send_request(struct hw_channel_context * hwc,u32 req_len,const void * req,u32 resp_len,void * resp)833 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
834 const void *req, u32 resp_len, void *resp)
835 {
836 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
837 struct hwc_work_request *tx_wr;
838 struct hwc_wq *txq = hwc->txq;
839 struct gdma_req_hdr *req_msg;
840 struct hwc_caller_ctx *ctx;
841 u32 dest_vrcq = 0;
842 u32 dest_vrq = 0;
843 u16 msg_id;
844 int err;
845
846 mana_hwc_get_msg_index(hwc, &msg_id);
847
848 tx_wr = &txq->msg_buf->reqs[msg_id];
849
850 if (req_len > tx_wr->buf_len) {
851 dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
852 tx_wr->buf_len);
853 err = -EINVAL;
854 goto out;
855 }
856
857 ctx = hwc->caller_ctx + msg_id;
858 ctx->output_buf = resp;
859 ctx->output_buflen = resp_len;
860
861 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
862 if (req)
863 memcpy(req_msg, req, req_len);
864
865 req_msg->req.hwc_msg_id = msg_id;
866
867 tx_wr->msg_size = req_len;
868
869 if (gc->is_pf) {
870 dest_vrq = hwc->pf_dest_vrq_id;
871 dest_vrcq = hwc->pf_dest_vrcq_id;
872 }
873
874 err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
875 if (err) {
876 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
877 goto out;
878 }
879
880 if (!wait_for_completion_timeout(&ctx->comp_event,
881 (msecs_to_jiffies(hwc->hwc_timeout)))) {
882 dev_err(hwc->dev, "HWC: Request timed out!\n");
883 err = -ETIMEDOUT;
884 goto out;
885 }
886
887 if (ctx->error) {
888 err = ctx->error;
889 goto out;
890 }
891
892 if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
893 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
894 ctx->status_code);
895 err = -EPROTO;
896 goto out;
897 }
898 out:
899 mana_hwc_put_msg_index(hwc, msg_id);
900 return err;
901 }
902