1*ce110ea1SWei Hu /*- 2*ce110ea1SWei Hu * SPDX-License-Identifier: BSD-2-Clause 3*ce110ea1SWei Hu * 4*ce110ea1SWei Hu * Copyright (c) 2021 Microsoft Corp. 5*ce110ea1SWei Hu * All rights reserved. 6*ce110ea1SWei Hu * 7*ce110ea1SWei Hu * Redistribution and use in source and binary forms, with or without 8*ce110ea1SWei Hu * modification, are permitted provided that the following conditions 9*ce110ea1SWei Hu * are met: 10*ce110ea1SWei Hu * 11*ce110ea1SWei Hu * 1. Redistributions of source code must retain the above copyright 12*ce110ea1SWei Hu * notice, this list of conditions and the following disclaimer. 13*ce110ea1SWei Hu * 14*ce110ea1SWei Hu * 2. Redistributions in binary form must reproduce the above copyright 15*ce110ea1SWei Hu * notice, this list of conditions and the following disclaimer in the 16*ce110ea1SWei Hu * documentation and/or other materials provided with the distribution. 17*ce110ea1SWei Hu * 18*ce110ea1SWei Hu * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19*ce110ea1SWei Hu * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20*ce110ea1SWei Hu * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21*ce110ea1SWei Hu * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22*ce110ea1SWei Hu * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23*ce110ea1SWei Hu * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24*ce110ea1SWei Hu * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25*ce110ea1SWei Hu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26*ce110ea1SWei Hu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27*ce110ea1SWei Hu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28*ce110ea1SWei Hu * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29*ce110ea1SWei Hu */ 30*ce110ea1SWei Hu #include <sys/cdefs.h> 31*ce110ea1SWei Hu __FBSDID("$FreeBSD$"); 32*ce110ea1SWei Hu 33*ce110ea1SWei Hu #include <sys/param.h> 34*ce110ea1SWei Hu #include <sys/systm.h> 35*ce110ea1SWei Hu #include <sys/types.h> 36*ce110ea1SWei Hu #include <sys/kernel.h> 37*ce110ea1SWei Hu #include <sys/kthread.h> 38*ce110ea1SWei Hu #include <sys/lock.h> 39*ce110ea1SWei Hu #include <sys/malloc.h> 40*ce110ea1SWei Hu #include <sys/mutex.h> 41*ce110ea1SWei Hu #include <sys/bus.h> 42*ce110ea1SWei Hu #include <machine/bus.h> 43*ce110ea1SWei Hu 44*ce110ea1SWei Hu #include "mana.h" 45*ce110ea1SWei Hu #include "hw_channel.h" 46*ce110ea1SWei Hu 47*ce110ea1SWei Hu static int 48*ce110ea1SWei Hu mana_hwc_get_msg_index(struct hw_channel_context *hwc, uint16_t *msg_id) 49*ce110ea1SWei Hu { 50*ce110ea1SWei Hu struct gdma_resource *r = &hwc->inflight_msg_res; 51*ce110ea1SWei Hu uint32_t index; 52*ce110ea1SWei Hu 53*ce110ea1SWei Hu sema_wait(&hwc->sema); 54*ce110ea1SWei Hu 55*ce110ea1SWei Hu mtx_lock_spin(&r->lock_spin); 56*ce110ea1SWei Hu 57*ce110ea1SWei Hu index = find_first_zero_bit(hwc->inflight_msg_res.map, 58*ce110ea1SWei Hu hwc->inflight_msg_res.size); 59*ce110ea1SWei Hu 60*ce110ea1SWei Hu bitmap_set(hwc->inflight_msg_res.map, index, 1); 61*ce110ea1SWei Hu 62*ce110ea1SWei Hu mtx_unlock_spin(&r->lock_spin); 63*ce110ea1SWei Hu 64*ce110ea1SWei Hu *msg_id = index; 65*ce110ea1SWei Hu 66*ce110ea1SWei Hu return 0; 67*ce110ea1SWei Hu } 68*ce110ea1SWei Hu 69*ce110ea1SWei Hu static void 70*ce110ea1SWei Hu mana_hwc_put_msg_index(struct hw_channel_context *hwc, uint16_t msg_id) 71*ce110ea1SWei Hu { 72*ce110ea1SWei Hu struct gdma_resource *r = &hwc->inflight_msg_res; 73*ce110ea1SWei Hu 74*ce110ea1SWei Hu mtx_lock_spin(&r->lock_spin); 75*ce110ea1SWei Hu bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1); 76*ce110ea1SWei Hu mtx_unlock_spin(&r->lock_spin); 77*ce110ea1SWei Hu 78*ce110ea1SWei Hu sema_post(&hwc->sema); 79*ce110ea1SWei Hu } 80*ce110ea1SWei Hu 81*ce110ea1SWei Hu static int 82*ce110ea1SWei Hu mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx, 83*ce110ea1SWei Hu const struct gdma_resp_hdr *resp_msg, 84*ce110ea1SWei Hu uint32_t resp_len) 85*ce110ea1SWei Hu { 86*ce110ea1SWei Hu if (resp_len < sizeof(*resp_msg)) 87*ce110ea1SWei Hu return EPROTO; 88*ce110ea1SWei Hu 89*ce110ea1SWei Hu if (resp_len > caller_ctx->output_buflen) 90*ce110ea1SWei Hu return EPROTO; 91*ce110ea1SWei Hu 92*ce110ea1SWei Hu return 0; 93*ce110ea1SWei Hu } 94*ce110ea1SWei Hu 95*ce110ea1SWei Hu static void 96*ce110ea1SWei Hu mana_hwc_handle_resp(struct hw_channel_context *hwc, uint32_t resp_len, 97*ce110ea1SWei Hu const struct gdma_resp_hdr *resp_msg) 98*ce110ea1SWei Hu { 99*ce110ea1SWei Hu struct hwc_caller_ctx *ctx; 100*ce110ea1SWei Hu int err; 101*ce110ea1SWei Hu 102*ce110ea1SWei Hu if (!test_bit(resp_msg->response.hwc_msg_id, 103*ce110ea1SWei Hu hwc->inflight_msg_res.map)) { 104*ce110ea1SWei Hu device_printf(hwc->dev, "hwc_rx: invalid msg_id = %u\n", 105*ce110ea1SWei Hu resp_msg->response.hwc_msg_id); 106*ce110ea1SWei Hu return; 107*ce110ea1SWei Hu } 108*ce110ea1SWei Hu 109*ce110ea1SWei Hu ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id; 110*ce110ea1SWei Hu err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len); 111*ce110ea1SWei Hu if (err) 112*ce110ea1SWei Hu goto out; 113*ce110ea1SWei Hu 114*ce110ea1SWei Hu ctx->status_code = resp_msg->status; 115*ce110ea1SWei Hu 116*ce110ea1SWei Hu memcpy(ctx->output_buf, resp_msg, resp_len); 117*ce110ea1SWei Hu out: 118*ce110ea1SWei Hu ctx->error = err; 119*ce110ea1SWei Hu complete(&ctx->comp_event); 120*ce110ea1SWei Hu } 121*ce110ea1SWei Hu 122*ce110ea1SWei Hu static int 123*ce110ea1SWei Hu mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq, 124*ce110ea1SWei Hu struct hwc_work_request *req) 125*ce110ea1SWei Hu { 126*ce110ea1SWei Hu device_t dev = hwc_rxq->hwc->dev; 127*ce110ea1SWei Hu struct gdma_sge *sge; 128*ce110ea1SWei Hu int err; 129*ce110ea1SWei Hu 130*ce110ea1SWei Hu sge = &req->sge; 131*ce110ea1SWei Hu sge->address = (uint64_t)req->buf_sge_addr; 132*ce110ea1SWei Hu sge->mem_key = hwc_rxq->msg_buf->gpa_mkey; 133*ce110ea1SWei Hu sge->size = req->buf_len; 134*ce110ea1SWei Hu 135*ce110ea1SWei Hu memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); 136*ce110ea1SWei Hu req->wqe_req.sgl = sge; 137*ce110ea1SWei Hu req->wqe_req.num_sge = 1; 138*ce110ea1SWei Hu req->wqe_req.client_data_unit = 0; 139*ce110ea1SWei Hu 140*ce110ea1SWei Hu err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL); 141*ce110ea1SWei Hu if (err) 142*ce110ea1SWei Hu device_printf(dev, 143*ce110ea1SWei Hu "Failed to post WQE on HWC RQ: %d\n", err); 144*ce110ea1SWei Hu return err; 145*ce110ea1SWei Hu } 146*ce110ea1SWei Hu 147*ce110ea1SWei Hu static void 148*ce110ea1SWei Hu mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, 149*ce110ea1SWei Hu struct gdma_event *event) 150*ce110ea1SWei Hu { 151*ce110ea1SWei Hu struct hw_channel_context *hwc = ctx; 152*ce110ea1SWei Hu struct gdma_dev *gd = hwc->gdma_dev; 153*ce110ea1SWei Hu union hwc_init_type_data type_data; 154*ce110ea1SWei Hu union hwc_init_eq_id_db eq_db; 155*ce110ea1SWei Hu uint32_t type, val; 156*ce110ea1SWei Hu 157*ce110ea1SWei Hu switch (event->type) { 158*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_EQ_ID_DB: 159*ce110ea1SWei Hu eq_db.as_uint32 = event->details[0]; 160*ce110ea1SWei Hu hwc->cq->gdma_eq->id = eq_db.eq_id; 161*ce110ea1SWei Hu gd->doorbell = eq_db.doorbell; 162*ce110ea1SWei Hu break; 163*ce110ea1SWei Hu 164*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_DATA: 165*ce110ea1SWei Hu type_data.as_uint32 = event->details[0]; 166*ce110ea1SWei Hu type = type_data.type; 167*ce110ea1SWei Hu val = type_data.value; 168*ce110ea1SWei Hu 169*ce110ea1SWei Hu switch (type) { 170*ce110ea1SWei Hu case HWC_INIT_DATA_CQID: 171*ce110ea1SWei Hu hwc->cq->gdma_cq->id = val; 172*ce110ea1SWei Hu break; 173*ce110ea1SWei Hu 174*ce110ea1SWei Hu case HWC_INIT_DATA_RQID: 175*ce110ea1SWei Hu hwc->rxq->gdma_wq->id = val; 176*ce110ea1SWei Hu break; 177*ce110ea1SWei Hu 178*ce110ea1SWei Hu case HWC_INIT_DATA_SQID: 179*ce110ea1SWei Hu hwc->txq->gdma_wq->id = val; 180*ce110ea1SWei Hu break; 181*ce110ea1SWei Hu 182*ce110ea1SWei Hu case HWC_INIT_DATA_QUEUE_DEPTH: 183*ce110ea1SWei Hu hwc->hwc_init_q_depth_max = (uint16_t)val; 184*ce110ea1SWei Hu break; 185*ce110ea1SWei Hu 186*ce110ea1SWei Hu case HWC_INIT_DATA_MAX_REQUEST: 187*ce110ea1SWei Hu hwc->hwc_init_max_req_msg_size = val; 188*ce110ea1SWei Hu break; 189*ce110ea1SWei Hu 190*ce110ea1SWei Hu case HWC_INIT_DATA_MAX_RESPONSE: 191*ce110ea1SWei Hu hwc->hwc_init_max_resp_msg_size = val; 192*ce110ea1SWei Hu break; 193*ce110ea1SWei Hu 194*ce110ea1SWei Hu case HWC_INIT_DATA_MAX_NUM_CQS: 195*ce110ea1SWei Hu gd->gdma_context->max_num_cqs = val; 196*ce110ea1SWei Hu break; 197*ce110ea1SWei Hu 198*ce110ea1SWei Hu case HWC_INIT_DATA_PDID: 199*ce110ea1SWei Hu hwc->gdma_dev->pdid = val; 200*ce110ea1SWei Hu break; 201*ce110ea1SWei Hu 202*ce110ea1SWei Hu case HWC_INIT_DATA_GPA_MKEY: 203*ce110ea1SWei Hu hwc->rxq->msg_buf->gpa_mkey = val; 204*ce110ea1SWei Hu hwc->txq->msg_buf->gpa_mkey = val; 205*ce110ea1SWei Hu break; 206*ce110ea1SWei Hu } 207*ce110ea1SWei Hu 208*ce110ea1SWei Hu break; 209*ce110ea1SWei Hu 210*ce110ea1SWei Hu case GDMA_EQE_HWC_INIT_DONE: 211*ce110ea1SWei Hu complete(&hwc->hwc_init_eqe_comp); 212*ce110ea1SWei Hu break; 213*ce110ea1SWei Hu 214*ce110ea1SWei Hu default: 215*ce110ea1SWei Hu /* Ignore unknown events, which should never happen. */ 216*ce110ea1SWei Hu break; 217*ce110ea1SWei Hu } 218*ce110ea1SWei Hu } 219*ce110ea1SWei Hu 220*ce110ea1SWei Hu static void 221*ce110ea1SWei Hu mana_hwc_rx_event_handler(void *ctx, uint32_t gdma_rxq_id, 222*ce110ea1SWei Hu const struct hwc_rx_oob *rx_oob) 223*ce110ea1SWei Hu { 224*ce110ea1SWei Hu struct hw_channel_context *hwc = ctx; 225*ce110ea1SWei Hu struct hwc_wq *hwc_rxq = hwc->rxq; 226*ce110ea1SWei Hu struct hwc_work_request *rx_req; 227*ce110ea1SWei Hu struct gdma_resp_hdr *resp; 228*ce110ea1SWei Hu struct gdma_wqe *dma_oob; 229*ce110ea1SWei Hu struct gdma_queue *rq; 230*ce110ea1SWei Hu struct gdma_sge *sge; 231*ce110ea1SWei Hu uint64_t rq_base_addr; 232*ce110ea1SWei Hu uint64_t rx_req_idx; 233*ce110ea1SWei Hu uint8_t *wqe; 234*ce110ea1SWei Hu 235*ce110ea1SWei Hu if (hwc_rxq->gdma_wq->id != gdma_rxq_id) { 236*ce110ea1SWei Hu mana_warn(NULL, "unmatched rx queue %u != %u\n", 237*ce110ea1SWei Hu hwc_rxq->gdma_wq->id, gdma_rxq_id); 238*ce110ea1SWei Hu return; 239*ce110ea1SWei Hu } 240*ce110ea1SWei Hu 241*ce110ea1SWei Hu 242*ce110ea1SWei Hu rq = hwc_rxq->gdma_wq; 243*ce110ea1SWei Hu wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE); 244*ce110ea1SWei Hu dma_oob = (struct gdma_wqe *)wqe; 245*ce110ea1SWei Hu 246*ce110ea1SWei Hu bus_dmamap_sync(rq->mem_info.dma_tag, rq->mem_info.dma_map, 247*ce110ea1SWei Hu BUS_DMASYNC_POSTREAD); 248*ce110ea1SWei Hu 249*ce110ea1SWei Hu sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4); 250*ce110ea1SWei Hu 251*ce110ea1SWei Hu /* Select the RX work request for virtual address and for reposting. */ 252*ce110ea1SWei Hu rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle; 253*ce110ea1SWei Hu rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size; 254*ce110ea1SWei Hu 255*ce110ea1SWei Hu bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag, 256*ce110ea1SWei Hu hwc_rxq->msg_buf->mem_info.dma_map, 257*ce110ea1SWei Hu BUS_DMASYNC_POSTREAD); 258*ce110ea1SWei Hu 259*ce110ea1SWei Hu rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx]; 260*ce110ea1SWei Hu resp = (struct gdma_resp_hdr *)rx_req->buf_va; 261*ce110ea1SWei Hu 262*ce110ea1SWei Hu if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) { 263*ce110ea1SWei Hu device_printf(hwc->dev, "HWC RX: wrong msg_id=%u\n", 264*ce110ea1SWei Hu resp->response.hwc_msg_id); 265*ce110ea1SWei Hu return; 266*ce110ea1SWei Hu } 267*ce110ea1SWei Hu 268*ce110ea1SWei Hu mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp); 269*ce110ea1SWei Hu 270*ce110ea1SWei Hu /* Do no longer use 'resp', because the buffer is posted to the HW 271*ce110ea1SWei Hu * in the below mana_hwc_post_rx_wqe(). 272*ce110ea1SWei Hu */ 273*ce110ea1SWei Hu resp = NULL; 274*ce110ea1SWei Hu 275*ce110ea1SWei Hu bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag, 276*ce110ea1SWei Hu hwc_rxq->msg_buf->mem_info.dma_map, 277*ce110ea1SWei Hu BUS_DMASYNC_PREREAD); 278*ce110ea1SWei Hu 279*ce110ea1SWei Hu mana_hwc_post_rx_wqe(hwc_rxq, rx_req); 280*ce110ea1SWei Hu } 281*ce110ea1SWei Hu 282*ce110ea1SWei Hu static void 283*ce110ea1SWei Hu mana_hwc_tx_event_handler(void *ctx, uint32_t gdma_txq_id, 284*ce110ea1SWei Hu const struct hwc_rx_oob *rx_oob) 285*ce110ea1SWei Hu { 286*ce110ea1SWei Hu struct hw_channel_context *hwc = ctx; 287*ce110ea1SWei Hu struct hwc_wq *hwc_txq = hwc->txq; 288*ce110ea1SWei Hu 289*ce110ea1SWei Hu if (!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id) { 290*ce110ea1SWei Hu mana_warn(NULL, "unmatched tx queue %u != %u\n", 291*ce110ea1SWei Hu hwc_txq->gdma_wq->id, gdma_txq_id); 292*ce110ea1SWei Hu } 293*ce110ea1SWei Hu 294*ce110ea1SWei Hu bus_dmamap_sync(hwc_txq->gdma_wq->mem_info.dma_tag, 295*ce110ea1SWei Hu hwc_txq->gdma_wq->mem_info.dma_map, 296*ce110ea1SWei Hu BUS_DMASYNC_POSTWRITE); 297*ce110ea1SWei Hu } 298*ce110ea1SWei Hu 299*ce110ea1SWei Hu static int 300*ce110ea1SWei Hu mana_hwc_create_gdma_wq(struct hw_channel_context *hwc, 301*ce110ea1SWei Hu enum gdma_queue_type type, uint64_t queue_size, 302*ce110ea1SWei Hu struct gdma_queue **queue) 303*ce110ea1SWei Hu { 304*ce110ea1SWei Hu struct gdma_queue_spec spec = {}; 305*ce110ea1SWei Hu 306*ce110ea1SWei Hu if (type != GDMA_SQ && type != GDMA_RQ) 307*ce110ea1SWei Hu return EINVAL; 308*ce110ea1SWei Hu 309*ce110ea1SWei Hu spec.type = type; 310*ce110ea1SWei Hu spec.monitor_avl_buf = false; 311*ce110ea1SWei Hu spec.queue_size = queue_size; 312*ce110ea1SWei Hu 313*ce110ea1SWei Hu return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); 314*ce110ea1SWei Hu } 315*ce110ea1SWei Hu 316*ce110ea1SWei Hu static int 317*ce110ea1SWei Hu mana_hwc_create_gdma_cq(struct hw_channel_context *hwc, 318*ce110ea1SWei Hu uint64_t queue_size, 319*ce110ea1SWei Hu void *ctx, gdma_cq_callback *cb, 320*ce110ea1SWei Hu struct gdma_queue *parent_eq, 321*ce110ea1SWei Hu struct gdma_queue **queue) 322*ce110ea1SWei Hu { 323*ce110ea1SWei Hu struct gdma_queue_spec spec = {}; 324*ce110ea1SWei Hu 325*ce110ea1SWei Hu spec.type = GDMA_CQ; 326*ce110ea1SWei Hu spec.monitor_avl_buf = false; 327*ce110ea1SWei Hu spec.queue_size = queue_size; 328*ce110ea1SWei Hu spec.cq.context = ctx; 329*ce110ea1SWei Hu spec.cq.callback = cb; 330*ce110ea1SWei Hu spec.cq.parent_eq = parent_eq; 331*ce110ea1SWei Hu 332*ce110ea1SWei Hu return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); 333*ce110ea1SWei Hu } 334*ce110ea1SWei Hu 335*ce110ea1SWei Hu static int 336*ce110ea1SWei Hu mana_hwc_create_gdma_eq(struct hw_channel_context *hwc, 337*ce110ea1SWei Hu uint64_t queue_size, 338*ce110ea1SWei Hu void *ctx, gdma_eq_callback *cb, 339*ce110ea1SWei Hu struct gdma_queue **queue) 340*ce110ea1SWei Hu { 341*ce110ea1SWei Hu struct gdma_queue_spec spec = {}; 342*ce110ea1SWei Hu 343*ce110ea1SWei Hu spec.type = GDMA_EQ; 344*ce110ea1SWei Hu spec.monitor_avl_buf = false; 345*ce110ea1SWei Hu spec.queue_size = queue_size; 346*ce110ea1SWei Hu spec.eq.context = ctx; 347*ce110ea1SWei Hu spec.eq.callback = cb; 348*ce110ea1SWei Hu spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ; 349*ce110ea1SWei Hu 350*ce110ea1SWei Hu return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue); 351*ce110ea1SWei Hu } 352*ce110ea1SWei Hu 353*ce110ea1SWei Hu static void 354*ce110ea1SWei Hu mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) 355*ce110ea1SWei Hu { 356*ce110ea1SWei Hu struct hwc_rx_oob comp_data = {}; 357*ce110ea1SWei Hu struct gdma_comp *completions; 358*ce110ea1SWei Hu struct hwc_cq *hwc_cq = ctx; 359*ce110ea1SWei Hu int comp_read, i; 360*ce110ea1SWei Hu 361*ce110ea1SWei Hu completions = hwc_cq->comp_buf; 362*ce110ea1SWei Hu comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth); 363*ce110ea1SWei Hu 364*ce110ea1SWei Hu for (i = 0; i < comp_read; ++i) { 365*ce110ea1SWei Hu comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data; 366*ce110ea1SWei Hu 367*ce110ea1SWei Hu if (completions[i].is_sq) 368*ce110ea1SWei Hu hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx, 369*ce110ea1SWei Hu completions[i].wq_num, 370*ce110ea1SWei Hu &comp_data); 371*ce110ea1SWei Hu else 372*ce110ea1SWei Hu hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx, 373*ce110ea1SWei Hu completions[i].wq_num, 374*ce110ea1SWei Hu &comp_data); 375*ce110ea1SWei Hu } 376*ce110ea1SWei Hu 377*ce110ea1SWei Hu bus_dmamap_sync(q_self->mem_info.dma_tag, q_self->mem_info.dma_map, 378*ce110ea1SWei Hu BUS_DMASYNC_POSTREAD); 379*ce110ea1SWei Hu 380*ce110ea1SWei Hu mana_gd_arm_cq(q_self); 381*ce110ea1SWei Hu } 382*ce110ea1SWei Hu 383*ce110ea1SWei Hu static void 384*ce110ea1SWei Hu mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) 385*ce110ea1SWei Hu { 386*ce110ea1SWei Hu if (!hwc_cq) 387*ce110ea1SWei Hu return; 388*ce110ea1SWei Hu 389*ce110ea1SWei Hu if (hwc_cq->comp_buf) 390*ce110ea1SWei Hu free(hwc_cq->comp_buf, M_DEVBUF); 391*ce110ea1SWei Hu 392*ce110ea1SWei Hu if (hwc_cq->gdma_cq) 393*ce110ea1SWei Hu mana_gd_destroy_queue(gc, hwc_cq->gdma_cq); 394*ce110ea1SWei Hu 395*ce110ea1SWei Hu if (hwc_cq->gdma_eq) 396*ce110ea1SWei Hu mana_gd_destroy_queue(gc, hwc_cq->gdma_eq); 397*ce110ea1SWei Hu 398*ce110ea1SWei Hu free(hwc_cq, M_DEVBUF); 399*ce110ea1SWei Hu } 400*ce110ea1SWei Hu 401*ce110ea1SWei Hu static int 402*ce110ea1SWei Hu mana_hwc_create_cq(struct hw_channel_context *hwc, 403*ce110ea1SWei Hu uint16_t q_depth, 404*ce110ea1SWei Hu gdma_eq_callback *callback, void *ctx, 405*ce110ea1SWei Hu hwc_rx_event_handler_t *rx_ev_hdlr, void *rx_ev_ctx, 406*ce110ea1SWei Hu hwc_tx_event_handler_t *tx_ev_hdlr, void *tx_ev_ctx, 407*ce110ea1SWei Hu struct hwc_cq **hwc_cq_ptr) 408*ce110ea1SWei Hu { 409*ce110ea1SWei Hu struct gdma_queue *eq, *cq; 410*ce110ea1SWei Hu struct gdma_comp *comp_buf; 411*ce110ea1SWei Hu struct hwc_cq *hwc_cq; 412*ce110ea1SWei Hu uint32_t eq_size, cq_size; 413*ce110ea1SWei Hu int err; 414*ce110ea1SWei Hu 415*ce110ea1SWei Hu eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); 416*ce110ea1SWei Hu if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) 417*ce110ea1SWei Hu eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; 418*ce110ea1SWei Hu 419*ce110ea1SWei Hu cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); 420*ce110ea1SWei Hu if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) 421*ce110ea1SWei Hu cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; 422*ce110ea1SWei Hu 423*ce110ea1SWei Hu hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO); 424*ce110ea1SWei Hu if (!hwc_cq) 425*ce110ea1SWei Hu return ENOMEM; 426*ce110ea1SWei Hu 427*ce110ea1SWei Hu err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq); 428*ce110ea1SWei Hu if (err) { 429*ce110ea1SWei Hu device_printf(hwc->dev, 430*ce110ea1SWei Hu "Failed to create HWC EQ for RQ: %d\n", err); 431*ce110ea1SWei Hu goto out; 432*ce110ea1SWei Hu } 433*ce110ea1SWei Hu hwc_cq->gdma_eq = eq; 434*ce110ea1SWei Hu 435*ce110ea1SWei Hu err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, 436*ce110ea1SWei Hu mana_hwc_comp_event, eq, &cq); 437*ce110ea1SWei Hu if (err) { 438*ce110ea1SWei Hu device_printf(hwc->dev, 439*ce110ea1SWei Hu "Failed to create HWC CQ for RQ: %d\n", err); 440*ce110ea1SWei Hu goto out; 441*ce110ea1SWei Hu } 442*ce110ea1SWei Hu hwc_cq->gdma_cq = cq; 443*ce110ea1SWei Hu 444*ce110ea1SWei Hu comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp), 445*ce110ea1SWei Hu M_DEVBUF, M_WAITOK | M_ZERO); 446*ce110ea1SWei Hu if (!comp_buf) { 447*ce110ea1SWei Hu err = ENOMEM; 448*ce110ea1SWei Hu goto out; 449*ce110ea1SWei Hu } 450*ce110ea1SWei Hu 451*ce110ea1SWei Hu hwc_cq->hwc = hwc; 452*ce110ea1SWei Hu hwc_cq->comp_buf = comp_buf; 453*ce110ea1SWei Hu hwc_cq->queue_depth = q_depth; 454*ce110ea1SWei Hu hwc_cq->rx_event_handler = rx_ev_hdlr; 455*ce110ea1SWei Hu hwc_cq->rx_event_ctx = rx_ev_ctx; 456*ce110ea1SWei Hu hwc_cq->tx_event_handler = tx_ev_hdlr; 457*ce110ea1SWei Hu hwc_cq->tx_event_ctx = tx_ev_ctx; 458*ce110ea1SWei Hu 459*ce110ea1SWei Hu *hwc_cq_ptr = hwc_cq; 460*ce110ea1SWei Hu return 0; 461*ce110ea1SWei Hu out: 462*ce110ea1SWei Hu mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); 463*ce110ea1SWei Hu return err; 464*ce110ea1SWei Hu } 465*ce110ea1SWei Hu 466*ce110ea1SWei Hu static int 467*ce110ea1SWei Hu mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth, 468*ce110ea1SWei Hu uint32_t max_msg_size, 469*ce110ea1SWei Hu struct hwc_dma_buf **dma_buf_ptr) 470*ce110ea1SWei Hu { 471*ce110ea1SWei Hu struct gdma_context *gc = hwc->gdma_dev->gdma_context; 472*ce110ea1SWei Hu struct hwc_work_request *hwc_wr; 473*ce110ea1SWei Hu struct hwc_dma_buf *dma_buf; 474*ce110ea1SWei Hu struct gdma_mem_info *gmi; 475*ce110ea1SWei Hu uint32_t buf_size; 476*ce110ea1SWei Hu uint8_t *base_pa; 477*ce110ea1SWei Hu void *virt_addr; 478*ce110ea1SWei Hu uint16_t i; 479*ce110ea1SWei Hu int err; 480*ce110ea1SWei Hu 481*ce110ea1SWei Hu dma_buf = malloc(sizeof(*dma_buf) + 482*ce110ea1SWei Hu q_depth * sizeof(struct hwc_work_request), 483*ce110ea1SWei Hu M_DEVBUF, M_WAITOK | M_ZERO); 484*ce110ea1SWei Hu if (!dma_buf) 485*ce110ea1SWei Hu return ENOMEM; 486*ce110ea1SWei Hu 487*ce110ea1SWei Hu dma_buf->num_reqs = q_depth; 488*ce110ea1SWei Hu 489*ce110ea1SWei Hu buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE); 490*ce110ea1SWei Hu 491*ce110ea1SWei Hu gmi = &dma_buf->mem_info; 492*ce110ea1SWei Hu err = mana_gd_alloc_memory(gc, buf_size, gmi); 493*ce110ea1SWei Hu if (err) { 494*ce110ea1SWei Hu device_printf(hwc->dev, 495*ce110ea1SWei Hu "Failed to allocate DMA buffer: %d\n", err); 496*ce110ea1SWei Hu goto out; 497*ce110ea1SWei Hu } 498*ce110ea1SWei Hu 499*ce110ea1SWei Hu virt_addr = dma_buf->mem_info.virt_addr; 500*ce110ea1SWei Hu base_pa = (uint8_t *)dma_buf->mem_info.dma_handle; 501*ce110ea1SWei Hu 502*ce110ea1SWei Hu for (i = 0; i < q_depth; i++) { 503*ce110ea1SWei Hu hwc_wr = &dma_buf->reqs[i]; 504*ce110ea1SWei Hu 505*ce110ea1SWei Hu hwc_wr->buf_va = (char *)virt_addr + i * max_msg_size; 506*ce110ea1SWei Hu hwc_wr->buf_sge_addr = base_pa + i * max_msg_size; 507*ce110ea1SWei Hu 508*ce110ea1SWei Hu hwc_wr->buf_len = max_msg_size; 509*ce110ea1SWei Hu } 510*ce110ea1SWei Hu 511*ce110ea1SWei Hu *dma_buf_ptr = dma_buf; 512*ce110ea1SWei Hu return 0; 513*ce110ea1SWei Hu out: 514*ce110ea1SWei Hu free(dma_buf, M_DEVBUF); 515*ce110ea1SWei Hu return err; 516*ce110ea1SWei Hu } 517*ce110ea1SWei Hu 518*ce110ea1SWei Hu static void 519*ce110ea1SWei Hu mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc, 520*ce110ea1SWei Hu struct hwc_dma_buf *dma_buf) 521*ce110ea1SWei Hu { 522*ce110ea1SWei Hu if (!dma_buf) 523*ce110ea1SWei Hu return; 524*ce110ea1SWei Hu 525*ce110ea1SWei Hu mana_gd_free_memory(&dma_buf->mem_info); 526*ce110ea1SWei Hu 527*ce110ea1SWei Hu free(dma_buf, M_DEVBUF); 528*ce110ea1SWei Hu } 529*ce110ea1SWei Hu 530*ce110ea1SWei Hu static void 531*ce110ea1SWei Hu mana_hwc_destroy_wq(struct hw_channel_context *hwc, 532*ce110ea1SWei Hu struct hwc_wq *hwc_wq) 533*ce110ea1SWei Hu { 534*ce110ea1SWei Hu if (!hwc_wq) 535*ce110ea1SWei Hu return; 536*ce110ea1SWei Hu 537*ce110ea1SWei Hu mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf); 538*ce110ea1SWei Hu 539*ce110ea1SWei Hu if (hwc_wq->gdma_wq) 540*ce110ea1SWei Hu mana_gd_destroy_queue(hwc->gdma_dev->gdma_context, 541*ce110ea1SWei Hu hwc_wq->gdma_wq); 542*ce110ea1SWei Hu 543*ce110ea1SWei Hu free(hwc_wq, M_DEVBUF); 544*ce110ea1SWei Hu } 545*ce110ea1SWei Hu 546*ce110ea1SWei Hu static int 547*ce110ea1SWei Hu mana_hwc_create_wq(struct hw_channel_context *hwc, 548*ce110ea1SWei Hu enum gdma_queue_type q_type, uint16_t q_depth, 549*ce110ea1SWei Hu uint32_t max_msg_size, struct hwc_cq *hwc_cq, 550*ce110ea1SWei Hu struct hwc_wq **hwc_wq_ptr) 551*ce110ea1SWei Hu { 552*ce110ea1SWei Hu struct gdma_queue *queue; 553*ce110ea1SWei Hu struct hwc_wq *hwc_wq; 554*ce110ea1SWei Hu uint32_t queue_size; 555*ce110ea1SWei Hu int err; 556*ce110ea1SWei Hu 557*ce110ea1SWei Hu if (q_type != GDMA_SQ && q_type != GDMA_RQ) { 558*ce110ea1SWei Hu /* XXX should fail and return error? */ 559*ce110ea1SWei Hu mana_warn(NULL, "Invalid q_type %u\n", q_type); 560*ce110ea1SWei Hu } 561*ce110ea1SWei Hu 562*ce110ea1SWei Hu if (q_type == GDMA_RQ) 563*ce110ea1SWei Hu queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth); 564*ce110ea1SWei Hu else 565*ce110ea1SWei Hu queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); 566*ce110ea1SWei Hu 567*ce110ea1SWei Hu if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) 568*ce110ea1SWei Hu queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; 569*ce110ea1SWei Hu 570*ce110ea1SWei Hu hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO); 571*ce110ea1SWei Hu if (!hwc_wq) 572*ce110ea1SWei Hu return ENOMEM; 573*ce110ea1SWei Hu 574*ce110ea1SWei Hu err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue); 575*ce110ea1SWei Hu if (err) 576*ce110ea1SWei Hu goto out; 577*ce110ea1SWei Hu 578*ce110ea1SWei Hu err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size, 579*ce110ea1SWei Hu &hwc_wq->msg_buf); 580*ce110ea1SWei Hu if (err) 581*ce110ea1SWei Hu goto out; 582*ce110ea1SWei Hu 583*ce110ea1SWei Hu hwc_wq->hwc = hwc; 584*ce110ea1SWei Hu hwc_wq->gdma_wq = queue; 585*ce110ea1SWei Hu hwc_wq->queue_depth = q_depth; 586*ce110ea1SWei Hu hwc_wq->hwc_cq = hwc_cq; 587*ce110ea1SWei Hu 588*ce110ea1SWei Hu *hwc_wq_ptr = hwc_wq; 589*ce110ea1SWei Hu return 0; 590*ce110ea1SWei Hu out: 591*ce110ea1SWei Hu if (err) 592*ce110ea1SWei Hu mana_hwc_destroy_wq(hwc, hwc_wq); 593*ce110ea1SWei Hu return err; 594*ce110ea1SWei Hu } 595*ce110ea1SWei Hu 596*ce110ea1SWei Hu static int 597*ce110ea1SWei Hu mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq, 598*ce110ea1SWei Hu struct hwc_work_request *req, 599*ce110ea1SWei Hu uint32_t dest_virt_rq_id, uint32_t dest_virt_rcq_id, 600*ce110ea1SWei Hu bool dest_pf) 601*ce110ea1SWei Hu { 602*ce110ea1SWei Hu device_t dev = hwc_txq->hwc->dev; 603*ce110ea1SWei Hu struct hwc_tx_oob *tx_oob; 604*ce110ea1SWei Hu struct gdma_sge *sge; 605*ce110ea1SWei Hu int err; 606*ce110ea1SWei Hu 607*ce110ea1SWei Hu if (req->msg_size == 0 || req->msg_size > req->buf_len) { 608*ce110ea1SWei Hu device_printf(dev, "wrong msg_size: %u, buf_len: %u\n", 609*ce110ea1SWei Hu req->msg_size, req->buf_len); 610*ce110ea1SWei Hu return EINVAL; 611*ce110ea1SWei Hu } 612*ce110ea1SWei Hu 613*ce110ea1SWei Hu tx_oob = &req->tx_oob; 614*ce110ea1SWei Hu 615*ce110ea1SWei Hu tx_oob->vrq_id = dest_virt_rq_id; 616*ce110ea1SWei Hu tx_oob->dest_vfid = 0; 617*ce110ea1SWei Hu tx_oob->vrcq_id = dest_virt_rcq_id; 618*ce110ea1SWei Hu tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id; 619*ce110ea1SWei Hu tx_oob->loopback = false; 620*ce110ea1SWei Hu tx_oob->lso_override = false; 621*ce110ea1SWei Hu tx_oob->dest_pf = dest_pf; 622*ce110ea1SWei Hu tx_oob->vsq_id = hwc_txq->gdma_wq->id; 623*ce110ea1SWei Hu 624*ce110ea1SWei Hu sge = &req->sge; 625*ce110ea1SWei Hu sge->address = (uint64_t)req->buf_sge_addr; 626*ce110ea1SWei Hu sge->mem_key = hwc_txq->msg_buf->gpa_mkey; 627*ce110ea1SWei Hu sge->size = req->msg_size; 628*ce110ea1SWei Hu 629*ce110ea1SWei Hu memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request)); 630*ce110ea1SWei Hu req->wqe_req.sgl = sge; 631*ce110ea1SWei Hu req->wqe_req.num_sge = 1; 632*ce110ea1SWei Hu req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob); 633*ce110ea1SWei Hu req->wqe_req.inline_oob_data = tx_oob; 634*ce110ea1SWei Hu req->wqe_req.client_data_unit = 0; 635*ce110ea1SWei Hu 636*ce110ea1SWei Hu err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL); 637*ce110ea1SWei Hu if (err) 638*ce110ea1SWei Hu device_printf(dev, 639*ce110ea1SWei Hu "Failed to post WQE on HWC SQ: %d\n", err); 640*ce110ea1SWei Hu return err; 641*ce110ea1SWei Hu } 642*ce110ea1SWei Hu 643*ce110ea1SWei Hu static int 644*ce110ea1SWei Hu mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, uint16_t num_msg) 645*ce110ea1SWei Hu { 646*ce110ea1SWei Hu int err; 647*ce110ea1SWei Hu 648*ce110ea1SWei Hu sema_init(&hwc->sema, num_msg, "gdma hwc sema"); 649*ce110ea1SWei Hu 650*ce110ea1SWei Hu err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res, 651*ce110ea1SWei Hu "gdma hwc res lock"); 652*ce110ea1SWei Hu if (err) 653*ce110ea1SWei Hu device_printf(hwc->dev, 654*ce110ea1SWei Hu "Failed to init inflight_msg_res: %d\n", err); 655*ce110ea1SWei Hu 656*ce110ea1SWei Hu return (err); 657*ce110ea1SWei Hu } 658*ce110ea1SWei Hu 659*ce110ea1SWei Hu static int 660*ce110ea1SWei Hu mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth, 661*ce110ea1SWei Hu uint32_t max_req_msg_size, uint32_t max_resp_msg_size) 662*ce110ea1SWei Hu { 663*ce110ea1SWei Hu struct gdma_context *gc = hwc->gdma_dev->gdma_context; 664*ce110ea1SWei Hu struct hwc_wq *hwc_rxq = hwc->rxq; 665*ce110ea1SWei Hu struct hwc_work_request *req; 666*ce110ea1SWei Hu struct hwc_caller_ctx *ctx; 667*ce110ea1SWei Hu int err; 668*ce110ea1SWei Hu int i; 669*ce110ea1SWei Hu 670*ce110ea1SWei Hu /* Post all WQEs on the RQ */ 671*ce110ea1SWei Hu for (i = 0; i < q_depth; i++) { 672*ce110ea1SWei Hu req = &hwc_rxq->msg_buf->reqs[i]; 673*ce110ea1SWei Hu err = mana_hwc_post_rx_wqe(hwc_rxq, req); 674*ce110ea1SWei Hu if (err) 675*ce110ea1SWei Hu return err; 676*ce110ea1SWei Hu } 677*ce110ea1SWei Hu 678*ce110ea1SWei Hu ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx), 679*ce110ea1SWei Hu M_DEVBUF, M_WAITOK | M_ZERO); 680*ce110ea1SWei Hu if (!ctx) 681*ce110ea1SWei Hu return ENOMEM; 682*ce110ea1SWei Hu 683*ce110ea1SWei Hu for (i = 0; i < q_depth; ++i) 684*ce110ea1SWei Hu init_completion(&ctx[i].comp_event); 685*ce110ea1SWei Hu 686*ce110ea1SWei Hu hwc->caller_ctx = ctx; 687*ce110ea1SWei Hu 688*ce110ea1SWei Hu return mana_gd_test_eq(gc, hwc->cq->gdma_eq); 689*ce110ea1SWei Hu } 690*ce110ea1SWei Hu 691*ce110ea1SWei Hu static int 692*ce110ea1SWei Hu mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth, 693*ce110ea1SWei Hu uint32_t *max_req_msg_size, 694*ce110ea1SWei Hu uint32_t *max_resp_msg_size) 695*ce110ea1SWei Hu { 696*ce110ea1SWei Hu struct hw_channel_context *hwc = gc->hwc.driver_data; 697*ce110ea1SWei Hu struct gdma_queue *rq = hwc->rxq->gdma_wq; 698*ce110ea1SWei Hu struct gdma_queue *sq = hwc->txq->gdma_wq; 699*ce110ea1SWei Hu struct gdma_queue *eq = hwc->cq->gdma_eq; 700*ce110ea1SWei Hu struct gdma_queue *cq = hwc->cq->gdma_cq; 701*ce110ea1SWei Hu int err; 702*ce110ea1SWei Hu 703*ce110ea1SWei Hu init_completion(&hwc->hwc_init_eqe_comp); 704*ce110ea1SWei Hu 705*ce110ea1SWei Hu err = mana_smc_setup_hwc(&gc->shm_channel, false, 706*ce110ea1SWei Hu eq->mem_info.dma_handle, 707*ce110ea1SWei Hu cq->mem_info.dma_handle, 708*ce110ea1SWei Hu rq->mem_info.dma_handle, 709*ce110ea1SWei Hu sq->mem_info.dma_handle, 710*ce110ea1SWei Hu eq->eq.msix_index); 711*ce110ea1SWei Hu if (err) 712*ce110ea1SWei Hu return err; 713*ce110ea1SWei Hu 714*ce110ea1SWei Hu if (wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * hz)) 715*ce110ea1SWei Hu return ETIMEDOUT; 716*ce110ea1SWei Hu 717*ce110ea1SWei Hu *q_depth = hwc->hwc_init_q_depth_max; 718*ce110ea1SWei Hu *max_req_msg_size = hwc->hwc_init_max_req_msg_size; 719*ce110ea1SWei Hu *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size; 720*ce110ea1SWei Hu 721*ce110ea1SWei Hu if (cq->id >= gc->max_num_cqs) { 722*ce110ea1SWei Hu mana_warn(NULL, "invalid cq id %u > %u\n", 723*ce110ea1SWei Hu cq->id, gc->max_num_cqs); 724*ce110ea1SWei Hu return EPROTO; 725*ce110ea1SWei Hu } 726*ce110ea1SWei Hu 727*ce110ea1SWei Hu gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *), 728*ce110ea1SWei Hu M_DEVBUF, M_WAITOK | M_ZERO); 729*ce110ea1SWei Hu if (!gc->cq_table) 730*ce110ea1SWei Hu return ENOMEM; 731*ce110ea1SWei Hu 732*ce110ea1SWei Hu gc->cq_table[cq->id] = cq; 733*ce110ea1SWei Hu 734*ce110ea1SWei Hu return 0; 735*ce110ea1SWei Hu } 736*ce110ea1SWei Hu 737*ce110ea1SWei Hu static int 738*ce110ea1SWei Hu mana_hwc_init_queues(struct hw_channel_context *hwc, uint16_t q_depth, 739*ce110ea1SWei Hu uint32_t max_req_msg_size, uint32_t max_resp_msg_size) 740*ce110ea1SWei Hu { 741*ce110ea1SWei Hu struct hwc_wq *hwc_rxq = NULL; 742*ce110ea1SWei Hu struct hwc_wq *hwc_txq = NULL; 743*ce110ea1SWei Hu struct hwc_cq *hwc_cq = NULL; 744*ce110ea1SWei Hu int err; 745*ce110ea1SWei Hu 746*ce110ea1SWei Hu err = mana_hwc_init_inflight_msg(hwc, q_depth); 747*ce110ea1SWei Hu if (err) 748*ce110ea1SWei Hu return err; 749*ce110ea1SWei Hu 750*ce110ea1SWei Hu /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ 751*ce110ea1SWei Hu * queue depth and RQ queue depth. 752*ce110ea1SWei Hu */ 753*ce110ea1SWei Hu err = mana_hwc_create_cq(hwc, q_depth * 2, 754*ce110ea1SWei Hu mana_hwc_init_event_handler, hwc, 755*ce110ea1SWei Hu mana_hwc_rx_event_handler, hwc, 756*ce110ea1SWei Hu mana_hwc_tx_event_handler, hwc, &hwc_cq); 757*ce110ea1SWei Hu if (err) { 758*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to create HWC CQ: %d\n", err); 759*ce110ea1SWei Hu goto out; 760*ce110ea1SWei Hu } 761*ce110ea1SWei Hu hwc->cq = hwc_cq; 762*ce110ea1SWei Hu 763*ce110ea1SWei Hu err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size, 764*ce110ea1SWei Hu hwc_cq, &hwc_rxq); 765*ce110ea1SWei Hu if (err) { 766*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to create HWC RQ: %d\n", err); 767*ce110ea1SWei Hu goto out; 768*ce110ea1SWei Hu } 769*ce110ea1SWei Hu hwc->rxq = hwc_rxq; 770*ce110ea1SWei Hu 771*ce110ea1SWei Hu err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size, 772*ce110ea1SWei Hu hwc_cq, &hwc_txq); 773*ce110ea1SWei Hu if (err) { 774*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to create HWC SQ: %d\n", err); 775*ce110ea1SWei Hu goto out; 776*ce110ea1SWei Hu } 777*ce110ea1SWei Hu hwc->txq = hwc_txq; 778*ce110ea1SWei Hu 779*ce110ea1SWei Hu hwc->num_inflight_msg = q_depth; 780*ce110ea1SWei Hu hwc->max_req_msg_size = max_req_msg_size; 781*ce110ea1SWei Hu 782*ce110ea1SWei Hu return 0; 783*ce110ea1SWei Hu out: 784*ce110ea1SWei Hu if (hwc_txq) 785*ce110ea1SWei Hu mana_hwc_destroy_wq(hwc, hwc_txq); 786*ce110ea1SWei Hu 787*ce110ea1SWei Hu if (hwc_rxq) 788*ce110ea1SWei Hu mana_hwc_destroy_wq(hwc, hwc_rxq); 789*ce110ea1SWei Hu 790*ce110ea1SWei Hu if (hwc_cq) 791*ce110ea1SWei Hu mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq); 792*ce110ea1SWei Hu 793*ce110ea1SWei Hu mana_gd_free_res_map(&hwc->inflight_msg_res); 794*ce110ea1SWei Hu return err; 795*ce110ea1SWei Hu } 796*ce110ea1SWei Hu 797*ce110ea1SWei Hu int 798*ce110ea1SWei Hu mana_hwc_create_channel(struct gdma_context *gc) 799*ce110ea1SWei Hu { 800*ce110ea1SWei Hu uint32_t max_req_msg_size, max_resp_msg_size; 801*ce110ea1SWei Hu struct gdma_dev *gd = &gc->hwc; 802*ce110ea1SWei Hu struct hw_channel_context *hwc; 803*ce110ea1SWei Hu uint16_t q_depth_max; 804*ce110ea1SWei Hu int err; 805*ce110ea1SWei Hu 806*ce110ea1SWei Hu hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO); 807*ce110ea1SWei Hu if (!hwc) 808*ce110ea1SWei Hu return ENOMEM; 809*ce110ea1SWei Hu 810*ce110ea1SWei Hu gd->gdma_context = gc; 811*ce110ea1SWei Hu gd->driver_data = hwc; 812*ce110ea1SWei Hu hwc->gdma_dev = gd; 813*ce110ea1SWei Hu hwc->dev = gc->dev; 814*ce110ea1SWei Hu 815*ce110ea1SWei Hu /* HWC's instance number is always 0. */ 816*ce110ea1SWei Hu gd->dev_id.as_uint32 = 0; 817*ce110ea1SWei Hu gd->dev_id.type = GDMA_DEVICE_HWC; 818*ce110ea1SWei Hu 819*ce110ea1SWei Hu gd->pdid = INVALID_PDID; 820*ce110ea1SWei Hu gd->doorbell = INVALID_DOORBELL; 821*ce110ea1SWei Hu 822*ce110ea1SWei Hu err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, 823*ce110ea1SWei Hu HW_CHANNEL_MAX_REQUEST_SIZE, 824*ce110ea1SWei Hu HW_CHANNEL_MAX_RESPONSE_SIZE); 825*ce110ea1SWei Hu if (err) { 826*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to initialize HWC: %d\n", 827*ce110ea1SWei Hu err); 828*ce110ea1SWei Hu goto out; 829*ce110ea1SWei Hu } 830*ce110ea1SWei Hu 831*ce110ea1SWei Hu err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size, 832*ce110ea1SWei Hu &max_resp_msg_size); 833*ce110ea1SWei Hu if (err) { 834*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to establish HWC: %d\n", err); 835*ce110ea1SWei Hu goto out; 836*ce110ea1SWei Hu } 837*ce110ea1SWei Hu 838*ce110ea1SWei Hu err = mana_hwc_test_channel(gc->hwc.driver_data, 839*ce110ea1SWei Hu HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH, 840*ce110ea1SWei Hu max_req_msg_size, max_resp_msg_size); 841*ce110ea1SWei Hu if (err) { 842*ce110ea1SWei Hu /* Test failed, but the channel has been established */ 843*ce110ea1SWei Hu device_printf(hwc->dev, "Failed to test HWC: %d\n", err); 844*ce110ea1SWei Hu return EIO; 845*ce110ea1SWei Hu } 846*ce110ea1SWei Hu 847*ce110ea1SWei Hu return 0; 848*ce110ea1SWei Hu out: 849*ce110ea1SWei Hu free(hwc, M_DEVBUF); 850*ce110ea1SWei Hu return (err); 851*ce110ea1SWei Hu } 852*ce110ea1SWei Hu 853*ce110ea1SWei Hu void 854*ce110ea1SWei Hu mana_hwc_destroy_channel(struct gdma_context *gc) 855*ce110ea1SWei Hu { 856*ce110ea1SWei Hu struct hw_channel_context *hwc = gc->hwc.driver_data; 857*ce110ea1SWei Hu struct hwc_caller_ctx *ctx; 858*ce110ea1SWei Hu 859*ce110ea1SWei Hu mana_smc_teardown_hwc(&gc->shm_channel, false); 860*ce110ea1SWei Hu 861*ce110ea1SWei Hu ctx = hwc->caller_ctx; 862*ce110ea1SWei Hu free(ctx, M_DEVBUF); 863*ce110ea1SWei Hu hwc->caller_ctx = NULL; 864*ce110ea1SWei Hu 865*ce110ea1SWei Hu mana_hwc_destroy_wq(hwc, hwc->txq); 866*ce110ea1SWei Hu hwc->txq = NULL; 867*ce110ea1SWei Hu 868*ce110ea1SWei Hu mana_hwc_destroy_wq(hwc, hwc->rxq); 869*ce110ea1SWei Hu hwc->rxq = NULL; 870*ce110ea1SWei Hu 871*ce110ea1SWei Hu mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); 872*ce110ea1SWei Hu hwc->cq = NULL; 873*ce110ea1SWei Hu 874*ce110ea1SWei Hu mana_gd_free_res_map(&hwc->inflight_msg_res); 875*ce110ea1SWei Hu 876*ce110ea1SWei Hu hwc->num_inflight_msg = 0; 877*ce110ea1SWei Hu 878*ce110ea1SWei Hu if (hwc->gdma_dev->pdid != INVALID_PDID) { 879*ce110ea1SWei Hu hwc->gdma_dev->doorbell = INVALID_DOORBELL; 880*ce110ea1SWei Hu hwc->gdma_dev->pdid = INVALID_PDID; 881*ce110ea1SWei Hu } 882*ce110ea1SWei Hu 883*ce110ea1SWei Hu free(hwc, M_DEVBUF); 884*ce110ea1SWei Hu gc->hwc.driver_data = NULL; 885*ce110ea1SWei Hu gc->hwc.gdma_context = NULL; 886*ce110ea1SWei Hu } 887*ce110ea1SWei Hu 888*ce110ea1SWei Hu int 889*ce110ea1SWei Hu mana_hwc_send_request(struct hw_channel_context *hwc, uint32_t req_len, 890*ce110ea1SWei Hu const void *req, uint32_t resp_len, void *resp) 891*ce110ea1SWei Hu { 892*ce110ea1SWei Hu struct hwc_work_request *tx_wr; 893*ce110ea1SWei Hu struct hwc_wq *txq = hwc->txq; 894*ce110ea1SWei Hu struct gdma_req_hdr *req_msg; 895*ce110ea1SWei Hu struct hwc_caller_ctx *ctx; 896*ce110ea1SWei Hu uint16_t msg_id; 897*ce110ea1SWei Hu int err; 898*ce110ea1SWei Hu 899*ce110ea1SWei Hu mana_hwc_get_msg_index(hwc, &msg_id); 900*ce110ea1SWei Hu 901*ce110ea1SWei Hu tx_wr = &txq->msg_buf->reqs[msg_id]; 902*ce110ea1SWei Hu 903*ce110ea1SWei Hu if (req_len > tx_wr->buf_len) { 904*ce110ea1SWei Hu device_printf(hwc->dev, 905*ce110ea1SWei Hu "HWC: req msg size: %d > %d\n", req_len, 906*ce110ea1SWei Hu tx_wr->buf_len); 907*ce110ea1SWei Hu err = EINVAL; 908*ce110ea1SWei Hu goto out; 909*ce110ea1SWei Hu } 910*ce110ea1SWei Hu 911*ce110ea1SWei Hu ctx = hwc->caller_ctx + msg_id; 912*ce110ea1SWei Hu ctx->output_buf = resp; 913*ce110ea1SWei Hu ctx->output_buflen = resp_len; 914*ce110ea1SWei Hu 915*ce110ea1SWei Hu req_msg = (struct gdma_req_hdr *)tx_wr->buf_va; 916*ce110ea1SWei Hu if (req) 917*ce110ea1SWei Hu memcpy(req_msg, req, req_len); 918*ce110ea1SWei Hu 919*ce110ea1SWei Hu req_msg->req.hwc_msg_id = msg_id; 920*ce110ea1SWei Hu 921*ce110ea1SWei Hu tx_wr->msg_size = req_len; 922*ce110ea1SWei Hu 923*ce110ea1SWei Hu err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false); 924*ce110ea1SWei Hu if (err) { 925*ce110ea1SWei Hu device_printf(hwc->dev, 926*ce110ea1SWei Hu "HWC: Failed to post send WQE: %d\n", err); 927*ce110ea1SWei Hu goto out; 928*ce110ea1SWei Hu } 929*ce110ea1SWei Hu 930*ce110ea1SWei Hu if (wait_for_completion_timeout(&ctx->comp_event, 30 * hz)) { 931*ce110ea1SWei Hu device_printf(hwc->dev, "HWC: Request timed out!\n"); 932*ce110ea1SWei Hu err = ETIMEDOUT; 933*ce110ea1SWei Hu goto out; 934*ce110ea1SWei Hu } 935*ce110ea1SWei Hu 936*ce110ea1SWei Hu if (ctx->error) { 937*ce110ea1SWei Hu err = ctx->error; 938*ce110ea1SWei Hu goto out; 939*ce110ea1SWei Hu } 940*ce110ea1SWei Hu 941*ce110ea1SWei Hu if (ctx->status_code) { 942*ce110ea1SWei Hu device_printf(hwc->dev, 943*ce110ea1SWei Hu "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code); 944*ce110ea1SWei Hu err = EPROTO; 945*ce110ea1SWei Hu goto out; 946*ce110ea1SWei Hu } 947*ce110ea1SWei Hu out: 948*ce110ea1SWei Hu mana_hwc_put_msg_index(hwc, msg_id); 949*ce110ea1SWei Hu return err; 950*ce110ea1SWei Hu } 951