1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: RDMA Controller HW interface 37 */ 38 #include <linux/interrupt.h> 39 #include <linux/spinlock.h> 40 #include <linux/pci.h> 41 #include <linux/prefetch.h> 42 #include <linux/delay.h> 43 44 #include "roce_hsi.h" 45 #include "qplib_res.h" 46 #include "qplib_rcfw.h" 47 #include "qplib_sp.h" 48 #include "qplib_fp.h" 49 50 static void bnxt_qplib_service_creq(unsigned long data); 51 52 /* Hardware communication channel */ 53 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 54 { 55 u16 cbit; 56 int rc; 57 58 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 59 rc = wait_event_timeout(rcfw->waitq, 60 !test_bit(cbit, rcfw->cmdq_bitmap), 61 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 62 return rc ? 0 : -ETIMEDOUT; 63 }; 64 65 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 66 { 67 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 68 u16 cbit; 69 70 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 71 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 72 goto done; 73 do { 74 mdelay(1); /* 1m sec */ 75 bnxt_qplib_service_creq((unsigned long)rcfw); 76 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 77 done: 78 return count ? 0 : -ETIMEDOUT; 79 }; 80 81 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 82 struct creq_base *resp, void *sb, u8 is_block) 83 { 84 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 85 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 86 struct bnxt_qplib_crsq *crsqe; 87 u32 sw_prod, cmdq_prod; 88 unsigned long flags; 89 u32 size, opcode; 90 u16 cookie, cbit; 91 u8 *preq; 92 93 opcode = req->opcode; 94 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 95 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 96 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && 97 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { 98 dev_err(&rcfw->pdev->dev, 99 "QPLIB: RCFW not initialized, reject opcode 0x%x", 100 opcode); 101 return -EINVAL; 102 } 103 104 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 105 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 106 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 107 return -EINVAL; 108 } 109 110 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 111 return -ETIMEDOUT; 112 113 /* Cmdq are in 16-byte units, each request can consume 1 or more 114 * cmdqe 115 */ 116 spin_lock_irqsave(&cmdq->lock, flags); 117 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 118 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 119 spin_unlock_irqrestore(&cmdq->lock, flags); 120 return -EAGAIN; 121 } 122 123 124 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 125 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 126 if (is_block) 127 cookie |= RCFW_CMD_IS_BLOCKING; 128 129 set_bit(cbit, rcfw->cmdq_bitmap); 130 req->cookie = cpu_to_le16(cookie); 131 crsqe = &rcfw->crsqe_tbl[cbit]; 132 if (crsqe->resp) { 133 spin_unlock_irqrestore(&cmdq->lock, flags); 134 return -EBUSY; 135 } 136 memset(resp, 0, sizeof(*resp)); 137 crsqe->resp = (struct creq_qp_event *)resp; 138 crsqe->resp->cookie = req->cookie; 139 crsqe->req_size = req->cmd_size; 140 if (req->resp_size && sb) { 141 struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 142 143 req->resp_addr = cpu_to_le64(sbuf->dma_addr); 144 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 145 BNXT_QPLIB_CMDQE_UNITS; 146 } 147 148 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 149 preq = (u8 *)req; 150 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 151 do { 152 /* Locate the next cmdq slot */ 153 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 154 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)]; 155 if (!cmdqe) { 156 dev_err(&rcfw->pdev->dev, 157 "QPLIB: RCFW request failed with no cmdqe!"); 158 goto done; 159 } 160 /* Copy a segment of the req cmd to the cmdq */ 161 memset(cmdqe, 0, sizeof(*cmdqe)); 162 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); 163 preq += min_t(u32, size, sizeof(*cmdqe)); 164 size -= min_t(u32, size, sizeof(*cmdqe)); 165 cmdq->prod++; 166 rcfw->seq_num++; 167 } while (size > 0); 168 169 rcfw->seq_num++; 170 171 cmdq_prod = cmdq->prod; 172 if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { 173 /* The very first doorbell write 174 * is required to set this flag 175 * which prompts the FW to reset 176 * its internal pointers 177 */ 178 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); 179 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 180 } 181 182 /* ring CMDQ DB */ 183 wmb(); 184 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 185 rcfw->cmdq_bar_reg_prod_off); 186 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 187 rcfw->cmdq_bar_reg_trig_off); 188 done: 189 spin_unlock_irqrestore(&cmdq->lock, flags); 190 /* Return the CREQ response pointer */ 191 return 0; 192 } 193 194 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 195 struct cmdq_base *req, 196 struct creq_base *resp, 197 void *sb, u8 is_block) 198 { 199 struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 200 u16 cookie; 201 u8 opcode, retry_cnt = 0xFF; 202 int rc = 0; 203 204 do { 205 opcode = req->opcode; 206 rc = __send_message(rcfw, req, resp, sb, is_block); 207 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 208 if (!rc) 209 break; 210 211 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 212 /* send failed */ 213 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", 214 cookie, opcode); 215 return rc; 216 } 217 is_block ? mdelay(1) : usleep_range(500, 1000); 218 219 } while (retry_cnt--); 220 221 if (is_block) 222 rc = __block_for_resp(rcfw, cookie); 223 else 224 rc = __wait_for_resp(rcfw, cookie); 225 if (rc) { 226 /* timed out */ 227 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 228 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 229 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 230 return rc; 231 } 232 233 if (evnt->status) { 234 /* failed with status */ 235 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", 236 cookie, opcode, evnt->status); 237 rc = -EFAULT; 238 } 239 240 return rc; 241 } 242 /* Completions */ 243 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 244 struct creq_func_event *func_event) 245 { 246 switch (func_event->event) { 247 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 248 break; 249 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 250 break; 251 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 252 break; 253 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 254 break; 255 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 256 break; 257 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 258 break; 259 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 260 break; 261 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 262 /* SRQ ctx error, call srq_handler?? 263 * But there's no SRQ handle! 264 */ 265 break; 266 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 267 break; 268 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 269 break; 270 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 271 break; 272 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST: 273 break; 274 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED: 275 break; 276 default: 277 return -EINVAL; 278 } 279 return 0; 280 } 281 282 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 283 struct creq_qp_event *qp_event) 284 { 285 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 286 struct creq_qp_error_notification *err_event; 287 struct bnxt_qplib_crsq *crsqe; 288 unsigned long flags; 289 struct bnxt_qplib_qp *qp; 290 u16 cbit, blocked = 0; 291 u16 cookie; 292 __le16 mcookie; 293 u32 qp_id; 294 295 switch (qp_event->event) { 296 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 297 err_event = (struct creq_qp_error_notification *)qp_event; 298 qp_id = le32_to_cpu(err_event->xid); 299 qp = rcfw->qp_tbl[qp_id].qp_handle; 300 dev_dbg(&rcfw->pdev->dev, 301 "QPLIB: Received QP error notification"); 302 dev_dbg(&rcfw->pdev->dev, 303 "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", 304 qp_id, err_event->req_err_state_reason, 305 err_event->res_err_state_reason); 306 if (!qp) 307 break; 308 bnxt_qplib_mark_qp_error(qp); 309 rcfw->aeq_handler(rcfw, qp_event, qp); 310 break; 311 default: 312 /* Command Response */ 313 spin_lock_irqsave(&cmdq->lock, flags); 314 cookie = le16_to_cpu(qp_event->cookie); 315 mcookie = qp_event->cookie; 316 blocked = cookie & RCFW_CMD_IS_BLOCKING; 317 cookie &= RCFW_MAX_COOKIE_VALUE; 318 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 319 crsqe = &rcfw->crsqe_tbl[cbit]; 320 if (crsqe->resp && 321 crsqe->resp->cookie == mcookie) { 322 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 323 crsqe->resp = NULL; 324 } else { 325 dev_err(&rcfw->pdev->dev, 326 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", 327 crsqe->resp ? "mismatch" : "collision", 328 crsqe->resp ? crsqe->resp->cookie : 0, mcookie); 329 } 330 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 331 dev_warn(&rcfw->pdev->dev, 332 "QPLIB: CMD bit %d was not requested", cbit); 333 cmdq->cons += crsqe->req_size; 334 crsqe->req_size = 0; 335 336 if (!blocked) 337 wake_up(&rcfw->waitq); 338 spin_unlock_irqrestore(&cmdq->lock, flags); 339 } 340 return 0; 341 } 342 343 /* SP - CREQ Completion handlers */ 344 static void bnxt_qplib_service_creq(unsigned long data) 345 { 346 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; 347 struct bnxt_qplib_hwq *creq = &rcfw->creq; 348 struct creq_base *creqe, **creq_ptr; 349 u32 sw_cons, raw_cons; 350 unsigned long flags; 351 u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 352 353 /* Service the CREQ until budget is over */ 354 spin_lock_irqsave(&creq->lock, flags); 355 raw_cons = creq->cons; 356 while (budget > 0) { 357 sw_cons = HWQ_CMP(raw_cons, creq); 358 creq_ptr = (struct creq_base **)creq->pbl_ptr; 359 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 360 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) 361 break; 362 /* The valid test of the entry must be done first before 363 * reading any further. 364 */ 365 dma_rmb(); 366 367 type = creqe->type & CREQ_BASE_TYPE_MASK; 368 switch (type) { 369 case CREQ_BASE_TYPE_QP_EVENT: 370 bnxt_qplib_process_qp_event 371 (rcfw, (struct creq_qp_event *)creqe); 372 rcfw->creq_qp_event_processed++; 373 break; 374 case CREQ_BASE_TYPE_FUNC_EVENT: 375 if (!bnxt_qplib_process_func_event 376 (rcfw, (struct creq_func_event *)creqe)) 377 rcfw->creq_func_event_processed++; 378 else 379 dev_warn 380 (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled", 381 type); 382 break; 383 default: 384 dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with "); 385 dev_warn(&rcfw->pdev->dev, 386 "QPLIB: op_event = 0x%x not handled", type); 387 break; 388 } 389 raw_cons++; 390 budget--; 391 } 392 393 if (creq->cons != raw_cons) { 394 creq->cons = raw_cons; 395 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 396 creq->max_elements); 397 } 398 spin_unlock_irqrestore(&creq->lock, flags); 399 } 400 401 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) 402 { 403 struct bnxt_qplib_rcfw *rcfw = dev_instance; 404 struct bnxt_qplib_hwq *creq = &rcfw->creq; 405 struct creq_base **creq_ptr; 406 u32 sw_cons; 407 408 /* Prefetch the CREQ element */ 409 sw_cons = HWQ_CMP(creq->cons, creq); 410 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; 411 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); 412 413 tasklet_schedule(&rcfw->worker); 414 415 return IRQ_HANDLED; 416 } 417 418 /* RCFW */ 419 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 420 { 421 struct cmdq_deinitialize_fw req; 422 struct creq_deinitialize_fw_resp resp; 423 u16 cmd_flags = 0; 424 int rc; 425 426 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 427 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 428 NULL, 0); 429 if (rc) 430 return rc; 431 432 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 433 return 0; 434 } 435 436 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) 437 { 438 return (pbl->pg_size == ROCE_PG_SIZE_4K ? 439 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K : 440 pbl->pg_size == ROCE_PG_SIZE_8K ? 441 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K : 442 pbl->pg_size == ROCE_PG_SIZE_64K ? 443 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K : 444 pbl->pg_size == ROCE_PG_SIZE_2M ? 445 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M : 446 pbl->pg_size == ROCE_PG_SIZE_8M ? 447 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M : 448 pbl->pg_size == ROCE_PG_SIZE_1G ? 449 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G : 450 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K); 451 } 452 453 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 454 struct bnxt_qplib_ctx *ctx, int is_virtfn) 455 { 456 struct cmdq_initialize_fw req; 457 struct creq_initialize_fw_resp resp; 458 u16 cmd_flags = 0, level; 459 int rc; 460 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 462 /* Supply (log-base-2-of-host-page-size - base-page-shift) 463 * to bono to adjust the doorbell page sizes. 464 */ 465 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - 466 RCFW_DBR_BASE_PAGE_SHIFT); 467 /* 468 * VFs need not setup the HW context area, PF 469 * shall setup this area for VF. Skipping the 470 * HW programming 471 */ 472 if (is_virtfn) 473 goto skip_ctx_setup; 474 475 level = ctx->qpc_tbl.level; 476 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | 477 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]); 478 level = ctx->mrw_tbl.level; 479 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) | 480 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]); 481 level = ctx->srqc_tbl.level; 482 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 483 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 484 level = ctx->cq_tbl.level; 485 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 486 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 487 level = ctx->srqc_tbl.level; 488 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 489 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 490 level = ctx->cq_tbl.level; 491 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 492 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 493 level = ctx->tim_tbl.level; 494 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | 495 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); 496 level = ctx->tqm_pde_level; 497 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | 498 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); 499 500 req.qpc_page_dir = 501 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 502 req.mrw_page_dir = 503 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 504 req.srq_page_dir = 505 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 506 req.cq_page_dir = 507 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 508 req.tim_page_dir = 509 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 510 req.tqm_page_dir = 511 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); 512 513 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); 514 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); 515 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); 516 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); 517 518 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 519 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 520 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 521 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 522 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 523 524 skip_ctx_setup: 525 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 526 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 527 NULL, 0); 528 if (rc) 529 return rc; 530 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 531 return 0; 532 } 533 534 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 535 { 536 kfree(rcfw->qp_tbl); 537 kfree(rcfw->crsqe_tbl); 538 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 539 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 540 rcfw->pdev = NULL; 541 } 542 543 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 544 struct bnxt_qplib_rcfw *rcfw, 545 int qp_tbl_sz) 546 { 547 rcfw->pdev = pdev; 548 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; 549 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, 550 &rcfw->creq.max_elements, 551 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE, 552 HWQ_TYPE_L2_CMPL)) { 553 dev_err(&rcfw->pdev->dev, 554 "QPLIB: HW channel CREQ allocation failed"); 555 goto fail; 556 } 557 rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT; 558 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0, 559 &rcfw->cmdq.max_elements, 560 BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE, 561 HWQ_TYPE_CTX)) { 562 dev_err(&rcfw->pdev->dev, 563 "QPLIB: HW channel CMDQ allocation failed"); 564 goto fail; 565 } 566 567 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 568 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 569 if (!rcfw->crsqe_tbl) 570 goto fail; 571 572 rcfw->qp_tbl_size = qp_tbl_sz; 573 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), 574 GFP_KERNEL); 575 if (!rcfw->qp_tbl) 576 goto fail; 577 578 return 0; 579 580 fail: 581 bnxt_qplib_free_rcfw_channel(rcfw); 582 return -ENOMEM; 583 } 584 585 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) 586 { 587 tasklet_disable(&rcfw->worker); 588 /* Mask h/w interrupts */ 589 CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 590 rcfw->creq.max_elements); 591 /* Sync with last running IRQ-handler */ 592 synchronize_irq(rcfw->vector); 593 if (kill) 594 tasklet_kill(&rcfw->worker); 595 596 if (rcfw->requested) { 597 free_irq(rcfw->vector, rcfw); 598 rcfw->requested = false; 599 } 600 } 601 602 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 603 { 604 unsigned long indx; 605 606 bnxt_qplib_rcfw_stop_irq(rcfw, true); 607 608 if (rcfw->cmdq_bar_reg_iomem) 609 iounmap(rcfw->cmdq_bar_reg_iomem); 610 rcfw->cmdq_bar_reg_iomem = NULL; 611 612 if (rcfw->creq_bar_reg_iomem) 613 iounmap(rcfw->creq_bar_reg_iomem); 614 rcfw->creq_bar_reg_iomem = NULL; 615 616 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); 617 if (indx != rcfw->bmap_size) 618 dev_err(&rcfw->pdev->dev, 619 "QPLIB: disabling RCFW with pending cmd-bit %lx", indx); 620 kfree(rcfw->cmdq_bitmap); 621 rcfw->bmap_size = 0; 622 623 rcfw->aeq_handler = NULL; 624 rcfw->vector = 0; 625 } 626 627 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 628 bool need_init) 629 { 630 int rc; 631 632 if (rcfw->requested) 633 return -EFAULT; 634 635 rcfw->vector = msix_vector; 636 if (need_init) 637 tasklet_init(&rcfw->worker, 638 bnxt_qplib_service_creq, (unsigned long)rcfw); 639 else 640 tasklet_enable(&rcfw->worker); 641 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 642 "bnxt_qplib_creq", rcfw); 643 if (rc) 644 return rc; 645 rcfw->requested = true; 646 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 647 rcfw->creq.max_elements); 648 649 return 0; 650 } 651 652 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 653 struct bnxt_qplib_rcfw *rcfw, 654 int msix_vector, 655 int cp_bar_reg_off, int virt_fn, 656 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 657 void *, void *)) 658 { 659 resource_size_t res_base; 660 struct cmdq_init init; 661 u16 bmap_size; 662 int rc; 663 664 /* General */ 665 rcfw->seq_num = 0; 666 set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 667 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 668 sizeof(unsigned long)); 669 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 670 if (!rcfw->cmdq_bitmap) 671 return -ENOMEM; 672 rcfw->bmap_size = bmap_size; 673 674 /* CMDQ */ 675 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; 676 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); 677 if (!res_base) 678 return -ENOMEM; 679 680 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + 681 RCFW_COMM_BASE_OFFSET, 682 RCFW_COMM_SIZE); 683 if (!rcfw->cmdq_bar_reg_iomem) { 684 dev_err(&rcfw->pdev->dev, 685 "QPLIB: CMDQ BAR region %d mapping failed", 686 rcfw->cmdq_bar_reg); 687 return -ENOMEM; 688 } 689 690 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : 691 RCFW_PF_COMM_PROD_OFFSET; 692 693 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 694 695 /* CREQ */ 696 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 697 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 698 if (!res_base) 699 dev_err(&rcfw->pdev->dev, 700 "QPLIB: CREQ BAR region %d resc start is 0!", 701 rcfw->creq_bar_reg); 702 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 703 4); 704 if (!rcfw->creq_bar_reg_iomem) { 705 dev_err(&rcfw->pdev->dev, 706 "QPLIB: CREQ BAR region %d mapping failed", 707 rcfw->creq_bar_reg); 708 return -ENOMEM; 709 } 710 rcfw->creq_qp_event_processed = 0; 711 rcfw->creq_func_event_processed = 0; 712 713 if (aeq_handler) 714 rcfw->aeq_handler = aeq_handler; 715 init_waitqueue_head(&rcfw->waitq); 716 717 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); 718 if (rc) { 719 dev_err(&rcfw->pdev->dev, 720 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 721 bnxt_qplib_disable_rcfw_channel(rcfw); 722 return rc; 723 } 724 725 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 726 init.cmdq_size_cmdq_lvl = cpu_to_le16( 727 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & 728 CMDQ_INIT_CMDQ_SIZE_MASK) | 729 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & 730 CMDQ_INIT_CMDQ_LVL_MASK)); 731 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); 732 733 /* Write to the Bono mailbox register */ 734 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 735 return 0; 736 } 737 738 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 739 struct bnxt_qplib_rcfw *rcfw, 740 u32 size) 741 { 742 struct bnxt_qplib_rcfw_sbuf *sbuf; 743 744 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 745 if (!sbuf) 746 return NULL; 747 748 sbuf->size = size; 749 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 750 &sbuf->dma_addr, GFP_ATOMIC); 751 if (!sbuf->sb) 752 goto bail; 753 754 return sbuf; 755 bail: 756 kfree(sbuf); 757 return NULL; 758 } 759 760 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 761 struct bnxt_qplib_rcfw_sbuf *sbuf) 762 { 763 if (sbuf->sb) 764 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 765 sbuf->sb, sbuf->dma_addr); 766 kfree(sbuf); 767 } 768