1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: RDMA Controller HW interface 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/interrupt.h> 42 #include <linux/spinlock.h> 43 #include <linux/pci.h> 44 #include <linux/prefetch.h> 45 #include <linux/delay.h> 46 47 #include "roce_hsi.h" 48 #include "qplib_res.h" 49 #include "qplib_rcfw.h" 50 #include "qplib_sp.h" 51 #include "qplib_fp.h" 52 53 static void bnxt_qplib_service_creq(unsigned long data); 54 55 /* Hardware communication channel */ 56 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 57 { 58 u16 cbit; 59 int rc; 60 61 cbit = cookie % rcfw->cmdq_depth; 62 rc = wait_event_timeout(rcfw->waitq, 63 !test_bit(cbit, rcfw->cmdq_bitmap), 64 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 65 return rc ? 0 : -ETIMEDOUT; 66 }; 67 68 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 69 { 70 u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 71 u16 cbit; 72 73 cbit = cookie % rcfw->cmdq_depth; 74 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 75 goto done; 76 do { 77 mdelay(1); /* 1m sec */ 78 bnxt_qplib_service_creq((unsigned long)rcfw); 79 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 80 done: 81 return count ? 0 : -ETIMEDOUT; 82 }; 83 84 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 85 struct creq_base *resp, void *sb, u8 is_block) 86 { 87 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 88 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 89 u32 cmdq_depth = rcfw->cmdq_depth; 90 struct bnxt_qplib_crsq *crsqe; 91 u32 sw_prod, cmdq_prod; 92 unsigned long flags; 93 u32 size, opcode; 94 u16 cookie, cbit; 95 u8 *preq; 96 97 opcode = req->opcode; 98 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 99 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && 100 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW && 101 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) { 102 dev_err(&rcfw->pdev->dev, 103 "RCFW not initialized, reject opcode 0x%x\n", opcode); 104 return -EINVAL; 105 } 106 107 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 108 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 109 dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n"); 110 return -EINVAL; 111 } 112 113 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags)) 114 return -ETIMEDOUT; 115 116 /* Cmdq are in 16-byte units, each request can consume 1 or more 117 * cmdqe 118 */ 119 spin_lock_irqsave(&cmdq->lock, flags); 120 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 121 dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n"); 122 spin_unlock_irqrestore(&cmdq->lock, flags); 123 return -EAGAIN; 124 } 125 126 127 cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 128 cbit = cookie % rcfw->cmdq_depth; 129 if (is_block) 130 cookie |= RCFW_CMD_IS_BLOCKING; 131 132 set_bit(cbit, rcfw->cmdq_bitmap); 133 req->cookie = cpu_to_le16(cookie); 134 crsqe = &rcfw->crsqe_tbl[cbit]; 135 if (crsqe->resp) { 136 spin_unlock_irqrestore(&cmdq->lock, flags); 137 return -EBUSY; 138 } 139 memset(resp, 0, sizeof(*resp)); 140 crsqe->resp = (struct creq_qp_event *)resp; 141 crsqe->resp->cookie = req->cookie; 142 crsqe->req_size = req->cmd_size; 143 if (req->resp_size && sb) { 144 struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 145 146 req->resp_addr = cpu_to_le64(sbuf->dma_addr); 147 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 148 BNXT_QPLIB_CMDQE_UNITS; 149 } 150 151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 152 preq = (u8 *)req; 153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; 154 do { 155 /* Locate the next cmdq slot */ 156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 157 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)] 158 [get_cmdq_idx(sw_prod, cmdq_depth)]; 159 if (!cmdqe) { 160 dev_err(&rcfw->pdev->dev, 161 "RCFW request failed with no cmdqe!\n"); 162 goto done; 163 } 164 /* Copy a segment of the req cmd to the cmdq */ 165 memset(cmdqe, 0, sizeof(*cmdqe)); 166 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe))); 167 preq += min_t(u32, size, sizeof(*cmdqe)); 168 size -= min_t(u32, size, sizeof(*cmdqe)); 169 cmdq->prod++; 170 rcfw->seq_num++; 171 } while (size > 0); 172 173 rcfw->seq_num++; 174 175 cmdq_prod = cmdq->prod; 176 if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) { 177 /* The very first doorbell write 178 * is required to set this flag 179 * which prompts the FW to reset 180 * its internal pointers 181 */ 182 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG); 183 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 184 } 185 186 /* ring CMDQ DB */ 187 wmb(); 188 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 189 rcfw->cmdq_bar_reg_prod_off); 190 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + 191 rcfw->cmdq_bar_reg_trig_off); 192 done: 193 spin_unlock_irqrestore(&cmdq->lock, flags); 194 /* Return the CREQ response pointer */ 195 return 0; 196 } 197 198 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 199 struct cmdq_base *req, 200 struct creq_base *resp, 201 void *sb, u8 is_block) 202 { 203 struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 204 u16 cookie; 205 u8 opcode, retry_cnt = 0xFF; 206 int rc = 0; 207 208 do { 209 opcode = req->opcode; 210 rc = __send_message(rcfw, req, resp, sb, is_block); 211 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 212 if (!rc) 213 break; 214 215 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 216 /* send failed */ 217 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n", 218 cookie, opcode); 219 return rc; 220 } 221 is_block ? mdelay(1) : usleep_range(500, 1000); 222 223 } while (retry_cnt--); 224 225 if (is_block) 226 rc = __block_for_resp(rcfw, cookie); 227 else 228 rc = __wait_for_resp(rcfw, cookie); 229 if (rc) { 230 /* timed out */ 231 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n", 232 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 233 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags); 234 return rc; 235 } 236 237 if (evnt->status) { 238 /* failed with status */ 239 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n", 240 cookie, opcode, evnt->status); 241 rc = -EFAULT; 242 } 243 244 return rc; 245 } 246 /* Completions */ 247 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 248 struct creq_func_event *func_event) 249 { 250 switch (func_event->event) { 251 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 252 break; 253 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 254 break; 255 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 256 break; 257 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 258 break; 259 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 260 break; 261 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 262 break; 263 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 264 break; 265 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 266 /* SRQ ctx error, call srq_handler?? 267 * But there's no SRQ handle! 268 */ 269 break; 270 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 271 break; 272 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 273 break; 274 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 275 break; 276 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST: 277 break; 278 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED: 279 break; 280 default: 281 return -EINVAL; 282 } 283 return 0; 284 } 285 286 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 287 struct creq_qp_event *qp_event) 288 { 289 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 290 struct creq_qp_error_notification *err_event; 291 struct bnxt_qplib_crsq *crsqe; 292 unsigned long flags; 293 struct bnxt_qplib_qp *qp; 294 u16 cbit, blocked = 0; 295 u16 cookie; 296 __le16 mcookie; 297 u32 qp_id; 298 299 switch (qp_event->event) { 300 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: 301 err_event = (struct creq_qp_error_notification *)qp_event; 302 qp_id = le32_to_cpu(err_event->xid); 303 qp = rcfw->qp_tbl[qp_id].qp_handle; 304 dev_dbg(&rcfw->pdev->dev, 305 "Received QP error notification\n"); 306 dev_dbg(&rcfw->pdev->dev, 307 "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n", 308 qp_id, err_event->req_err_state_reason, 309 err_event->res_err_state_reason); 310 if (!qp) 311 break; 312 bnxt_qplib_mark_qp_error(qp); 313 rcfw->aeq_handler(rcfw, qp_event, qp); 314 break; 315 default: 316 /* 317 * Command Response 318 * cmdq->lock needs to be acquired to synchronie 319 * the command send and completion reaping. This function 320 * is always called with creq->lock held. Using 321 * the nested variant of spin_lock. 322 * 323 */ 324 325 spin_lock_irqsave_nested(&cmdq->lock, flags, 326 SINGLE_DEPTH_NESTING); 327 cookie = le16_to_cpu(qp_event->cookie); 328 mcookie = qp_event->cookie; 329 blocked = cookie & RCFW_CMD_IS_BLOCKING; 330 cookie &= RCFW_MAX_COOKIE_VALUE; 331 cbit = cookie % rcfw->cmdq_depth; 332 crsqe = &rcfw->crsqe_tbl[cbit]; 333 if (crsqe->resp && 334 crsqe->resp->cookie == mcookie) { 335 memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 336 crsqe->resp = NULL; 337 } else { 338 if (crsqe->resp && crsqe->resp->cookie) 339 dev_err(&rcfw->pdev->dev, 340 "CMD %s cookie sent=%#x, recd=%#x\n", 341 crsqe->resp ? "mismatch" : "collision", 342 crsqe->resp ? crsqe->resp->cookie : 0, 343 mcookie); 344 } 345 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 346 dev_warn(&rcfw->pdev->dev, 347 "CMD bit %d was not requested\n", cbit); 348 cmdq->cons += crsqe->req_size; 349 crsqe->req_size = 0; 350 351 if (!blocked) 352 wake_up(&rcfw->waitq); 353 spin_unlock_irqrestore(&cmdq->lock, flags); 354 } 355 return 0; 356 } 357 358 /* SP - CREQ Completion handlers */ 359 static void bnxt_qplib_service_creq(unsigned long data) 360 { 361 struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data; 362 struct bnxt_qplib_hwq *creq = &rcfw->creq; 363 struct creq_base *creqe, **creq_ptr; 364 u32 sw_cons, raw_cons; 365 unsigned long flags; 366 u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 367 368 /* Service the CREQ until budget is over */ 369 spin_lock_irqsave(&creq->lock, flags); 370 raw_cons = creq->cons; 371 while (budget > 0) { 372 sw_cons = HWQ_CMP(raw_cons, creq); 373 creq_ptr = (struct creq_base **)creq->pbl_ptr; 374 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; 375 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements)) 376 break; 377 /* The valid test of the entry must be done first before 378 * reading any further. 379 */ 380 dma_rmb(); 381 382 type = creqe->type & CREQ_BASE_TYPE_MASK; 383 switch (type) { 384 case CREQ_BASE_TYPE_QP_EVENT: 385 bnxt_qplib_process_qp_event 386 (rcfw, (struct creq_qp_event *)creqe); 387 rcfw->creq_qp_event_processed++; 388 break; 389 case CREQ_BASE_TYPE_FUNC_EVENT: 390 if (!bnxt_qplib_process_func_event 391 (rcfw, (struct creq_func_event *)creqe)) 392 rcfw->creq_func_event_processed++; 393 else 394 dev_warn(&rcfw->pdev->dev, 395 "aeqe:%#x Not handled\n", type); 396 break; 397 default: 398 if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT) 399 dev_warn(&rcfw->pdev->dev, 400 "creqe with event 0x%x not handled\n", 401 type); 402 break; 403 } 404 raw_cons++; 405 budget--; 406 } 407 408 if (creq->cons != raw_cons) { 409 creq->cons = raw_cons; 410 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, 411 creq->max_elements); 412 } 413 spin_unlock_irqrestore(&creq->lock, flags); 414 } 415 416 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) 417 { 418 struct bnxt_qplib_rcfw *rcfw = dev_instance; 419 struct bnxt_qplib_hwq *creq = &rcfw->creq; 420 struct creq_base **creq_ptr; 421 u32 sw_cons; 422 423 /* Prefetch the CREQ element */ 424 sw_cons = HWQ_CMP(creq->cons, creq); 425 creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr; 426 prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]); 427 428 tasklet_schedule(&rcfw->worker); 429 430 return IRQ_HANDLED; 431 } 432 433 /* RCFW */ 434 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 435 { 436 struct cmdq_deinitialize_fw req; 437 struct creq_deinitialize_fw_resp resp; 438 u16 cmd_flags = 0; 439 int rc; 440 441 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 442 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 443 NULL, 0); 444 if (rc) 445 return rc; 446 447 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 448 return 0; 449 } 450 451 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) 452 { 453 return (pbl->pg_size == ROCE_PG_SIZE_4K ? 454 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K : 455 pbl->pg_size == ROCE_PG_SIZE_8K ? 456 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K : 457 pbl->pg_size == ROCE_PG_SIZE_64K ? 458 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K : 459 pbl->pg_size == ROCE_PG_SIZE_2M ? 460 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M : 461 pbl->pg_size == ROCE_PG_SIZE_8M ? 462 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M : 463 pbl->pg_size == ROCE_PG_SIZE_1G ? 464 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G : 465 CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K); 466 } 467 468 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 469 struct bnxt_qplib_ctx *ctx, int is_virtfn) 470 { 471 struct cmdq_initialize_fw req; 472 struct creq_initialize_fw_resp resp; 473 u16 cmd_flags = 0, level; 474 int rc; 475 476 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 477 /* Supply (log-base-2-of-host-page-size - base-page-shift) 478 * to bono to adjust the doorbell page sizes. 479 */ 480 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - 481 RCFW_DBR_BASE_PAGE_SHIFT); 482 /* 483 * VFs need not setup the HW context area, PF 484 * shall setup this area for VF. Skipping the 485 * HW programming 486 */ 487 if (is_virtfn) 488 goto skip_ctx_setup; 489 490 level = ctx->qpc_tbl.level; 491 req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | 492 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]); 493 level = ctx->mrw_tbl.level; 494 req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) | 495 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]); 496 level = ctx->srqc_tbl.level; 497 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 498 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 499 level = ctx->cq_tbl.level; 500 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 501 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 502 level = ctx->srqc_tbl.level; 503 req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) | 504 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]); 505 level = ctx->cq_tbl.level; 506 req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) | 507 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]); 508 level = ctx->tim_tbl.level; 509 req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) | 510 __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]); 511 level = ctx->tqm_pde_level; 512 req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) | 513 __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]); 514 515 req.qpc_page_dir = 516 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 517 req.mrw_page_dir = 518 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 519 req.srq_page_dir = 520 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 521 req.cq_page_dir = 522 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 523 req.tim_page_dir = 524 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]); 525 req.tqm_page_dir = 526 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]); 527 528 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements); 529 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements); 530 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); 531 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); 532 533 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); 534 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); 535 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); 536 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf); 537 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf); 538 539 skip_ctx_setup: 540 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 541 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 542 NULL, 0); 543 if (rc) 544 return rc; 545 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 546 return 0; 547 } 548 549 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 550 { 551 kfree(rcfw->qp_tbl); 552 kfree(rcfw->crsqe_tbl); 553 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 554 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 555 rcfw->pdev = NULL; 556 } 557 558 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 559 struct bnxt_qplib_rcfw *rcfw, 560 struct bnxt_qplib_ctx *ctx, 561 int qp_tbl_sz) 562 { 563 rcfw->pdev = pdev; 564 rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT; 565 if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0, 566 &rcfw->creq.max_elements, 567 BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE, 568 HWQ_TYPE_L2_CMPL)) { 569 dev_err(&rcfw->pdev->dev, 570 "HW channel CREQ allocation failed\n"); 571 goto fail; 572 } 573 if (ctx->hwrm_intf_ver < HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK) 574 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_256; 575 else 576 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT_8192; 577 578 rcfw->cmdq.max_elements = rcfw->cmdq_depth; 579 if (bnxt_qplib_alloc_init_hwq 580 (rcfw->pdev, &rcfw->cmdq, NULL, 0, 581 &rcfw->cmdq.max_elements, 582 BNXT_QPLIB_CMDQE_UNITS, 0, 583 bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth), 584 HWQ_TYPE_CTX)) { 585 dev_err(&rcfw->pdev->dev, 586 "HW channel CMDQ allocation failed\n"); 587 goto fail; 588 } 589 590 rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 591 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 592 if (!rcfw->crsqe_tbl) 593 goto fail; 594 595 rcfw->qp_tbl_size = qp_tbl_sz; 596 rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node), 597 GFP_KERNEL); 598 if (!rcfw->qp_tbl) 599 goto fail; 600 601 return 0; 602 603 fail: 604 bnxt_qplib_free_rcfw_channel(rcfw); 605 return -ENOMEM; 606 } 607 608 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill) 609 { 610 tasklet_disable(&rcfw->worker); 611 /* Mask h/w interrupts */ 612 CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 613 rcfw->creq.max_elements); 614 /* Sync with last running IRQ-handler */ 615 synchronize_irq(rcfw->vector); 616 if (kill) 617 tasklet_kill(&rcfw->worker); 618 619 if (rcfw->requested) { 620 free_irq(rcfw->vector, rcfw); 621 rcfw->requested = false; 622 } 623 } 624 625 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 626 { 627 unsigned long indx; 628 629 bnxt_qplib_rcfw_stop_irq(rcfw, true); 630 631 iounmap(rcfw->cmdq_bar_reg_iomem); 632 iounmap(rcfw->creq_bar_reg_iomem); 633 634 indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size); 635 if (indx != rcfw->bmap_size) 636 dev_err(&rcfw->pdev->dev, 637 "disabling RCFW with pending cmd-bit %lx\n", indx); 638 kfree(rcfw->cmdq_bitmap); 639 rcfw->bmap_size = 0; 640 641 rcfw->cmdq_bar_reg_iomem = NULL; 642 rcfw->creq_bar_reg_iomem = NULL; 643 rcfw->aeq_handler = NULL; 644 rcfw->vector = 0; 645 } 646 647 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, 648 bool need_init) 649 { 650 int rc; 651 652 if (rcfw->requested) 653 return -EFAULT; 654 655 rcfw->vector = msix_vector; 656 if (need_init) 657 tasklet_init(&rcfw->worker, 658 bnxt_qplib_service_creq, (unsigned long)rcfw); 659 else 660 tasklet_enable(&rcfw->worker); 661 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, 662 "bnxt_qplib_creq", rcfw); 663 if (rc) 664 return rc; 665 rcfw->requested = true; 666 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons, 667 rcfw->creq.max_elements); 668 669 return 0; 670 } 671 672 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 673 struct bnxt_qplib_rcfw *rcfw, 674 int msix_vector, 675 int cp_bar_reg_off, int virt_fn, 676 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 677 void *, void *)) 678 { 679 resource_size_t res_base; 680 struct cmdq_init init; 681 u16 bmap_size; 682 int rc; 683 684 /* General */ 685 rcfw->seq_num = 0; 686 set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags); 687 bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth * 688 sizeof(unsigned long)); 689 rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); 690 if (!rcfw->cmdq_bitmap) 691 return -ENOMEM; 692 rcfw->bmap_size = bmap_size; 693 694 /* CMDQ */ 695 rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; 696 res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); 697 if (!res_base) 698 return -ENOMEM; 699 700 rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + 701 RCFW_COMM_BASE_OFFSET, 702 RCFW_COMM_SIZE); 703 if (!rcfw->cmdq_bar_reg_iomem) { 704 dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n", 705 rcfw->cmdq_bar_reg); 706 return -ENOMEM; 707 } 708 709 rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : 710 RCFW_PF_COMM_PROD_OFFSET; 711 712 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 713 714 /* CREQ */ 715 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; 716 res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); 717 if (!res_base) 718 dev_err(&rcfw->pdev->dev, 719 "CREQ BAR region %d resc start is 0!\n", 720 rcfw->creq_bar_reg); 721 rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 722 4); 723 if (!rcfw->creq_bar_reg_iomem) { 724 dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n", 725 rcfw->creq_bar_reg); 726 iounmap(rcfw->cmdq_bar_reg_iomem); 727 rcfw->cmdq_bar_reg_iomem = NULL; 728 return -ENOMEM; 729 } 730 rcfw->creq_qp_event_processed = 0; 731 rcfw->creq_func_event_processed = 0; 732 733 if (aeq_handler) 734 rcfw->aeq_handler = aeq_handler; 735 init_waitqueue_head(&rcfw->waitq); 736 737 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true); 738 if (rc) { 739 dev_err(&rcfw->pdev->dev, 740 "Failed to request IRQ for CREQ rc = 0x%x\n", rc); 741 bnxt_qplib_disable_rcfw_channel(rcfw); 742 return rc; 743 } 744 745 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 746 init.cmdq_size_cmdq_lvl = cpu_to_le16( 747 ((rcfw->cmdq_depth << CMDQ_INIT_CMDQ_SIZE_SFT) & 748 CMDQ_INIT_CMDQ_SIZE_MASK) | 749 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & 750 CMDQ_INIT_CMDQ_LVL_MASK)); 751 init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); 752 753 /* Write to the Bono mailbox register */ 754 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 755 return 0; 756 } 757 758 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 759 struct bnxt_qplib_rcfw *rcfw, 760 u32 size) 761 { 762 struct bnxt_qplib_rcfw_sbuf *sbuf; 763 764 sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 765 if (!sbuf) 766 return NULL; 767 768 sbuf->size = size; 769 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 770 &sbuf->dma_addr, GFP_ATOMIC); 771 if (!sbuf->sb) 772 goto bail; 773 774 return sbuf; 775 bail: 776 kfree(sbuf); 777 return NULL; 778 } 779 780 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 781 struct bnxt_qplib_rcfw_sbuf *sbuf) 782 { 783 if (sbuf->sb) 784 dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 785 sbuf->sb, sbuf->dma_addr); 786 kfree(sbuf); 787 } 788