1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/bitops.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/errno.h> 11 #include <linux/kernel.h> 12 #include <linux/list.h> 13 #include <linux/log2.h> 14 #include <linux/pci.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include "qed.h" 18 #include "qed_cxt.h" 19 #include "qed_dev_api.h" 20 #include "qed_hsi.h" 21 #include "qed_hw.h" 22 #include "qed_init_ops.h" 23 #include "qed_rdma.h" 24 #include "qed_reg_addr.h" 25 #include "qed_sriov.h" 26 27 /* QM constants */ 28 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ 29 30 /* Doorbell-Queue constants */ 31 #define DQ_RANGE_SHIFT 4 32 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) 33 34 /* Searcher constants */ 35 #define SRC_MIN_NUM_ELEMS 256 36 37 /* Timers constants */ 38 #define TM_SHIFT 7 39 #define TM_ALIGN BIT(TM_SHIFT) 40 #define TM_ELEM_SIZE 4 41 42 #define ILT_DEFAULT_HW_P_SIZE 4 43 44 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 45 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 46 47 /* ILT entry structure */ 48 #define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) 49 #define ILT_ENTRY_PHY_ADDR_SHIFT 0 50 #define ILT_ENTRY_VALID_MASK 0x1ULL 51 #define ILT_ENTRY_VALID_SHIFT 52 52 #define ILT_ENTRY_IN_REGS 2 53 #define ILT_REG_SIZE_IN_BYTES 4 54 55 /* connection context union */ 56 union conn_context { 57 struct core_conn_context core_ctx; 58 struct eth_conn_context eth_ctx; 59 struct iscsi_conn_context iscsi_ctx; 60 struct fcoe_conn_context fcoe_ctx; 61 struct roce_conn_context roce_ctx; 62 }; 63 64 /* TYPE-0 task context - iSCSI, FCOE */ 65 union type0_task_context { 66 struct iscsi_task_context iscsi_ctx; 67 struct fcoe_task_context fcoe_ctx; 68 }; 69 70 /* TYPE-1 task context - ROCE */ 71 union type1_task_context { 72 struct rdma_task_context roce_ctx; 73 }; 74 75 struct src_ent { 76 __u8 opaque[56]; 77 __be64 next; 78 }; 79 80 #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ 81 #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12) 82 83 #define CONN_CXT_SIZE(p_hwfn) \ 84 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) 85 86 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) 87 #define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context)) 88 89 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \ 90 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) 91 92 /* Alignment is inherent to the type1_task_context structure */ 93 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) 94 95 static bool src_proto(enum protocol_type type) 96 { 97 return type == PROTOCOLID_TCP_ULP || 98 type == PROTOCOLID_FCOE || 99 type == PROTOCOLID_IWARP; 100 } 101 102 static bool tm_cid_proto(enum protocol_type type) 103 { 104 return type == PROTOCOLID_TCP_ULP || 105 type == PROTOCOLID_FCOE || 106 type == PROTOCOLID_ROCE || 107 type == PROTOCOLID_IWARP; 108 } 109 110 static bool tm_tid_proto(enum protocol_type type) 111 { 112 return type == PROTOCOLID_FCOE; 113 } 114 115 /* counts the iids for the CDU/CDUC ILT client configuration */ 116 struct qed_cdu_iids { 117 u32 pf_cids; 118 u32 per_vf_cids; 119 }; 120 121 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, 122 struct qed_cdu_iids *iids) 123 { 124 u32 type; 125 126 for (type = 0; type < MAX_CONN_TYPES; type++) { 127 iids->pf_cids += p_mngr->conn_cfg[type].cid_count; 128 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; 129 } 130 } 131 132 /* counts the iids for the Searcher block configuration */ 133 struct qed_src_iids { 134 u32 pf_cids; 135 u32 per_vf_cids; 136 }; 137 138 static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, 139 struct qed_src_iids *iids) 140 { 141 u32 i; 142 143 for (i = 0; i < MAX_CONN_TYPES; i++) { 144 if (!src_proto(i)) 145 continue; 146 147 iids->pf_cids += p_mngr->conn_cfg[i].cid_count; 148 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; 149 } 150 151 /* Add L2 filtering filters in addition */ 152 iids->pf_cids += p_mngr->arfs_count; 153 } 154 155 /* counts the iids for the Timers block configuration */ 156 struct qed_tm_iids { 157 u32 pf_cids; 158 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ 159 u32 pf_tids_total; 160 u32 per_vf_cids; 161 u32 per_vf_tids; 162 }; 163 164 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, 165 struct qed_cxt_mngr *p_mngr, 166 struct qed_tm_iids *iids) 167 { 168 bool tm_vf_required = false; 169 bool tm_required = false; 170 int i, j; 171 172 /* Timers is a special case -> we don't count how many cids require 173 * timers but what's the max cid that will be used by the timer block. 174 * therefore we traverse in reverse order, and once we hit a protocol 175 * that requires the timers memory, we'll sum all the protocols up 176 * to that one. 177 */ 178 for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { 179 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; 180 181 if (tm_cid_proto(i) || tm_required) { 182 if (p_cfg->cid_count) 183 tm_required = true; 184 185 iids->pf_cids += p_cfg->cid_count; 186 } 187 188 if (tm_cid_proto(i) || tm_vf_required) { 189 if (p_cfg->cids_per_vf) 190 tm_vf_required = true; 191 192 iids->per_vf_cids += p_cfg->cids_per_vf; 193 } 194 195 if (tm_tid_proto(i)) { 196 struct qed_tid_seg *segs = p_cfg->tid_seg; 197 198 /* for each segment there is at most one 199 * protocol for which count is not 0. 200 */ 201 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) 202 iids->pf_tids[j] += segs[j].count; 203 204 /* The last array elelment is for the VFs. As for PF 205 * segments there can be only one protocol for 206 * which this value is not 0. 207 */ 208 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; 209 } 210 } 211 212 iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); 213 iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN); 214 iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN); 215 216 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { 217 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN); 218 iids->pf_tids_total += iids->pf_tids[j]; 219 } 220 } 221 222 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, 223 struct qed_qm_iids *iids) 224 { 225 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 226 struct qed_tid_seg *segs; 227 u32 vf_cids = 0, type, j; 228 u32 vf_tids = 0; 229 230 for (type = 0; type < MAX_CONN_TYPES; type++) { 231 iids->cids += p_mngr->conn_cfg[type].cid_count; 232 vf_cids += p_mngr->conn_cfg[type].cids_per_vf; 233 234 segs = p_mngr->conn_cfg[type].tid_seg; 235 /* for each segment there is at most one 236 * protocol for which count is not 0. 237 */ 238 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) 239 iids->tids += segs[j].count; 240 241 /* The last array elelment is for the VFs. As for PF 242 * segments there can be only one protocol for 243 * which this value is not 0. 244 */ 245 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; 246 } 247 248 iids->vf_cids = vf_cids; 249 iids->tids += vf_tids * p_mngr->vf_count; 250 251 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 252 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", 253 iids->cids, iids->vf_cids, iids->tids, vf_tids); 254 } 255 256 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, 257 u32 seg) 258 { 259 struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; 260 u32 i; 261 262 /* Find the protocol with tid count > 0 for this segment. 263 * Note: there can only be one and this is already validated. 264 */ 265 for (i = 0; i < MAX_CONN_TYPES; i++) 266 if (p_cfg->conn_cfg[i].tid_seg[seg].count) 267 return &p_cfg->conn_cfg[i].tid_seg[seg]; 268 return NULL; 269 } 270 271 static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, 272 u32 num_srqs, u32 num_xrc_srqs) 273 { 274 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 275 276 p_mgr->srq_count = num_srqs; 277 p_mgr->xrc_srq_count = num_xrc_srqs; 278 } 279 280 u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, 281 enum ilt_clients ilt_client) 282 { 283 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 284 struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client]; 285 286 return ILT_PAGE_IN_BYTES(p_cli->p_size.val); 287 } 288 289 static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn) 290 { 291 u32 page_size; 292 293 page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM); 294 return page_size / XRC_SRQ_CXT_SIZE; 295 } 296 297 u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn) 298 { 299 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 300 u32 total_srqs; 301 302 total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count; 303 304 return total_srqs; 305 } 306 307 /* set the iids count per protocol */ 308 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, 309 enum protocol_type type, 310 u32 cid_count, u32 vf_cid_cnt) 311 { 312 struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; 313 struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; 314 315 p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); 316 p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); 317 318 if (type == PROTOCOLID_ROCE) { 319 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; 320 u32 cxt_size = CONN_CXT_SIZE(p_hwfn); 321 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 322 u32 align = elems_per_page * DQ_RANGE_ALIGN; 323 324 p_conn->cid_count = roundup(p_conn->cid_count, align); 325 } 326 } 327 328 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, 329 enum protocol_type type, u32 *vf_cid) 330 { 331 if (vf_cid) 332 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; 333 334 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; 335 } 336 337 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, 338 enum protocol_type type) 339 { 340 return p_hwfn->p_cxt_mngr->acquired[type].start_cid; 341 } 342 343 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, 344 enum protocol_type type) 345 { 346 u32 cnt = 0; 347 int i; 348 349 for (i = 0; i < TASK_SEGMENTS; i++) 350 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; 351 352 return cnt; 353 } 354 355 static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, 356 enum protocol_type proto, 357 u8 seg, 358 u8 seg_type, u32 count, bool has_fl) 359 { 360 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 361 struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; 362 363 p_seg->count = count; 364 p_seg->has_fl_mem = has_fl; 365 p_seg->type = seg_type; 366 } 367 368 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, 369 struct qed_ilt_cli_blk *p_blk, 370 u32 start_line, u32 total_size, u32 elem_size) 371 { 372 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); 373 374 /* verify thatits called only once for each block */ 375 if (p_blk->total_size) 376 return; 377 378 p_blk->total_size = total_size; 379 p_blk->real_size_in_page = 0; 380 if (elem_size) 381 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; 382 p_blk->start_line = start_line; 383 } 384 385 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, 386 struct qed_ilt_client_cfg *p_cli, 387 struct qed_ilt_cli_blk *p_blk, 388 u32 *p_line, enum ilt_clients client_id) 389 { 390 if (!p_blk->total_size) 391 return; 392 393 if (!p_cli->active) 394 p_cli->first.val = *p_line; 395 396 p_cli->active = true; 397 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); 398 p_cli->last.val = *p_line - 1; 399 400 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 401 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", 402 client_id, p_cli->first.val, 403 p_cli->last.val, p_blk->total_size, 404 p_blk->real_size_in_page, p_blk->start_line); 405 } 406 407 static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, 408 enum ilt_clients ilt_client) 409 { 410 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; 411 struct qed_ilt_client_cfg *p_cli; 412 u32 lines_to_skip = 0; 413 u32 cxts_per_p; 414 415 if (ilt_client == ILT_CLI_CDUC) { 416 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 417 418 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / 419 (u32) CONN_CXT_SIZE(p_hwfn); 420 421 lines_to_skip = cid_count / cxts_per_p; 422 } 423 424 return lines_to_skip; 425 } 426 427 static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg 428 *p_cli) 429 { 430 p_cli->active = false; 431 p_cli->first.val = 0; 432 p_cli->last.val = 0; 433 return p_cli; 434 } 435 436 static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) 437 { 438 p_blk->total_size = 0; 439 return p_blk; 440 } 441 442 static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) 443 { 444 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; 445 u32 cli_idx, blk_idx; 446 447 for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { 448 for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) 449 clients[cli_idx].pf_blks[blk_idx].total_size = 0; 450 451 for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) 452 clients[cli_idx].vf_blks[blk_idx].total_size = 0; 453 } 454 } 455 456 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) 457 { 458 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 459 u32 curr_line, total, i, task_size, line; 460 struct qed_ilt_client_cfg *p_cli; 461 struct qed_ilt_cli_blk *p_blk; 462 struct qed_cdu_iids cdu_iids; 463 struct qed_src_iids src_iids; 464 struct qed_qm_iids qm_iids; 465 struct qed_tm_iids tm_iids; 466 struct qed_tid_seg *p_seg; 467 468 memset(&qm_iids, 0, sizeof(qm_iids)); 469 memset(&cdu_iids, 0, sizeof(cdu_iids)); 470 memset(&src_iids, 0, sizeof(src_iids)); 471 memset(&tm_iids, 0, sizeof(tm_iids)); 472 473 p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); 474 475 /* Reset all ILT blocks at the beginning of ILT computing in order 476 * to prevent memory allocation for irrelevant blocks afterwards. 477 */ 478 qed_cxt_ilt_blk_reset(p_hwfn); 479 480 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 481 "hwfn [%d] - Set context manager starting line to be 0x%08x\n", 482 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); 483 484 /* CDUC */ 485 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); 486 487 curr_line = p_mngr->pf_start_line; 488 489 /* CDUC PF */ 490 p_cli->pf_total_lines = 0; 491 492 /* get the counters for the CDUC and QM clients */ 493 qed_cxt_cdu_iids(p_mngr, &cdu_iids); 494 495 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); 496 497 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); 498 499 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 500 total, CONN_CXT_SIZE(p_hwfn)); 501 502 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); 503 p_cli->pf_total_lines = curr_line - p_blk->start_line; 504 505 p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn, 506 ILT_CLI_CDUC); 507 508 /* CDUC VF */ 509 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); 510 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); 511 512 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 513 total, CONN_CXT_SIZE(p_hwfn)); 514 515 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); 516 p_cli->vf_total_lines = curr_line - p_blk->start_line; 517 518 for (i = 1; i < p_mngr->vf_count; i++) 519 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 520 ILT_CLI_CDUC); 521 522 /* CDUT PF */ 523 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); 524 p_cli->first.val = curr_line; 525 526 /* first the 'working' task memory */ 527 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 528 p_seg = qed_cxt_tid_seg_info(p_hwfn, i); 529 if (!p_seg || p_seg->count == 0) 530 continue; 531 532 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); 533 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 534 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, 535 p_mngr->task_type_size[p_seg->type]); 536 537 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 538 ILT_CLI_CDUT); 539 } 540 541 /* next the 'init' task memory (forced load memory) */ 542 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 543 p_seg = qed_cxt_tid_seg_info(p_hwfn, i); 544 if (!p_seg || p_seg->count == 0) 545 continue; 546 547 p_blk = 548 qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); 549 550 if (!p_seg->has_fl_mem) { 551 /* The segment is active (total size pf 'working' 552 * memory is > 0) but has no FL (forced-load, Init) 553 * memory. Thus: 554 * 555 * 1. The total-size in the corrsponding FL block of 556 * the ILT client is set to 0 - No ILT line are 557 * provisioned and no ILT memory allocated. 558 * 559 * 2. The start-line of said block is set to the 560 * start line of the matching working memory 561 * block in the ILT client. This is later used to 562 * configure the CDU segment offset registers and 563 * results in an FL command for TIDs of this 564 * segement behaves as regular load commands 565 * (loading TIDs from the working memory). 566 */ 567 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; 568 569 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); 570 continue; 571 } 572 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 573 574 qed_ilt_cli_blk_fill(p_cli, p_blk, 575 curr_line, total, 576 p_mngr->task_type_size[p_seg->type]); 577 578 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 579 ILT_CLI_CDUT); 580 } 581 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; 582 583 /* CDUT VF */ 584 p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); 585 if (p_seg && p_seg->count) { 586 /* Stricly speaking we need to iterate over all VF 587 * task segment types, but a VF has only 1 segment 588 */ 589 590 /* 'working' memory */ 591 total = p_seg->count * p_mngr->task_type_size[p_seg->type]; 592 593 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); 594 qed_ilt_cli_blk_fill(p_cli, p_blk, 595 curr_line, total, 596 p_mngr->task_type_size[p_seg->type]); 597 598 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 599 ILT_CLI_CDUT); 600 601 /* 'init' memory */ 602 p_blk = 603 qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); 604 if (!p_seg->has_fl_mem) { 605 /* see comment above */ 606 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; 607 qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); 608 } else { 609 task_size = p_mngr->task_type_size[p_seg->type]; 610 qed_ilt_cli_blk_fill(p_cli, p_blk, 611 curr_line, total, task_size); 612 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 613 ILT_CLI_CDUT); 614 } 615 p_cli->vf_total_lines = curr_line - 616 p_cli->vf_blks[0].start_line; 617 618 /* Now for the rest of the VFs */ 619 for (i = 1; i < p_mngr->vf_count; i++) { 620 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; 621 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 622 ILT_CLI_CDUT); 623 624 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; 625 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 626 ILT_CLI_CDUT); 627 } 628 } 629 630 /* QM */ 631 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); 632 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); 633 634 qed_cxt_qm_iids(p_hwfn, &qm_iids); 635 total = qed_qm_pf_mem_size(qm_iids.cids, 636 qm_iids.vf_cids, qm_iids.tids, 637 p_hwfn->qm_info.num_pqs, 638 p_hwfn->qm_info.num_vf_pqs); 639 640 DP_VERBOSE(p_hwfn, 641 QED_MSG_ILT, 642 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", 643 qm_iids.cids, 644 qm_iids.vf_cids, 645 qm_iids.tids, 646 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); 647 648 qed_ilt_cli_blk_fill(p_cli, p_blk, 649 curr_line, total * 0x1000, 650 QM_PQ_ELEMENT_SIZE); 651 652 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); 653 p_cli->pf_total_lines = curr_line - p_blk->start_line; 654 655 /* SRC */ 656 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); 657 qed_cxt_src_iids(p_mngr, &src_iids); 658 659 /* Both the PF and VFs searcher connections are stored in the per PF 660 * database. Thus sum the PF searcher cids and all the VFs searcher 661 * cids. 662 */ 663 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 664 if (total) { 665 u32 local_max = max_t(u32, total, 666 SRC_MIN_NUM_ELEMS); 667 668 total = roundup_pow_of_two(local_max); 669 670 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); 671 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 672 total * sizeof(struct src_ent), 673 sizeof(struct src_ent)); 674 675 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 676 ILT_CLI_SRC); 677 p_cli->pf_total_lines = curr_line - p_blk->start_line; 678 } 679 680 /* TM PF */ 681 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); 682 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); 683 total = tm_iids.pf_cids + tm_iids.pf_tids_total; 684 if (total) { 685 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); 686 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 687 total * TM_ELEM_SIZE, TM_ELEM_SIZE); 688 689 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 690 ILT_CLI_TM); 691 p_cli->pf_total_lines = curr_line - p_blk->start_line; 692 } 693 694 /* TM VF */ 695 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; 696 if (total) { 697 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]); 698 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 699 total * TM_ELEM_SIZE, TM_ELEM_SIZE); 700 701 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 702 ILT_CLI_TM); 703 704 p_cli->vf_total_lines = curr_line - p_blk->start_line; 705 for (i = 1; i < p_mngr->vf_count; i++) 706 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 707 ILT_CLI_TM); 708 } 709 710 /* TSDM (SRQ CONTEXT) */ 711 total = qed_cxt_get_total_srq_count(p_hwfn); 712 713 if (total) { 714 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); 715 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); 716 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, 717 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); 718 719 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, 720 ILT_CLI_TSDM); 721 p_cli->pf_total_lines = curr_line - p_blk->start_line; 722 } 723 724 *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line; 725 726 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > 727 RESC_NUM(p_hwfn, QED_ILT)) 728 return -EINVAL; 729 730 return 0; 731 } 732 733 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) 734 { 735 struct qed_ilt_client_cfg *p_cli; 736 u32 excess_lines, available_lines; 737 struct qed_cxt_mngr *p_mngr; 738 u32 ilt_page_size, elem_size; 739 struct qed_tid_seg *p_seg; 740 int i; 741 742 available_lines = RESC_NUM(p_hwfn, QED_ILT); 743 excess_lines = used_lines - available_lines; 744 745 if (!excess_lines) 746 return 0; 747 748 if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) 749 return 0; 750 751 p_mngr = p_hwfn->p_cxt_mngr; 752 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 753 ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); 754 755 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 756 p_seg = qed_cxt_tid_seg_info(p_hwfn, i); 757 if (!p_seg || p_seg->count == 0) 758 continue; 759 760 elem_size = p_mngr->task_type_size[p_seg->type]; 761 if (!elem_size) 762 continue; 763 764 return (ilt_page_size / elem_size) * excess_lines; 765 } 766 767 DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n"); 768 return 0; 769 } 770 771 static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) 772 { 773 struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2; 774 u32 i; 775 776 if (!p_t2 || !p_t2->dma_mem) 777 return; 778 779 for (i = 0; i < p_t2->num_pages; i++) 780 if (p_t2->dma_mem[i].virt_addr) 781 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 782 p_t2->dma_mem[i].size, 783 p_t2->dma_mem[i].virt_addr, 784 p_t2->dma_mem[i].phys_addr); 785 786 kfree(p_t2->dma_mem); 787 p_t2->dma_mem = NULL; 788 } 789 790 static int 791 qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn, 792 struct qed_src_t2 *p_t2, u32 total_size, u32 page_size) 793 { 794 void **p_virt; 795 u32 size, i; 796 797 if (!p_t2 || !p_t2->dma_mem) 798 return -EINVAL; 799 800 for (i = 0; i < p_t2->num_pages; i++) { 801 size = min_t(u32, total_size, page_size); 802 p_virt = &p_t2->dma_mem[i].virt_addr; 803 804 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 805 size, 806 &p_t2->dma_mem[i].phys_addr, 807 GFP_KERNEL); 808 if (!p_t2->dma_mem[i].virt_addr) 809 return -ENOMEM; 810 811 memset(*p_virt, 0, size); 812 p_t2->dma_mem[i].size = size; 813 total_size -= size; 814 } 815 816 return 0; 817 } 818 819 static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) 820 { 821 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 822 u32 conn_num, total_size, ent_per_page, psz, i; 823 struct phys_mem_desc *p_t2_last_page; 824 struct qed_ilt_client_cfg *p_src; 825 struct qed_src_iids src_iids; 826 struct qed_src_t2 *p_t2; 827 int rc; 828 829 memset(&src_iids, 0, sizeof(src_iids)); 830 831 /* if the SRC ILT client is inactive - there are no connection 832 * requiring the searcer, leave. 833 */ 834 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; 835 if (!p_src->active) 836 return 0; 837 838 qed_cxt_src_iids(p_mngr, &src_iids); 839 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 840 total_size = conn_num * sizeof(struct src_ent); 841 842 /* use the same page size as the SRC ILT client */ 843 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); 844 p_t2 = &p_mngr->src_t2; 845 p_t2->num_pages = DIV_ROUND_UP(total_size, psz); 846 847 /* allocate t2 */ 848 p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc), 849 GFP_KERNEL); 850 if (!p_t2->dma_mem) { 851 DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); 852 rc = -ENOMEM; 853 goto t2_fail; 854 } 855 856 rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz); 857 if (rc) 858 goto t2_fail; 859 860 /* Set the t2 pointers */ 861 862 /* entries per page - must be a power of two */ 863 ent_per_page = psz / sizeof(struct src_ent); 864 865 p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr; 866 867 p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page]; 868 p_t2->last_free = (u64)p_t2_last_page->phys_addr + 869 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); 870 871 for (i = 0; i < p_t2->num_pages; i++) { 872 u32 ent_num = min_t(u32, 873 ent_per_page, 874 conn_num); 875 struct src_ent *entries = p_t2->dma_mem[i].virt_addr; 876 u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val; 877 u32 j; 878 879 for (j = 0; j < ent_num - 1; j++) { 880 val = p_ent_phys + (j + 1) * sizeof(struct src_ent); 881 entries[j].next = cpu_to_be64(val); 882 } 883 884 if (i < p_t2->num_pages - 1) 885 val = (u64)p_t2->dma_mem[i + 1].phys_addr; 886 else 887 val = 0; 888 entries[j].next = cpu_to_be64(val); 889 890 conn_num -= ent_num; 891 } 892 893 return 0; 894 895 t2_fail: 896 qed_cxt_src_t2_free(p_hwfn); 897 return rc; 898 } 899 900 #define for_each_ilt_valid_client(pos, clients) \ 901 for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \ 902 if (!clients[pos].active) { \ 903 continue; \ 904 } else \ 905 906 /* Total number of ILT lines used by this PF */ 907 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) 908 { 909 u32 size = 0; 910 u32 i; 911 912 for_each_ilt_valid_client(i, ilt_clients) 913 size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); 914 915 return size; 916 } 917 918 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) 919 { 920 struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; 921 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 922 u32 ilt_size, i; 923 924 ilt_size = qed_cxt_ilt_shadow_size(p_cli); 925 926 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { 927 struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i]; 928 929 if (p_dma->virt_addr) 930 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 931 p_dma->size, p_dma->virt_addr, 932 p_dma->phys_addr); 933 p_dma->virt_addr = NULL; 934 } 935 kfree(p_mngr->ilt_shadow); 936 p_mngr->ilt_shadow = NULL; 937 } 938 939 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, 940 struct qed_ilt_cli_blk *p_blk, 941 enum ilt_clients ilt_client, 942 u32 start_line_offset) 943 { 944 struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; 945 u32 lines, line, sz_left, lines_to_skip = 0; 946 947 /* Special handling for RoCE that supports dynamic allocation */ 948 if (QED_IS_RDMA_PERSONALITY(p_hwfn) && 949 ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) 950 return 0; 951 952 lines_to_skip = p_blk->dynamic_line_cnt; 953 954 if (!p_blk->total_size) 955 return 0; 956 957 sz_left = p_blk->total_size; 958 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; 959 line = p_blk->start_line + start_line_offset - 960 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; 961 962 for (; lines; lines--) { 963 dma_addr_t p_phys; 964 void *p_virt; 965 u32 size; 966 967 size = min_t(u32, sz_left, p_blk->real_size_in_page); 968 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, 969 &p_phys, GFP_KERNEL); 970 if (!p_virt) 971 return -ENOMEM; 972 973 ilt_shadow[line].phys_addr = p_phys; 974 ilt_shadow[line].virt_addr = p_virt; 975 ilt_shadow[line].size = size; 976 977 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 978 "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", 979 line, (u64)p_phys, p_virt, size); 980 981 sz_left -= size; 982 line++; 983 } 984 985 return 0; 986 } 987 988 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) 989 { 990 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 991 struct qed_ilt_client_cfg *clients = p_mngr->clients; 992 struct qed_ilt_cli_blk *p_blk; 993 u32 size, i, j, k; 994 int rc; 995 996 size = qed_cxt_ilt_shadow_size(clients); 997 p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc), 998 GFP_KERNEL); 999 if (!p_mngr->ilt_shadow) { 1000 rc = -ENOMEM; 1001 goto ilt_shadow_fail; 1002 } 1003 1004 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 1005 "Allocated 0x%x bytes for ilt shadow\n", 1006 (u32)(size * sizeof(struct phys_mem_desc))); 1007 1008 for_each_ilt_valid_client(i, clients) { 1009 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { 1010 p_blk = &clients[i].pf_blks[j]; 1011 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); 1012 if (rc) 1013 goto ilt_shadow_fail; 1014 } 1015 for (k = 0; k < p_mngr->vf_count; k++) { 1016 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { 1017 u32 lines = clients[i].vf_total_lines * k; 1018 1019 p_blk = &clients[i].vf_blks[j]; 1020 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); 1021 if (rc) 1022 goto ilt_shadow_fail; 1023 } 1024 } 1025 } 1026 1027 return 0; 1028 1029 ilt_shadow_fail: 1030 qed_ilt_shadow_free(p_hwfn); 1031 return rc; 1032 } 1033 1034 static void qed_cid_map_free(struct qed_hwfn *p_hwfn) 1035 { 1036 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1037 u32 type, vf; 1038 1039 for (type = 0; type < MAX_CONN_TYPES; type++) { 1040 bitmap_free(p_mngr->acquired[type].cid_map); 1041 p_mngr->acquired[type].max_count = 0; 1042 p_mngr->acquired[type].start_cid = 0; 1043 1044 for (vf = 0; vf < MAX_NUM_VFS; vf++) { 1045 bitmap_free(p_mngr->acquired_vf[type][vf].cid_map); 1046 p_mngr->acquired_vf[type][vf].max_count = 0; 1047 p_mngr->acquired_vf[type][vf].start_cid = 0; 1048 } 1049 } 1050 } 1051 1052 static int 1053 qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, 1054 u32 type, 1055 u32 cid_start, 1056 u32 cid_count, struct qed_cid_acquired_map *p_map) 1057 { 1058 if (!cid_count) 1059 return 0; 1060 1061 p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL); 1062 if (!p_map->cid_map) 1063 return -ENOMEM; 1064 1065 p_map->max_count = cid_count; 1066 p_map->start_cid = cid_start; 1067 1068 DP_VERBOSE(p_hwfn, QED_MSG_CXT, 1069 "Type %08x start: %08x count %08x\n", 1070 type, p_map->start_cid, p_map->max_count); 1071 1072 return 0; 1073 } 1074 1075 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) 1076 { 1077 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1078 u32 start_cid = 0, vf_start_cid = 0; 1079 u32 type, vf; 1080 1081 for (type = 0; type < MAX_CONN_TYPES; type++) { 1082 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type]; 1083 struct qed_cid_acquired_map *p_map; 1084 1085 /* Handle PF maps */ 1086 p_map = &p_mngr->acquired[type]; 1087 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid, 1088 p_cfg->cid_count, p_map)) 1089 goto cid_map_fail; 1090 1091 /* Handle VF maps */ 1092 for (vf = 0; vf < MAX_NUM_VFS; vf++) { 1093 p_map = &p_mngr->acquired_vf[type][vf]; 1094 if (qed_cid_map_alloc_single(p_hwfn, type, 1095 vf_start_cid, 1096 p_cfg->cids_per_vf, p_map)) 1097 goto cid_map_fail; 1098 } 1099 1100 start_cid += p_cfg->cid_count; 1101 vf_start_cid += p_cfg->cids_per_vf; 1102 } 1103 1104 return 0; 1105 1106 cid_map_fail: 1107 qed_cid_map_free(p_hwfn); 1108 return -ENOMEM; 1109 } 1110 1111 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) 1112 { 1113 struct qed_ilt_client_cfg *clients; 1114 struct qed_cxt_mngr *p_mngr; 1115 u32 i; 1116 1117 p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL); 1118 if (!p_mngr) 1119 return -ENOMEM; 1120 1121 /* Initialize ILT client registers */ 1122 clients = p_mngr->clients; 1123 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); 1124 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); 1125 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); 1126 1127 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); 1128 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); 1129 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); 1130 1131 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); 1132 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); 1133 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); 1134 1135 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); 1136 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); 1137 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); 1138 1139 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); 1140 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); 1141 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); 1142 1143 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); 1144 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); 1145 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); 1146 /* default ILT page size for all clients is 64K */ 1147 for (i = 0; i < MAX_ILT_CLIENTS; i++) 1148 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; 1149 1150 p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn); 1151 1152 /* Initialize task sizes */ 1153 p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); 1154 p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); 1155 1156 if (p_hwfn->cdev->p_iov_info) { 1157 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; 1158 p_mngr->first_vf_in_pf = 1159 p_hwfn->cdev->p_iov_info->first_vf_in_pf; 1160 } 1161 /* Initialize the dynamic ILT allocation mutex */ 1162 mutex_init(&p_mngr->mutex); 1163 1164 /* Set the cxt mangr pointer priori to further allocations */ 1165 p_hwfn->p_cxt_mngr = p_mngr; 1166 1167 return 0; 1168 } 1169 1170 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) 1171 { 1172 int rc; 1173 1174 /* Allocate the ILT shadow table */ 1175 rc = qed_ilt_shadow_alloc(p_hwfn); 1176 if (rc) 1177 goto tables_alloc_fail; 1178 1179 /* Allocate the T2 table */ 1180 rc = qed_cxt_src_t2_alloc(p_hwfn); 1181 if (rc) 1182 goto tables_alloc_fail; 1183 1184 /* Allocate and initialize the acquired cids bitmaps */ 1185 rc = qed_cid_map_alloc(p_hwfn); 1186 if (rc) 1187 goto tables_alloc_fail; 1188 1189 return 0; 1190 1191 tables_alloc_fail: 1192 qed_cxt_mngr_free(p_hwfn); 1193 return rc; 1194 } 1195 1196 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) 1197 { 1198 if (!p_hwfn->p_cxt_mngr) 1199 return; 1200 1201 qed_cid_map_free(p_hwfn); 1202 qed_cxt_src_t2_free(p_hwfn); 1203 qed_ilt_shadow_free(p_hwfn); 1204 kfree(p_hwfn->p_cxt_mngr); 1205 1206 p_hwfn->p_cxt_mngr = NULL; 1207 } 1208 1209 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) 1210 { 1211 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1212 struct qed_cid_acquired_map *p_map; 1213 struct qed_conn_type_cfg *p_cfg; 1214 int type; 1215 1216 /* Reset acquired cids */ 1217 for (type = 0; type < MAX_CONN_TYPES; type++) { 1218 u32 vf; 1219 1220 p_cfg = &p_mngr->conn_cfg[type]; 1221 if (p_cfg->cid_count) { 1222 p_map = &p_mngr->acquired[type]; 1223 bitmap_zero(p_map->cid_map, p_map->max_count); 1224 } 1225 1226 if (!p_cfg->cids_per_vf) 1227 continue; 1228 1229 for (vf = 0; vf < MAX_NUM_VFS; vf++) { 1230 p_map = &p_mngr->acquired_vf[type][vf]; 1231 bitmap_zero(p_map->cid_map, p_map->max_count); 1232 } 1233 } 1234 } 1235 1236 /* CDU Common */ 1237 #define CDUC_CXT_SIZE_SHIFT \ 1238 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT 1239 1240 #define CDUC_CXT_SIZE_MASK \ 1241 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) 1242 1243 #define CDUC_BLOCK_WASTE_SHIFT \ 1244 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT 1245 1246 #define CDUC_BLOCK_WASTE_MASK \ 1247 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) 1248 1249 #define CDUC_NCIB_SHIFT \ 1250 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT 1251 1252 #define CDUC_NCIB_MASK \ 1253 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) 1254 1255 #define CDUT_TYPE0_CXT_SIZE_SHIFT \ 1256 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 1257 1258 #define CDUT_TYPE0_CXT_SIZE_MASK \ 1259 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ 1260 CDUT_TYPE0_CXT_SIZE_SHIFT) 1261 1262 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ 1263 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 1264 1265 #define CDUT_TYPE0_BLOCK_WASTE_MASK \ 1266 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ 1267 CDUT_TYPE0_BLOCK_WASTE_SHIFT) 1268 1269 #define CDUT_TYPE0_NCIB_SHIFT \ 1270 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 1271 1272 #define CDUT_TYPE0_NCIB_MASK \ 1273 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ 1274 CDUT_TYPE0_NCIB_SHIFT) 1275 1276 #define CDUT_TYPE1_CXT_SIZE_SHIFT \ 1277 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 1278 1279 #define CDUT_TYPE1_CXT_SIZE_MASK \ 1280 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ 1281 CDUT_TYPE1_CXT_SIZE_SHIFT) 1282 1283 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ 1284 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 1285 1286 #define CDUT_TYPE1_BLOCK_WASTE_MASK \ 1287 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ 1288 CDUT_TYPE1_BLOCK_WASTE_SHIFT) 1289 1290 #define CDUT_TYPE1_NCIB_SHIFT \ 1291 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 1292 1293 #define CDUT_TYPE1_NCIB_MASK \ 1294 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ 1295 CDUT_TYPE1_NCIB_SHIFT) 1296 1297 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) 1298 { 1299 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; 1300 1301 /* CDUC - connection configuration */ 1302 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; 1303 cxt_size = CONN_CXT_SIZE(p_hwfn); 1304 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1305 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1306 1307 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); 1308 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); 1309 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); 1310 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); 1311 1312 /* CDUT - type-0 tasks configuration */ 1313 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; 1314 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; 1315 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1316 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1317 1318 /* cxt size and block-waste are multipes of 8 */ 1319 cdu_params = 0; 1320 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); 1321 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); 1322 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); 1323 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); 1324 1325 /* CDUT - type-1 tasks configuration */ 1326 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; 1327 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; 1328 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; 1329 1330 /* cxt size and block-waste are multipes of 8 */ 1331 cdu_params = 0; 1332 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); 1333 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); 1334 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); 1335 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); 1336 } 1337 1338 /* CDU PF */ 1339 #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT 1340 #define CDU_SEG_REG_TYPE_MASK 0x1 1341 #define CDU_SEG_REG_OFFSET_SHIFT 0 1342 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK 1343 1344 static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) 1345 { 1346 struct qed_ilt_client_cfg *p_cli; 1347 struct qed_tid_seg *p_seg; 1348 u32 cdu_seg_params, offset; 1349 int i; 1350 1351 static const u32 rt_type_offset_arr[] = { 1352 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, 1353 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, 1354 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, 1355 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 1356 }; 1357 1358 static const u32 rt_type_offset_fl_arr[] = { 1359 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, 1360 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, 1361 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, 1362 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 1363 }; 1364 1365 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 1366 1367 /* There are initializations only for CDUT during pf Phase */ 1368 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 1369 /* Segment 0 */ 1370 p_seg = qed_cxt_tid_seg_info(p_hwfn, i); 1371 if (!p_seg) 1372 continue; 1373 1374 /* Note: start_line is already adjusted for the CDU 1375 * segment register granularity, so we just need to 1376 * divide. Adjustment is implicit as we assume ILT 1377 * Page size is larger than 32K! 1378 */ 1379 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * 1380 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - 1381 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; 1382 1383 cdu_seg_params = 0; 1384 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); 1385 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); 1386 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); 1387 1388 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * 1389 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - 1390 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; 1391 1392 cdu_seg_params = 0; 1393 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); 1394 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); 1395 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); 1396 } 1397 } 1398 1399 void qed_qm_init_pf(struct qed_hwfn *p_hwfn, 1400 struct qed_ptt *p_ptt, bool is_pf_loading) 1401 { 1402 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 1403 struct qed_qm_pf_rt_init_params params; 1404 struct qed_qm_iids iids; 1405 1406 memset(&iids, 0, sizeof(iids)); 1407 qed_cxt_qm_iids(p_hwfn, &iids); 1408 1409 memset(¶ms, 0, sizeof(params)); 1410 params.port_id = p_hwfn->port_id; 1411 params.pf_id = p_hwfn->rel_pf_id; 1412 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; 1413 params.is_pf_loading = is_pf_loading; 1414 params.num_pf_cids = iids.cids; 1415 params.num_vf_cids = iids.vf_cids; 1416 params.num_tids = iids.tids; 1417 params.start_pq = qm_info->start_pq; 1418 params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; 1419 params.num_vf_pqs = qm_info->num_vf_pqs; 1420 params.start_vport = qm_info->start_vport; 1421 params.num_vports = qm_info->num_vports; 1422 params.pf_wfq = qm_info->pf_wfq; 1423 params.pf_rl = qm_info->pf_rl; 1424 params.pq_params = qm_info->qm_pq_params; 1425 params.vport_params = qm_info->qm_vport_params; 1426 1427 qed_qm_pf_rt_init(p_hwfn, p_ptt, ¶ms); 1428 } 1429 1430 /* CM PF */ 1431 static void qed_cm_init_pf(struct qed_hwfn *p_hwfn) 1432 { 1433 /* XCM pure-LB queue */ 1434 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, 1435 qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); 1436 } 1437 1438 /* DQ PF */ 1439 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) 1440 { 1441 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1442 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; 1443 1444 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); 1445 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); 1446 1447 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); 1448 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); 1449 1450 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); 1451 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); 1452 1453 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); 1454 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); 1455 1456 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); 1457 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); 1458 1459 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); 1460 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); 1461 1462 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); 1463 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); 1464 1465 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); 1466 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); 1467 1468 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); 1469 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); 1470 1471 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); 1472 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); 1473 1474 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); 1475 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); 1476 1477 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); 1478 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); 1479 1480 /* Connection types 6 & 7 are not in use, yet they must be configured 1481 * as the highest possible connection. Not configuring them means the 1482 * defaults will be used, and with a large number of cids a bug may 1483 * occur, if the defaults will be smaller than dq_pf_max_cid / 1484 * dq_vf_max_cid. 1485 */ 1486 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); 1487 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); 1488 1489 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); 1490 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); 1491 } 1492 1493 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) 1494 { 1495 struct qed_ilt_client_cfg *ilt_clients; 1496 int i; 1497 1498 ilt_clients = p_hwfn->p_cxt_mngr->clients; 1499 for_each_ilt_valid_client(i, ilt_clients) { 1500 STORE_RT_REG(p_hwfn, 1501 ilt_clients[i].first.reg, 1502 ilt_clients[i].first.val); 1503 STORE_RT_REG(p_hwfn, 1504 ilt_clients[i].last.reg, ilt_clients[i].last.val); 1505 STORE_RT_REG(p_hwfn, 1506 ilt_clients[i].p_size.reg, 1507 ilt_clients[i].p_size.val); 1508 } 1509 } 1510 1511 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) 1512 { 1513 struct qed_ilt_client_cfg *p_cli; 1514 u32 blk_factor; 1515 1516 /* For simplicty we set the 'block' to be an ILT page */ 1517 if (p_hwfn->cdev->p_iov_info) { 1518 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; 1519 1520 STORE_RT_REG(p_hwfn, 1521 PSWRQ2_REG_VF_BASE_RT_OFFSET, 1522 p_iov->first_vf_in_pf); 1523 STORE_RT_REG(p_hwfn, 1524 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, 1525 p_iov->first_vf_in_pf + p_iov->total_vfs); 1526 } 1527 1528 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 1529 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1530 if (p_cli->active) { 1531 STORE_RT_REG(p_hwfn, 1532 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, 1533 blk_factor); 1534 STORE_RT_REG(p_hwfn, 1535 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1536 p_cli->pf_total_lines); 1537 STORE_RT_REG(p_hwfn, 1538 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, 1539 p_cli->vf_total_lines); 1540 } 1541 1542 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 1543 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1544 if (p_cli->active) { 1545 STORE_RT_REG(p_hwfn, 1546 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, 1547 blk_factor); 1548 STORE_RT_REG(p_hwfn, 1549 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1550 p_cli->pf_total_lines); 1551 STORE_RT_REG(p_hwfn, 1552 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, 1553 p_cli->vf_total_lines); 1554 } 1555 1556 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; 1557 blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); 1558 if (p_cli->active) { 1559 STORE_RT_REG(p_hwfn, 1560 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); 1561 STORE_RT_REG(p_hwfn, 1562 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, 1563 p_cli->pf_total_lines); 1564 STORE_RT_REG(p_hwfn, 1565 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, 1566 p_cli->vf_total_lines); 1567 } 1568 } 1569 1570 /* ILT (PSWRQ2) PF */ 1571 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) 1572 { 1573 struct qed_ilt_client_cfg *clients; 1574 struct qed_cxt_mngr *p_mngr; 1575 struct phys_mem_desc *p_shdw; 1576 u32 line, rt_offst, i; 1577 1578 qed_ilt_bounds_init(p_hwfn); 1579 qed_ilt_vf_bounds_init(p_hwfn); 1580 1581 p_mngr = p_hwfn->p_cxt_mngr; 1582 p_shdw = p_mngr->ilt_shadow; 1583 clients = p_hwfn->p_cxt_mngr->clients; 1584 1585 for_each_ilt_valid_client(i, clients) { 1586 /** Client's 1st val and RT array are absolute, ILT shadows' 1587 * lines are relative. 1588 */ 1589 line = clients[i].first.val - p_mngr->pf_start_line; 1590 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + 1591 clients[i].first.val * ILT_ENTRY_IN_REGS; 1592 1593 for (; line <= clients[i].last.val - p_mngr->pf_start_line; 1594 line++, rt_offst += ILT_ENTRY_IN_REGS) { 1595 u64 ilt_hw_entry = 0; 1596 1597 /** p_virt could be NULL incase of dynamic 1598 * allocation 1599 */ 1600 if (p_shdw[line].virt_addr) { 1601 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); 1602 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, 1603 (p_shdw[line].phys_addr >> 12)); 1604 1605 DP_VERBOSE(p_hwfn, QED_MSG_ILT, 1606 "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", 1607 rt_offst, line, i, 1608 (u64)(p_shdw[line].phys_addr >> 12)); 1609 } 1610 1611 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); 1612 } 1613 } 1614 } 1615 1616 /* SRC (Searcher) PF */ 1617 static void qed_src_init_pf(struct qed_hwfn *p_hwfn) 1618 { 1619 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1620 u32 rounded_conn_num, conn_num, conn_max; 1621 struct qed_src_iids src_iids; 1622 1623 memset(&src_iids, 0, sizeof(src_iids)); 1624 qed_cxt_src_iids(p_mngr, &src_iids); 1625 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; 1626 if (!conn_num) 1627 return; 1628 1629 conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS); 1630 rounded_conn_num = roundup_pow_of_two(conn_max); 1631 1632 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); 1633 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, 1634 ilog2(rounded_conn_num)); 1635 1636 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, 1637 p_hwfn->p_cxt_mngr->src_t2.first_free); 1638 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, 1639 p_hwfn->p_cxt_mngr->src_t2.last_free); 1640 } 1641 1642 /* Timers PF */ 1643 #define TM_CFG_NUM_IDS_SHIFT 0 1644 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL 1645 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 1646 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL 1647 #define TM_CFG_PARENT_PF_SHIFT 25 1648 #define TM_CFG_PARENT_PF_MASK 0x7ULL 1649 1650 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 1651 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL 1652 1653 #define TM_CFG_TID_OFFSET_SHIFT 30 1654 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL 1655 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 1656 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL 1657 1658 static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) 1659 { 1660 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1661 u32 active_seg_mask = 0, tm_offset, rt_reg; 1662 struct qed_tm_iids tm_iids; 1663 u64 cfg_word; 1664 u8 i; 1665 1666 memset(&tm_iids, 0, sizeof(tm_iids)); 1667 qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); 1668 1669 /* @@@TBD No pre-scan for now */ 1670 1671 /* Note: We assume consecutive VFs for a PF */ 1672 for (i = 0; i < p_mngr->vf_count; i++) { 1673 cfg_word = 0; 1674 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); 1675 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1676 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); 1677 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); 1678 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + 1679 (sizeof(cfg_word) / sizeof(u32)) * 1680 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); 1681 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1682 } 1683 1684 cfg_word = 0; 1685 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); 1686 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1687 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ 1688 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ 1689 1690 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + 1691 (sizeof(cfg_word) / sizeof(u32)) * 1692 (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id); 1693 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1694 1695 /* enale scan */ 1696 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, 1697 tm_iids.pf_cids ? 0x1 : 0x0); 1698 1699 /* @@@TBD how to enable the scan for the VFs */ 1700 1701 tm_offset = tm_iids.per_vf_cids; 1702 1703 /* Note: We assume consecutive VFs for a PF */ 1704 for (i = 0; i < p_mngr->vf_count; i++) { 1705 cfg_word = 0; 1706 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); 1707 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1708 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); 1709 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); 1710 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); 1711 1712 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + 1713 (sizeof(cfg_word) / sizeof(u32)) * 1714 (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); 1715 1716 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1717 } 1718 1719 tm_offset = tm_iids.pf_cids; 1720 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 1721 cfg_word = 0; 1722 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); 1723 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); 1724 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); 1725 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); 1726 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); 1727 1728 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + 1729 (sizeof(cfg_word) / sizeof(u32)) * 1730 (NUM_OF_VFS(p_hwfn->cdev) + 1731 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); 1732 1733 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); 1734 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0); 1735 1736 tm_offset += tm_iids.pf_tids[i]; 1737 } 1738 1739 if (QED_IS_RDMA_PERSONALITY(p_hwfn)) 1740 active_seg_mask = 0; 1741 1742 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); 1743 1744 /* @@@TBD how to enable the scan for the VFs */ 1745 } 1746 1747 static void qed_prs_init_common(struct qed_hwfn *p_hwfn) 1748 { 1749 if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && 1750 p_hwfn->pf_params.fcoe_pf_params.is_target) 1751 STORE_RT_REG(p_hwfn, 1752 PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); 1753 } 1754 1755 static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) 1756 { 1757 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1758 struct qed_conn_type_cfg *p_fcoe; 1759 struct qed_tid_seg *p_tid; 1760 1761 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; 1762 1763 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ 1764 if (!p_fcoe->cid_count) 1765 return; 1766 1767 p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; 1768 if (p_hwfn->pf_params.fcoe_pf_params.is_target) { 1769 STORE_RT_REG_AGG(p_hwfn, 1770 PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, 1771 p_tid->count); 1772 } else { 1773 STORE_RT_REG_AGG(p_hwfn, 1774 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, 1775 p_tid->count); 1776 } 1777 } 1778 1779 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) 1780 { 1781 qed_cdu_init_common(p_hwfn); 1782 qed_prs_init_common(p_hwfn); 1783 } 1784 1785 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1786 { 1787 qed_qm_init_pf(p_hwfn, p_ptt, true); 1788 qed_cm_init_pf(p_hwfn); 1789 qed_dq_init_pf(p_hwfn); 1790 qed_cdu_init_pf(p_hwfn); 1791 qed_ilt_init_pf(p_hwfn); 1792 qed_src_init_pf(p_hwfn); 1793 qed_tm_init_pf(p_hwfn); 1794 qed_prs_init_pf(p_hwfn); 1795 } 1796 1797 int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, 1798 enum protocol_type type, u32 *p_cid, u8 vfid) 1799 { 1800 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1801 struct qed_cid_acquired_map *p_map; 1802 u32 rel_cid; 1803 1804 if (type >= MAX_CONN_TYPES) { 1805 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); 1806 return -EINVAL; 1807 } 1808 1809 if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) { 1810 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid); 1811 return -EINVAL; 1812 } 1813 1814 /* Determine the right map to take this CID from */ 1815 if (vfid == QED_CXT_PF_CID) 1816 p_map = &p_mngr->acquired[type]; 1817 else 1818 p_map = &p_mngr->acquired_vf[type][vfid]; 1819 1820 if (!p_map->cid_map) { 1821 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); 1822 return -EINVAL; 1823 } 1824 1825 rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count); 1826 1827 if (rel_cid >= p_map->max_count) { 1828 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); 1829 return -EINVAL; 1830 } 1831 1832 __set_bit(rel_cid, p_map->cid_map); 1833 1834 *p_cid = rel_cid + p_map->start_cid; 1835 1836 DP_VERBOSE(p_hwfn, QED_MSG_CXT, 1837 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", 1838 *p_cid, rel_cid, vfid, type); 1839 1840 return 0; 1841 } 1842 1843 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, 1844 enum protocol_type type, u32 *p_cid) 1845 { 1846 return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID); 1847 } 1848 1849 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, 1850 u32 cid, 1851 u8 vfid, 1852 enum protocol_type *p_type, 1853 struct qed_cid_acquired_map **pp_map) 1854 { 1855 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1856 u32 rel_cid; 1857 1858 /* Iterate over protocols and find matching cid range */ 1859 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { 1860 if (vfid == QED_CXT_PF_CID) 1861 *pp_map = &p_mngr->acquired[*p_type]; 1862 else 1863 *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; 1864 1865 if (!((*pp_map)->cid_map)) 1866 continue; 1867 if (cid >= (*pp_map)->start_cid && 1868 cid < (*pp_map)->start_cid + (*pp_map)->max_count) 1869 break; 1870 } 1871 1872 if (*p_type == MAX_CONN_TYPES) { 1873 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid); 1874 goto fail; 1875 } 1876 1877 rel_cid = cid - (*pp_map)->start_cid; 1878 if (!test_bit(rel_cid, (*pp_map)->cid_map)) { 1879 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired", 1880 cid, vfid); 1881 goto fail; 1882 } 1883 1884 return true; 1885 fail: 1886 *p_type = MAX_CONN_TYPES; 1887 *pp_map = NULL; 1888 return false; 1889 } 1890 1891 void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid) 1892 { 1893 struct qed_cid_acquired_map *p_map = NULL; 1894 enum protocol_type type; 1895 bool b_acquired; 1896 u32 rel_cid; 1897 1898 if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) { 1899 DP_NOTICE(p_hwfn, 1900 "Trying to return incorrect CID belonging to VF %02x\n", 1901 vfid); 1902 return; 1903 } 1904 1905 /* Test acquired and find matching per-protocol map */ 1906 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid, 1907 &type, &p_map); 1908 1909 if (!b_acquired) 1910 return; 1911 1912 rel_cid = cid - p_map->start_cid; 1913 clear_bit(rel_cid, p_map->cid_map); 1914 1915 DP_VERBOSE(p_hwfn, QED_MSG_CXT, 1916 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", 1917 cid, rel_cid, vfid, type); 1918 } 1919 1920 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) 1921 { 1922 _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID); 1923 } 1924 1925 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) 1926 { 1927 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 1928 struct qed_cid_acquired_map *p_map = NULL; 1929 u32 conn_cxt_size, hw_p_size, cxts_per_p, line; 1930 enum protocol_type type; 1931 bool b_acquired; 1932 1933 /* Test acquired and find matching per-protocol map */ 1934 b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, 1935 QED_CXT_PF_CID, &type, &p_map); 1936 1937 if (!b_acquired) 1938 return -EINVAL; 1939 1940 /* set the protocl type */ 1941 p_info->type = type; 1942 1943 /* compute context virtual pointer */ 1944 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; 1945 1946 conn_cxt_size = CONN_CXT_SIZE(p_hwfn); 1947 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; 1948 line = p_info->iid / cxts_per_p; 1949 1950 /* Make sure context is allocated (dynamic allocation) */ 1951 if (!p_mngr->ilt_shadow[line].virt_addr) 1952 return -EINVAL; 1953 1954 p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr + 1955 p_info->iid % cxts_per_p * conn_cxt_size; 1956 1957 DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), 1958 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", 1959 p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid); 1960 1961 return 0; 1962 } 1963 1964 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, 1965 struct qed_rdma_pf_params *p_params, 1966 u32 num_tasks) 1967 { 1968 u32 num_cons, num_qps; 1969 enum protocol_type proto; 1970 1971 if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { 1972 DP_VERBOSE(p_hwfn, QED_MSG_SP, 1973 "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); 1974 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; 1975 } 1976 1977 switch (p_hwfn->hw_info.personality) { 1978 case QED_PCI_ETH_IWARP: 1979 /* Each QP requires one connection */ 1980 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps); 1981 proto = PROTOCOLID_IWARP; 1982 break; 1983 case QED_PCI_ETH_ROCE: 1984 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); 1985 num_cons = num_qps * 2; /* each QP requires two connections */ 1986 proto = PROTOCOLID_ROCE; 1987 break; 1988 default: 1989 return; 1990 } 1991 1992 if (num_cons && num_tasks) { 1993 u32 num_srqs, num_xrc_srqs; 1994 1995 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); 1996 1997 /* Deliberatly passing ROCE for tasks id. This is because 1998 * iWARP / RoCE share the task id. 1999 */ 2000 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, 2001 QED_CXT_ROCE_TID_SEG, 1, 2002 num_tasks, false); 2003 2004 num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); 2005 2006 /* XRC SRQs populate a single ILT page */ 2007 num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn); 2008 2009 qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs); 2010 } else { 2011 DP_INFO(p_hwfn->cdev, 2012 "RDMA personality used without setting params!\n"); 2013 } 2014 } 2015 2016 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) 2017 { 2018 /* Set the number of required CORE connections */ 2019 u32 core_cids = 1; /* SPQ */ 2020 2021 if (p_hwfn->using_ll2) 2022 core_cids += 4; 2023 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); 2024 2025 switch (p_hwfn->hw_info.personality) { 2026 case QED_PCI_ETH_RDMA: 2027 case QED_PCI_ETH_IWARP: 2028 case QED_PCI_ETH_ROCE: 2029 { 2030 qed_rdma_set_pf_params(p_hwfn, 2031 &p_hwfn-> 2032 pf_params.rdma_pf_params, 2033 rdma_tasks); 2034 /* no need for break since RoCE coexist with Ethernet */ 2035 } 2036 fallthrough; 2037 case QED_PCI_ETH: 2038 { 2039 struct qed_eth_pf_params *p_params = 2040 &p_hwfn->pf_params.eth_pf_params; 2041 2042 if (!p_params->num_vf_cons) 2043 p_params->num_vf_cons = 2044 ETH_PF_PARAMS_VF_CONS_DEFAULT; 2045 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, 2046 p_params->num_cons, 2047 p_params->num_vf_cons); 2048 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; 2049 break; 2050 } 2051 case QED_PCI_FCOE: 2052 { 2053 struct qed_fcoe_pf_params *p_params; 2054 2055 p_params = &p_hwfn->pf_params.fcoe_pf_params; 2056 2057 if (p_params->num_cons && p_params->num_tasks) { 2058 qed_cxt_set_proto_cid_count(p_hwfn, 2059 PROTOCOLID_FCOE, 2060 p_params->num_cons, 2061 0); 2062 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, 2063 QED_CXT_FCOE_TID_SEG, 0, 2064 p_params->num_tasks, true); 2065 } else { 2066 DP_INFO(p_hwfn->cdev, 2067 "Fcoe personality used without setting params!\n"); 2068 } 2069 break; 2070 } 2071 case QED_PCI_ISCSI: 2072 { 2073 struct qed_iscsi_pf_params *p_params; 2074 2075 p_params = &p_hwfn->pf_params.iscsi_pf_params; 2076 2077 if (p_params->num_cons && p_params->num_tasks) { 2078 qed_cxt_set_proto_cid_count(p_hwfn, 2079 PROTOCOLID_TCP_ULP, 2080 p_params->num_cons, 2081 0); 2082 qed_cxt_set_proto_tid_count(p_hwfn, 2083 PROTOCOLID_TCP_ULP, 2084 QED_CXT_TCP_ULP_TID_SEG, 2085 0, 2086 p_params->num_tasks, 2087 true); 2088 } else { 2089 DP_INFO(p_hwfn->cdev, 2090 "Iscsi personality used without setting params!\n"); 2091 } 2092 break; 2093 } 2094 case QED_PCI_NVMETCP: 2095 { 2096 struct qed_nvmetcp_pf_params *p_params; 2097 2098 p_params = &p_hwfn->pf_params.nvmetcp_pf_params; 2099 2100 if (p_params->num_cons && p_params->num_tasks) { 2101 qed_cxt_set_proto_cid_count(p_hwfn, 2102 PROTOCOLID_TCP_ULP, 2103 p_params->num_cons, 2104 0); 2105 qed_cxt_set_proto_tid_count(p_hwfn, 2106 PROTOCOLID_TCP_ULP, 2107 QED_CXT_TCP_ULP_TID_SEG, 2108 0, 2109 p_params->num_tasks, 2110 true); 2111 } else { 2112 DP_INFO(p_hwfn->cdev, 2113 "NvmeTCP personality used without setting params!\n"); 2114 } 2115 break; 2116 } 2117 default: 2118 return -EINVAL; 2119 } 2120 2121 return 0; 2122 } 2123 2124 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, 2125 struct qed_tid_mem *p_info) 2126 { 2127 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2128 u32 proto, seg, total_lines, i, shadow_line; 2129 struct qed_ilt_client_cfg *p_cli; 2130 struct qed_ilt_cli_blk *p_fl_seg; 2131 struct qed_tid_seg *p_seg_info; 2132 2133 /* Verify the personality */ 2134 switch (p_hwfn->hw_info.personality) { 2135 case QED_PCI_FCOE: 2136 proto = PROTOCOLID_FCOE; 2137 seg = QED_CXT_FCOE_TID_SEG; 2138 break; 2139 case QED_PCI_ISCSI: 2140 case QED_PCI_NVMETCP: 2141 proto = PROTOCOLID_TCP_ULP; 2142 seg = QED_CXT_TCP_ULP_TID_SEG; 2143 break; 2144 default: 2145 return -EINVAL; 2146 } 2147 2148 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 2149 if (!p_cli->active) 2150 return -EINVAL; 2151 2152 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; 2153 if (!p_seg_info->has_fl_mem) 2154 return -EINVAL; 2155 2156 p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; 2157 total_lines = DIV_ROUND_UP(p_fl_seg->total_size, 2158 p_fl_seg->real_size_in_page); 2159 2160 for (i = 0; i < total_lines; i++) { 2161 shadow_line = i + p_fl_seg->start_line - 2162 p_hwfn->p_cxt_mngr->pf_start_line; 2163 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr; 2164 } 2165 p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - 2166 p_fl_seg->real_size_in_page; 2167 p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; 2168 p_info->num_tids_per_block = p_fl_seg->real_size_in_page / 2169 p_info->tid_size; 2170 2171 return 0; 2172 } 2173 2174 /* This function is very RoCE oriented, if another protocol in the future 2175 * will want this feature we'll need to modify the function to be more generic 2176 */ 2177 int 2178 qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, 2179 enum qed_cxt_elem_type elem_type, u32 iid) 2180 { 2181 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; 2182 struct tdif_task_context *tdif_context; 2183 struct qed_ilt_client_cfg *p_cli; 2184 struct qed_ilt_cli_blk *p_blk; 2185 struct qed_ptt *p_ptt; 2186 dma_addr_t p_phys; 2187 u64 ilt_hw_entry; 2188 void *p_virt; 2189 u32 flags1; 2190 int rc = 0; 2191 2192 switch (elem_type) { 2193 case QED_ELEM_CXT: 2194 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 2195 elem_size = CONN_CXT_SIZE(p_hwfn); 2196 p_blk = &p_cli->pf_blks[CDUC_BLK]; 2197 break; 2198 case QED_ELEM_SRQ: 2199 /* The first ILT page is not used for regular SRQs. Skip it. */ 2200 iid += p_hwfn->p_cxt_mngr->xrc_srq_count; 2201 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2202 elem_size = SRQ_CXT_SIZE; 2203 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2204 break; 2205 case QED_ELEM_XRC_SRQ: 2206 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2207 elem_size = XRC_SRQ_CXT_SIZE; 2208 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2209 break; 2210 case QED_ELEM_TASK: 2211 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2212 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); 2213 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; 2214 break; 2215 default: 2216 DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type); 2217 return -EOPNOTSUPP; 2218 } 2219 2220 /* Calculate line in ilt */ 2221 hw_p_size = p_cli->p_size.val; 2222 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; 2223 line = p_blk->start_line + (iid / elems_per_p); 2224 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; 2225 2226 /* If line is already allocated, do nothing, otherwise allocate it and 2227 * write it to the PSWRQ2 registers. 2228 * This section can be run in parallel from different contexts and thus 2229 * a mutex protection is needed. 2230 */ 2231 2232 mutex_lock(&p_hwfn->p_cxt_mngr->mutex); 2233 2234 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr) 2235 goto out0; 2236 2237 p_ptt = qed_ptt_acquire(p_hwfn); 2238 if (!p_ptt) { 2239 DP_NOTICE(p_hwfn, 2240 "QED_TIME_OUT on ptt acquire - dynamic allocation"); 2241 rc = -EBUSY; 2242 goto out0; 2243 } 2244 2245 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 2246 p_blk->real_size_in_page, &p_phys, 2247 GFP_KERNEL); 2248 if (!p_virt) { 2249 rc = -ENOMEM; 2250 goto out1; 2251 } 2252 2253 /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, 2254 * to compensate for a HW bug, but it is configured even if DIF is not 2255 * enabled. This is harmless and allows us to avoid a dedicated API. We 2256 * configure the field for all of the contexts on the newly allocated 2257 * page. 2258 */ 2259 if (elem_type == QED_ELEM_TASK) { 2260 u32 elem_i; 2261 u8 *elem_start = (u8 *)p_virt; 2262 union type1_task_context *elem; 2263 2264 for (elem_i = 0; elem_i < elems_per_p; elem_i++) { 2265 elem = (union type1_task_context *)elem_start; 2266 tdif_context = &elem->roce_ctx.tdif_context; 2267 2268 flags1 = le32_to_cpu(tdif_context->flags1); 2269 SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); 2270 tdif_context->flags1 = cpu_to_le32(flags1); 2271 2272 elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); 2273 } 2274 } 2275 2276 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt; 2277 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys; 2278 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = 2279 p_blk->real_size_in_page; 2280 2281 /* compute absolute offset */ 2282 reg_offset = PSWRQ2_REG_ILT_MEMORY + 2283 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); 2284 2285 ilt_hw_entry = 0; 2286 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); 2287 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, 2288 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr 2289 >> 12)); 2290 2291 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ 2292 qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, 2293 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 2294 NULL); 2295 2296 if (elem_type == QED_ELEM_CXT) { 2297 u32 last_cid_allocated = (1 + (iid / elems_per_p)) * 2298 elems_per_p; 2299 2300 /* Update the relevant register in the parser */ 2301 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 2302 last_cid_allocated - 1); 2303 2304 if (!p_hwfn->b_rdma_enabled_in_prs) { 2305 /* Enable RDMA search */ 2306 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); 2307 p_hwfn->b_rdma_enabled_in_prs = true; 2308 } 2309 } 2310 2311 out1: 2312 qed_ptt_release(p_hwfn, p_ptt); 2313 out0: 2314 mutex_unlock(&p_hwfn->p_cxt_mngr->mutex); 2315 2316 return rc; 2317 } 2318 2319 /* This function is very RoCE oriented, if another protocol in the future 2320 * will want this feature we'll need to modify the function to be more generic 2321 */ 2322 static int 2323 qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, 2324 enum qed_cxt_elem_type elem_type, 2325 u32 start_iid, u32 count) 2326 { 2327 u32 start_line, end_line, shadow_start_line, shadow_end_line; 2328 u32 reg_offset, elem_size, hw_p_size, elems_per_p; 2329 struct qed_ilt_client_cfg *p_cli; 2330 struct qed_ilt_cli_blk *p_blk; 2331 u32 end_iid = start_iid + count; 2332 struct qed_ptt *p_ptt; 2333 u64 ilt_hw_entry = 0; 2334 u32 i; 2335 2336 switch (elem_type) { 2337 case QED_ELEM_CXT: 2338 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; 2339 elem_size = CONN_CXT_SIZE(p_hwfn); 2340 p_blk = &p_cli->pf_blks[CDUC_BLK]; 2341 break; 2342 case QED_ELEM_SRQ: 2343 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2344 elem_size = SRQ_CXT_SIZE; 2345 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2346 break; 2347 case QED_ELEM_XRC_SRQ: 2348 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; 2349 elem_size = XRC_SRQ_CXT_SIZE; 2350 p_blk = &p_cli->pf_blks[SRQ_BLK]; 2351 break; 2352 case QED_ELEM_TASK: 2353 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2354 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); 2355 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; 2356 break; 2357 default: 2358 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); 2359 return -EINVAL; 2360 } 2361 2362 /* Calculate line in ilt */ 2363 hw_p_size = p_cli->p_size.val; 2364 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; 2365 start_line = p_blk->start_line + (start_iid / elems_per_p); 2366 end_line = p_blk->start_line + (end_iid / elems_per_p); 2367 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) 2368 end_line--; 2369 2370 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; 2371 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; 2372 2373 p_ptt = qed_ptt_acquire(p_hwfn); 2374 if (!p_ptt) { 2375 DP_NOTICE(p_hwfn, 2376 "QED_TIME_OUT on ptt acquire - dynamic allocation"); 2377 return -EBUSY; 2378 } 2379 2380 for (i = shadow_start_line; i < shadow_end_line; i++) { 2381 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr) 2382 continue; 2383 2384 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2385 p_hwfn->p_cxt_mngr->ilt_shadow[i].size, 2386 p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr, 2387 p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr); 2388 2389 p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL; 2390 p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0; 2391 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; 2392 2393 /* compute absolute offset */ 2394 reg_offset = PSWRQ2_REG_ILT_MEMORY + 2395 ((start_line++) * ILT_REG_SIZE_IN_BYTES * 2396 ILT_ENTRY_IN_REGS); 2397 2398 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a 2399 * wide-bus. 2400 */ 2401 qed_dmae_host2grc(p_hwfn, p_ptt, 2402 (u64) (uintptr_t) &ilt_hw_entry, 2403 reg_offset, 2404 sizeof(ilt_hw_entry) / sizeof(u32), 2405 NULL); 2406 } 2407 2408 qed_ptt_release(p_hwfn, p_ptt); 2409 2410 return 0; 2411 } 2412 2413 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) 2414 { 2415 int rc; 2416 u32 cid; 2417 2418 /* Free Connection CXT */ 2419 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT, 2420 qed_cxt_get_proto_cid_start(p_hwfn, 2421 proto), 2422 qed_cxt_get_proto_cid_count(p_hwfn, 2423 proto, &cid)); 2424 2425 if (rc) 2426 return rc; 2427 2428 /* Free Task CXT ( Intentionally RoCE as task-id is shared between 2429 * RoCE and iWARP ) 2430 */ 2431 proto = PROTOCOLID_ROCE; 2432 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, 2433 qed_cxt_get_proto_tid_count(p_hwfn, proto)); 2434 if (rc) 2435 return rc; 2436 2437 /* Free TSDM CXT */ 2438 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0, 2439 p_hwfn->p_cxt_mngr->xrc_srq_count); 2440 2441 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 2442 p_hwfn->p_cxt_mngr->xrc_srq_count, 2443 p_hwfn->p_cxt_mngr->srq_count); 2444 2445 return rc; 2446 } 2447 2448 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, 2449 u32 tid, u8 ctx_type, void **pp_task_ctx) 2450 { 2451 struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; 2452 struct qed_ilt_client_cfg *p_cli; 2453 struct qed_tid_seg *p_seg_info; 2454 struct qed_ilt_cli_blk *p_seg; 2455 u32 num_tids_per_block; 2456 u32 tid_size, ilt_idx; 2457 u32 total_lines; 2458 u32 proto, seg; 2459 2460 /* Verify the personality */ 2461 switch (p_hwfn->hw_info.personality) { 2462 case QED_PCI_FCOE: 2463 proto = PROTOCOLID_FCOE; 2464 seg = QED_CXT_FCOE_TID_SEG; 2465 break; 2466 case QED_PCI_ISCSI: 2467 case QED_PCI_NVMETCP: 2468 proto = PROTOCOLID_TCP_ULP; 2469 seg = QED_CXT_TCP_ULP_TID_SEG; 2470 break; 2471 default: 2472 return -EINVAL; 2473 } 2474 2475 p_cli = &p_mngr->clients[ILT_CLI_CDUT]; 2476 if (!p_cli->active) 2477 return -EINVAL; 2478 2479 p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; 2480 2481 if (ctx_type == QED_CTX_WORKING_MEM) { 2482 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; 2483 } else if (ctx_type == QED_CTX_FL_MEM) { 2484 if (!p_seg_info->has_fl_mem) 2485 return -EINVAL; 2486 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; 2487 } else { 2488 return -EINVAL; 2489 } 2490 total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); 2491 tid_size = p_mngr->task_type_size[p_seg_info->type]; 2492 num_tids_per_block = p_seg->real_size_in_page / tid_size; 2493 2494 if (total_lines < tid / num_tids_per_block) 2495 return -EINVAL; 2496 2497 ilt_idx = tid / num_tids_per_block + p_seg->start_line - 2498 p_mngr->pf_start_line; 2499 *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr + 2500 (tid % num_tids_per_block) * tid_size; 2501 2502 return 0; 2503 } 2504 2505 static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk) 2506 { 2507 if (p_blk->real_size_in_page == 0) 2508 return 0; 2509 2510 return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); 2511 } 2512 2513 u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn) 2514 { 2515 struct qed_ilt_client_cfg *p_cli; 2516 struct qed_ilt_cli_blk *p_blk; 2517 u16 i, pages = 0; 2518 2519 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2520 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 2521 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; 2522 pages += qed_blk_calculate_pages(p_blk); 2523 } 2524 2525 return pages; 2526 } 2527 2528 u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn) 2529 { 2530 struct qed_ilt_client_cfg *p_cli; 2531 struct qed_ilt_cli_blk *p_blk; 2532 u16 i, pages = 0; 2533 2534 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2535 for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { 2536 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)]; 2537 pages += qed_blk_calculate_pages(p_blk); 2538 } 2539 2540 return pages; 2541 } 2542 2543 u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn) 2544 { 2545 struct qed_ilt_client_cfg *p_cli; 2546 struct qed_ilt_cli_blk *p_blk; 2547 u16 i, pages = 0; 2548 2549 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2550 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { 2551 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; 2552 pages += qed_blk_calculate_pages(p_blk); 2553 } 2554 2555 return pages; 2556 } 2557 2558 u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn) 2559 { 2560 struct qed_ilt_client_cfg *p_cli; 2561 struct qed_ilt_cli_blk *p_blk; 2562 u16 pages = 0, i; 2563 2564 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; 2565 for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { 2566 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)]; 2567 pages += qed_blk_calculate_pages(p_blk); 2568 } 2569 2570 return pages; 2571 } 2572