1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/delay.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include "qed_hsi.h" 15 #include "qed_hw.h" 16 #include "qed_init_ops.h" 17 #include "qed_reg_addr.h" 18 19 enum cminterface { 20 MCM_SEC, 21 MCM_PRI, 22 UCM_SEC, 23 UCM_PRI, 24 TCM_SEC, 25 TCM_PRI, 26 YCM_SEC, 27 YCM_PRI, 28 XCM_SEC, 29 XCM_PRI, 30 NUM_OF_CM_INTERFACES 31 }; 32 33 /* general constants */ 34 #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ 35 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ 36 QM_PQ_ELEMENT_SIZE, \ 37 0x1000) : 0) 38 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 39 0x100) - 1 : 0) 40 #define QM_INVALID_PQ_ID 0xffff 41 /* feature enable */ 42 #define QM_BYPASS_EN 1 43 #define QM_BYTE_CRD_EN 1 44 /* other PQ constants */ 45 #define QM_OTHER_PQS_PER_PF 4 46 /* WFQ constants */ 47 #define QM_WFQ_UPPER_BOUND 6250000 48 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0 49 #define QM_WFQ_VP_PQ_PF_SHIFT 5 50 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) 51 #define QM_WFQ_MAX_INC_VAL 4375000 52 #define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val)) 53 /* RL constants */ 54 #define QM_RL_UPPER_BOUND 6250000 55 #define QM_RL_PERIOD 5 /* in us */ 56 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 57 #define QM_RL_INC_VAL(rate) max_t(u32, \ 58 (((rate ? rate : 1000000) \ 59 * QM_RL_PERIOD) / 8), 1) 60 #define QM_RL_MAX_INC_VAL 4375000 61 /* AFullOprtnstcCrdMask constants */ 62 #define QM_OPPOR_LINE_VOQ_DEF 1 63 #define QM_OPPOR_FW_STOP_DEF 0 64 #define QM_OPPOR_PQ_EMPTY_DEF 1 65 #define EAGLE_WORKAROUND_TC 7 66 /* Command Queue constants */ 67 #define PBF_CMDQ_PURE_LB_LINES 150 68 #define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 69 #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ 70 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ 71 (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ 72 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) 73 #define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \ 74 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ 75 (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ 76 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) 77 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \ 78 4) * \ 79 2) | QM_LINE_CRD_REG_SIGN_BIT) 80 /* BTB: blocks constants (block size = 256B) */ 81 #define BTB_JUMBO_PKT_BLOCKS 38 82 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 83 #define BTB_EAGLE_WORKAROUND_BLOCKS 4 84 #define BTB_PURE_LB_FACTOR 10 85 #define BTB_PURE_LB_RATIO 7 86 /* QM stop command constants */ 87 #define QM_STOP_PQ_MASK_WIDTH 32 88 #define QM_STOP_CMD_ADDR 0x2 89 #define QM_STOP_CMD_STRUCT_SIZE 2 90 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 91 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 92 #define QM_STOP_CMD_PAUSE_MASK_MASK -1 93 #define QM_STOP_CMD_GROUP_ID_OFFSET 1 94 #define QM_STOP_CMD_GROUP_ID_SHIFT 16 95 #define QM_STOP_CMD_GROUP_ID_MASK 15 96 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 97 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 98 #define QM_STOP_CMD_PQ_TYPE_MASK 1 99 #define QM_STOP_CMD_MAX_POLL_COUNT 100 100 #define QM_STOP_CMD_POLL_PERIOD_US 500 101 /* QM command macros */ 102 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ 103 _STRUCT_SIZE 104 #define QM_CMD_SET_FIELD(var, cmd, field, \ 105 value) SET_FIELD(var[cmd ## _ ## field ## \ 106 _OFFSET], \ 107 cmd ## _ ## field, \ 108 value) 109 /* QM: VOQ macros */ 110 #define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \ 111 (max_phy_tcs_pr_port) \ 112 + (tc)) 113 #define LB_VOQ(port) ( \ 114 MAX_PHYS_VOQS + (port)) 115 #define VOQ(port, tc, max_phy_tcs_pr_port) \ 116 ((tc) < \ 117 LB_TC ? PHYS_VOQ(port, \ 118 tc, \ 119 max_phy_tcs_pr_port) \ 120 : LB_VOQ(port)) 121 /******************** INTERNAL IMPLEMENTATION *********************/ 122 /* Prepare PF RL enable/disable runtime init values */ 123 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, 124 bool pf_rl_en) 125 { 126 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 127 if (pf_rl_en) { 128 /* enable RLs for all VOQs */ 129 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, 130 (1 << MAX_NUM_VOQS) - 1); 131 /* write RL period */ 132 STORE_RT_REG(p_hwfn, 133 QM_REG_RLPFPERIOD_RT_OFFSET, 134 QM_RL_PERIOD_CLK_25M); 135 STORE_RT_REG(p_hwfn, 136 QM_REG_RLPFPERIODTIMER_RT_OFFSET, 137 QM_RL_PERIOD_CLK_25M); 138 /* set credit threshold for QM bypass flow */ 139 if (QM_BYPASS_EN) 140 STORE_RT_REG(p_hwfn, 141 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, 142 QM_RL_UPPER_BOUND); 143 } 144 } 145 146 /* Prepare PF WFQ enable/disable runtime init values */ 147 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, 148 bool pf_wfq_en) 149 { 150 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 151 /* set credit threshold for QM bypass flow */ 152 if (pf_wfq_en && QM_BYPASS_EN) 153 STORE_RT_REG(p_hwfn, 154 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, 155 QM_WFQ_UPPER_BOUND); 156 } 157 158 /* Prepare VPORT RL enable/disable runtime init values */ 159 static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, 160 bool vport_rl_en) 161 { 162 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, 163 vport_rl_en ? 1 : 0); 164 if (vport_rl_en) { 165 /* write RL period (use timer 0 only) */ 166 STORE_RT_REG(p_hwfn, 167 QM_REG_RLGLBLPERIOD_0_RT_OFFSET, 168 QM_RL_PERIOD_CLK_25M); 169 STORE_RT_REG(p_hwfn, 170 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, 171 QM_RL_PERIOD_CLK_25M); 172 /* set credit threshold for QM bypass flow */ 173 if (QM_BYPASS_EN) 174 STORE_RT_REG(p_hwfn, 175 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, 176 QM_RL_UPPER_BOUND); 177 } 178 } 179 180 /* Prepare VPORT WFQ enable/disable runtime init values */ 181 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, 182 bool vport_wfq_en) 183 { 184 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, 185 vport_wfq_en ? 1 : 0); 186 /* set credit threshold for QM bypass flow */ 187 if (vport_wfq_en && QM_BYPASS_EN) 188 STORE_RT_REG(p_hwfn, 189 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, 190 QM_WFQ_UPPER_BOUND); 191 } 192 193 /* Prepare runtime init values to allocate PBF command queue lines for 194 * the specified VOQ 195 */ 196 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, 197 u8 voq, 198 u16 cmdq_lines) 199 { 200 u32 qm_line_crd; 201 202 /* In A0 - Limit the size of pbf queue so that only 511 commands with 203 * the minimum size of 4 (FCoE minimum size) 204 */ 205 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); 206 207 if (is_bb_a0) 208 cmdq_lines = min_t(u32, cmdq_lines, 1022); 209 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 210 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 211 (u32)cmdq_lines); 212 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); 213 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, 214 qm_line_crd); 215 } 216 217 /* Prepare runtime init values to allocate PBF command queue lines. */ 218 static void qed_cmdq_lines_rt_init( 219 struct qed_hwfn *p_hwfn, 220 u8 max_ports_per_engine, 221 u8 max_phys_tcs_per_port, 222 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 223 { 224 u8 tc, voq, port_id; 225 226 /* clear PBF lines for all VOQs */ 227 for (voq = 0; voq < MAX_NUM_VOQS; voq++) 228 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); 229 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 230 if (port_params[port_id].active) { 231 u16 phys_lines, phys_lines_per_tc; 232 u8 phys_tcs = port_params[port_id].num_active_phys_tcs; 233 234 /* find #lines to divide between the active 235 * physical TCs. 236 */ 237 phys_lines = port_params[port_id].num_pbf_cmd_lines - 238 PBF_CMDQ_PURE_LB_LINES; 239 /* find #lines per active physical TC */ 240 phys_lines_per_tc = phys_lines / phys_tcs; 241 /* init registers per active TC */ 242 for (tc = 0; tc < phys_tcs; tc++) { 243 voq = PHYS_VOQ(port_id, tc, 244 max_phys_tcs_per_port); 245 qed_cmdq_lines_voq_rt_init(p_hwfn, voq, 246 phys_lines_per_tc); 247 } 248 /* init registers for pure LB TC */ 249 qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), 250 PBF_CMDQ_PURE_LB_LINES); 251 } 252 } 253 } 254 255 static void qed_btb_blocks_rt_init( 256 struct qed_hwfn *p_hwfn, 257 u8 max_ports_per_engine, 258 u8 max_phys_tcs_per_port, 259 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 260 { 261 u32 usable_blocks, pure_lb_blocks, phys_blocks; 262 u8 tc, voq, port_id; 263 264 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 265 u32 temp; 266 u8 phys_tcs; 267 268 if (!port_params[port_id].active) 269 continue; 270 271 phys_tcs = port_params[port_id].num_active_phys_tcs; 272 273 /* subtract headroom blocks */ 274 usable_blocks = port_params[port_id].num_btb_blocks - 275 BTB_HEADROOM_BLOCKS; 276 277 /* find blocks per physical TC. use factor to avoid 278 * floating arithmethic. 279 */ 280 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / 281 (phys_tcs * BTB_PURE_LB_FACTOR + 282 BTB_PURE_LB_RATIO); 283 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, 284 pure_lb_blocks / BTB_PURE_LB_FACTOR); 285 phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs; 286 287 /* init physical TCs */ 288 for (tc = 0; tc < phys_tcs; tc++) { 289 voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); 290 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), 291 phys_blocks); 292 } 293 294 /* init pure LB TC */ 295 temp = LB_VOQ(port_id); 296 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), 297 pure_lb_blocks); 298 } 299 } 300 301 /* Prepare Tx PQ mapping runtime init values for the specified PF */ 302 static void qed_tx_pq_map_rt_init( 303 struct qed_hwfn *p_hwfn, 304 struct qed_ptt *p_ptt, 305 struct qed_qm_pf_rt_init_params *p_params, 306 u32 base_mem_addr_4kb) 307 { 308 struct init_qm_vport_params *vport_params = p_params->vport_params; 309 u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; 310 u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; 311 u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / 312 QM_PF_QUEUE_GROUP_SIZE; 313 bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev); 314 u16 i, pq_id, pq_group; 315 316 /* a bit per Tx PQ indicating if the PQ is associated with a VF */ 317 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 318 u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; 319 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width; 320 u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); 321 u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); 322 u32 mem_addr_4kb = base_mem_addr_4kb; 323 324 /* set mapping from PQ group to PF */ 325 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) 326 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, 327 (u32)(p_params->pf_id)); 328 /* set PQ sizes */ 329 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, 330 QM_PQ_SIZE_256B(p_params->num_pf_cids)); 331 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, 332 QM_PQ_SIZE_256B(p_params->num_vf_cids)); 333 334 /* go over all Tx PQs */ 335 for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { 336 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, 337 p_params->max_phys_tcs_per_port); 338 bool is_vf_pq = (i >= p_params->num_pf_pqs); 339 struct qm_rf_pq_map tx_pq_map; 340 341 /* update first Tx PQ of VPORT/TC */ 342 u8 vport_id_in_pf = p_params->pq_params[i].vport_id - 343 p_params->start_vport; 344 u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; 345 u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; 346 347 if (first_tx_pq_id == QM_INVALID_PQ_ID) { 348 /* create new VP PQ */ 349 pq_ids[p_params->pq_params[i].tc_id] = pq_id; 350 first_tx_pq_id = pq_id; 351 /* map VP PQ to VOQ and PF */ 352 STORE_RT_REG(p_hwfn, 353 QM_REG_WFQVPMAP_RT_OFFSET + 354 first_tx_pq_id, 355 (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | 356 (p_params->pf_id << 357 QM_WFQ_VP_PQ_PF_SHIFT)); 358 } 359 /* fill PQ map entry */ 360 memset(&tx_pq_map, 0, sizeof(tx_pq_map)); 361 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); 362 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, 363 is_vf_pq ? 1 : 0); 364 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); 365 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, 366 is_vf_pq ? p_params->pq_params[i].vport_id : 0); 367 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); 368 SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, 369 p_params->pq_params[i].wrr_group); 370 /* write PQ map entry to CAM */ 371 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, 372 *((u32 *)&tx_pq_map)); 373 /* set base address */ 374 STORE_RT_REG(p_hwfn, 375 QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, 376 mem_addr_4kb); 377 /* check if VF PQ */ 378 if (is_vf_pq) { 379 /* if PQ is associated with a VF, add indication 380 * to PQ VF mask 381 */ 382 tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= 383 (1 << (pq_id % tx_pq_vf_mask_width)); 384 mem_addr_4kb += vport_pq_mem_4kb; 385 } else { 386 mem_addr_4kb += pq_mem_4kb; 387 } 388 } 389 390 /* store Tx PQ VF mask to size select register */ 391 for (i = 0; i < num_tx_pq_vf_masks; i++) { 392 if (tx_pq_vf_mask[i]) { 393 if (is_bb_a0) { 394 u32 curr_mask = 0, addr; 395 396 addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4); 397 if (!p_params->is_first_pf) 398 curr_mask = qed_rd(p_hwfn, p_ptt, 399 addr); 400 401 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; 402 403 STORE_RT_REG(p_hwfn, addr, 404 curr_mask | tx_pq_vf_mask[i]); 405 } else { 406 u32 addr; 407 408 addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; 409 STORE_RT_REG(p_hwfn, addr, 410 tx_pq_vf_mask[i]); 411 } 412 } 413 } 414 } 415 416 /* Prepare Other PQ mapping runtime init values for the specified PF */ 417 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, 418 u8 port_id, 419 u8 pf_id, 420 u32 num_pf_cids, 421 u32 num_tids, 422 u32 base_mem_addr_4kb) 423 { 424 u16 i, pq_id; 425 426 /* a single other PQ group is used in each PF, 427 * where PQ group i is used in PF i. 428 */ 429 u16 pq_group = pf_id; 430 u32 pq_size = num_pf_cids + num_tids; 431 u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); 432 u32 mem_addr_4kb = base_mem_addr_4kb; 433 434 /* map PQ group to PF */ 435 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, 436 (u32)(pf_id)); 437 /* set PQ sizes */ 438 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, 439 QM_PQ_SIZE_256B(pq_size)); 440 /* set base address */ 441 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; 442 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { 443 STORE_RT_REG(p_hwfn, 444 QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, 445 mem_addr_4kb); 446 mem_addr_4kb += pq_mem_4kb; 447 } 448 } 449 450 /* Prepare PF WFQ runtime init values for the specified PF. 451 * Return -1 on error. 452 */ 453 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, 454 struct qed_qm_pf_rt_init_params *p_params) 455 { 456 u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; 457 u32 crd_reg_offset; 458 u32 inc_val; 459 u16 i; 460 461 if (p_params->pf_id < MAX_NUM_PFS_BB) 462 crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; 463 else 464 crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET + 465 (p_params->pf_id % MAX_NUM_PFS_BB); 466 467 inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); 468 if (inc_val > QM_WFQ_MAX_INC_VAL) { 469 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); 470 return -1; 471 } 472 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, 473 inc_val); 474 STORE_RT_REG(p_hwfn, 475 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, 476 QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); 477 478 for (i = 0; i < num_tx_pqs; i++) { 479 u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, 480 p_params->max_phys_tcs_per_port); 481 482 OVERWRITE_RT_REG(p_hwfn, 483 crd_reg_offset + voq * MAX_NUM_PFS_BB, 484 QM_WFQ_INIT_CRD(inc_val) | 485 QM_WFQ_CRD_REG_SIGN_BIT); 486 } 487 488 return 0; 489 } 490 491 /* Prepare PF RL runtime init values for the specified PF. 492 * Return -1 on error. 493 */ 494 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, 495 u8 pf_id, 496 u32 pf_rl) 497 { 498 u32 inc_val = QM_RL_INC_VAL(pf_rl); 499 500 if (inc_val > QM_RL_MAX_INC_VAL) { 501 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); 502 return -1; 503 } 504 STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, 505 QM_RL_CRD_REG_SIGN_BIT); 506 STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, 507 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); 508 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); 509 return 0; 510 } 511 512 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. 513 * Return -1 on error. 514 */ 515 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, 516 u8 num_vports, 517 struct init_qm_vport_params *vport_params) 518 { 519 u32 inc_val; 520 u8 tc, i; 521 522 /* go over all PF VPORTs */ 523 for (i = 0; i < num_vports; i++) { 524 525 if (!vport_params[i].vport_wfq) 526 continue; 527 528 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); 529 if (inc_val > QM_WFQ_MAX_INC_VAL) { 530 DP_NOTICE(p_hwfn, 531 "Invalid VPORT WFQ weight configuration"); 532 return -1; 533 } 534 535 /* each VPORT can have several VPORT PQ IDs for 536 * different TCs 537 */ 538 for (tc = 0; tc < NUM_OF_TCS; tc++) { 539 u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; 540 541 if (vport_pq_id != QM_INVALID_PQ_ID) { 542 STORE_RT_REG(p_hwfn, 543 QM_REG_WFQVPCRD_RT_OFFSET + 544 vport_pq_id, 545 QM_WFQ_CRD_REG_SIGN_BIT); 546 STORE_RT_REG(p_hwfn, 547 QM_REG_WFQVPWEIGHT_RT_OFFSET + 548 vport_pq_id, inc_val); 549 } 550 } 551 } 552 553 return 0; 554 } 555 556 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, 557 u8 start_vport, 558 u8 num_vports, 559 struct init_qm_vport_params *vport_params) 560 { 561 u8 i, vport_id; 562 563 /* go over all PF VPORTs */ 564 for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { 565 u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); 566 567 if (inc_val > QM_RL_MAX_INC_VAL) { 568 DP_NOTICE(p_hwfn, 569 "Invalid VPORT rate-limit configuration"); 570 return -1; 571 } 572 573 STORE_RT_REG(p_hwfn, 574 QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, 575 QM_RL_CRD_REG_SIGN_BIT); 576 STORE_RT_REG(p_hwfn, 577 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, 578 QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); 579 STORE_RT_REG(p_hwfn, 580 QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, 581 inc_val); 582 } 583 584 return 0; 585 } 586 587 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, 588 struct qed_ptt *p_ptt) 589 { 590 u32 reg_val, i; 591 592 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0; 593 i++) { 594 udelay(QM_STOP_CMD_POLL_PERIOD_US); 595 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); 596 } 597 598 /* check if timeout while waiting for SDM command ready */ 599 if (i == QM_STOP_CMD_MAX_POLL_COUNT) { 600 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 601 "Timeout when waiting for QM SDM command ready signal\n"); 602 return false; 603 } 604 605 return true; 606 } 607 608 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, 609 struct qed_ptt *p_ptt, 610 u32 cmd_addr, 611 u32 cmd_data_lsb, 612 u32 cmd_data_msb) 613 { 614 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 615 return false; 616 617 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); 618 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); 619 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); 620 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); 621 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); 622 623 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); 624 } 625 626 /******************** INTERFACE IMPLEMENTATION *********************/ 627 u32 qed_qm_pf_mem_size(u8 pf_id, 628 u32 num_pf_cids, 629 u32 num_vf_cids, 630 u32 num_tids, 631 u16 num_pf_pqs, 632 u16 num_vf_pqs) 633 { 634 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 635 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 636 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 637 } 638 639 int qed_qm_common_rt_init( 640 struct qed_hwfn *p_hwfn, 641 struct qed_qm_common_rt_init_params *p_params) 642 { 643 /* init AFullOprtnstcCrdMask */ 644 u32 mask = (QM_OPPOR_LINE_VOQ_DEF << 645 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | 646 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | 647 (p_params->pf_wfq_en << 648 QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | 649 (p_params->vport_wfq_en << 650 QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | 651 (p_params->pf_rl_en << 652 QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | 653 (p_params->vport_rl_en << 654 QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | 655 (QM_OPPOR_FW_STOP_DEF << 656 QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | 657 (QM_OPPOR_PQ_EMPTY_DEF << 658 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); 659 660 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); 661 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); 662 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); 663 qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); 664 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); 665 qed_cmdq_lines_rt_init(p_hwfn, 666 p_params->max_ports_per_engine, 667 p_params->max_phys_tcs_per_port, 668 p_params->port_params); 669 qed_btb_blocks_rt_init(p_hwfn, 670 p_params->max_ports_per_engine, 671 p_params->max_phys_tcs_per_port, 672 p_params->port_params); 673 return 0; 674 } 675 676 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, 677 struct qed_ptt *p_ptt, 678 struct qed_qm_pf_rt_init_params *p_params) 679 { 680 struct init_qm_vport_params *vport_params = p_params->vport_params; 681 u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + 682 p_params->num_tids) * 683 QM_OTHER_PQS_PER_PF; 684 u8 tc, i; 685 686 /* clear first Tx PQ ID array for each VPORT */ 687 for (i = 0; i < p_params->num_vports; i++) 688 for (tc = 0; tc < NUM_OF_TCS; tc++) 689 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; 690 691 /* map Other PQs (if any) */ 692 qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, 693 p_params->num_pf_cids, p_params->num_tids, 0); 694 695 /* map Tx PQs */ 696 qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); 697 698 if (p_params->pf_wfq) 699 if (qed_pf_wfq_rt_init(p_hwfn, p_params)) 700 return -1; 701 702 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) 703 return -1; 704 705 if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) 706 return -1; 707 708 if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, 709 p_params->num_vports, vport_params)) 710 return -1; 711 712 return 0; 713 } 714 715 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, 716 struct qed_ptt *p_ptt, 717 u8 pf_id, u16 pf_wfq) 718 { 719 u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); 720 721 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 722 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); 723 return -1; 724 } 725 726 qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); 727 return 0; 728 } 729 730 int qed_init_pf_rl(struct qed_hwfn *p_hwfn, 731 struct qed_ptt *p_ptt, 732 u8 pf_id, 733 u32 pf_rl) 734 { 735 u32 inc_val = QM_RL_INC_VAL(pf_rl); 736 737 if (inc_val > QM_RL_MAX_INC_VAL) { 738 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration"); 739 return -1; 740 } 741 742 qed_wr(p_hwfn, p_ptt, 743 QM_REG_RLPFCRD + pf_id * 4, 744 QM_RL_CRD_REG_SIGN_BIT); 745 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); 746 747 return 0; 748 } 749 750 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, 751 struct qed_ptt *p_ptt, 752 u16 first_tx_pq_id[NUM_OF_TCS], 753 u16 vport_wfq) 754 { 755 u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); 756 u8 tc; 757 758 if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { 759 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration"); 760 return -1; 761 } 762 763 for (tc = 0; tc < NUM_OF_TCS; tc++) { 764 u16 vport_pq_id = first_tx_pq_id[tc]; 765 766 if (vport_pq_id != QM_INVALID_PQ_ID) 767 qed_wr(p_hwfn, p_ptt, 768 QM_REG_WFQVPWEIGHT + vport_pq_id * 4, 769 inc_val); 770 } 771 772 return 0; 773 } 774 775 int qed_init_vport_rl(struct qed_hwfn *p_hwfn, 776 struct qed_ptt *p_ptt, 777 u8 vport_id, 778 u32 vport_rl) 779 { 780 u32 inc_val = QM_RL_INC_VAL(vport_rl); 781 782 if (inc_val > QM_RL_MAX_INC_VAL) { 783 DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration"); 784 return -1; 785 } 786 787 qed_wr(p_hwfn, p_ptt, 788 QM_REG_RLGLBLCRD + vport_id * 4, 789 QM_RL_CRD_REG_SIGN_BIT); 790 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); 791 792 return 0; 793 } 794 795 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, 796 struct qed_ptt *p_ptt, 797 bool is_release_cmd, 798 bool is_tx_pq, 799 u16 start_pq, 800 u16 num_pqs) 801 { 802 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; 803 u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; 804 805 /* set command's PQ type */ 806 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); 807 808 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { 809 /* set PQ bit in mask (stop command only) */ 810 if (!is_release_cmd) 811 pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); 812 813 /* if last PQ or end of PQ mask, write command */ 814 if ((pq_id == last_pq) || 815 (pq_id % QM_STOP_PQ_MASK_WIDTH == 816 (QM_STOP_PQ_MASK_WIDTH - 1))) { 817 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, 818 PAUSE_MASK, pq_mask); 819 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, 820 GROUP_ID, 821 pq_id / QM_STOP_PQ_MASK_WIDTH); 822 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, 823 cmd_arr[0], cmd_arr[1])) 824 return false; 825 pq_mask = 0; 826 } 827 } 828 829 return true; 830 } 831 832 static void 833 qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) 834 { 835 if (enable) 836 set_bit(bit, var); 837 else 838 clear_bit(bit, var); 839 } 840 841 #define PRS_ETH_TUNN_FIC_FORMAT -188897008 842 843 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, 844 struct qed_ptt *p_ptt, 845 u16 dest_port) 846 { 847 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 848 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); 849 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 850 } 851 852 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, 853 struct qed_ptt *p_ptt, 854 bool vxlan_enable) 855 { 856 unsigned long reg_val = 0; 857 u8 shift; 858 859 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 860 shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; 861 qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); 862 863 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 864 865 if (reg_val) 866 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 867 PRS_ETH_TUNN_FIC_FORMAT); 868 869 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 870 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; 871 qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); 872 873 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 874 875 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, 876 vxlan_enable ? 1 : 0); 877 } 878 879 void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 880 bool eth_gre_enable, bool ip_gre_enable) 881 { 882 unsigned long reg_val = 0; 883 u8 shift; 884 885 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 886 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; 887 qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); 888 889 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; 890 qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); 891 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 892 if (reg_val) 893 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 894 PRS_ETH_TUNN_FIC_FORMAT); 895 896 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 897 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; 898 qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); 899 900 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; 901 qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); 902 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 903 904 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, 905 eth_gre_enable ? 1 : 0); 906 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, 907 ip_gre_enable ? 1 : 0); 908 } 909 910 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, 911 struct qed_ptt *p_ptt, 912 u16 dest_port) 913 { 914 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 915 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 916 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); 917 } 918 919 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, 920 struct qed_ptt *p_ptt, 921 bool eth_geneve_enable, 922 bool ip_geneve_enable) 923 { 924 unsigned long reg_val = 0; 925 u8 shift; 926 927 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 928 shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; 929 qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable); 930 931 shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; 932 qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable); 933 934 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 935 if (reg_val) 936 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 937 PRS_ETH_TUNN_FIC_FORMAT); 938 939 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, 940 eth_geneve_enable ? 1 : 0); 941 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 942 943 /* comp ver */ 944 reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0; 945 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val); 946 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val); 947 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val); 948 949 /* EDPM with geneve tunnel not supported in BB_B0 */ 950 if (QED_IS_BB_B0(p_hwfn->cdev)) 951 return; 952 953 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, 954 eth_geneve_enable ? 1 : 0); 955 qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, 956 ip_geneve_enable ? 1 : 0); 957 } 958