1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2021 Marvell International Ltd. 5 */ 6 7 #include <linux/types.h> 8 #include <linux/crc8.h> 9 #include <linux/delay.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/string.h> 13 #include "qed_hsi.h" 14 #include "qed_hw.h" 15 #include "qed_init_ops.h" 16 #include "qed_iro_hsi.h" 17 #include "qed_reg_addr.h" 18 19 #define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG 20 21 /* General constants */ 22 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ 23 QM_PQ_ELEMENT_SIZE, \ 24 0x1000) : 0) 25 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 26 0x100) - 1 : 0) 27 #define QM_INVALID_PQ_ID 0xffff 28 29 /* Max link speed (in Mbps) */ 30 #define QM_MAX_LINK_SPEED 100000 31 32 /* Feature enable */ 33 #define QM_BYPASS_EN 1 34 #define QM_BYTE_CRD_EN 1 35 36 /* Initial VOQ byte credit */ 37 #define QM_INITIAL_VOQ_BYTE_CRD 98304 38 /* Other PQ constants */ 39 #define QM_OTHER_PQS_PER_PF 4 40 41 /* VOQ constants */ 42 #define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) 43 #define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) 44 45 /* WFQ constants */ 46 47 /* PF WFQ increment value, 0x9000 = 4*9*1024 */ 48 #define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) 49 50 /* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ 51 #define QM_PF_WFQ_UPPER_BOUND 62500000 52 53 /* PF WFQ max increment value, 0.7 * upper bound */ 54 #define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) 55 56 /* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ 57 #define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 58 59 /* VP WFQ increment value */ 60 #define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) 61 62 /* VP WFQ min increment value */ 63 #define QM_VP_WFQ_MIN_INC_VAL 10800 64 65 /* VP WFQ max increment value, 2^30 */ 66 #define QM_VP_WFQ_MAX_INC_VAL 0x40000000 67 68 /* VP WFQ bypass threshold */ 69 #define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) 70 71 /* VP RL credit task cost */ 72 #define QM_VP_RL_CRD_TASK_COST 9700 73 74 /* Bit of VOQ in VP WFQ PQ map */ 75 #define QM_VP_WFQ_PQ_VOQ_SHIFT 0 76 77 /* Bit of PF in VP WFQ PQ map */ 78 #define QM_VP_WFQ_PQ_PF_SHIFT 5 79 80 /* RL constants */ 81 82 /* Period in us */ 83 #define QM_RL_PERIOD 5 84 85 /* Period in 25MHz cycles */ 86 #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) 87 88 /* RL increment value - rate is specified in mbps */ 89 #define QM_RL_INC_VAL(rate) ({ \ 90 typeof(rate) __rate = (rate); \ 91 max_t(u32, \ 92 (u32)(((__rate ? __rate : \ 93 100000) * \ 94 QM_RL_PERIOD * \ 95 101) / (8 * 100)), 1); }) 96 97 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ 98 #define QM_PF_RL_UPPER_BOUND 62500000 99 100 /* Max PF RL increment value is 0.7 * upper bound */ 101 #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) 102 103 /* QCN RL Upper bound, speed is in Mpbs */ 104 #define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ 105 u32, \ 106 (u32)(((speed) * \ 107 QM_RL_PERIOD * 101) / (8 * 100)), \ 108 QM_VP_RL_CRD_TASK_COST \ 109 + 1000)) 110 111 /* AFullOprtnstcCrdMask constants */ 112 #define QM_OPPOR_LINE_VOQ_DEF 1 113 #define QM_OPPOR_FW_STOP_DEF 0 114 #define QM_OPPOR_PQ_EMPTY_DEF 1 115 116 /* Command Queue constants */ 117 118 /* Pure LB CmdQ lines (+spare) */ 119 #define PBF_CMDQ_PURE_LB_LINES 150 120 121 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ 122 (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ 123 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ 124 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) 125 126 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ 127 (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ 128 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ 129 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) 130 131 /* Returns the VOQ line credit for the specified number of PBF command lines. 132 * PBF lines are specified in 256b units. 133 */ 134 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ 135 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) 136 137 /* BTB: blocks constants (block size = 256B) */ 138 139 /* 256B blocks in 9700B packet */ 140 #define BTB_JUMBO_PKT_BLOCKS 38 141 142 /* Headroom per-port */ 143 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS 144 #define BTB_PURE_LB_FACTOR 10 145 146 /* Factored (hence really 0.7) */ 147 #define BTB_PURE_LB_RATIO 7 148 149 /* QM stop command constants */ 150 #define QM_STOP_PQ_MASK_WIDTH 32 151 #define QM_STOP_CMD_ADDR 2 152 #define QM_STOP_CMD_STRUCT_SIZE 2 153 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 154 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 155 #define QM_STOP_CMD_PAUSE_MASK_MASK -1 156 #define QM_STOP_CMD_GROUP_ID_OFFSET 1 157 #define QM_STOP_CMD_GROUP_ID_SHIFT 16 158 #define QM_STOP_CMD_GROUP_ID_MASK 15 159 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1 160 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24 161 #define QM_STOP_CMD_PQ_TYPE_MASK 1 162 #define QM_STOP_CMD_MAX_POLL_COUNT 100 163 #define QM_STOP_CMD_POLL_PERIOD_US 500 164 165 /* QM command macros */ 166 #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE 167 #define QM_CMD_SET_FIELD(var, cmd, field, value) \ 168 SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ 169 cmd ## _ ## field, \ 170 value) 171 172 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ 173 rl_id, ext_voq, wrr) \ 174 do { \ 175 u32 __reg = 0; \ 176 \ 177 BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ 178 memset(&(map), 0, sizeof(map)); \ 179 SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ 180 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ 181 !!(rl_valid)); \ 182 SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ 183 SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ 184 SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ 185 SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ 186 (wrr)); \ 187 \ 188 STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ 189 __reg); \ 190 (map).reg = cpu_to_le32(__reg); \ 191 } while (0) 192 193 #define WRITE_PQ_INFO_TO_RAM 1 194 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ 195 (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ 196 ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \ 197 (((rl) >> 8) << 9)) 198 199 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ 200 (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ 201 XSTORM_PQ_INFO_OFFSET(pq_id)) 202 203 static const char * const s_protocol_types[] = { 204 "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE", 205 "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP", 206 "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON", 207 "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI", 208 }; 209 210 static const char *s_ramrod_cmd_ids[][28] = { 211 { 212 "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC", 213 "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC", 214 "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN", 215 "ISCSI_RAMROD_CMD_ID_UPDATE_CONN", 216 "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN", 217 "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE", 218 "ISCSI_RAMROD_CMD_ID_CONN_STATS", }, 219 { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC", 220 "FCOE_RAMROD_CMD_ID_STAT_FUNC", 221 "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN", 222 "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", }, 223 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", 224 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", 225 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", 226 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", 227 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", 228 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", 229 "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP", 230 "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP", 231 "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP", 232 "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE", 233 "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP", 234 "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP", 235 "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP", 236 "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", }, 237 { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START", 238 "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP", 239 "CORE_RAMROD_TX_QUEUE_STOP", 240 "CORE_RAMROD_RX_QUEUE_FLUSH", 241 "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", }, 242 { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START", 243 "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP", 244 "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP", 245 "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP", 246 "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE", 247 "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION", 248 "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER", 249 "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER", 250 "ETH_RAMROD_RX_ADD_UDP_FILTER", 251 "ETH_RAMROD_RX_DELETE_UDP_FILTER", 252 "ETH_RAMROD_RX_CREATE_GFT_ACTION", 253 "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE", 254 "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL", 255 "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL", 256 "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", }, 257 { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", 258 "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", 259 "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", 260 "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", 261 "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", 262 "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", 263 "RDMA_RAMROD_STOP_NS_TRACKING", 264 "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD", 265 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD", 266 "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR", 267 "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP", 268 "IWARP_RAMROD_CMD_ID_MODIFY_QP", 269 "IWARP_RAMROD_CMD_ID_DESTROY_QP", 270 "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", }, 271 { NULL }, /*TOE*/ 272 { NULL }, /*PREROCE*/ 273 { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START", 274 "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START", 275 "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE", 276 "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", } 277 }; 278 279 /******************** INTERNAL IMPLEMENTATION *********************/ 280 281 /* Returns the external VOQ number */ 282 static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, 283 u8 port_id, u8 tc, u8 max_phys_tcs_per_port) 284 { 285 if (tc == PURE_LB_TC) 286 return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; 287 else 288 return port_id * max_phys_tcs_per_port + tc; 289 } 290 291 /* Prepare PF RL enable/disable runtime init values */ 292 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) 293 { 294 STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); 295 if (pf_rl_en) { 296 u8 num_ext_voqs = MAX_NUM_VOQS; 297 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; 298 299 /* Enable RLs for all VOQs */ 300 STORE_RT_REG(p_hwfn, 301 QM_REG_RLPFVOQENABLE_RT_OFFSET, 302 (u32)voq_bit_mask); 303 304 /* Write RL period */ 305 STORE_RT_REG(p_hwfn, 306 QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); 307 STORE_RT_REG(p_hwfn, 308 QM_REG_RLPFPERIODTIMER_RT_OFFSET, 309 QM_RL_PERIOD_CLK_25M); 310 311 /* Set credit threshold for QM bypass flow */ 312 if (QM_BYPASS_EN) 313 STORE_RT_REG(p_hwfn, 314 QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, 315 QM_PF_RL_UPPER_BOUND); 316 } 317 } 318 319 /* Prepare PF WFQ enable/disable runtime init values */ 320 static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) 321 { 322 STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); 323 324 /* Set credit threshold for QM bypass flow */ 325 if (pf_wfq_en && QM_BYPASS_EN) 326 STORE_RT_REG(p_hwfn, 327 QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, 328 QM_PF_WFQ_UPPER_BOUND); 329 } 330 331 /* Prepare global RL enable/disable runtime init values */ 332 static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) 333 { 334 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, 335 global_rl_en ? 1 : 0); 336 if (global_rl_en) { 337 /* Write RL period (use timer 0 only) */ 338 STORE_RT_REG(p_hwfn, 339 QM_REG_RLGLBLPERIOD_0_RT_OFFSET, 340 QM_RL_PERIOD_CLK_25M); 341 STORE_RT_REG(p_hwfn, 342 QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, 343 QM_RL_PERIOD_CLK_25M); 344 345 /* Set credit threshold for QM bypass flow */ 346 if (QM_BYPASS_EN) 347 STORE_RT_REG(p_hwfn, 348 QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, 349 QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); 350 } 351 } 352 353 /* Prepare VPORT WFQ enable/disable runtime init values */ 354 static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) 355 { 356 STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, 357 vport_wfq_en ? 1 : 0); 358 359 /* Set credit threshold for QM bypass flow */ 360 if (vport_wfq_en && QM_BYPASS_EN) 361 STORE_RT_REG(p_hwfn, 362 QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, 363 QM_VP_WFQ_BYPASS_THRESH); 364 } 365 366 /* Prepare runtime init values to allocate PBF command queue lines for 367 * the specified VOQ. 368 */ 369 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, 370 u8 ext_voq, u16 cmdq_lines) 371 { 372 u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); 373 374 OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 375 (u32)cmdq_lines); 376 STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, 377 qm_line_crd); 378 STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, 379 qm_line_crd); 380 } 381 382 /* Prepare runtime init values to allocate PBF command queue lines. */ 383 static void 384 qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, 385 u8 max_ports_per_engine, 386 u8 max_phys_tcs_per_port, 387 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 388 { 389 u8 tc, ext_voq, port_id, num_tcs_in_port; 390 u8 num_ext_voqs = MAX_NUM_VOQS; 391 392 /* Clear PBF lines of all VOQs */ 393 for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) 394 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); 395 396 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 397 u16 phys_lines, phys_lines_per_tc; 398 399 if (!port_params[port_id].active) 400 continue; 401 402 /* Find number of command queue lines to divide between the 403 * active physical TCs. 404 */ 405 phys_lines = port_params[port_id].num_pbf_cmd_lines; 406 phys_lines -= PBF_CMDQ_PURE_LB_LINES; 407 408 /* Find #lines per active physical TC */ 409 num_tcs_in_port = 0; 410 for (tc = 0; tc < max_phys_tcs_per_port; tc++) 411 if (((port_params[port_id].active_phys_tcs >> 412 tc) & 0x1) == 1) 413 num_tcs_in_port++; 414 phys_lines_per_tc = phys_lines / num_tcs_in_port; 415 416 /* Init registers per active TC */ 417 for (tc = 0; tc < max_phys_tcs_per_port; tc++) { 418 ext_voq = qed_get_ext_voq(p_hwfn, 419 port_id, 420 tc, max_phys_tcs_per_port); 421 if (((port_params[port_id].active_phys_tcs >> 422 tc) & 0x1) == 1) 423 qed_cmdq_lines_voq_rt_init(p_hwfn, 424 ext_voq, 425 phys_lines_per_tc); 426 } 427 428 /* Init registers for pure LB TC */ 429 ext_voq = qed_get_ext_voq(p_hwfn, 430 port_id, 431 PURE_LB_TC, max_phys_tcs_per_port); 432 qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, 433 PBF_CMDQ_PURE_LB_LINES); 434 } 435 } 436 437 /* Prepare runtime init values to allocate guaranteed BTB blocks for the 438 * specified port. The guaranteed BTB space is divided between the TCs as 439 * follows (shared space Is currently not used): 440 * 1. Parameters: 441 * B - BTB blocks for this port 442 * C - Number of physical TCs for this port 443 * 2. Calculation: 444 * a. 38 blocks (9700B jumbo frame) are allocated for global per port 445 * headroom. 446 * b. B = B - 38 (remainder after global headroom allocation). 447 * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. 448 * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation). 449 * e. B/C blocks are allocated for each physical TC. 450 * Assumptions: 451 * - MTU is up to 9700 bytes (38 blocks) 452 * - All TCs are considered symmetrical (same rate and packet size) 453 * - No optimization for lossy TC (all are considered lossless). Shared space 454 * is not enabled and allocated for each TC. 455 */ 456 static void 457 qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, 458 u8 max_ports_per_engine, 459 u8 max_phys_tcs_per_port, 460 struct init_qm_port_params port_params[MAX_NUM_PORTS]) 461 { 462 u32 usable_blocks, pure_lb_blocks, phys_blocks; 463 u8 tc, ext_voq, port_id, num_tcs_in_port; 464 465 for (port_id = 0; port_id < max_ports_per_engine; port_id++) { 466 if (!port_params[port_id].active) 467 continue; 468 469 /* Subtract headroom blocks */ 470 usable_blocks = port_params[port_id].num_btb_blocks - 471 BTB_HEADROOM_BLOCKS; 472 473 /* Find blocks per physical TC. Use factor to avoid floating 474 * arithmethic. 475 */ 476 num_tcs_in_port = 0; 477 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) 478 if (((port_params[port_id].active_phys_tcs >> 479 tc) & 0x1) == 1) 480 num_tcs_in_port++; 481 482 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / 483 (num_tcs_in_port * BTB_PURE_LB_FACTOR + 484 BTB_PURE_LB_RATIO); 485 pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, 486 pure_lb_blocks / BTB_PURE_LB_FACTOR); 487 phys_blocks = (usable_blocks - pure_lb_blocks) / 488 num_tcs_in_port; 489 490 /* Init physical TCs */ 491 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { 492 if (((port_params[port_id].active_phys_tcs >> 493 tc) & 0x1) == 1) { 494 ext_voq = 495 qed_get_ext_voq(p_hwfn, 496 port_id, 497 tc, 498 max_phys_tcs_per_port); 499 STORE_RT_REG(p_hwfn, 500 PBF_BTB_GUARANTEED_RT_OFFSET 501 (ext_voq), phys_blocks); 502 } 503 } 504 505 /* Init pure LB TC */ 506 ext_voq = qed_get_ext_voq(p_hwfn, 507 port_id, 508 PURE_LB_TC, max_phys_tcs_per_port); 509 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), 510 pure_lb_blocks); 511 } 512 } 513 514 /* Prepare runtime init values for the specified RL. 515 * Set max link speed (100Gbps) per rate limiter. 516 * Return -1 on error. 517 */ 518 static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) 519 { 520 u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | 521 (u32)QM_RL_CRD_REG_SIGN_BIT; 522 u32 inc_val; 523 u16 rl_id; 524 525 /* Go over all global RLs */ 526 for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { 527 inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED); 528 529 STORE_RT_REG(p_hwfn, 530 QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, 531 (u32)QM_RL_CRD_REG_SIGN_BIT); 532 STORE_RT_REG(p_hwfn, 533 QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, 534 upper_bound); 535 STORE_RT_REG(p_hwfn, 536 QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); 537 } 538 539 return 0; 540 } 541 542 /* Returns the upper bound for the specified Vport RL parameters. 543 * link_speed is in Mbps. 544 * Returns 0 in case of error. 545 */ 546 static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, 547 u32 link_speed) 548 { 549 switch (vport_rl_type) { 550 case QM_RL_TYPE_NORMAL: 551 return QM_INITIAL_VOQ_BYTE_CRD; 552 case QM_RL_TYPE_QCN: 553 return QM_GLOBAL_RL_UPPER_BOUND(link_speed); 554 default: 555 return 0; 556 } 557 } 558 559 /* Prepare VPORT RL runtime init values. 560 * Return -1 on error. 561 */ 562 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, 563 u16 start_rl, 564 u16 num_rls, 565 u32 link_speed, 566 struct init_qm_rl_params *rl_params) 567 { 568 u16 i, rl_id; 569 570 if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { 571 DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); 572 return -1; 573 } 574 575 /* Go over all PF VPORTs */ 576 for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { 577 u32 upper_bound, inc_val; 578 579 upper_bound = 580 qed_get_vport_rl_upper_bound((enum init_qm_rl_type) 581 rl_params[i].vport_rl_type, 582 link_speed); 583 584 inc_val = 585 QM_RL_INC_VAL(rl_params[i].vport_rl ? 586 rl_params[i].vport_rl : link_speed); 587 if (inc_val > upper_bound) { 588 DP_NOTICE(p_hwfn, 589 "Invalid RL rate - limit configuration\n"); 590 return -1; 591 } 592 593 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, 594 (u32)QM_RL_CRD_REG_SIGN_BIT); 595 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, 596 upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); 597 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, 598 inc_val); 599 } 600 601 return 0; 602 } 603 604 /* Prepare Tx PQ mapping runtime init values for the specified PF */ 605 static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, 606 struct qed_ptt *p_ptt, 607 struct qed_qm_pf_rt_init_params *p_params, 608 u32 base_mem_addr_4kb) 609 { 610 u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; 611 struct init_qm_vport_params *vport_params = p_params->vport_params; 612 u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; 613 u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; 614 struct init_qm_pq_params *pq_params = p_params->pq_params; 615 u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; 616 617 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; 618 619 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; 620 last_pq_group = (p_params->start_pq + num_pqs - 1) / 621 QM_PF_QUEUE_GROUP_SIZE; 622 623 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); 624 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); 625 mem_addr_4kb = base_mem_addr_4kb; 626 627 /* Set mapping from PQ group to PF */ 628 for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) 629 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, 630 (u32)(p_params->pf_id)); 631 632 /* Set PQ sizes */ 633 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, 634 QM_PQ_SIZE_256B(p_params->num_pf_cids)); 635 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, 636 QM_PQ_SIZE_256B(p_params->num_vf_cids)); 637 638 /* Go over all Tx PQs */ 639 for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { 640 u16 *p_first_tx_pq_id, vport_id_in_pf; 641 struct qm_rf_pq_map tx_pq_map; 642 u8 tc_id = pq_params[i].tc_id; 643 bool is_vf_pq; 644 u8 ext_voq; 645 646 ext_voq = qed_get_ext_voq(p_hwfn, 647 pq_params[i].port_id, 648 tc_id, 649 p_params->max_phys_tcs_per_port); 650 is_vf_pq = (i >= p_params->num_pf_pqs); 651 652 /* Update first Tx PQ of VPORT/TC */ 653 vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; 654 p_first_tx_pq_id = 655 &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; 656 if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { 657 u32 map_val = 658 (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | 659 (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); 660 661 /* Create new VP PQ */ 662 *p_first_tx_pq_id = pq_id; 663 664 /* Map VP PQ to VOQ and PF */ 665 STORE_RT_REG(p_hwfn, 666 QM_REG_WFQVPMAP_RT_OFFSET + 667 *p_first_tx_pq_id, 668 map_val); 669 } 670 671 /* Prepare PQ map entry */ 672 QM_INIT_TX_PQ_MAP(p_hwfn, 673 tx_pq_map, 674 pq_id, 675 *p_first_tx_pq_id, 676 pq_params[i].rl_valid, 677 pq_params[i].rl_id, 678 ext_voq, pq_params[i].wrr_group); 679 680 /* Set PQ base address */ 681 STORE_RT_REG(p_hwfn, 682 QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, 683 mem_addr_4kb); 684 685 /* Clear PQ pointer table entry (64 bit) */ 686 if (p_params->is_pf_loading) 687 for (j = 0; j < 2; j++) 688 STORE_RT_REG(p_hwfn, 689 QM_REG_PTRTBLTX_RT_OFFSET + 690 (pq_id * 2) + j, 0); 691 692 /* Write PQ info to RAM */ 693 if (WRITE_PQ_INFO_TO_RAM != 0) { 694 u32 pq_info = 0; 695 696 pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, 697 p_params->pf_id, 698 tc_id, 699 pq_params[i].port_id, 700 pq_params[i].rl_valid, 701 pq_params[i].rl_id); 702 qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), 703 pq_info); 704 } 705 706 /* If VF PQ, add indication to PQ VF mask */ 707 if (is_vf_pq) { 708 tx_pq_vf_mask[pq_id / 709 QM_PF_QUEUE_GROUP_SIZE] |= 710 BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); 711 mem_addr_4kb += vport_pq_mem_4kb; 712 } else { 713 mem_addr_4kb += pq_mem_4kb; 714 } 715 } 716 717 /* Store Tx PQ VF mask to size select register */ 718 for (i = 0; i < num_tx_pq_vf_masks; i++) 719 if (tx_pq_vf_mask[i]) 720 STORE_RT_REG(p_hwfn, 721 QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, 722 tx_pq_vf_mask[i]); 723 724 return 0; 725 } 726 727 /* Prepare Other PQ mapping runtime init values for the specified PF */ 728 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, 729 u8 pf_id, 730 bool is_pf_loading, 731 u32 num_pf_cids, 732 u32 num_tids, u32 base_mem_addr_4kb) 733 { 734 u32 pq_size, pq_mem_4kb, mem_addr_4kb; 735 u16 i, j, pq_id, pq_group; 736 737 /* A single other PQ group is used in each PF, where PQ group i is used 738 * in PF i. 739 */ 740 pq_group = pf_id; 741 pq_size = num_pf_cids + num_tids; 742 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); 743 mem_addr_4kb = base_mem_addr_4kb; 744 745 /* Map PQ group to PF */ 746 STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, 747 (u32)(pf_id)); 748 749 /* Set PQ sizes */ 750 STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, 751 QM_PQ_SIZE_256B(pq_size)); 752 753 for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; 754 i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { 755 /* Set PQ base address */ 756 STORE_RT_REG(p_hwfn, 757 QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, 758 mem_addr_4kb); 759 760 /* Clear PQ pointer table entry */ 761 if (is_pf_loading) 762 for (j = 0; j < 2; j++) 763 STORE_RT_REG(p_hwfn, 764 QM_REG_PTRTBLOTHER_RT_OFFSET + 765 (pq_id * 2) + j, 0); 766 767 mem_addr_4kb += pq_mem_4kb; 768 } 769 } 770 771 /* Prepare PF WFQ runtime init values for the specified PF. 772 * Return -1 on error. 773 */ 774 static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, 775 struct qed_qm_pf_rt_init_params *p_params) 776 { 777 u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; 778 struct init_qm_pq_params *pq_params = p_params->pq_params; 779 u32 inc_val, crd_reg_offset; 780 u8 ext_voq; 781 u16 i; 782 783 inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); 784 if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { 785 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); 786 return -1; 787 } 788 789 for (i = 0; i < num_tx_pqs; i++) { 790 ext_voq = qed_get_ext_voq(p_hwfn, 791 pq_params[i].port_id, 792 pq_params[i].tc_id, 793 p_params->max_phys_tcs_per_port); 794 crd_reg_offset = 795 (p_params->pf_id < MAX_NUM_PFS_BB ? 796 QM_REG_WFQPFCRD_RT_OFFSET : 797 QM_REG_WFQPFCRD_MSB_RT_OFFSET) + 798 ext_voq * MAX_NUM_PFS_BB + 799 (p_params->pf_id % MAX_NUM_PFS_BB); 800 OVERWRITE_RT_REG(p_hwfn, 801 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); 802 } 803 804 STORE_RT_REG(p_hwfn, 805 QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, 806 QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); 807 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, 808 inc_val); 809 810 return 0; 811 } 812 813 /* Prepare PF RL runtime init values for the specified PF. 814 * Return -1 on error. 815 */ 816 static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) 817 { 818 u32 inc_val = QM_RL_INC_VAL(pf_rl); 819 820 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 821 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); 822 return -1; 823 } 824 825 STORE_RT_REG(p_hwfn, 826 QM_REG_RLPFCRD_RT_OFFSET + pf_id, 827 (u32)QM_RL_CRD_REG_SIGN_BIT); 828 STORE_RT_REG(p_hwfn, 829 QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, 830 QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); 831 STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); 832 833 return 0; 834 } 835 836 /* Prepare VPORT WFQ runtime init values for the specified VPORTs. 837 * Return -1 on error. 838 */ 839 static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, 840 u16 num_vports, 841 struct init_qm_vport_params *vport_params) 842 { 843 u16 vport_pq_id, wfq, i; 844 u32 inc_val; 845 u8 tc; 846 847 /* Go over all PF VPORTs */ 848 for (i = 0; i < num_vports; i++) { 849 /* Each VPORT can have several VPORT PQ IDs for various TCs */ 850 for (tc = 0; tc < NUM_OF_TCS; tc++) { 851 /* Check if VPORT/TC is valid */ 852 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; 853 if (vport_pq_id == QM_INVALID_PQ_ID) 854 continue; 855 856 /* Find WFQ weight (per VPORT or per VPORT+TC) */ 857 wfq = vport_params[i].wfq; 858 wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; 859 inc_val = QM_VP_WFQ_INC_VAL(wfq); 860 if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { 861 DP_NOTICE(p_hwfn, 862 "Invalid VPORT WFQ weight configuration\n"); 863 return -1; 864 } 865 866 /* Config registers */ 867 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + 868 vport_pq_id, 869 (u32)QM_WFQ_CRD_REG_SIGN_BIT); 870 STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + 871 vport_pq_id, 872 inc_val | QM_WFQ_CRD_REG_SIGN_BIT); 873 STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + 874 vport_pq_id, inc_val); 875 } 876 } 877 878 return 0; 879 } 880 881 static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, 882 struct qed_ptt *p_ptt) 883 { 884 u32 reg_val, i; 885 886 for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; 887 i++) { 888 udelay(QM_STOP_CMD_POLL_PERIOD_US); 889 reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); 890 } 891 892 /* Check if timeout while waiting for SDM command ready */ 893 if (i == QM_STOP_CMD_MAX_POLL_COUNT) { 894 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 895 "Timeout when waiting for QM SDM command ready signal\n"); 896 return false; 897 } 898 899 return true; 900 } 901 902 static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, 903 struct qed_ptt *p_ptt, 904 u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) 905 { 906 if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) 907 return false; 908 909 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); 910 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); 911 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); 912 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); 913 qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); 914 915 return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); 916 } 917 918 /******************** INTERFACE IMPLEMENTATION *********************/ 919 920 u32 qed_qm_pf_mem_size(u32 num_pf_cids, 921 u32 num_vf_cids, 922 u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) 923 { 924 return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + 925 QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + 926 QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; 927 } 928 929 int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, 930 struct qed_qm_common_rt_init_params *p_params) 931 { 932 u32 mask = 0; 933 934 /* Init AFullOprtnstcCrdMask */ 935 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, 936 QM_OPPOR_LINE_VOQ_DEF); 937 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); 938 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, 939 p_params->pf_wfq_en ? 1 : 0); 940 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, 941 p_params->vport_wfq_en ? 1 : 0); 942 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, 943 p_params->pf_rl_en ? 1 : 0); 944 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, 945 p_params->global_rl_en ? 1 : 0); 946 SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); 947 SET_FIELD(mask, 948 QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); 949 STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); 950 951 /* Enable/disable PF RL */ 952 qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); 953 954 /* Enable/disable PF WFQ */ 955 qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); 956 957 /* Enable/disable global RL */ 958 qed_enable_global_rl(p_hwfn, p_params->global_rl_en); 959 960 /* Enable/disable VPORT WFQ */ 961 qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); 962 963 /* Init PBF CMDQ line credit */ 964 qed_cmdq_lines_rt_init(p_hwfn, 965 p_params->max_ports_per_engine, 966 p_params->max_phys_tcs_per_port, 967 p_params->port_params); 968 969 /* Init BTB blocks in PBF */ 970 qed_btb_blocks_rt_init(p_hwfn, 971 p_params->max_ports_per_engine, 972 p_params->max_phys_tcs_per_port, 973 p_params->port_params); 974 975 qed_global_rl_rt_init(p_hwfn); 976 977 return 0; 978 } 979 980 int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, 981 struct qed_ptt *p_ptt, 982 struct qed_qm_pf_rt_init_params *p_params) 983 { 984 struct init_qm_vport_params *vport_params = p_params->vport_params; 985 u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + 986 p_params->num_tids) * 987 QM_OTHER_PQS_PER_PF; 988 u16 i; 989 u8 tc; 990 991 /* Clear first Tx PQ ID array for each VPORT */ 992 for (i = 0; i < p_params->num_vports; i++) 993 for (tc = 0; tc < NUM_OF_TCS; tc++) 994 vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; 995 996 /* Map Other PQs (if any) */ 997 qed_other_pq_map_rt_init(p_hwfn, 998 p_params->pf_id, 999 p_params->is_pf_loading, p_params->num_pf_cids, 1000 p_params->num_tids, 0); 1001 1002 /* Map Tx PQs */ 1003 if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) 1004 return -1; 1005 1006 /* Init PF WFQ */ 1007 if (p_params->pf_wfq) 1008 if (qed_pf_wfq_rt_init(p_hwfn, p_params)) 1009 return -1; 1010 1011 /* Init PF RL */ 1012 if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) 1013 return -1; 1014 1015 /* Init VPORT WFQ */ 1016 if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) 1017 return -1; 1018 1019 /* Set VPORT RL */ 1020 if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, 1021 p_params->num_rls, p_params->link_speed, 1022 p_params->rl_params)) 1023 return -1; 1024 1025 return 0; 1026 } 1027 1028 int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, 1029 struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) 1030 { 1031 u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); 1032 1033 if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { 1034 DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); 1035 return -1; 1036 } 1037 1038 qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); 1039 1040 return 0; 1041 } 1042 1043 int qed_init_pf_rl(struct qed_hwfn *p_hwfn, 1044 struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) 1045 { 1046 u32 inc_val = QM_RL_INC_VAL(pf_rl); 1047 1048 if (inc_val > QM_PF_RL_MAX_INC_VAL) { 1049 DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); 1050 return -1; 1051 } 1052 1053 qed_wr(p_hwfn, 1054 p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 1055 qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); 1056 1057 return 0; 1058 } 1059 1060 int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, 1061 struct qed_ptt *p_ptt, 1062 u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) 1063 { 1064 int result = 0; 1065 u16 vport_pq_id; 1066 u8 tc; 1067 1068 for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { 1069 vport_pq_id = first_tx_pq_id[tc]; 1070 if (vport_pq_id != QM_INVALID_PQ_ID) 1071 result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, 1072 vport_pq_id, wfq); 1073 } 1074 1075 return result; 1076 } 1077 1078 int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1079 u16 first_tx_pq_id, u16 wfq) 1080 { 1081 u32 inc_val; 1082 1083 if (first_tx_pq_id == QM_INVALID_PQ_ID) 1084 return -1; 1085 1086 inc_val = QM_VP_WFQ_INC_VAL(wfq); 1087 if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { 1088 DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); 1089 return -1; 1090 } 1091 1092 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, 1093 (u32)QM_WFQ_CRD_REG_SIGN_BIT); 1094 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, 1095 inc_val | QM_WFQ_CRD_REG_SIGN_BIT); 1096 qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, 1097 inc_val); 1098 1099 return 0; 1100 } 1101 1102 int qed_init_global_rl(struct qed_hwfn *p_hwfn, 1103 struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, 1104 enum init_qm_rl_type vport_rl_type) 1105 { 1106 u32 inc_val, upper_bound; 1107 1108 upper_bound = 1109 (vport_rl_type == 1110 QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : 1111 QM_INITIAL_VOQ_BYTE_CRD; 1112 inc_val = QM_RL_INC_VAL(rate_limit); 1113 if (inc_val > upper_bound) { 1114 DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); 1115 return -1; 1116 } 1117 1118 qed_wr(p_hwfn, p_ptt, 1119 QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); 1120 qed_wr(p_hwfn, 1121 p_ptt, 1122 QM_REG_RLGLBLUPPERBOUND + rl_id * 4, 1123 upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); 1124 qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); 1125 1126 return 0; 1127 } 1128 1129 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, 1130 struct qed_ptt *p_ptt, 1131 bool is_release_cmd, 1132 bool is_tx_pq, u16 start_pq, u16 num_pqs) 1133 { 1134 u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; 1135 u32 pq_mask = 0, last_pq, pq_id; 1136 1137 last_pq = start_pq + num_pqs - 1; 1138 1139 /* Set command's PQ type */ 1140 QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); 1141 1142 /* Go over requested PQs */ 1143 for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { 1144 /* Set PQ bit in mask (stop command only) */ 1145 if (!is_release_cmd) 1146 pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); 1147 1148 /* If last PQ or end of PQ mask, write command */ 1149 if ((pq_id == last_pq) || 1150 (pq_id % QM_STOP_PQ_MASK_WIDTH == 1151 (QM_STOP_PQ_MASK_WIDTH - 1))) { 1152 QM_CMD_SET_FIELD(cmd_arr, 1153 QM_STOP_CMD, PAUSE_MASK, pq_mask); 1154 QM_CMD_SET_FIELD(cmd_arr, 1155 QM_STOP_CMD, 1156 GROUP_ID, 1157 pq_id / QM_STOP_PQ_MASK_WIDTH); 1158 if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, 1159 cmd_arr[0], cmd_arr[1])) 1160 return false; 1161 pq_mask = 0; 1162 } 1163 } 1164 1165 return true; 1166 } 1167 1168 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ 1169 do { \ 1170 typeof(var) *__p_var = &(var); \ 1171 typeof(offset) __offset = offset; \ 1172 *__p_var = (*__p_var & ~BIT(__offset)) | \ 1173 ((enable) ? BIT(__offset) : 0); \ 1174 } while (0) 1175 1176 #define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910 1177 #define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910 1178 1179 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ 1180 do { \ 1181 u32 i; \ 1182 \ 1183 for (i = 0; i < (arr_size); i++) \ 1184 qed_wr(dev, ptt, \ 1185 ((addr) + (4 * i)), \ 1186 ((u32 *)&(arr))[i]); \ 1187 } while (0) 1188 1189 /** 1190 * qed_dmae_to_grc() - Internal function for writing from host to 1191 * wide-bus registers (split registers are not supported yet). 1192 * 1193 * @p_hwfn: HW device data. 1194 * @p_ptt: PTT window used for writing the registers. 1195 * @p_data: Pointer to source data. 1196 * @addr: Destination register address. 1197 * @len_in_dwords: Data length in dwords (u32). 1198 * 1199 * Return: Length of the written data in dwords (u32) or -1 on invalid 1200 * input. 1201 */ 1202 static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 1203 __le32 *p_data, u32 addr, u32 len_in_dwords) 1204 { 1205 struct qed_dmae_params params = { 0 }; 1206 u32 *data_cpu; 1207 int rc; 1208 1209 if (!p_data) 1210 return -1; 1211 1212 /* Set DMAE params */ 1213 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); 1214 1215 /* Execute DMAE command */ 1216 rc = qed_dmae_host2grc(p_hwfn, p_ptt, 1217 (u64)(uintptr_t)(p_data), 1218 addr, len_in_dwords, ¶ms); 1219 1220 /* If not read using DMAE, read using GRC */ 1221 if (rc) { 1222 DP_VERBOSE(p_hwfn, 1223 QED_MSG_DEBUG, 1224 "Failed writing to chip using DMAE, using GRC instead\n"); 1225 1226 /* Swap to CPU byteorder and write to registers using GRC */ 1227 data_cpu = (__force u32 *)p_data; 1228 le32_to_cpu_array(data_cpu, len_in_dwords); 1229 1230 ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); 1231 cpu_to_le32_array(data_cpu, len_in_dwords); 1232 } 1233 1234 return len_in_dwords; 1235 } 1236 1237 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, 1238 struct qed_ptt *p_ptt, u16 dest_port) 1239 { 1240 /* Update PRS register */ 1241 qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); 1242 1243 /* Update NIG register */ 1244 qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); 1245 1246 /* Update PBF register */ 1247 qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); 1248 } 1249 1250 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, 1251 struct qed_ptt *p_ptt, bool vxlan_enable) 1252 { 1253 u32 reg_val; 1254 u8 shift; 1255 1256 /* Update PRS register */ 1257 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1258 SET_FIELD(reg_val, 1259 PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); 1260 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1261 if (reg_val) { 1262 reg_val = 1263 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); 1264 1265 /* Update output only if tunnel blocks not included. */ 1266 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1267 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 1268 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1269 } 1270 1271 /* Update NIG register */ 1272 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1273 shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; 1274 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); 1275 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1276 1277 /* Update DORQ register */ 1278 qed_wr(p_hwfn, 1279 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); 1280 } 1281 1282 void qed_set_gre_enable(struct qed_hwfn *p_hwfn, 1283 struct qed_ptt *p_ptt, 1284 bool eth_gre_enable, bool ip_gre_enable) 1285 { 1286 u32 reg_val; 1287 u8 shift; 1288 1289 /* Update PRS register */ 1290 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1291 SET_FIELD(reg_val, 1292 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, 1293 eth_gre_enable); 1294 SET_FIELD(reg_val, 1295 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, 1296 ip_gre_enable); 1297 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1298 if (reg_val) { 1299 reg_val = 1300 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); 1301 1302 /* Update output only if tunnel blocks not included. */ 1303 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1304 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 1305 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1306 } 1307 1308 /* Update NIG register */ 1309 reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); 1310 shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; 1311 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); 1312 shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; 1313 SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); 1314 qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); 1315 1316 /* Update DORQ registers */ 1317 qed_wr(p_hwfn, 1318 p_ptt, 1319 DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); 1320 qed_wr(p_hwfn, 1321 p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); 1322 } 1323 1324 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, 1325 struct qed_ptt *p_ptt, u16 dest_port) 1326 { 1327 /* Update PRS register */ 1328 qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); 1329 1330 /* Update NIG register */ 1331 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); 1332 1333 /* Update PBF register */ 1334 qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); 1335 } 1336 1337 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, 1338 struct qed_ptt *p_ptt, 1339 bool eth_geneve_enable, bool ip_geneve_enable) 1340 { 1341 u32 reg_val; 1342 1343 /* Update PRS register */ 1344 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); 1345 SET_FIELD(reg_val, 1346 PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, 1347 eth_geneve_enable); 1348 SET_FIELD(reg_val, 1349 PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, 1350 ip_geneve_enable); 1351 qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); 1352 if (reg_val) { 1353 reg_val = 1354 qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); 1355 1356 /* Update output only if tunnel blocks not included. */ 1357 if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) 1358 qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, 1359 (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); 1360 } 1361 1362 /* Update NIG register */ 1363 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, 1364 eth_geneve_enable ? 1 : 0); 1365 qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); 1366 1367 /* EDPM with geneve tunnel not supported in BB */ 1368 if (QED_IS_BB_B0(p_hwfn->cdev)) 1369 return; 1370 1371 /* Update DORQ registers */ 1372 qed_wr(p_hwfn, 1373 p_ptt, 1374 DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, 1375 eth_geneve_enable ? 1 : 0); 1376 qed_wr(p_hwfn, 1377 p_ptt, 1378 DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, 1379 ip_geneve_enable ? 1 : 0); 1380 } 1381 1382 #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 1383 #define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 1384 1385 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, 1386 struct qed_ptt *p_ptt, bool enable) 1387 { 1388 u32 reg_val, cfg_mask; 1389 1390 /* read PRS config register */ 1391 reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); 1392 1393 /* set VXLAN_NO_L2_ENABLE mask */ 1394 cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); 1395 1396 if (enable) { 1397 /* set VXLAN_NO_L2_ENABLE flag */ 1398 reg_val |= cfg_mask; 1399 1400 /* update PRS FIC register */ 1401 qed_wr(p_hwfn, 1402 p_ptt, 1403 PRS_REG_OUTPUT_FORMAT_4_0, 1404 (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); 1405 } else { 1406 /* clear VXLAN_NO_L2_ENABLE flag */ 1407 reg_val &= ~cfg_mask; 1408 } 1409 1410 /* write PRS config register */ 1411 qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); 1412 } 1413 1414 #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 1415 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 1416 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25 1417 #define PARSER_ETH_CONN_CM_HDR 0 1418 #define CAM_LINE_SIZE sizeof(u32) 1419 #define RAM_LINE_SIZE sizeof(u64) 1420 #define REG_SIZE sizeof(u32) 1421 1422 void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) 1423 { 1424 struct regpair ram_line = { 0 }; 1425 1426 /* Disable gft search for PF */ 1427 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); 1428 1429 /* Clean ram & cam for next gft session */ 1430 1431 /* Zero camline */ 1432 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); 1433 1434 /* Zero ramline */ 1435 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, 1436 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 1437 sizeof(ram_line) / REG_SIZE); 1438 } 1439 1440 void qed_gft_config(struct qed_hwfn *p_hwfn, 1441 struct qed_ptt *p_ptt, 1442 u16 pf_id, 1443 bool tcp, 1444 bool udp, 1445 bool ipv4, bool ipv6, enum gft_profile_type profile_type) 1446 { 1447 struct regpair ram_line; 1448 u32 search_non_ip_as_gft; 1449 u32 reg_val, cam_line; 1450 u32 lo = 0, hi = 0; 1451 1452 if (!ipv6 && !ipv4) 1453 DP_NOTICE(p_hwfn, 1454 "gft_config: must accept at least on of - ipv4 or ipv6'\n"); 1455 if (!tcp && !udp) 1456 DP_NOTICE(p_hwfn, 1457 "gft_config: must accept at least on of - udp or tcp\n"); 1458 if (profile_type >= MAX_GFT_PROFILE_TYPE) 1459 DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); 1460 1461 /* Set RFS event ID to be awakened i Tstorm By Prs */ 1462 reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << 1463 PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; 1464 reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; 1465 qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); 1466 1467 /* Do not load context only cid in PRS on match. */ 1468 qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); 1469 1470 /* Do not use tenant ID exist bit for gft search */ 1471 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); 1472 1473 /* Set Cam */ 1474 cam_line = 0; 1475 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); 1476 1477 /* Filters are per PF!! */ 1478 SET_FIELD(cam_line, 1479 GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1480 GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); 1481 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); 1482 1483 if (!(tcp && udp)) { 1484 SET_FIELD(cam_line, 1485 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1486 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); 1487 if (tcp) 1488 SET_FIELD(cam_line, 1489 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, 1490 GFT_PROFILE_TCP_PROTOCOL); 1491 else 1492 SET_FIELD(cam_line, 1493 GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, 1494 GFT_PROFILE_UDP_PROTOCOL); 1495 } 1496 1497 if (!(ipv4 && ipv6)) { 1498 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); 1499 if (ipv4) 1500 SET_FIELD(cam_line, 1501 GFT_CAM_LINE_MAPPED_IP_VERSION, 1502 GFT_PROFILE_IPV4); 1503 else 1504 SET_FIELD(cam_line, 1505 GFT_CAM_LINE_MAPPED_IP_VERSION, 1506 GFT_PROFILE_IPV6); 1507 } 1508 1509 /* Write characteristics to cam */ 1510 qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 1511 cam_line); 1512 cam_line = 1513 qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); 1514 1515 /* Write line to RAM - compare to filter 4 tuple */ 1516 1517 /* Search no IP as GFT */ 1518 search_non_ip_as_gft = 0; 1519 1520 /* Tunnel type */ 1521 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); 1522 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); 1523 1524 if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { 1525 SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); 1526 SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); 1527 SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1528 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); 1529 SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); 1530 SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); 1531 } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { 1532 SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); 1533 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); 1534 SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); 1535 } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { 1536 SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); 1537 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); 1538 } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { 1539 SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); 1540 SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); 1541 } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { 1542 SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); 1543 1544 /* Allow tunneled traffic without inner IP */ 1545 search_non_ip_as_gft = 1; 1546 } 1547 1548 ram_line.lo = cpu_to_le32(lo); 1549 ram_line.hi = cpu_to_le32(hi); 1550 1551 qed_wr(p_hwfn, 1552 p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); 1553 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, 1554 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 1555 sizeof(ram_line) / REG_SIZE); 1556 1557 /* Set default profile so that no filter match will happen */ 1558 ram_line.lo = cpu_to_le32(0xffffffff); 1559 ram_line.hi = cpu_to_le32(0x3ff); 1560 qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, 1561 PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * 1562 PRS_GFT_CAM_LINES_NO_MATCH, 1563 sizeof(ram_line) / REG_SIZE); 1564 1565 /* Enable gft search */ 1566 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); 1567 } 1568 1569 /* Enable and configure context validation */ 1570 void qed_enable_context_validation(struct qed_hwfn *p_hwfn, 1571 struct qed_ptt *p_ptt) 1572 { 1573 u32 ctx_validation; 1574 1575 /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ 1576 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; 1577 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); 1578 1579 /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ 1580 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1581 qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); 1582 1583 /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ 1584 ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; 1585 qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); 1586 } 1587 1588 const char *qed_get_protocol_type_str(u32 protocol_type) 1589 { 1590 if (protocol_type >= ARRAY_SIZE(s_protocol_types)) 1591 return "Invalid protocol type"; 1592 1593 return s_protocol_types[protocol_type]; 1594 } 1595 1596 const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id) 1597 { 1598 const char *ramrod_cmd_id_str; 1599 1600 if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids)) 1601 return "Invalid protocol type"; 1602 1603 if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0])) 1604 return "Invalid Ramrod command ID"; 1605 1606 ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id]; 1607 1608 if (!ramrod_cmd_id_str) 1609 return "Invalid Ramrod command ID"; 1610 1611 return ramrod_cmd_id_str; 1612 } 1613 1614 static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) 1615 { 1616 switch (storm_id) { 1617 case 0: 1618 return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1619 TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1620 case 1: 1621 return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1622 MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1623 case 2: 1624 return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1625 USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1626 case 3: 1627 return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1628 XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1629 case 4: 1630 return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1631 YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1632 case 5: 1633 return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1634 PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); 1635 1636 default: 1637 return 0; 1638 } 1639 } 1640 1641 void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, 1642 struct qed_ptt *p_ptt, 1643 u8 assert_level[NUM_STORMS]) 1644 { 1645 u8 storm_id; 1646 1647 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { 1648 u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); 1649 1650 qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); 1651 } 1652 } 1653 1654 #define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) 1655 #define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) 1656 1657 static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) 1658 { 1659 switch (storm_id) { 1660 case 0: 1661 return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1662 TSTORM_OVERLAY_BUF_ADDR_OFFSET; 1663 case 1: 1664 return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1665 MSTORM_OVERLAY_BUF_ADDR_OFFSET; 1666 case 2: 1667 return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1668 USTORM_OVERLAY_BUF_ADDR_OFFSET; 1669 case 3: 1670 return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1671 XSTORM_OVERLAY_BUF_ADDR_OFFSET; 1672 case 4: 1673 return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1674 YSTORM_OVERLAY_BUF_ADDR_OFFSET; 1675 case 5: 1676 return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 1677 PSTORM_OVERLAY_BUF_ADDR_OFFSET; 1678 1679 default: 1680 return 0; 1681 } 1682 } 1683 1684 struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, 1685 const u32 * const 1686 fw_overlay_in_buf, 1687 u32 buf_size_in_bytes) 1688 { 1689 u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; 1690 struct phys_mem_desc *allocated_mem; 1691 1692 if (!buf_size) 1693 return NULL; 1694 1695 allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc), 1696 GFP_KERNEL); 1697 if (!allocated_mem) 1698 return NULL; 1699 1700 /* For each Storm, set physical address in RAM */ 1701 while (buf_offset < buf_size) { 1702 struct phys_mem_desc *storm_mem_desc; 1703 struct fw_overlay_buf_hdr *hdr; 1704 u32 storm_buf_size; 1705 u8 storm_id; 1706 1707 hdr = 1708 (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; 1709 storm_buf_size = GET_FIELD(hdr->data, 1710 FW_OVERLAY_BUF_HDR_BUF_SIZE); 1711 storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); 1712 if (storm_id >= NUM_STORMS) 1713 break; 1714 storm_mem_desc = allocated_mem + storm_id; 1715 storm_mem_desc->size = storm_buf_size * sizeof(u32); 1716 1717 /* Allocate physical memory for Storm's overlays buffer */ 1718 storm_mem_desc->virt_addr = 1719 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 1720 storm_mem_desc->size, 1721 &storm_mem_desc->phys_addr, GFP_KERNEL); 1722 if (!storm_mem_desc->virt_addr) 1723 break; 1724 1725 /* Skip overlays buffer header */ 1726 buf_offset += OVERLAY_HDR_SIZE_DWORDS; 1727 1728 /* Copy Storm's overlays buffer to allocated memory */ 1729 memcpy(storm_mem_desc->virt_addr, 1730 &fw_overlay_in_buf[buf_offset], storm_mem_desc->size); 1731 1732 /* Advance to next Storm */ 1733 buf_offset += storm_buf_size; 1734 } 1735 1736 /* If memory allocation has failed, free all allocated memory */ 1737 if (buf_offset < buf_size) { 1738 qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); 1739 return NULL; 1740 } 1741 1742 return allocated_mem; 1743 } 1744 1745 void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, 1746 struct qed_ptt *p_ptt, 1747 struct phys_mem_desc *fw_overlay_mem) 1748 { 1749 u8 storm_id; 1750 1751 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { 1752 struct phys_mem_desc *storm_mem_desc = 1753 (struct phys_mem_desc *)fw_overlay_mem + storm_id; 1754 u32 ram_addr, i; 1755 1756 /* Skip Storms with no FW overlays */ 1757 if (!storm_mem_desc->virt_addr) 1758 continue; 1759 1760 /* Calculate overlay RAM GRC address of current PF */ 1761 ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) + 1762 sizeof(dma_addr_t) * p_hwfn->rel_pf_id; 1763 1764 /* Write Storm's overlay physical address to RAM */ 1765 for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) 1766 qed_wr(p_hwfn, p_ptt, ram_addr, 1767 ((u32 *)&storm_mem_desc->phys_addr)[i]); 1768 } 1769 } 1770 1771 void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, 1772 struct phys_mem_desc **fw_overlay_mem) 1773 { 1774 u8 storm_id; 1775 1776 if (!fw_overlay_mem || !(*fw_overlay_mem)) 1777 return; 1778 1779 for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { 1780 struct phys_mem_desc *storm_mem_desc = 1781 (struct phys_mem_desc *)*fw_overlay_mem + storm_id; 1782 1783 /* Free Storm's physical memory */ 1784 if (storm_mem_desc->virt_addr) 1785 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 1786 storm_mem_desc->size, 1787 storm_mem_desc->virt_addr, 1788 storm_mem_desc->phys_addr); 1789 } 1790 1791 /* Free allocated virtual memory */ 1792 kfree(*fw_overlay_mem); 1793 *fw_overlay_mem = NULL; 1794 } 1795