1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018-2019 Hisilicon Limited. */ 3 4 #include <linux/device.h> 5 6 #include "hclge_debugfs.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 #include "hnae3.h" 10 11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { 12 { .reg_type = "bios common", 13 .dfx_msg = &hclge_dbg_bios_common_reg[0], 14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg), 15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET, 16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } }, 17 { .reg_type = "ssu", 18 .dfx_msg = &hclge_dbg_ssu_reg_0[0], 19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0), 20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET, 21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } }, 22 { .reg_type = "ssu", 23 .dfx_msg = &hclge_dbg_ssu_reg_1[0], 24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1), 25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET, 26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } }, 27 { .reg_type = "ssu", 28 .dfx_msg = &hclge_dbg_ssu_reg_2[0], 29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2), 30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET, 31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } }, 32 { .reg_type = "igu egu", 33 .dfx_msg = &hclge_dbg_igu_egu_reg[0], 34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg), 35 .offset = HCLGE_DBG_DFX_IGU_OFFSET, 36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } }, 37 { .reg_type = "rpu", 38 .dfx_msg = &hclge_dbg_rpu_reg_0[0], 39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0), 40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET, 41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } }, 42 { .reg_type = "rpu", 43 .dfx_msg = &hclge_dbg_rpu_reg_1[0], 44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1), 45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET, 46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } }, 47 { .reg_type = "ncsi", 48 .dfx_msg = &hclge_dbg_ncsi_reg[0], 49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg), 50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET, 51 .cmd = HCLGE_OPC_DFX_NCSI_REG } }, 52 { .reg_type = "rtc", 53 .dfx_msg = &hclge_dbg_rtc_reg[0], 54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg), 55 .offset = HCLGE_DBG_DFX_RTC_OFFSET, 56 .cmd = HCLGE_OPC_DFX_RTC_REG } }, 57 { .reg_type = "ppp", 58 .dfx_msg = &hclge_dbg_ppp_reg[0], 59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg), 60 .offset = HCLGE_DBG_DFX_PPP_OFFSET, 61 .cmd = HCLGE_OPC_DFX_PPP_REG } }, 62 { .reg_type = "rcb", 63 .dfx_msg = &hclge_dbg_rcb_reg[0], 64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg), 65 .offset = HCLGE_DBG_DFX_RCB_OFFSET, 66 .cmd = HCLGE_OPC_DFX_RCB_REG } }, 67 { .reg_type = "tqp", 68 .dfx_msg = &hclge_dbg_tqp_reg[0], 69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg), 70 .offset = HCLGE_DBG_DFX_TQP_OFFSET, 71 .cmd = HCLGE_OPC_DFX_TQP_REG } }, 72 }; 73 74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) 75 { 76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; 77 int entries_per_desc; 78 int index; 79 int ret; 80 81 ret = hclge_query_bd_num_cmd_send(hdev, desc); 82 if (ret) { 83 dev_err(&hdev->pdev->dev, 84 "get dfx bdnum fail, ret = %d\n", ret); 85 return ret; 86 } 87 88 entries_per_desc = ARRAY_SIZE(desc[0].data); 89 index = offset % entries_per_desc; 90 return le32_to_cpu(desc[offset / entries_per_desc].data[index]); 91 } 92 93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev, 94 struct hclge_desc *desc_src, 95 int index, int bd_num, 96 enum hclge_opcode_type cmd) 97 { 98 struct hclge_desc *desc = desc_src; 99 int ret, i; 100 101 hclge_cmd_setup_basic_desc(desc, cmd, true); 102 desc->data[0] = cpu_to_le32(index); 103 104 for (i = 1; i < bd_num; i++) { 105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 106 desc++; 107 hclge_cmd_setup_basic_desc(desc, cmd, true); 108 } 109 110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); 111 if (ret) 112 dev_err(&hdev->pdev->dev, 113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret); 114 return ret; 115 } 116 117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, 118 struct hclge_dbg_reg_type_info *reg_info, 119 const char *cmd_buf) 120 { 121 #define IDX_OFFSET 1 122 123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET]; 124 struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg; 125 struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg; 126 struct hclge_desc *desc_src; 127 struct hclge_desc *desc; 128 int entries_per_desc; 129 int bd_num, buf_len; 130 int index = 0; 131 int min_num; 132 int ret, i; 133 134 if (*s) { 135 ret = kstrtouint(s, 0, &index); 136 index = (ret != 0) ? 0 : index; 137 } 138 139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset); 140 if (bd_num <= 0) { 141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n", 142 reg_msg->offset, bd_num); 143 return; 144 } 145 146 buf_len = sizeof(struct hclge_desc) * bd_num; 147 desc_src = kzalloc(buf_len, GFP_KERNEL); 148 if (!desc_src) 149 return; 150 151 desc = desc_src; 152 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); 153 if (ret) { 154 kfree(desc_src); 155 return; 156 } 157 158 entries_per_desc = ARRAY_SIZE(desc->data); 159 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num); 160 161 desc = desc_src; 162 for (i = 0; i < min_num; i++) { 163 if (i > 0 && (i % entries_per_desc) == 0) 164 desc++; 165 if (dfx_message->flag) 166 dev_info(&hdev->pdev->dev, "%s: 0x%x\n", 167 dfx_message->message, 168 le32_to_cpu(desc->data[i % entries_per_desc])); 169 170 dfx_message++; 171 } 172 173 kfree(desc_src); 174 } 175 176 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) 177 { 178 struct device *dev = &hdev->pdev->dev; 179 struct hclge_dbg_bitmap_cmd *bitmap; 180 enum hclge_opcode_type cmd; 181 int rq_id, pri_id, qset_id; 182 int port_id, nq_id, pg_id; 183 struct hclge_desc desc[2]; 184 185 int cnt, ret; 186 187 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", 188 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); 189 if (cnt != 6) { 190 dev_err(&hdev->pdev->dev, 191 "dump dcb: bad command parameter, cnt=%d\n", cnt); 192 return; 193 } 194 195 cmd = HCLGE_OPC_QSET_DFX_STS; 196 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd); 197 if (ret) 198 goto err_dcb_cmd_send; 199 200 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 201 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); 202 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); 203 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); 204 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); 205 206 cmd = HCLGE_OPC_PRI_DFX_STS; 207 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd); 208 if (ret) 209 goto err_dcb_cmd_send; 210 211 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 212 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); 213 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); 214 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); 215 216 cmd = HCLGE_OPC_PG_DFX_STS; 217 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd); 218 if (ret) 219 goto err_dcb_cmd_send; 220 221 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 222 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); 223 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); 224 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); 225 226 cmd = HCLGE_OPC_PORT_DFX_STS; 227 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 228 if (ret) 229 goto err_dcb_cmd_send; 230 231 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; 232 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); 233 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); 234 235 cmd = HCLGE_OPC_SCH_NQ_CNT; 236 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 237 if (ret) 238 goto err_dcb_cmd_send; 239 240 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 241 242 cmd = HCLGE_OPC_SCH_RQ_CNT; 243 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd); 244 if (ret) 245 goto err_dcb_cmd_send; 246 247 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); 248 249 cmd = HCLGE_OPC_TM_INTERNAL_STS; 250 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd); 251 if (ret) 252 goto err_dcb_cmd_send; 253 254 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1])); 255 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2])); 256 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", 257 le32_to_cpu(desc[0].data[3])); 258 dev_info(dev, "tx_private_waterline: 0x%x\n", 259 le32_to_cpu(desc[0].data[4])); 260 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5])); 261 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0])); 262 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1])); 263 264 cmd = HCLGE_OPC_TM_INTERNAL_CNT; 265 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 266 if (ret) 267 goto err_dcb_cmd_send; 268 269 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1])); 270 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2])); 271 272 cmd = HCLGE_OPC_TM_INTERNAL_STS_1; 273 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd); 274 if (ret) 275 goto err_dcb_cmd_send; 276 277 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1])); 278 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2])); 279 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3])); 280 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", 281 le32_to_cpu(desc[0].data[4])); 282 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", 283 le32_to_cpu(desc[0].data[5])); 284 return; 285 286 err_dcb_cmd_send: 287 dev_err(&hdev->pdev->dev, 288 "failed to dump dcb dfx, cmd = %#x, ret = %d\n", 289 cmd, ret); 290 } 291 292 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) 293 { 294 struct hclge_dbg_reg_type_info *reg_info; 295 bool has_dump = false; 296 int i; 297 298 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) { 299 reg_info = &hclge_dbg_reg_info[i]; 300 if (!strncmp(cmd_buf, reg_info->reg_type, 301 strlen(reg_info->reg_type))) { 302 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf); 303 has_dump = true; 304 } 305 } 306 307 if (strncmp(cmd_buf, "dcb", 3) == 0) { 308 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]); 309 has_dump = true; 310 } 311 312 if (!has_dump) { 313 dev_info(&hdev->pdev->dev, "unknown command\n"); 314 return; 315 } 316 } 317 318 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, 319 char *title_buf, char *true_buf, 320 char *false_buf) 321 { 322 if (flag) 323 dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n", 324 title_buf, index, true_buf, 325 hdev->tm_info.pg_info[0].tc_dwrr[index]); 326 else 327 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, 328 false_buf); 329 } 330 331 static void hclge_dbg_dump_tc(struct hclge_dev *hdev) 332 { 333 struct hclge_ets_tc_weight_cmd *ets_weight; 334 struct hclge_desc desc; 335 int i, ret; 336 337 if (!hnae3_dev_dcb_supported(hdev)) { 338 dev_info(&hdev->pdev->dev, 339 "Only DCB-supported dev supports tc\n"); 340 return; 341 } 342 343 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); 344 345 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 346 if (ret) { 347 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret); 348 return; 349 } 350 351 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 352 353 dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n", 354 hdev->tm_info.num_tc); 355 dev_info(&hdev->pdev->dev, "weight_offset: %u\n", 356 ets_weight->weight_offset); 357 358 for (i = 0; i < HNAE3_MAX_TC; i++) 359 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, 360 "tc", "no sp mode", "sp mode"); 361 } 362 363 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) 364 { 365 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 366 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 367 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; 368 enum hclge_opcode_type cmd; 369 struct hclge_desc desc; 370 int ret; 371 372 cmd = HCLGE_OPC_TM_PG_C_SHAPPING; 373 hclge_cmd_setup_basic_desc(&desc, cmd, true); 374 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 375 if (ret) 376 goto err_tm_pg_cmd_send; 377 378 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 379 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 380 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", 381 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 382 383 cmd = HCLGE_OPC_TM_PG_P_SHAPPING; 384 hclge_cmd_setup_basic_desc(&desc, cmd, true); 385 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 386 if (ret) 387 goto err_tm_pg_cmd_send; 388 389 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 390 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); 391 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", 392 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); 393 394 cmd = HCLGE_OPC_TM_PORT_SHAPPING; 395 hclge_cmd_setup_basic_desc(&desc, cmd, true); 396 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 397 if (ret) 398 goto err_tm_pg_cmd_send; 399 400 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 401 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", 402 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); 403 404 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; 405 hclge_cmd_setup_basic_desc(&desc, cmd, true); 406 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 407 if (ret) 408 goto err_tm_pg_cmd_send; 409 410 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", 411 le32_to_cpu(desc.data[0])); 412 413 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; 414 hclge_cmd_setup_basic_desc(&desc, cmd, true); 415 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 416 if (ret) 417 goto err_tm_pg_cmd_send; 418 419 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", 420 le32_to_cpu(desc.data[0])); 421 422 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; 423 hclge_cmd_setup_basic_desc(&desc, cmd, true); 424 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 425 if (ret) 426 goto err_tm_pg_cmd_send; 427 428 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", 429 le32_to_cpu(desc.data[0])); 430 431 if (!hnae3_dev_dcb_supported(hdev)) { 432 dev_info(&hdev->pdev->dev, 433 "Only DCB-supported dev supports tm mapping\n"); 434 return; 435 } 436 437 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 438 hclge_cmd_setup_basic_desc(&desc, cmd, true); 439 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 440 if (ret) 441 goto err_tm_pg_cmd_send; 442 443 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 444 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n", 445 bp_to_qs_map_cmd->tc_id); 446 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", 447 bp_to_qs_map_cmd->qs_group_id); 448 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", 449 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map)); 450 return; 451 452 err_tm_pg_cmd_send: 453 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n", 454 cmd, ret); 455 } 456 457 static void hclge_dbg_dump_tm(struct hclge_dev *hdev) 458 { 459 struct hclge_priority_weight_cmd *priority_weight; 460 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; 461 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; 462 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 463 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 464 struct hclge_pg_weight_cmd *pg_weight; 465 struct hclge_qs_weight_cmd *qs_weight; 466 enum hclge_opcode_type cmd; 467 struct hclge_desc desc; 468 int ret; 469 470 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; 471 hclge_cmd_setup_basic_desc(&desc, cmd, true); 472 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 473 if (ret) 474 goto err_tm_cmd_send; 475 476 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 477 dev_info(&hdev->pdev->dev, "dump tm\n"); 478 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", 479 pg_to_pri_map->pg_id); 480 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", 481 pg_to_pri_map->pri_bit_map); 482 483 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 484 hclge_cmd_setup_basic_desc(&desc, cmd, true); 485 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 486 if (ret) 487 goto err_tm_cmd_send; 488 489 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 490 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", 491 le16_to_cpu(qs_to_pri_map->qs_id)); 492 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", 493 qs_to_pri_map->priority); 494 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", 495 qs_to_pri_map->link_vld); 496 497 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 498 hclge_cmd_setup_basic_desc(&desc, cmd, true); 499 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 500 if (ret) 501 goto err_tm_cmd_send; 502 503 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 504 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", 505 le16_to_cpu(nq_to_qs_map->nq_id)); 506 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", 507 le16_to_cpu(nq_to_qs_map->qset_id)); 508 509 cmd = HCLGE_OPC_TM_PG_WEIGHT; 510 hclge_cmd_setup_basic_desc(&desc, cmd, true); 511 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 512 if (ret) 513 goto err_tm_cmd_send; 514 515 pg_weight = (struct hclge_pg_weight_cmd *)desc.data; 516 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); 517 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); 518 519 cmd = HCLGE_OPC_TM_QS_WEIGHT; 520 hclge_cmd_setup_basic_desc(&desc, cmd, true); 521 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 522 if (ret) 523 goto err_tm_cmd_send; 524 525 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 526 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", 527 le16_to_cpu(qs_weight->qs_id)); 528 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); 529 530 cmd = HCLGE_OPC_TM_PRI_WEIGHT; 531 hclge_cmd_setup_basic_desc(&desc, cmd, true); 532 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 533 if (ret) 534 goto err_tm_cmd_send; 535 536 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 537 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); 538 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); 539 540 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; 541 hclge_cmd_setup_basic_desc(&desc, cmd, true); 542 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 543 if (ret) 544 goto err_tm_cmd_send; 545 546 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 547 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); 548 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", 549 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 550 551 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; 552 hclge_cmd_setup_basic_desc(&desc, cmd, true); 553 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 554 if (ret) 555 goto err_tm_cmd_send; 556 557 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 558 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); 559 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", 560 le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); 561 562 hclge_dbg_dump_tm_pg(hdev); 563 564 return; 565 566 err_tm_cmd_send: 567 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n", 568 cmd, ret); 569 } 570 571 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, 572 const char *cmd_buf) 573 { 574 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 575 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; 576 struct hclge_qs_to_pri_link_cmd *map; 577 struct hclge_tqp_tx_queue_tc_cmd *tc; 578 enum hclge_opcode_type cmd; 579 struct hclge_desc desc; 580 int queue_id, group_id; 581 u32 qset_maping[32]; 582 int tc_id, qset_id; 583 int pri_id, ret; 584 u32 i; 585 586 ret = kstrtouint(cmd_buf, 0, &queue_id); 587 queue_id = (ret != 0) ? 0 : queue_id; 588 589 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; 590 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 591 hclge_cmd_setup_basic_desc(&desc, cmd, true); 592 nq_to_qs_map->nq_id = cpu_to_le16(queue_id); 593 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 594 if (ret) 595 goto err_tm_map_cmd_send; 596 qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF; 597 598 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; 599 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 600 hclge_cmd_setup_basic_desc(&desc, cmd, true); 601 map->qs_id = cpu_to_le16(qset_id); 602 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 603 if (ret) 604 goto err_tm_map_cmd_send; 605 pri_id = map->priority; 606 607 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; 608 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 609 hclge_cmd_setup_basic_desc(&desc, cmd, true); 610 tc->queue_id = cpu_to_le16(queue_id); 611 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 612 if (ret) 613 goto err_tm_map_cmd_send; 614 tc_id = tc->tc_id & 0x7; 615 616 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); 617 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", 618 queue_id, qset_id, pri_id, tc_id); 619 620 if (!hnae3_dev_dcb_supported(hdev)) { 621 dev_info(&hdev->pdev->dev, 622 "Only DCB-supported dev supports tm mapping\n"); 623 return; 624 } 625 626 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; 627 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 628 for (group_id = 0; group_id < 32; group_id++) { 629 hclge_cmd_setup_basic_desc(&desc, cmd, true); 630 bp_to_qs_map_cmd->tc_id = tc_id; 631 bp_to_qs_map_cmd->qs_group_id = group_id; 632 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 633 if (ret) 634 goto err_tm_map_cmd_send; 635 636 qset_maping[group_id] = 637 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map); 638 } 639 640 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); 641 642 i = 0; 643 for (group_id = 0; group_id < 4; group_id++) { 644 dev_info(&hdev->pdev->dev, 645 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", 646 group_id * 256, qset_maping[(u32)(i + 7)], 647 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)], 648 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)], 649 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)], 650 qset_maping[i]); 651 i += 8; 652 } 653 654 return; 655 656 err_tm_map_cmd_send: 657 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n", 658 cmd, ret); 659 } 660 661 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) 662 { 663 struct hclge_cfg_pause_param_cmd *pause_param; 664 struct hclge_desc desc; 665 int ret; 666 667 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 668 669 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 670 if (ret) { 671 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n", 672 ret); 673 return; 674 } 675 676 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 677 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); 678 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", 679 pause_param->pause_trans_gap); 680 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", 681 le16_to_cpu(pause_param->pause_trans_time)); 682 } 683 684 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) 685 { 686 struct hclge_qos_pri_map_cmd *pri_map; 687 struct hclge_desc desc; 688 int ret; 689 690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); 691 692 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 693 if (ret) { 694 dev_err(&hdev->pdev->dev, 695 "dump qos pri map fail, ret = %d\n", ret); 696 return; 697 } 698 699 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; 700 dev_info(&hdev->pdev->dev, "dump qos pri map\n"); 701 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); 702 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); 703 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); 704 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); 705 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); 706 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); 707 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); 708 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); 709 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); 710 } 711 712 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) 713 { 714 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; 715 struct hclge_rx_priv_buff_cmd *rx_buf_cmd; 716 struct hclge_rx_priv_wl_buf *rx_priv_wl; 717 struct hclge_rx_com_wl *rx_packet_cnt; 718 struct hclge_rx_com_thrd *rx_com_thrd; 719 struct hclge_rx_com_wl *rx_com_wl; 720 enum hclge_opcode_type cmd; 721 struct hclge_desc desc[2]; 722 int i, ret; 723 724 cmd = HCLGE_OPC_TX_BUFF_ALLOC; 725 hclge_cmd_setup_basic_desc(desc, cmd, true); 726 ret = hclge_cmd_send(&hdev->hw, desc, 1); 727 if (ret) 728 goto err_qos_cmd_send; 729 730 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); 731 732 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; 733 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 734 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, 735 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); 736 737 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; 738 hclge_cmd_setup_basic_desc(desc, cmd, true); 739 ret = hclge_cmd_send(&hdev->hw, desc, 1); 740 if (ret) 741 goto err_qos_cmd_send; 742 743 dev_info(&hdev->pdev->dev, "\n"); 744 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; 745 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 746 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, 747 le16_to_cpu(rx_buf_cmd->buf_num[i])); 748 749 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", 750 le16_to_cpu(rx_buf_cmd->shared_buf)); 751 752 cmd = HCLGE_OPC_RX_COM_WL_ALLOC; 753 hclge_cmd_setup_basic_desc(desc, cmd, true); 754 ret = hclge_cmd_send(&hdev->hw, desc, 1); 755 if (ret) 756 goto err_qos_cmd_send; 757 758 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; 759 dev_info(&hdev->pdev->dev, "\n"); 760 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", 761 le16_to_cpu(rx_com_wl->com_wl.high), 762 le16_to_cpu(rx_com_wl->com_wl.low)); 763 764 cmd = HCLGE_OPC_RX_GBL_PKT_CNT; 765 hclge_cmd_setup_basic_desc(desc, cmd, true); 766 ret = hclge_cmd_send(&hdev->hw, desc, 1); 767 if (ret) 768 goto err_qos_cmd_send; 769 770 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; 771 dev_info(&hdev->pdev->dev, 772 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", 773 le16_to_cpu(rx_packet_cnt->com_wl.high), 774 le16_to_cpu(rx_packet_cnt->com_wl.low)); 775 dev_info(&hdev->pdev->dev, "\n"); 776 777 if (!hnae3_dev_dcb_supported(hdev)) { 778 dev_info(&hdev->pdev->dev, 779 "Only DCB-supported dev supports rx priv wl\n"); 780 return; 781 } 782 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; 783 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 784 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 785 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 786 ret = hclge_cmd_send(&hdev->hw, desc, 2); 787 if (ret) 788 goto err_qos_cmd_send; 789 790 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; 791 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 792 dev_info(&hdev->pdev->dev, 793 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, 794 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 795 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 796 797 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; 798 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 799 dev_info(&hdev->pdev->dev, 800 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", 801 i + HCLGE_TC_NUM_ONE_DESC, 802 le16_to_cpu(rx_priv_wl->tc_wl[i].high), 803 le16_to_cpu(rx_priv_wl->tc_wl[i].low)); 804 805 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; 806 hclge_cmd_setup_basic_desc(&desc[0], cmd, true); 807 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 808 hclge_cmd_setup_basic_desc(&desc[1], cmd, true); 809 ret = hclge_cmd_send(&hdev->hw, desc, 2); 810 if (ret) 811 goto err_qos_cmd_send; 812 813 dev_info(&hdev->pdev->dev, "\n"); 814 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; 815 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 816 dev_info(&hdev->pdev->dev, 817 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, 818 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 819 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 820 821 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; 822 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) 823 dev_info(&hdev->pdev->dev, 824 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", 825 i + HCLGE_TC_NUM_ONE_DESC, 826 le16_to_cpu(rx_com_thrd->com_thrd[i].high), 827 le16_to_cpu(rx_com_thrd->com_thrd[i].low)); 828 return; 829 830 err_qos_cmd_send: 831 dev_err(&hdev->pdev->dev, 832 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret); 833 } 834 835 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) 836 { 837 struct hclge_mac_ethertype_idx_rd_cmd *req0; 838 char printf_buf[HCLGE_DBG_BUF_LEN]; 839 struct hclge_desc desc; 840 u32 msg_egress_port; 841 int ret, i; 842 843 dev_info(&hdev->pdev->dev, "mng tab:\n"); 844 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 845 strncat(printf_buf, 846 "entry|mac_addr |mask|ether|mask|vlan|mask", 847 HCLGE_DBG_BUF_LEN - 1); 848 strncat(printf_buf + strlen(printf_buf), 849 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", 850 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); 851 852 dev_info(&hdev->pdev->dev, "%s", printf_buf); 853 854 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { 855 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, 856 true); 857 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; 858 req0->index = cpu_to_le16(i); 859 860 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 861 if (ret) { 862 dev_err(&hdev->pdev->dev, 863 "call hclge_cmd_send fail, ret = %d\n", ret); 864 return; 865 } 866 867 if (!req0->resp_code) 868 continue; 869 870 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); 871 snprintf(printf_buf, HCLGE_DBG_BUF_LEN, 872 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", 873 le16_to_cpu(req0->index), 874 req0->mac_addr[0], req0->mac_addr[1], 875 req0->mac_addr[2], req0->mac_addr[3], 876 req0->mac_addr[4], req0->mac_addr[5]); 877 878 snprintf(printf_buf + strlen(printf_buf), 879 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 880 "%x |%04x |%x |%04x|%x |%02x |%02x |", 881 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), 882 le16_to_cpu(req0->ethter_type), 883 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), 884 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG, 885 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), 886 req0->i_port_bitmap, req0->i_port_direction); 887 888 msg_egress_port = le16_to_cpu(req0->egress_port); 889 snprintf(printf_buf + strlen(printf_buf), 890 HCLGE_DBG_BUF_LEN - strlen(printf_buf), 891 "%x |%x |%02x |%04x|%x\n", 892 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B), 893 msg_egress_port & HCLGE_DBG_MNG_PF_ID, 894 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, 895 le16_to_cpu(req0->egress_queue), 896 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B)); 897 898 dev_info(&hdev->pdev->dev, "%s", printf_buf); 899 } 900 } 901 902 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, 903 bool sel_x, u32 loc) 904 { 905 struct hclge_fd_tcam_config_1_cmd *req1; 906 struct hclge_fd_tcam_config_2_cmd *req2; 907 struct hclge_fd_tcam_config_3_cmd *req3; 908 struct hclge_desc desc[3]; 909 int ret, i; 910 u32 *req; 911 912 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); 913 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 914 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); 915 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 916 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); 917 918 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; 919 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; 920 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; 921 922 req1->stage = stage; 923 req1->xy_sel = sel_x ? 1 : 0; 924 req1->index = cpu_to_le32(loc); 925 926 ret = hclge_cmd_send(&hdev->hw, desc, 3); 927 if (ret) 928 return ret; 929 930 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", 931 sel_x ? "x" : "y", loc); 932 933 /* tcam_data0 ~ tcam_data1 */ 934 req = (u32 *)req1->tcam_data; 935 for (i = 0; i < 2; i++) 936 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 937 938 /* tcam_data2 ~ tcam_data7 */ 939 req = (u32 *)req2->tcam_data; 940 for (i = 0; i < 6; i++) 941 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 942 943 /* tcam_data8 ~ tcam_data12 */ 944 req = (u32 *)req3->tcam_data; 945 for (i = 0; i < 5; i++) 946 dev_info(&hdev->pdev->dev, "%08x\n", *req++); 947 948 return ret; 949 } 950 951 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) 952 { 953 struct hclge_fd_rule *rule; 954 struct hlist_node *node; 955 int cnt = 0; 956 957 spin_lock_bh(&hdev->fd_rule_lock); 958 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { 959 rule_locs[cnt] = rule->location; 960 cnt++; 961 } 962 spin_unlock_bh(&hdev->fd_rule_lock); 963 964 if (cnt != hdev->hclge_fd_rule_num) 965 return -EINVAL; 966 967 return cnt; 968 } 969 970 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) 971 { 972 int i, ret, rule_cnt; 973 u16 *rule_locs; 974 975 if (!hnae3_dev_fd_supported(hdev)) { 976 dev_err(&hdev->pdev->dev, 977 "Only FD-supported dev supports dump fd tcam\n"); 978 return; 979 } 980 981 if (!hdev->hclge_fd_rule_num || 982 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) 983 return; 984 985 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], 986 sizeof(u16), GFP_KERNEL); 987 if (!rule_locs) 988 return; 989 990 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs); 991 if (rule_cnt <= 0) { 992 dev_err(&hdev->pdev->dev, 993 "failed to get rule number, ret = %d\n", rule_cnt); 994 kfree(rule_locs); 995 return; 996 } 997 998 for (i = 0; i < rule_cnt; i++) { 999 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]); 1000 if (ret) { 1001 dev_err(&hdev->pdev->dev, 1002 "failed to get fd tcam key x, ret = %d\n", ret); 1003 kfree(rule_locs); 1004 return; 1005 } 1006 1007 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]); 1008 if (ret) { 1009 dev_err(&hdev->pdev->dev, 1010 "failed to get fd tcam key y, ret = %d\n", ret); 1011 kfree(rule_locs); 1012 return; 1013 } 1014 } 1015 1016 kfree(rule_locs); 1017 } 1018 1019 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) 1020 { 1021 dev_info(&hdev->pdev->dev, "PF reset count: %u\n", 1022 hdev->rst_stats.pf_rst_cnt); 1023 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1024 hdev->rst_stats.flr_rst_cnt); 1025 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n", 1026 hdev->rst_stats.global_rst_cnt); 1027 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n", 1028 hdev->rst_stats.imp_rst_cnt); 1029 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1030 hdev->rst_stats.reset_done_cnt); 1031 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1032 hdev->rst_stats.hw_reset_done_cnt); 1033 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1034 hdev->rst_stats.reset_cnt); 1035 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1036 hdev->rst_stats.reset_fail_cnt); 1037 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1038 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); 1039 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n", 1040 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); 1041 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n", 1042 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); 1043 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n", 1044 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); 1045 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1046 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); 1047 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1048 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); 1049 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1050 } 1051 1052 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev) 1053 { 1054 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n", 1055 hdev->last_serv_processed); 1056 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n", 1057 hdev->serv_processed_cnt); 1058 } 1059 1060 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) 1061 { 1062 struct hclge_desc *desc_src, *desc_tmp; 1063 struct hclge_get_m7_bd_cmd *req; 1064 struct hclge_desc desc; 1065 u32 bd_num, buf_len; 1066 int ret, i; 1067 1068 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true); 1069 1070 req = (struct hclge_get_m7_bd_cmd *)desc.data; 1071 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1072 if (ret) { 1073 dev_err(&hdev->pdev->dev, 1074 "get firmware statistics bd number failed, ret = %d\n", 1075 ret); 1076 return; 1077 } 1078 1079 bd_num = le32_to_cpu(req->bd_num); 1080 1081 buf_len = sizeof(struct hclge_desc) * bd_num; 1082 desc_src = kzalloc(buf_len, GFP_KERNEL); 1083 if (!desc_src) 1084 return; 1085 1086 desc_tmp = desc_src; 1087 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num, 1088 HCLGE_OPC_M7_STATS_INFO); 1089 if (ret) { 1090 kfree(desc_src); 1091 dev_err(&hdev->pdev->dev, 1092 "get firmware statistics failed, ret = %d\n", ret); 1093 return; 1094 } 1095 1096 for (i = 0; i < bd_num; i++) { 1097 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1098 le32_to_cpu(desc_tmp->data[0]), 1099 le32_to_cpu(desc_tmp->data[1]), 1100 le32_to_cpu(desc_tmp->data[2])); 1101 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n", 1102 le32_to_cpu(desc_tmp->data[3]), 1103 le32_to_cpu(desc_tmp->data[4]), 1104 le32_to_cpu(desc_tmp->data[5])); 1105 1106 desc_tmp++; 1107 } 1108 1109 kfree(desc_src); 1110 } 1111 1112 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5 1113 1114 static void hclge_ncl_config_data_print(struct hclge_dev *hdev, 1115 struct hclge_desc *desc, int *offset, 1116 int *length) 1117 { 1118 #define HCLGE_CMD_DATA_NUM 6 1119 1120 int i; 1121 int j; 1122 1123 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) { 1124 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { 1125 if (i == 0 && j == 0) 1126 continue; 1127 1128 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", 1129 *offset, 1130 le32_to_cpu(desc[i].data[j])); 1131 *offset += sizeof(u32); 1132 *length -= sizeof(u32); 1133 if (*length <= 0) 1134 return; 1135 } 1136 } 1137 } 1138 1139 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file 1140 * @hdev: pointer to struct hclge_dev 1141 * @cmd_buf: string that contains offset and length 1142 */ 1143 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, 1144 const char *cmd_buf) 1145 { 1146 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 1147 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4) 1148 1149 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM]; 1150 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM; 1151 int offset; 1152 int length; 1153 int data0; 1154 int ret; 1155 1156 ret = sscanf(cmd_buf, "%x %x", &offset, &length); 1157 if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || 1158 length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { 1159 dev_err(&hdev->pdev->dev, "Invalid offset or length.\n"); 1160 return; 1161 } 1162 if (offset < 0 || length <= 0) { 1163 dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n"); 1164 return; 1165 } 1166 1167 dev_info(&hdev->pdev->dev, "offset | data\n"); 1168 1169 while (length > 0) { 1170 data0 = offset; 1171 if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD) 1172 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16; 1173 else 1174 data0 |= length << 16; 1175 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, 1176 HCLGE_OPC_QUERY_NCL_CONFIG); 1177 if (ret) 1178 return; 1179 1180 hclge_ncl_config_data_print(hdev, desc, &offset, &length); 1181 } 1182 } 1183 1184 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev, 1185 const char *cmd_buf) 1186 { 1187 struct phy_device *phydev = hdev->hw.mac.phydev; 1188 struct hclge_config_mac_mode_cmd *req_app; 1189 struct hclge_serdes_lb_cmd *req_serdes; 1190 struct hclge_desc desc; 1191 u8 loopback_en; 1192 int ret; 1193 1194 req_app = (struct hclge_config_mac_mode_cmd *)desc.data; 1195 req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; 1196 1197 dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id); 1198 1199 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); 1200 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1201 if (ret) { 1202 dev_err(&hdev->pdev->dev, 1203 "failed to dump app loopback status, ret = %d\n", ret); 1204 return; 1205 } 1206 1207 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), 1208 HCLGE_MAC_APP_LP_B); 1209 dev_info(&hdev->pdev->dev, "app loopback: %s\n", 1210 loopback_en ? "on" : "off"); 1211 1212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true); 1213 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1214 if (ret) { 1215 dev_err(&hdev->pdev->dev, 1216 "failed to dump serdes loopback status, ret = %d\n", 1217 ret); 1218 return; 1219 } 1220 1221 loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; 1222 dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n", 1223 loopback_en ? "on" : "off"); 1224 1225 loopback_en = req_serdes->enable & 1226 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B; 1227 dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n", 1228 loopback_en ? "on" : "off"); 1229 1230 if (phydev) 1231 dev_info(&hdev->pdev->dev, "phy loopback: %s\n", 1232 phydev->loopback_enabled ? "on" : "off"); 1233 } 1234 1235 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt 1236 * @hdev: pointer to struct hclge_dev 1237 */ 1238 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) 1239 { 1240 #define HCLGE_BILLION_NANO_SECONDS 1000000000 1241 1242 struct hclge_mac_tnl_stats stats; 1243 unsigned long rem_nsec; 1244 1245 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); 1246 1247 while (kfifo_get(&hdev->mac_tnl_log, &stats)) { 1248 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); 1249 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n", 1250 (unsigned long)stats.time, rem_nsec / 1000, 1251 stats.status); 1252 } 1253 } 1254 1255 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) 1256 { 1257 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1258 u8 ir_u, ir_b, ir_s, bs_b, bs_s; 1259 struct hclge_desc desc; 1260 u32 shapping_para; 1261 int ret; 1262 1263 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1264 1265 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1266 shap_cfg_cmd->qs_id = cpu_to_le16(qsid); 1267 1268 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1269 if (ret) { 1270 dev_err(&hdev->pdev->dev, 1271 "qs%u failed to get tx_rate, ret=%d\n", 1272 qsid, ret); 1273 return; 1274 } 1275 1276 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1277 ir_b = hclge_tm_get_field(shapping_para, IR_B); 1278 ir_u = hclge_tm_get_field(shapping_para, IR_U); 1279 ir_s = hclge_tm_get_field(shapping_para, IR_S); 1280 bs_b = hclge_tm_get_field(shapping_para, BS_B); 1281 bs_s = hclge_tm_get_field(shapping_para, BS_S); 1282 1283 dev_info(&hdev->pdev->dev, 1284 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n", 1285 qsid, ir_b, ir_u, ir_s, bs_b, bs_s); 1286 } 1287 1288 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) 1289 { 1290 struct hnae3_knic_private_info *kinfo; 1291 struct hclge_vport *vport; 1292 int vport_id, i; 1293 1294 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) { 1295 vport = &hdev->vport[vport_id]; 1296 kinfo = &vport->nic.kinfo; 1297 1298 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); 1299 1300 for (i = 0; i < kinfo->num_tc; i++) { 1301 u16 qsid = vport->qs_offset + i; 1302 1303 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1304 } 1305 } 1306 } 1307 1308 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, 1309 const char *cmd_buf) 1310 { 1311 #define HCLGE_MAX_QSET_NUM 1024 1312 1313 u16 qsid; 1314 int ret; 1315 1316 ret = kstrtou16(cmd_buf, 0, &qsid); 1317 if (ret) { 1318 hclge_dbg_dump_qs_shaper_all(hdev); 1319 return; 1320 } 1321 1322 if (qsid >= HCLGE_MAX_QSET_NUM) { 1323 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n", 1324 qsid); 1325 return; 1326 } 1327 1328 hclge_dbg_dump_qs_shaper_single(hdev, qsid); 1329 } 1330 1331 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) 1332 { 1333 #define DUMP_REG "dump reg" 1334 #define DUMP_TM_MAP "dump tm map" 1335 #define DUMP_LOOPBACK "dump loopback" 1336 1337 struct hclge_vport *vport = hclge_get_vport(handle); 1338 struct hclge_dev *hdev = vport->back; 1339 1340 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { 1341 hclge_dbg_fd_tcam(hdev); 1342 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { 1343 hclge_dbg_dump_tc(hdev); 1344 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) { 1345 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]); 1346 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { 1347 hclge_dbg_dump_tm(hdev); 1348 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { 1349 hclge_dbg_dump_qos_pause_cfg(hdev); 1350 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { 1351 hclge_dbg_dump_qos_pri_map(hdev); 1352 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { 1353 hclge_dbg_dump_qos_buf_cfg(hdev); 1354 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { 1355 hclge_dbg_dump_mng_table(hdev); 1356 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) { 1357 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]); 1358 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { 1359 hclge_dbg_dump_rst_info(hdev); 1360 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) { 1361 hclge_dbg_dump_serv_info(hdev); 1362 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) { 1363 hclge_dbg_get_m7_stats_info(hdev); 1364 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { 1365 hclge_dbg_dump_ncl_config(hdev, 1366 &cmd_buf[sizeof("dump ncl_config")]); 1367 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { 1368 hclge_dbg_dump_mac_tnl_status(hdev); 1369 } else if (strncmp(cmd_buf, DUMP_LOOPBACK, 1370 strlen(DUMP_LOOPBACK)) == 0) { 1371 hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]); 1372 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { 1373 hclge_dbg_dump_qs_shaper(hdev, 1374 &cmd_buf[sizeof("dump qs shaper")]); 1375 } else { 1376 dev_info(&hdev->pdev->dev, "unknown command\n"); 1377 return -EINVAL; 1378 } 1379 1380 return 0; 1381 } 1382