1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * The Huawei Cache Coherence System (HCCS) is a multi-chip interconnection 4 * bus protocol. 5 * 6 * Copyright (c) 2023 Hisilicon Limited. 7 * Author: Huisong Li <lihuisong@huawei.com> 8 * 9 * HCCS driver for Kunpeng SoC provides the following features: 10 * - Retrieve the following information about each port: 11 * - port type 12 * - lane mode 13 * - enable 14 * - current lane mode 15 * - link finite state machine 16 * - lane mask 17 * - CRC error count 18 * 19 * - Retrieve the following information about all the ports on the chip or 20 * the die: 21 * - if all enabled ports are in linked 22 * - if all linked ports are in full lane 23 * - CRC error count sum 24 * 25 * - Retrieve all HCCS types used on the platform. 26 * 27 * - Support low power feature for all specified HCCS type ports, and 28 * provide the following interface: 29 * - query HCCS types supported increasing and decreasing lane number. 30 * - decrease lane number of all specified HCCS type ports on idle state. 31 * - increase lane number of all specified HCCS type ports. 32 */ 33 #include <linux/acpi.h> 34 #include <linux/delay.h> 35 #include <linux/iopoll.h> 36 #include <linux/platform_device.h> 37 #include <linux/stringify.h> 38 #include <linux/sysfs.h> 39 #include <linux/types.h> 40 41 #include <acpi/pcc.h> 42 43 #include "kunpeng_hccs.h" 44 45 /* 46 * Arbitrary retries in case the remote processor is slow to respond 47 * to PCC commands 48 */ 49 #define HCCS_PCC_CMD_WAIT_RETRIES_NUM 500ULL 50 #define HCCS_POLL_STATUS_TIME_INTERVAL_US 3 51 52 static struct hccs_port_info *kobj_to_port_info(struct kobject *k) 53 { 54 return container_of(k, struct hccs_port_info, kobj); 55 } 56 57 static struct hccs_die_info *kobj_to_die_info(struct kobject *k) 58 { 59 return container_of(k, struct hccs_die_info, kobj); 60 } 61 62 static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k) 63 { 64 return container_of(k, struct hccs_chip_info, kobj); 65 } 66 67 static struct hccs_dev *device_kobj_to_hccs_dev(struct kobject *k) 68 { 69 struct device *dev = container_of(k, struct device, kobj); 70 struct platform_device *pdev = 71 container_of(dev, struct platform_device, dev); 72 73 return platform_get_drvdata(pdev); 74 } 75 76 static char *hccs_port_type_to_name(struct hccs_dev *hdev, u8 type) 77 { 78 u16 i; 79 80 for (i = 0; i < hdev->used_type_num; i++) { 81 if (hdev->type_name_maps[i].type == type) 82 return hdev->type_name_maps[i].name; 83 } 84 85 return NULL; 86 } 87 88 static int hccs_name_to_port_type(struct hccs_dev *hdev, 89 const char *name, u8 *type) 90 { 91 u16 i; 92 93 for (i = 0; i < hdev->used_type_num; i++) { 94 if (strcmp(hdev->type_name_maps[i].name, name) == 0) { 95 *type = hdev->type_name_maps[i].type; 96 return 0; 97 } 98 } 99 100 return -EINVAL; 101 } 102 103 struct hccs_register_ctx { 104 struct device *dev; 105 u8 chan_id; 106 int err; 107 }; 108 109 static acpi_status hccs_get_register_cb(struct acpi_resource *ares, 110 void *context) 111 { 112 struct acpi_resource_generic_register *reg; 113 struct hccs_register_ctx *ctx = context; 114 115 if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER) 116 return AE_OK; 117 118 reg = &ares->data.generic_reg; 119 if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) { 120 dev_err(ctx->dev, "Bad register resource.\n"); 121 ctx->err = -EINVAL; 122 return AE_ERROR; 123 } 124 ctx->chan_id = reg->access_size; 125 126 return AE_OK; 127 } 128 129 static int hccs_get_pcc_chan_id(struct hccs_dev *hdev) 130 { 131 acpi_handle handle = ACPI_HANDLE(hdev->dev); 132 struct hccs_register_ctx ctx = {0}; 133 acpi_status status; 134 135 if (!acpi_has_method(handle, METHOD_NAME__CRS)) { 136 dev_err(hdev->dev, "No _CRS method.\n"); 137 return -ENODEV; 138 } 139 140 ctx.dev = hdev->dev; 141 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 142 hccs_get_register_cb, &ctx); 143 if (ACPI_FAILURE(status)) 144 return ctx.err; 145 hdev->chan_id = ctx.chan_id; 146 147 return 0; 148 } 149 150 static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret) 151 { 152 if (ret < 0) 153 pr_debug("TX did not complete: CMD sent:0x%x, ret:%d\n", 154 *(u8 *)msg, ret); 155 else 156 pr_debug("TX completed. CMD sent:0x%x, ret:%d\n", 157 *(u8 *)msg, ret); 158 } 159 160 static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg) 161 { 162 struct hccs_mbox_client_info *cl_info = 163 container_of(cl, struct hccs_mbox_client_info, client); 164 165 complete(&cl_info->done); 166 } 167 168 static void hccs_unregister_pcc_channel(struct hccs_dev *hdev) 169 { 170 struct hccs_mbox_client_info *cl_info = &hdev->cl_info; 171 172 if (cl_info->pcc_comm_addr) 173 iounmap(cl_info->pcc_comm_addr); 174 pcc_mbox_free_channel(hdev->cl_info.pcc_chan); 175 } 176 177 static int hccs_register_pcc_channel(struct hccs_dev *hdev) 178 { 179 struct hccs_mbox_client_info *cl_info = &hdev->cl_info; 180 struct mbox_client *cl = &cl_info->client; 181 struct pcc_mbox_chan *pcc_chan; 182 struct device *dev = hdev->dev; 183 int rc; 184 185 cl->dev = dev; 186 cl->tx_block = false; 187 cl->knows_txdone = true; 188 cl->tx_done = hccs_chan_tx_done; 189 cl->rx_callback = hdev->verspec_data->rx_callback; 190 init_completion(&cl_info->done); 191 192 pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id); 193 if (IS_ERR(pcc_chan)) { 194 dev_err(dev, "PCC channel request failed.\n"); 195 rc = -ENODEV; 196 goto out; 197 } 198 cl_info->pcc_chan = pcc_chan; 199 cl_info->mbox_chan = pcc_chan->mchan; 200 201 /* 202 * pcc_chan->latency is just a nominal value. In reality the remote 203 * processor could be much slower to reply. So add an arbitrary amount 204 * of wait on top of nominal. 205 */ 206 cl_info->deadline_us = 207 HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency; 208 if (!hdev->verspec_data->has_txdone_irq && 209 cl_info->mbox_chan->mbox->txdone_irq) { 210 dev_err(dev, "PCC IRQ in PCCT is enabled.\n"); 211 rc = -EINVAL; 212 goto err_mbx_channel_free; 213 } else if (hdev->verspec_data->has_txdone_irq && 214 !cl_info->mbox_chan->mbox->txdone_irq) { 215 dev_err(dev, "PCC IRQ in PCCT isn't supported.\n"); 216 rc = -EINVAL; 217 goto err_mbx_channel_free; 218 } 219 220 if (!pcc_chan->shmem_base_addr || 221 pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) { 222 dev_err(dev, "The base address or size (%llu) of PCC communication region is invalid.\n", 223 pcc_chan->shmem_size); 224 rc = -EINVAL; 225 goto err_mbx_channel_free; 226 } 227 228 cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr, 229 pcc_chan->shmem_size); 230 if (!cl_info->pcc_comm_addr) { 231 dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n", 232 hdev->chan_id); 233 rc = -ENOMEM; 234 goto err_mbx_channel_free; 235 } 236 237 return 0; 238 239 err_mbx_channel_free: 240 pcc_mbox_free_channel(cl_info->pcc_chan); 241 out: 242 return rc; 243 } 244 245 static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev) 246 { 247 struct hccs_mbox_client_info *cl_info = &hdev->cl_info; 248 struct acpi_pcct_shared_memory __iomem *comm_base = 249 cl_info->pcc_comm_addr; 250 u16 status; 251 int ret; 252 253 /* 254 * Poll PCC status register every 3us(delay_us) for maximum of 255 * deadline_us(timeout_us) until PCC command complete bit is set(cond) 256 */ 257 ret = readw_poll_timeout(&comm_base->status, status, 258 status & PCC_STATUS_CMD_COMPLETE, 259 HCCS_POLL_STATUS_TIME_INTERVAL_US, 260 cl_info->deadline_us); 261 if (unlikely(ret)) 262 dev_err(hdev->dev, "poll PCC status failed, ret = %d.\n", ret); 263 264 return ret; 265 } 266 267 static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev) 268 { 269 struct hccs_mbox_client_info *cl_info = &hdev->cl_info; 270 271 if (!wait_for_completion_timeout(&cl_info->done, 272 usecs_to_jiffies(cl_info->deadline_us))) { 273 dev_err(hdev->dev, "PCC command executed timeout!\n"); 274 return -ETIMEDOUT; 275 } 276 277 return 0; 278 } 279 280 static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev, 281 u8 cmd, 282 struct hccs_desc *desc, 283 void __iomem *comm_space, 284 u16 space_size) 285 { 286 struct acpi_pcct_shared_memory tmp = { 287 .signature = PCC_SIGNATURE | hdev->chan_id, 288 .command = cmd, 289 .status = 0, 290 }; 291 292 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, 293 sizeof(struct acpi_pcct_shared_memory)); 294 295 /* Copy the message to the PCC comm space */ 296 memcpy_toio(comm_space, (void *)desc, space_size); 297 } 298 299 static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev, 300 u8 cmd, 301 struct hccs_desc *desc, 302 void __iomem *comm_space, 303 u16 space_size) 304 { 305 struct acpi_pcct_ext_pcc_shared_memory tmp = { 306 .signature = PCC_SIGNATURE | hdev->chan_id, 307 .flags = PCC_CMD_COMPLETION_NOTIFY, 308 .length = HCCS_PCC_SHARE_MEM_BYTES, 309 .command = cmd, 310 }; 311 312 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, 313 sizeof(struct acpi_pcct_ext_pcc_shared_memory)); 314 315 /* Copy the message to the PCC comm space */ 316 memcpy_toio(comm_space, (void *)desc, space_size); 317 } 318 319 static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, 320 struct hccs_desc *desc) 321 { 322 const struct hccs_verspecific_data *verspec_data = hdev->verspec_data; 323 struct hccs_mbox_client_info *cl_info = &hdev->cl_info; 324 struct hccs_fw_inner_head *fw_inner_head; 325 void __iomem *comm_space; 326 u16 space_size; 327 int ret; 328 329 comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size; 330 space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size; 331 verspec_data->fill_pcc_shared_mem(hdev, cmd, desc, 332 comm_space, space_size); 333 if (verspec_data->has_txdone_irq) 334 reinit_completion(&cl_info->done); 335 336 /* Ring doorbell */ 337 ret = mbox_send_message(cl_info->mbox_chan, &cmd); 338 if (ret < 0) { 339 dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n", 340 ret); 341 goto end; 342 } 343 344 ret = verspec_data->wait_cmd_complete(hdev); 345 if (ret) 346 goto end; 347 348 /* Copy response data */ 349 memcpy_fromio((void *)desc, comm_space, space_size); 350 fw_inner_head = &desc->rsp.fw_inner_head; 351 if (fw_inner_head->retStatus) { 352 dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n", 353 fw_inner_head->retStatus); 354 ret = -EIO; 355 } 356 357 end: 358 if (verspec_data->has_txdone_irq) 359 mbox_chan_txdone(cl_info->mbox_chan, ret); 360 else 361 mbox_client_txdone(cl_info->mbox_chan, ret); 362 return ret; 363 } 364 365 static void hccs_init_req_desc(struct hccs_desc *desc) 366 { 367 struct hccs_req_desc *req = &desc->req; 368 369 memset(desc, 0, sizeof(*desc)); 370 req->req_head.module_code = HCCS_SERDES_MODULE_CODE; 371 } 372 373 static int hccs_get_dev_caps(struct hccs_dev *hdev) 374 { 375 struct hccs_desc desc; 376 int ret; 377 378 hccs_init_req_desc(&desc); 379 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DEV_CAP, &desc); 380 if (ret) { 381 dev_err(hdev->dev, "Get device capabilities failed, ret = %d.\n", 382 ret); 383 return ret; 384 } 385 memcpy(&hdev->caps, desc.rsp.data, sizeof(hdev->caps)); 386 387 return 0; 388 } 389 390 static int hccs_query_chip_num_on_platform(struct hccs_dev *hdev) 391 { 392 struct hccs_desc desc; 393 int ret; 394 395 hccs_init_req_desc(&desc); 396 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_CHIP_NUM, &desc); 397 if (ret) { 398 dev_err(hdev->dev, "query system chip number failed, ret = %d.\n", 399 ret); 400 return ret; 401 } 402 403 hdev->chip_num = *((u8 *)&desc.rsp.data); 404 if (!hdev->chip_num) { 405 dev_err(hdev->dev, "chip num obtained from firmware is zero.\n"); 406 return -EINVAL; 407 } 408 409 return 0; 410 } 411 412 static int hccs_get_chip_info(struct hccs_dev *hdev, 413 struct hccs_chip_info *chip) 414 { 415 struct hccs_die_num_req_param *req_param; 416 struct hccs_desc desc; 417 int ret; 418 419 hccs_init_req_desc(&desc); 420 req_param = (struct hccs_die_num_req_param *)desc.req.data; 421 req_param->chip_id = chip->chip_id; 422 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_NUM, &desc); 423 if (ret) 424 return ret; 425 426 chip->die_num = *((u8 *)&desc.rsp.data); 427 428 return 0; 429 } 430 431 static int hccs_query_chip_info_on_platform(struct hccs_dev *hdev) 432 { 433 struct hccs_chip_info *chip; 434 int ret; 435 u8 idx; 436 437 ret = hccs_query_chip_num_on_platform(hdev); 438 if (ret) { 439 dev_err(hdev->dev, "query chip number on platform failed, ret = %d.\n", 440 ret); 441 return ret; 442 } 443 444 hdev->chips = devm_kzalloc(hdev->dev, 445 hdev->chip_num * sizeof(struct hccs_chip_info), 446 GFP_KERNEL); 447 if (!hdev->chips) { 448 dev_err(hdev->dev, "allocate all chips memory failed.\n"); 449 return -ENOMEM; 450 } 451 452 for (idx = 0; idx < hdev->chip_num; idx++) { 453 chip = &hdev->chips[idx]; 454 chip->chip_id = idx; 455 ret = hccs_get_chip_info(hdev, chip); 456 if (ret) { 457 dev_err(hdev->dev, "get chip%u info failed, ret = %d.\n", 458 idx, ret); 459 return ret; 460 } 461 chip->hdev = hdev; 462 } 463 464 return 0; 465 } 466 467 static int hccs_query_die_info_on_chip(struct hccs_dev *hdev, u8 chip_id, 468 u8 die_idx, struct hccs_die_info *die) 469 { 470 struct hccs_die_info_req_param *req_param; 471 struct hccs_die_info_rsp_data *rsp_data; 472 struct hccs_desc desc; 473 int ret; 474 475 hccs_init_req_desc(&desc); 476 req_param = (struct hccs_die_info_req_param *)desc.req.data; 477 req_param->chip_id = chip_id; 478 req_param->die_idx = die_idx; 479 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_INFO, &desc); 480 if (ret) 481 return ret; 482 483 rsp_data = (struct hccs_die_info_rsp_data *)desc.rsp.data; 484 die->die_id = rsp_data->die_id; 485 die->port_num = rsp_data->port_num; 486 die->min_port_id = rsp_data->min_port_id; 487 die->max_port_id = rsp_data->max_port_id; 488 if (die->min_port_id > die->max_port_id) { 489 dev_err(hdev->dev, "min port id(%u) > max port id(%u) on die_idx(%u).\n", 490 die->min_port_id, die->max_port_id, die_idx); 491 return -EINVAL; 492 } 493 if (die->max_port_id > HCCS_DIE_MAX_PORT_ID) { 494 dev_err(hdev->dev, "max port id(%u) on die_idx(%u) is too big.\n", 495 die->max_port_id, die_idx); 496 return -EINVAL; 497 } 498 499 return 0; 500 } 501 502 static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev) 503 { 504 struct device *dev = hdev->dev; 505 struct hccs_chip_info *chip; 506 struct hccs_die_info *die; 507 bool has_die_info = false; 508 u8 i, j; 509 int ret; 510 511 for (i = 0; i < hdev->chip_num; i++) { 512 chip = &hdev->chips[i]; 513 if (!chip->die_num) 514 continue; 515 516 has_die_info = true; 517 chip->dies = devm_kzalloc(hdev->dev, 518 chip->die_num * sizeof(struct hccs_die_info), 519 GFP_KERNEL); 520 if (!chip->dies) { 521 dev_err(dev, "allocate all dies memory on chip%u failed.\n", 522 i); 523 return -ENOMEM; 524 } 525 526 for (j = 0; j < chip->die_num; j++) { 527 die = &chip->dies[j]; 528 ret = hccs_query_die_info_on_chip(hdev, i, j, die); 529 if (ret) { 530 dev_err(dev, "get die idx (%u) info on chip%u failed, ret = %d.\n", 531 j, i, ret); 532 return ret; 533 } 534 die->chip = chip; 535 } 536 } 537 538 return has_die_info ? 0 : -EINVAL; 539 } 540 541 static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode, 542 struct hccs_desc *desc, 543 void *buf, size_t buf_len, 544 struct hccs_rsp_head *rsp_head) 545 { 546 struct hccs_rsp_head *head; 547 struct hccs_rsp_desc *rsp; 548 int ret; 549 550 ret = hccs_pcc_cmd_send(hdev, opcode, desc); 551 if (ret) 552 return ret; 553 554 rsp = &desc->rsp; 555 head = &rsp->rsp_head; 556 if (head->data_len > buf_len) { 557 dev_err(hdev->dev, 558 "buffer overflow (buf_len = %zu, data_len = %u)!\n", 559 buf_len, head->data_len); 560 return -ENOMEM; 561 } 562 563 memcpy(buf, rsp->data, head->data_len); 564 *rsp_head = *head; 565 566 return 0; 567 } 568 569 static int hccs_get_all_port_attr(struct hccs_dev *hdev, 570 struct hccs_die_info *die, 571 struct hccs_port_attr *attrs, u16 size) 572 { 573 struct hccs_die_comm_req_param *req_param; 574 struct hccs_req_head *req_head; 575 struct hccs_rsp_head rsp_head; 576 struct hccs_desc desc; 577 size_t left_buf_len; 578 u32 data_len = 0; 579 u8 start_id; 580 u8 *buf; 581 int ret; 582 583 buf = (u8 *)attrs; 584 left_buf_len = sizeof(struct hccs_port_attr) * size; 585 start_id = die->min_port_id; 586 while (start_id <= die->max_port_id) { 587 hccs_init_req_desc(&desc); 588 req_head = &desc.req.req_head; 589 req_head->start_id = start_id; 590 req_param = (struct hccs_die_comm_req_param *)desc.req.data; 591 req_param->chip_id = die->chip->chip_id; 592 req_param->die_id = die->die_id; 593 594 ret = hccs_get_bd_info(hdev, HCCS_GET_DIE_PORT_INFO, &desc, 595 buf + data_len, left_buf_len, &rsp_head); 596 if (ret) { 597 dev_err(hdev->dev, 598 "get the information of port%u on die%u failed, ret = %d.\n", 599 start_id, die->die_id, ret); 600 return ret; 601 } 602 603 data_len += rsp_head.data_len; 604 left_buf_len -= rsp_head.data_len; 605 if (unlikely(rsp_head.next_id <= start_id)) { 606 dev_err(hdev->dev, 607 "next port id (%u) is not greater than last start id (%u) on die%u.\n", 608 rsp_head.next_id, start_id, die->die_id); 609 return -EINVAL; 610 } 611 start_id = rsp_head.next_id; 612 } 613 614 if (left_buf_len != 0) { 615 dev_err(hdev->dev, "failed to get the expected port number(%u) attribute.\n", 616 size); 617 return -EINVAL; 618 } 619 620 return 0; 621 } 622 623 static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev, 624 struct hccs_die_info *die) 625 { 626 struct hccs_port_attr *attrs; 627 struct hccs_port_info *port; 628 int ret; 629 u8 i; 630 631 attrs = kcalloc(die->port_num, sizeof(struct hccs_port_attr), 632 GFP_KERNEL); 633 if (!attrs) 634 return -ENOMEM; 635 636 ret = hccs_get_all_port_attr(hdev, die, attrs, die->port_num); 637 if (ret) 638 goto out; 639 640 for (i = 0; i < die->port_num; i++) { 641 port = &die->ports[i]; 642 port->port_id = attrs[i].port_id; 643 port->port_type = attrs[i].port_type; 644 port->max_lane_num = attrs[i].max_lane_num; 645 port->enable = attrs[i].enable; 646 port->die = die; 647 } 648 649 out: 650 kfree(attrs); 651 return ret; 652 } 653 654 static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev) 655 { 656 struct device *dev = hdev->dev; 657 struct hccs_chip_info *chip; 658 struct hccs_die_info *die; 659 bool has_port_info = false; 660 u8 i, j; 661 int ret; 662 663 for (i = 0; i < hdev->chip_num; i++) { 664 chip = &hdev->chips[i]; 665 for (j = 0; j < chip->die_num; j++) { 666 die = &chip->dies[j]; 667 if (!die->port_num) 668 continue; 669 670 has_port_info = true; 671 die->ports = devm_kzalloc(dev, 672 die->port_num * sizeof(struct hccs_port_info), 673 GFP_KERNEL); 674 if (!die->ports) { 675 dev_err(dev, "allocate ports memory on chip%u/die%u failed.\n", 676 i, die->die_id); 677 return -ENOMEM; 678 } 679 680 ret = hccs_get_all_port_info_on_die(hdev, die); 681 if (ret) { 682 dev_err(dev, "get all port info on chip%u/die%u failed, ret = %d.\n", 683 i, die->die_id, ret); 684 return ret; 685 } 686 } 687 } 688 689 return has_port_info ? 0 : -EINVAL; 690 } 691 692 static int hccs_get_hw_info(struct hccs_dev *hdev) 693 { 694 int ret; 695 696 ret = hccs_query_chip_info_on_platform(hdev); 697 if (ret) { 698 dev_err(hdev->dev, "query chip info on platform failed, ret = %d.\n", 699 ret); 700 return ret; 701 } 702 703 ret = hccs_query_all_die_info_on_platform(hdev); 704 if (ret) { 705 dev_err(hdev->dev, "query all die info on platform failed, ret = %d.\n", 706 ret); 707 return ret; 708 } 709 710 ret = hccs_query_all_port_info_on_platform(hdev); 711 if (ret) { 712 dev_err(hdev->dev, "query all port info on platform failed, ret = %d.\n", 713 ret); 714 return ret; 715 } 716 717 return 0; 718 } 719 720 static u16 hccs_calc_used_type_num(struct hccs_dev *hdev, 721 unsigned long *hccs_ver) 722 { 723 struct hccs_chip_info *chip; 724 struct hccs_port_info *port; 725 struct hccs_die_info *die; 726 u16 used_type_num = 0; 727 u16 i, j, k; 728 729 for (i = 0; i < hdev->chip_num; i++) { 730 chip = &hdev->chips[i]; 731 for (j = 0; j < chip->die_num; j++) { 732 die = &chip->dies[j]; 733 for (k = 0; k < die->port_num; k++) { 734 port = &die->ports[k]; 735 set_bit(port->port_type, hccs_ver); 736 } 737 } 738 } 739 740 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1) 741 used_type_num++; 742 743 return used_type_num; 744 } 745 746 static int hccs_init_type_name_maps(struct hccs_dev *hdev) 747 { 748 DECLARE_BITMAP(hccs_ver, HCCS_IP_MAX + 1) = {}; 749 unsigned int i; 750 u16 idx = 0; 751 752 hdev->used_type_num = hccs_calc_used_type_num(hdev, hccs_ver); 753 hdev->type_name_maps = devm_kcalloc(hdev->dev, hdev->used_type_num, 754 sizeof(struct hccs_type_name_map), 755 GFP_KERNEL); 756 if (!hdev->type_name_maps) 757 return -ENOMEM; 758 759 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1) { 760 hdev->type_name_maps[idx].type = i; 761 sprintf(hdev->type_name_maps[idx].name, 762 "%s%u", HCCS_IP_PREFIX, i); 763 idx++; 764 } 765 766 return 0; 767 } 768 769 static int hccs_query_port_link_status(struct hccs_dev *hdev, 770 const struct hccs_port_info *port, 771 struct hccs_link_status *link_status) 772 { 773 const struct hccs_die_info *die = port->die; 774 const struct hccs_chip_info *chip = die->chip; 775 struct hccs_port_comm_req_param *req_param; 776 struct hccs_desc desc; 777 int ret; 778 779 hccs_init_req_desc(&desc); 780 req_param = (struct hccs_port_comm_req_param *)desc.req.data; 781 req_param->chip_id = chip->chip_id; 782 req_param->die_id = die->die_id; 783 req_param->port_id = port->port_id; 784 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_LINK_STATUS, &desc); 785 if (ret) { 786 dev_err(hdev->dev, 787 "get port link status info failed, ret = %d.\n", ret); 788 return ret; 789 } 790 791 *link_status = *((struct hccs_link_status *)desc.rsp.data); 792 793 return 0; 794 } 795 796 static int hccs_query_port_crc_err_cnt(struct hccs_dev *hdev, 797 const struct hccs_port_info *port, 798 u64 *crc_err_cnt) 799 { 800 const struct hccs_die_info *die = port->die; 801 const struct hccs_chip_info *chip = die->chip; 802 struct hccs_port_comm_req_param *req_param; 803 struct hccs_desc desc; 804 int ret; 805 806 hccs_init_req_desc(&desc); 807 req_param = (struct hccs_port_comm_req_param *)desc.req.data; 808 req_param->chip_id = chip->chip_id; 809 req_param->die_id = die->die_id; 810 req_param->port_id = port->port_id; 811 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_CRC_ERR_CNT, &desc); 812 if (ret) { 813 dev_err(hdev->dev, 814 "get port crc error count failed, ret = %d.\n", ret); 815 return ret; 816 } 817 818 memcpy(crc_err_cnt, &desc.rsp.data, sizeof(u64)); 819 820 return 0; 821 } 822 823 static int hccs_get_die_all_link_status(struct hccs_dev *hdev, 824 const struct hccs_die_info *die, 825 u8 *all_linked) 826 { 827 struct hccs_die_comm_req_param *req_param; 828 struct hccs_desc desc; 829 int ret; 830 831 if (die->port_num == 0) { 832 *all_linked = 1; 833 return 0; 834 } 835 836 hccs_init_req_desc(&desc); 837 req_param = (struct hccs_die_comm_req_param *)desc.req.data; 838 req_param->chip_id = die->chip->chip_id; 839 req_param->die_id = die->die_id; 840 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LINK_STA, &desc); 841 if (ret) { 842 dev_err(hdev->dev, 843 "get link status of all ports failed on die%u, ret = %d.\n", 844 die->die_id, ret); 845 return ret; 846 } 847 848 *all_linked = *((u8 *)&desc.rsp.data); 849 850 return 0; 851 } 852 853 static int hccs_get_die_all_port_lane_status(struct hccs_dev *hdev, 854 const struct hccs_die_info *die, 855 u8 *full_lane) 856 { 857 struct hccs_die_comm_req_param *req_param; 858 struct hccs_desc desc; 859 int ret; 860 861 if (die->port_num == 0) { 862 *full_lane = 1; 863 return 0; 864 } 865 866 hccs_init_req_desc(&desc); 867 req_param = (struct hccs_die_comm_req_param *)desc.req.data; 868 req_param->chip_id = die->chip->chip_id; 869 req_param->die_id = die->die_id; 870 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LANE_STA, &desc); 871 if (ret) { 872 dev_err(hdev->dev, "get lane status of all ports failed on die%u, ret = %d.\n", 873 die->die_id, ret); 874 return ret; 875 } 876 877 *full_lane = *((u8 *)&desc.rsp.data); 878 879 return 0; 880 } 881 882 static int hccs_get_die_total_crc_err_cnt(struct hccs_dev *hdev, 883 const struct hccs_die_info *die, 884 u64 *total_crc_err_cnt) 885 { 886 struct hccs_die_comm_req_param *req_param; 887 struct hccs_desc desc; 888 int ret; 889 890 if (die->port_num == 0) { 891 *total_crc_err_cnt = 0; 892 return 0; 893 } 894 895 hccs_init_req_desc(&desc); 896 req_param = (struct hccs_die_comm_req_param *)desc.req.data; 897 req_param->chip_id = die->chip->chip_id; 898 req_param->die_id = die->die_id; 899 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_CRC_ERR_CNT, &desc); 900 if (ret) { 901 dev_err(hdev->dev, "get crc error count sum failed on die%u, ret = %d.\n", 902 die->die_id, ret); 903 return ret; 904 } 905 906 memcpy(total_crc_err_cnt, &desc.rsp.data, sizeof(u64)); 907 908 return 0; 909 } 910 911 static ssize_t hccs_show(struct kobject *k, struct attribute *attr, char *buf) 912 { 913 struct kobj_attribute *kobj_attr; 914 915 kobj_attr = container_of(attr, struct kobj_attribute, attr); 916 917 return kobj_attr->show(k, kobj_attr, buf); 918 } 919 920 static const struct sysfs_ops hccs_comm_ops = { 921 .show = hccs_show, 922 }; 923 924 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, 925 char *buf) 926 { 927 const struct hccs_port_info *port = kobj_to_port_info(kobj); 928 929 return sysfs_emit(buf, "%s%u\n", HCCS_IP_PREFIX, port->port_type); 930 } 931 static struct kobj_attribute hccs_type_attr = __ATTR_RO(type); 932 933 static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr, 934 char *buf) 935 { 936 const struct hccs_port_info *port = kobj_to_port_info(kobj); 937 938 return sysfs_emit(buf, "x%u\n", port->max_lane_num); 939 } 940 static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode); 941 942 static ssize_t enable_show(struct kobject *kobj, 943 struct kobj_attribute *attr, char *buf) 944 { 945 const struct hccs_port_info *port = kobj_to_port_info(kobj); 946 947 return sysfs_emit(buf, "%u\n", port->enable); 948 } 949 static struct kobj_attribute port_enable_attr = __ATTR_RO(enable); 950 951 static ssize_t cur_lane_num_show(struct kobject *kobj, 952 struct kobj_attribute *attr, char *buf) 953 { 954 const struct hccs_port_info *port = kobj_to_port_info(kobj); 955 struct hccs_dev *hdev = port->die->chip->hdev; 956 struct hccs_link_status link_status = {0}; 957 int ret; 958 959 mutex_lock(&hdev->lock); 960 ret = hccs_query_port_link_status(hdev, port, &link_status); 961 mutex_unlock(&hdev->lock); 962 if (ret) 963 return ret; 964 965 return sysfs_emit(buf, "%u\n", link_status.lane_num); 966 } 967 static struct kobj_attribute cur_lane_num_attr = __ATTR_RO(cur_lane_num); 968 969 static ssize_t link_fsm_show(struct kobject *kobj, 970 struct kobj_attribute *attr, char *buf) 971 { 972 const struct hccs_port_info *port = kobj_to_port_info(kobj); 973 struct hccs_dev *hdev = port->die->chip->hdev; 974 struct hccs_link_status link_status = {0}; 975 const struct { 976 u8 link_fsm; 977 char *str; 978 } link_fsm_map[] = { 979 {HCCS_PORT_RESET, "reset"}, 980 {HCCS_PORT_SETUP, "setup"}, 981 {HCCS_PORT_CONFIG, "config"}, 982 {HCCS_PORT_READY, "link-up"}, 983 }; 984 const char *link_fsm_str = "unknown"; 985 size_t i; 986 int ret; 987 988 mutex_lock(&hdev->lock); 989 ret = hccs_query_port_link_status(hdev, port, &link_status); 990 mutex_unlock(&hdev->lock); 991 if (ret) 992 return ret; 993 994 for (i = 0; i < ARRAY_SIZE(link_fsm_map); i++) { 995 if (link_fsm_map[i].link_fsm == link_status.link_fsm) { 996 link_fsm_str = link_fsm_map[i].str; 997 break; 998 } 999 } 1000 1001 return sysfs_emit(buf, "%s\n", link_fsm_str); 1002 } 1003 static struct kobj_attribute link_fsm_attr = __ATTR_RO(link_fsm); 1004 1005 static ssize_t lane_mask_show(struct kobject *kobj, 1006 struct kobj_attribute *attr, char *buf) 1007 { 1008 const struct hccs_port_info *port = kobj_to_port_info(kobj); 1009 struct hccs_dev *hdev = port->die->chip->hdev; 1010 struct hccs_link_status link_status = {0}; 1011 int ret; 1012 1013 mutex_lock(&hdev->lock); 1014 ret = hccs_query_port_link_status(hdev, port, &link_status); 1015 mutex_unlock(&hdev->lock); 1016 if (ret) 1017 return ret; 1018 1019 return sysfs_emit(buf, "0x%x\n", link_status.lane_mask); 1020 } 1021 static struct kobj_attribute lane_mask_attr = __ATTR_RO(lane_mask); 1022 1023 static ssize_t crc_err_cnt_show(struct kobject *kobj, 1024 struct kobj_attribute *attr, char *buf) 1025 { 1026 const struct hccs_port_info *port = kobj_to_port_info(kobj); 1027 struct hccs_dev *hdev = port->die->chip->hdev; 1028 u64 crc_err_cnt; 1029 int ret; 1030 1031 mutex_lock(&hdev->lock); 1032 ret = hccs_query_port_crc_err_cnt(hdev, port, &crc_err_cnt); 1033 mutex_unlock(&hdev->lock); 1034 if (ret) 1035 return ret; 1036 1037 return sysfs_emit(buf, "%llu\n", crc_err_cnt); 1038 } 1039 static struct kobj_attribute crc_err_cnt_attr = __ATTR_RO(crc_err_cnt); 1040 1041 static struct attribute *hccs_port_default_attrs[] = { 1042 &hccs_type_attr.attr, 1043 &lane_mode_attr.attr, 1044 &port_enable_attr.attr, 1045 &cur_lane_num_attr.attr, 1046 &link_fsm_attr.attr, 1047 &lane_mask_attr.attr, 1048 &crc_err_cnt_attr.attr, 1049 NULL, 1050 }; 1051 ATTRIBUTE_GROUPS(hccs_port_default); 1052 1053 static const struct kobj_type hccs_port_type = { 1054 .sysfs_ops = &hccs_comm_ops, 1055 .default_groups = hccs_port_default_groups, 1056 }; 1057 1058 static ssize_t all_linked_on_die_show(struct kobject *kobj, 1059 struct kobj_attribute *attr, char *buf) 1060 { 1061 const struct hccs_die_info *die = kobj_to_die_info(kobj); 1062 struct hccs_dev *hdev = die->chip->hdev; 1063 u8 all_linked; 1064 int ret; 1065 1066 mutex_lock(&hdev->lock); 1067 ret = hccs_get_die_all_link_status(hdev, die, &all_linked); 1068 mutex_unlock(&hdev->lock); 1069 if (ret) 1070 return ret; 1071 1072 return sysfs_emit(buf, "%u\n", all_linked); 1073 } 1074 static struct kobj_attribute all_linked_on_die_attr = 1075 __ATTR(all_linked, 0444, all_linked_on_die_show, NULL); 1076 1077 static ssize_t linked_full_lane_on_die_show(struct kobject *kobj, 1078 struct kobj_attribute *attr, 1079 char *buf) 1080 { 1081 const struct hccs_die_info *die = kobj_to_die_info(kobj); 1082 struct hccs_dev *hdev = die->chip->hdev; 1083 u8 full_lane; 1084 int ret; 1085 1086 mutex_lock(&hdev->lock); 1087 ret = hccs_get_die_all_port_lane_status(hdev, die, &full_lane); 1088 mutex_unlock(&hdev->lock); 1089 if (ret) 1090 return ret; 1091 1092 return sysfs_emit(buf, "%u\n", full_lane); 1093 } 1094 static struct kobj_attribute linked_full_lane_on_die_attr = 1095 __ATTR(linked_full_lane, 0444, linked_full_lane_on_die_show, NULL); 1096 1097 static ssize_t crc_err_cnt_sum_on_die_show(struct kobject *kobj, 1098 struct kobj_attribute *attr, 1099 char *buf) 1100 { 1101 const struct hccs_die_info *die = kobj_to_die_info(kobj); 1102 struct hccs_dev *hdev = die->chip->hdev; 1103 u64 total_crc_err_cnt; 1104 int ret; 1105 1106 mutex_lock(&hdev->lock); 1107 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &total_crc_err_cnt); 1108 mutex_unlock(&hdev->lock); 1109 if (ret) 1110 return ret; 1111 1112 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt); 1113 } 1114 static struct kobj_attribute crc_err_cnt_sum_on_die_attr = 1115 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_die_show, NULL); 1116 1117 static struct attribute *hccs_die_default_attrs[] = { 1118 &all_linked_on_die_attr.attr, 1119 &linked_full_lane_on_die_attr.attr, 1120 &crc_err_cnt_sum_on_die_attr.attr, 1121 NULL, 1122 }; 1123 ATTRIBUTE_GROUPS(hccs_die_default); 1124 1125 static const struct kobj_type hccs_die_type = { 1126 .sysfs_ops = &hccs_comm_ops, 1127 .default_groups = hccs_die_default_groups, 1128 }; 1129 1130 static ssize_t all_linked_on_chip_show(struct kobject *kobj, 1131 struct kobj_attribute *attr, char *buf) 1132 { 1133 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj); 1134 struct hccs_dev *hdev = chip->hdev; 1135 const struct hccs_die_info *die; 1136 u8 all_linked = 1; 1137 u8 i, tmp; 1138 int ret; 1139 1140 mutex_lock(&hdev->lock); 1141 for (i = 0; i < chip->die_num; i++) { 1142 die = &chip->dies[i]; 1143 ret = hccs_get_die_all_link_status(hdev, die, &tmp); 1144 if (ret) { 1145 mutex_unlock(&hdev->lock); 1146 return ret; 1147 } 1148 if (tmp != all_linked) { 1149 all_linked = 0; 1150 break; 1151 } 1152 } 1153 mutex_unlock(&hdev->lock); 1154 1155 return sysfs_emit(buf, "%u\n", all_linked); 1156 } 1157 static struct kobj_attribute all_linked_on_chip_attr = 1158 __ATTR(all_linked, 0444, all_linked_on_chip_show, NULL); 1159 1160 static ssize_t linked_full_lane_on_chip_show(struct kobject *kobj, 1161 struct kobj_attribute *attr, 1162 char *buf) 1163 { 1164 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj); 1165 struct hccs_dev *hdev = chip->hdev; 1166 const struct hccs_die_info *die; 1167 u8 full_lane = 1; 1168 u8 i, tmp; 1169 int ret; 1170 1171 mutex_lock(&hdev->lock); 1172 for (i = 0; i < chip->die_num; i++) { 1173 die = &chip->dies[i]; 1174 ret = hccs_get_die_all_port_lane_status(hdev, die, &tmp); 1175 if (ret) { 1176 mutex_unlock(&hdev->lock); 1177 return ret; 1178 } 1179 if (tmp != full_lane) { 1180 full_lane = 0; 1181 break; 1182 } 1183 } 1184 mutex_unlock(&hdev->lock); 1185 1186 return sysfs_emit(buf, "%u\n", full_lane); 1187 } 1188 static struct kobj_attribute linked_full_lane_on_chip_attr = 1189 __ATTR(linked_full_lane, 0444, linked_full_lane_on_chip_show, NULL); 1190 1191 static ssize_t crc_err_cnt_sum_on_chip_show(struct kobject *kobj, 1192 struct kobj_attribute *attr, 1193 char *buf) 1194 { 1195 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj); 1196 u64 crc_err_cnt, total_crc_err_cnt = 0; 1197 struct hccs_dev *hdev = chip->hdev; 1198 const struct hccs_die_info *die; 1199 int ret; 1200 u16 i; 1201 1202 mutex_lock(&hdev->lock); 1203 for (i = 0; i < chip->die_num; i++) { 1204 die = &chip->dies[i]; 1205 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &crc_err_cnt); 1206 if (ret) { 1207 mutex_unlock(&hdev->lock); 1208 return ret; 1209 } 1210 1211 total_crc_err_cnt += crc_err_cnt; 1212 } 1213 mutex_unlock(&hdev->lock); 1214 1215 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt); 1216 } 1217 static struct kobj_attribute crc_err_cnt_sum_on_chip_attr = 1218 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_chip_show, NULL); 1219 1220 static struct attribute *hccs_chip_default_attrs[] = { 1221 &all_linked_on_chip_attr.attr, 1222 &linked_full_lane_on_chip_attr.attr, 1223 &crc_err_cnt_sum_on_chip_attr.attr, 1224 NULL, 1225 }; 1226 ATTRIBUTE_GROUPS(hccs_chip_default); 1227 1228 static const struct kobj_type hccs_chip_type = { 1229 .sysfs_ops = &hccs_comm_ops, 1230 .default_groups = hccs_chip_default_groups, 1231 }; 1232 1233 static int hccs_parse_pm_port_type(struct hccs_dev *hdev, const char *buf, 1234 u8 *port_type) 1235 { 1236 char hccs_name[HCCS_NAME_MAX_LEN + 1] = ""; 1237 u8 type; 1238 int ret; 1239 1240 ret = sscanf(buf, "%" __stringify(HCCS_NAME_MAX_LEN) "s", hccs_name); 1241 if (ret != 1) 1242 return -EINVAL; 1243 1244 ret = hccs_name_to_port_type(hdev, hccs_name, &type); 1245 if (ret) { 1246 dev_dbg(hdev->dev, "input invalid, please get the available types from 'used_types'.\n"); 1247 return ret; 1248 } 1249 1250 if (type == HCCS_V2 && hdev->caps & HCCS_CAPS_HCCS_V2_PM) { 1251 *port_type = type; 1252 return 0; 1253 } 1254 1255 dev_dbg(hdev->dev, "%s doesn't support for increasing and decreasing lane.\n", 1256 hccs_name); 1257 1258 return -EOPNOTSUPP; 1259 } 1260 1261 static int hccs_query_port_idle_status(struct hccs_dev *hdev, 1262 struct hccs_port_info *port, u8 *idle) 1263 { 1264 const struct hccs_die_info *die = port->die; 1265 const struct hccs_chip_info *chip = die->chip; 1266 struct hccs_port_comm_req_param *req_param; 1267 struct hccs_desc desc; 1268 int ret; 1269 1270 hccs_init_req_desc(&desc); 1271 req_param = (struct hccs_port_comm_req_param *)desc.req.data; 1272 req_param->chip_id = chip->chip_id; 1273 req_param->die_id = die->die_id; 1274 req_param->port_id = port->port_id; 1275 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_IDLE_STATUS, &desc); 1276 if (ret) { 1277 dev_err(hdev->dev, 1278 "get port idle status failed, ret = %d.\n", ret); 1279 return ret; 1280 } 1281 1282 *idle = *((u8 *)desc.rsp.data); 1283 return 0; 1284 } 1285 1286 static int hccs_get_all_spec_port_idle_sta(struct hccs_dev *hdev, u8 port_type, 1287 bool *all_idle) 1288 { 1289 struct hccs_chip_info *chip; 1290 struct hccs_port_info *port; 1291 struct hccs_die_info *die; 1292 int ret = 0; 1293 u8 i, j, k; 1294 u8 idle; 1295 1296 *all_idle = false; 1297 for (i = 0; i < hdev->chip_num; i++) { 1298 chip = &hdev->chips[i]; 1299 for (j = 0; j < chip->die_num; j++) { 1300 die = &chip->dies[j]; 1301 for (k = 0; k < die->port_num; k++) { 1302 port = &die->ports[k]; 1303 if (port->port_type != port_type) 1304 continue; 1305 ret = hccs_query_port_idle_status(hdev, port, 1306 &idle); 1307 if (ret) { 1308 dev_err(hdev->dev, 1309 "hccs%u on chip%u/die%u get idle status failed, ret = %d.\n", 1310 k, i, j, ret); 1311 return ret; 1312 } else if (idle == 0) { 1313 dev_info(hdev->dev, "hccs%u on chip%u/die%u is busy.\n", 1314 k, i, j); 1315 return 0; 1316 } 1317 } 1318 } 1319 } 1320 *all_idle = true; 1321 1322 return 0; 1323 } 1324 1325 static int hccs_get_all_spec_port_full_lane_sta(struct hccs_dev *hdev, 1326 u8 port_type, bool *full_lane) 1327 { 1328 struct hccs_link_status status = {0}; 1329 struct hccs_chip_info *chip; 1330 struct hccs_port_info *port; 1331 struct hccs_die_info *die; 1332 u8 i, j, k; 1333 int ret; 1334 1335 *full_lane = false; 1336 for (i = 0; i < hdev->chip_num; i++) { 1337 chip = &hdev->chips[i]; 1338 for (j = 0; j < chip->die_num; j++) { 1339 die = &chip->dies[j]; 1340 for (k = 0; k < die->port_num; k++) { 1341 port = &die->ports[k]; 1342 if (port->port_type != port_type) 1343 continue; 1344 ret = hccs_query_port_link_status(hdev, port, 1345 &status); 1346 if (ret) 1347 return ret; 1348 if (status.lane_num != port->max_lane_num) 1349 return 0; 1350 } 1351 } 1352 } 1353 *full_lane = true; 1354 1355 return 0; 1356 } 1357 1358 static int hccs_prepare_inc_lane(struct hccs_dev *hdev, u8 type) 1359 { 1360 struct hccs_inc_lane_req_param *req_param; 1361 struct hccs_desc desc; 1362 int ret; 1363 1364 hccs_init_req_desc(&desc); 1365 req_param = (struct hccs_inc_lane_req_param *)desc.req.data; 1366 req_param->port_type = type; 1367 req_param->opt_type = HCCS_PREPARE_INC_LANE; 1368 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc); 1369 if (ret) 1370 dev_err(hdev->dev, "prepare for increasing lane failed, ret = %d.\n", 1371 ret); 1372 1373 return ret; 1374 } 1375 1376 static int hccs_wait_serdes_adapt_completed(struct hccs_dev *hdev, u8 type) 1377 { 1378 #define HCCS_MAX_WAIT_CNT_FOR_ADAPT 10 1379 #define HCCS_QUERY_ADAPT_RES_DELAY_MS 100 1380 #define HCCS_SERDES_ADAPT_OK 0 1381 1382 struct hccs_inc_lane_req_param *req_param; 1383 u8 wait_cnt = HCCS_MAX_WAIT_CNT_FOR_ADAPT; 1384 struct hccs_desc desc; 1385 u8 adapt_res; 1386 int ret; 1387 1388 do { 1389 hccs_init_req_desc(&desc); 1390 req_param = (struct hccs_inc_lane_req_param *)desc.req.data; 1391 req_param->port_type = type; 1392 req_param->opt_type = HCCS_GET_ADAPT_RES; 1393 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc); 1394 if (ret) { 1395 dev_err(hdev->dev, "query adapting result failed, ret = %d.\n", 1396 ret); 1397 return ret; 1398 } 1399 adapt_res = *((u8 *)&desc.rsp.data); 1400 if (adapt_res == HCCS_SERDES_ADAPT_OK) 1401 return 0; 1402 1403 msleep(HCCS_QUERY_ADAPT_RES_DELAY_MS); 1404 } while (--wait_cnt); 1405 1406 dev_err(hdev->dev, "wait for adapting completed timeout.\n"); 1407 1408 return -ETIMEDOUT; 1409 } 1410 1411 static int hccs_start_hpcs_retraining(struct hccs_dev *hdev, u8 type) 1412 { 1413 struct hccs_inc_lane_req_param *req_param; 1414 struct hccs_desc desc; 1415 int ret; 1416 1417 hccs_init_req_desc(&desc); 1418 req_param = (struct hccs_inc_lane_req_param *)desc.req.data; 1419 req_param->port_type = type; 1420 req_param->opt_type = HCCS_START_RETRAINING; 1421 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc); 1422 if (ret) 1423 dev_err(hdev->dev, "start hpcs retraining failed, ret = %d.\n", 1424 ret); 1425 1426 return ret; 1427 } 1428 1429 static int hccs_start_inc_lane(struct hccs_dev *hdev, u8 type) 1430 { 1431 int ret; 1432 1433 ret = hccs_prepare_inc_lane(hdev, type); 1434 if (ret) 1435 return ret; 1436 1437 ret = hccs_wait_serdes_adapt_completed(hdev, type); 1438 if (ret) 1439 return ret; 1440 1441 return hccs_start_hpcs_retraining(hdev, type); 1442 } 1443 1444 static int hccs_start_dec_lane(struct hccs_dev *hdev, u8 type) 1445 { 1446 struct hccs_desc desc; 1447 u8 *port_type; 1448 int ret; 1449 1450 hccs_init_req_desc(&desc); 1451 port_type = (u8 *)desc.req.data; 1452 *port_type = type; 1453 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_DEC_LANE, &desc); 1454 if (ret) 1455 dev_err(hdev->dev, "start to decrease lane failed, ret = %d.\n", 1456 ret); 1457 1458 return ret; 1459 } 1460 1461 static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr, 1462 const char *buf, size_t count) 1463 { 1464 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj); 1465 bool all_in_idle; 1466 u8 port_type; 1467 int ret; 1468 1469 ret = hccs_parse_pm_port_type(hdev, buf, &port_type); 1470 if (ret) 1471 return ret; 1472 1473 mutex_lock(&hdev->lock); 1474 ret = hccs_get_all_spec_port_idle_sta(hdev, port_type, &all_in_idle); 1475 if (ret) 1476 goto out; 1477 if (!all_in_idle) { 1478 ret = -EBUSY; 1479 dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n", 1480 hccs_port_type_to_name(hdev, port_type), ret); 1481 goto out; 1482 } 1483 1484 ret = hccs_start_dec_lane(hdev, port_type); 1485 out: 1486 mutex_unlock(&hdev->lock); 1487 1488 return ret == 0 ? count : ret; 1489 } 1490 static struct kobj_attribute dec_lane_of_type_attr = 1491 __ATTR(dec_lane_of_type, 0200, NULL, dec_lane_of_type_store); 1492 1493 static ssize_t inc_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr, 1494 const char *buf, size_t count) 1495 { 1496 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj); 1497 bool full_lane; 1498 u8 port_type; 1499 int ret; 1500 1501 ret = hccs_parse_pm_port_type(hdev, buf, &port_type); 1502 if (ret) 1503 return ret; 1504 1505 mutex_lock(&hdev->lock); 1506 ret = hccs_get_all_spec_port_full_lane_sta(hdev, port_type, &full_lane); 1507 if (ret || full_lane) 1508 goto out; 1509 1510 ret = hccs_start_inc_lane(hdev, port_type); 1511 out: 1512 mutex_unlock(&hdev->lock); 1513 return ret == 0 ? count : ret; 1514 } 1515 static struct kobj_attribute inc_lane_of_type_attr = 1516 __ATTR(inc_lane_of_type, 0200, NULL, inc_lane_of_type_store); 1517 1518 static ssize_t available_inc_dec_lane_types_show(struct kobject *kobj, 1519 struct kobj_attribute *attr, 1520 char *buf) 1521 { 1522 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj); 1523 1524 if (hdev->caps & HCCS_CAPS_HCCS_V2_PM) 1525 return sysfs_emit(buf, "%s\n", 1526 hccs_port_type_to_name(hdev, HCCS_V2)); 1527 1528 return -EINVAL; 1529 } 1530 static struct kobj_attribute available_inc_dec_lane_types_attr = 1531 __ATTR(available_inc_dec_lane_types, 0444, 1532 available_inc_dec_lane_types_show, NULL); 1533 1534 static ssize_t used_types_show(struct kobject *kobj, 1535 struct kobj_attribute *attr, char *buf) 1536 { 1537 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj); 1538 int len = 0; 1539 u16 i; 1540 1541 for (i = 0; i < hdev->used_type_num - 1; i++) 1542 len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name); 1543 len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name); 1544 1545 return len; 1546 } 1547 static struct kobj_attribute used_types_attr = 1548 __ATTR(used_types, 0444, used_types_show, NULL); 1549 1550 static void hccs_remove_misc_sysfs(struct hccs_dev *hdev) 1551 { 1552 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr); 1553 1554 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM)) 1555 return; 1556 1557 sysfs_remove_file(&hdev->dev->kobj, 1558 &available_inc_dec_lane_types_attr.attr); 1559 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr); 1560 sysfs_remove_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr); 1561 } 1562 1563 static int hccs_add_misc_sysfs(struct hccs_dev *hdev) 1564 { 1565 int ret; 1566 1567 ret = sysfs_create_file(&hdev->dev->kobj, &used_types_attr.attr); 1568 if (ret) 1569 return ret; 1570 1571 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM)) 1572 return 0; 1573 1574 ret = sysfs_create_file(&hdev->dev->kobj, 1575 &available_inc_dec_lane_types_attr.attr); 1576 if (ret) 1577 goto used_types_remove; 1578 1579 ret = sysfs_create_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr); 1580 if (ret) 1581 goto inc_dec_lane_types_remove; 1582 1583 ret = sysfs_create_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr); 1584 if (ret) 1585 goto dec_lane_of_type_remove; 1586 1587 return 0; 1588 1589 dec_lane_of_type_remove: 1590 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr); 1591 inc_dec_lane_types_remove: 1592 sysfs_remove_file(&hdev->dev->kobj, 1593 &available_inc_dec_lane_types_attr.attr); 1594 used_types_remove: 1595 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr); 1596 return ret; 1597 } 1598 1599 static void hccs_remove_die_dir(struct hccs_die_info *die) 1600 { 1601 struct hccs_port_info *port; 1602 u8 i; 1603 1604 for (i = 0; i < die->port_num; i++) { 1605 port = &die->ports[i]; 1606 if (port->dir_created) 1607 kobject_put(&port->kobj); 1608 } 1609 1610 kobject_put(&die->kobj); 1611 } 1612 1613 static void hccs_remove_chip_dir(struct hccs_chip_info *chip) 1614 { 1615 struct hccs_die_info *die; 1616 u8 i; 1617 1618 for (i = 0; i < chip->die_num; i++) { 1619 die = &chip->dies[i]; 1620 if (die->dir_created) 1621 hccs_remove_die_dir(die); 1622 } 1623 1624 kobject_put(&chip->kobj); 1625 } 1626 1627 static void hccs_remove_topo_dirs(struct hccs_dev *hdev) 1628 { 1629 u8 i; 1630 1631 for (i = 0; i < hdev->chip_num; i++) 1632 hccs_remove_chip_dir(&hdev->chips[i]); 1633 1634 hccs_remove_misc_sysfs(hdev); 1635 } 1636 1637 static int hccs_create_hccs_dir(struct hccs_dev *hdev, 1638 struct hccs_die_info *die, 1639 struct hccs_port_info *port) 1640 { 1641 int ret; 1642 1643 ret = kobject_init_and_add(&port->kobj, &hccs_port_type, 1644 &die->kobj, "hccs%u", port->port_id); 1645 if (ret) { 1646 kobject_put(&port->kobj); 1647 return ret; 1648 } 1649 1650 return 0; 1651 } 1652 1653 static int hccs_create_die_dir(struct hccs_dev *hdev, 1654 struct hccs_chip_info *chip, 1655 struct hccs_die_info *die) 1656 { 1657 struct hccs_port_info *port; 1658 int ret; 1659 u16 i; 1660 1661 ret = kobject_init_and_add(&die->kobj, &hccs_die_type, 1662 &chip->kobj, "die%u", die->die_id); 1663 if (ret) { 1664 kobject_put(&die->kobj); 1665 return ret; 1666 } 1667 1668 for (i = 0; i < die->port_num; i++) { 1669 port = &die->ports[i]; 1670 ret = hccs_create_hccs_dir(hdev, die, port); 1671 if (ret) { 1672 dev_err(hdev->dev, "create hccs%u dir failed.\n", 1673 port->port_id); 1674 goto err; 1675 } 1676 port->dir_created = true; 1677 } 1678 1679 return 0; 1680 err: 1681 hccs_remove_die_dir(die); 1682 1683 return ret; 1684 } 1685 1686 static int hccs_create_chip_dir(struct hccs_dev *hdev, 1687 struct hccs_chip_info *chip) 1688 { 1689 struct hccs_die_info *die; 1690 int ret; 1691 u16 id; 1692 1693 ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type, 1694 &hdev->dev->kobj, "chip%u", chip->chip_id); 1695 if (ret) { 1696 kobject_put(&chip->kobj); 1697 return ret; 1698 } 1699 1700 for (id = 0; id < chip->die_num; id++) { 1701 die = &chip->dies[id]; 1702 ret = hccs_create_die_dir(hdev, chip, die); 1703 if (ret) 1704 goto err; 1705 die->dir_created = true; 1706 } 1707 1708 return 0; 1709 err: 1710 hccs_remove_chip_dir(chip); 1711 1712 return ret; 1713 } 1714 1715 static int hccs_create_topo_dirs(struct hccs_dev *hdev) 1716 { 1717 struct hccs_chip_info *chip; 1718 u8 id, k; 1719 int ret; 1720 1721 for (id = 0; id < hdev->chip_num; id++) { 1722 chip = &hdev->chips[id]; 1723 ret = hccs_create_chip_dir(hdev, chip); 1724 if (ret) { 1725 dev_err(hdev->dev, "init chip%u dir failed!\n", id); 1726 goto err; 1727 } 1728 } 1729 1730 ret = hccs_add_misc_sysfs(hdev); 1731 if (ret) { 1732 dev_err(hdev->dev, "create misc sysfs interface failed, ret = %d\n", ret); 1733 goto err; 1734 } 1735 1736 return 0; 1737 err: 1738 for (k = 0; k < id; k++) 1739 hccs_remove_chip_dir(&hdev->chips[k]); 1740 1741 return ret; 1742 } 1743 1744 static int hccs_probe(struct platform_device *pdev) 1745 { 1746 struct acpi_device *acpi_dev; 1747 struct hccs_dev *hdev; 1748 int rc; 1749 1750 if (acpi_disabled) { 1751 dev_err(&pdev->dev, "acpi is disabled.\n"); 1752 return -ENODEV; 1753 } 1754 acpi_dev = ACPI_COMPANION(&pdev->dev); 1755 if (!acpi_dev) 1756 return -ENODEV; 1757 1758 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 1759 if (!hdev) 1760 return -ENOMEM; 1761 hdev->acpi_dev = acpi_dev; 1762 hdev->dev = &pdev->dev; 1763 platform_set_drvdata(pdev, hdev); 1764 1765 /* 1766 * Here would never be failure as the driver and device has been matched. 1767 */ 1768 hdev->verspec_data = acpi_device_get_match_data(hdev->dev); 1769 1770 mutex_init(&hdev->lock); 1771 rc = hccs_get_pcc_chan_id(hdev); 1772 if (rc) 1773 return rc; 1774 rc = hccs_register_pcc_channel(hdev); 1775 if (rc) 1776 return rc; 1777 1778 rc = hccs_get_dev_caps(hdev); 1779 if (rc) 1780 goto unregister_pcc_chan; 1781 1782 rc = hccs_get_hw_info(hdev); 1783 if (rc) 1784 goto unregister_pcc_chan; 1785 1786 rc = hccs_init_type_name_maps(hdev); 1787 if (rc) 1788 goto unregister_pcc_chan; 1789 1790 rc = hccs_create_topo_dirs(hdev); 1791 if (rc) 1792 goto unregister_pcc_chan; 1793 1794 return 0; 1795 1796 unregister_pcc_chan: 1797 hccs_unregister_pcc_channel(hdev); 1798 1799 return rc; 1800 } 1801 1802 static void hccs_remove(struct platform_device *pdev) 1803 { 1804 struct hccs_dev *hdev = platform_get_drvdata(pdev); 1805 1806 hccs_remove_topo_dirs(hdev); 1807 hccs_unregister_pcc_channel(hdev); 1808 } 1809 1810 static const struct hccs_verspecific_data hisi04b1_verspec_data = { 1811 .rx_callback = NULL, 1812 .wait_cmd_complete = hccs_wait_cmd_complete_by_poll, 1813 .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region, 1814 .shared_mem_size = sizeof(struct acpi_pcct_shared_memory), 1815 .has_txdone_irq = false, 1816 }; 1817 1818 static const struct hccs_verspecific_data hisi04b2_verspec_data = { 1819 .rx_callback = hccs_pcc_rx_callback, 1820 .wait_cmd_complete = hccs_wait_cmd_complete_by_irq, 1821 .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region, 1822 .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory), 1823 .has_txdone_irq = true, 1824 }; 1825 1826 static const struct acpi_device_id hccs_acpi_match[] = { 1827 { "HISI04B1", (unsigned long)&hisi04b1_verspec_data}, 1828 { "HISI04B2", (unsigned long)&hisi04b2_verspec_data}, 1829 { } 1830 }; 1831 MODULE_DEVICE_TABLE(acpi, hccs_acpi_match); 1832 1833 static struct platform_driver hccs_driver = { 1834 .probe = hccs_probe, 1835 .remove = hccs_remove, 1836 .driver = { 1837 .name = "kunpeng_hccs", 1838 .acpi_match_table = hccs_acpi_match, 1839 }, 1840 }; 1841 1842 module_platform_driver(hccs_driver); 1843 1844 MODULE_DESCRIPTION("Kunpeng SoC HCCS driver"); 1845 MODULE_LICENSE("GPL"); 1846 MODULE_AUTHOR("Huisong Li <lihuisong@huawei.com>"); 1847