1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * The Huawei Cache Coherence System (HCCS) is a multi-chip interconnection
4 * bus protocol.
5 *
6 * Copyright (c) 2023 Hisilicon Limited.
7 * Author: Huisong Li <lihuisong@huawei.com>
8 *
9 * HCCS driver for Kunpeng SoC provides the following features:
10 * - Retrieve the following information about each port:
11 * - port type
12 * - lane mode
13 * - enable
14 * - current lane mode
15 * - link finite state machine
16 * - lane mask
17 * - CRC error count
18 *
19 * - Retrieve the following information about all the ports on the chip or
20 * the die:
21 * - if all enabled ports are in linked
22 * - if all linked ports are in full lane
23 * - CRC error count sum
24 *
25 * - Retrieve all HCCS types used on the platform.
26 *
27 * - Support low power feature for all specified HCCS type ports, and
28 * provide the following interface:
29 * - query HCCS types supported increasing and decreasing lane number.
30 * - decrease lane number of all specified HCCS type ports on idle state.
31 * - increase lane number of all specified HCCS type ports.
32 */
33 #include <linux/acpi.h>
34 #include <linux/delay.h>
35 #include <linux/iopoll.h>
36 #include <linux/platform_device.h>
37 #include <linux/stringify.h>
38 #include <linux/sysfs.h>
39 #include <linux/types.h>
40
41 #include <acpi/pcc.h>
42
43 #include "kunpeng_hccs.h"
44
45 /*
46 * Arbitrary retries in case the remote processor is slow to respond
47 * to PCC commands
48 */
49 #define HCCS_PCC_CMD_WAIT_RETRIES_NUM 500ULL
50 #define HCCS_POLL_STATUS_TIME_INTERVAL_US 3
51
kobj_to_port_info(struct kobject * k)52 static struct hccs_port_info *kobj_to_port_info(struct kobject *k)
53 {
54 return container_of(k, struct hccs_port_info, kobj);
55 }
56
kobj_to_die_info(struct kobject * k)57 static struct hccs_die_info *kobj_to_die_info(struct kobject *k)
58 {
59 return container_of(k, struct hccs_die_info, kobj);
60 }
61
kobj_to_chip_info(struct kobject * k)62 static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k)
63 {
64 return container_of(k, struct hccs_chip_info, kobj);
65 }
66
device_kobj_to_hccs_dev(struct kobject * k)67 static struct hccs_dev *device_kobj_to_hccs_dev(struct kobject *k)
68 {
69 struct device *dev = container_of(k, struct device, kobj);
70 struct platform_device *pdev =
71 container_of(dev, struct platform_device, dev);
72
73 return platform_get_drvdata(pdev);
74 }
75
hccs_port_type_to_name(struct hccs_dev * hdev,u8 type)76 static char *hccs_port_type_to_name(struct hccs_dev *hdev, u8 type)
77 {
78 u16 i;
79
80 for (i = 0; i < hdev->used_type_num; i++) {
81 if (hdev->type_name_maps[i].type == type)
82 return hdev->type_name_maps[i].name;
83 }
84
85 return NULL;
86 }
87
hccs_name_to_port_type(struct hccs_dev * hdev,const char * name,u8 * type)88 static int hccs_name_to_port_type(struct hccs_dev *hdev,
89 const char *name, u8 *type)
90 {
91 u16 i;
92
93 for (i = 0; i < hdev->used_type_num; i++) {
94 if (strcmp(hdev->type_name_maps[i].name, name) == 0) {
95 *type = hdev->type_name_maps[i].type;
96 return 0;
97 }
98 }
99
100 return -EINVAL;
101 }
102
103 struct hccs_register_ctx {
104 struct device *dev;
105 u8 chan_id;
106 int err;
107 };
108
hccs_get_register_cb(struct acpi_resource * ares,void * context)109 static acpi_status hccs_get_register_cb(struct acpi_resource *ares,
110 void *context)
111 {
112 struct acpi_resource_generic_register *reg;
113 struct hccs_register_ctx *ctx = context;
114
115 if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
116 return AE_OK;
117
118 reg = &ares->data.generic_reg;
119 if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) {
120 dev_err(ctx->dev, "Bad register resource.\n");
121 ctx->err = -EINVAL;
122 return AE_ERROR;
123 }
124 ctx->chan_id = reg->access_size;
125
126 return AE_OK;
127 }
128
hccs_get_pcc_chan_id(struct hccs_dev * hdev)129 static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
130 {
131 acpi_handle handle = ACPI_HANDLE(hdev->dev);
132 struct hccs_register_ctx ctx = {0};
133 acpi_status status;
134
135 if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
136 dev_err(hdev->dev, "No _CRS method.\n");
137 return -ENODEV;
138 }
139
140 ctx.dev = hdev->dev;
141 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
142 hccs_get_register_cb, &ctx);
143 if (ACPI_FAILURE(status))
144 return ctx.err;
145 hdev->chan_id = ctx.chan_id;
146
147 return 0;
148 }
149
hccs_chan_tx_done(struct mbox_client * cl,void * msg,int ret)150 static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
151 {
152 if (ret < 0)
153 pr_debug("TX did not complete: CMD sent:0x%x, ret:%d\n",
154 *(u8 *)msg, ret);
155 else
156 pr_debug("TX completed. CMD sent:0x%x, ret:%d\n",
157 *(u8 *)msg, ret);
158 }
159
hccs_pcc_rx_callback(struct mbox_client * cl,void * mssg)160 static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
161 {
162 struct hccs_mbox_client_info *cl_info =
163 container_of(cl, struct hccs_mbox_client_info, client);
164
165 complete(&cl_info->done);
166 }
167
hccs_unregister_pcc_channel(struct hccs_dev * hdev)168 static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
169 {
170 pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
171 }
172
hccs_register_pcc_channel(struct hccs_dev * hdev)173 static int hccs_register_pcc_channel(struct hccs_dev *hdev)
174 {
175 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
176 struct mbox_client *cl = &cl_info->client;
177 struct pcc_mbox_chan *pcc_chan;
178 struct mbox_chan *mbox_chan;
179 struct device *dev = hdev->dev;
180 int rc;
181
182 cl->dev = dev;
183 cl->tx_block = false;
184 cl->knows_txdone = true;
185 cl->tx_done = hccs_chan_tx_done;
186 cl->rx_callback = hdev->verspec_data->rx_callback;
187 init_completion(&cl_info->done);
188
189 pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
190 if (IS_ERR(pcc_chan)) {
191 dev_err(dev, "PCC channel request failed.\n");
192 rc = -ENODEV;
193 goto out;
194 }
195 cl_info->pcc_chan = pcc_chan;
196 mbox_chan = pcc_chan->mchan;
197
198 /*
199 * pcc_chan->latency is just a nominal value. In reality the remote
200 * processor could be much slower to reply. So add an arbitrary amount
201 * of wait on top of nominal.
202 */
203 cl_info->deadline_us =
204 HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
205 if (!hdev->verspec_data->has_txdone_irq &&
206 mbox_chan->mbox->txdone_irq) {
207 dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
208 rc = -EINVAL;
209 goto err_mbx_channel_free;
210 } else if (hdev->verspec_data->has_txdone_irq &&
211 !mbox_chan->mbox->txdone_irq) {
212 dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
213 rc = -EINVAL;
214 goto err_mbx_channel_free;
215 }
216
217 if (pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
218 dev_err(dev, "Base size (%llu) of PCC communication region must be %d bytes.\n",
219 pcc_chan->shmem_size, HCCS_PCC_SHARE_MEM_BYTES);
220 rc = -EINVAL;
221 goto err_mbx_channel_free;
222 }
223
224 return 0;
225
226 err_mbx_channel_free:
227 pcc_mbox_free_channel(cl_info->pcc_chan);
228 out:
229 return rc;
230 }
231
hccs_wait_cmd_complete_by_poll(struct hccs_dev * hdev)232 static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
233 {
234 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
235 struct acpi_pcct_shared_memory __iomem *comm_base =
236 cl_info->pcc_chan->shmem;
237 u16 status;
238 int ret;
239
240 /*
241 * Poll PCC status register every 3us(delay_us) for maximum of
242 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
243 */
244 ret = readw_poll_timeout(&comm_base->status, status,
245 status & PCC_STATUS_CMD_COMPLETE,
246 HCCS_POLL_STATUS_TIME_INTERVAL_US,
247 cl_info->deadline_us);
248 if (unlikely(ret))
249 dev_err(hdev->dev, "poll PCC status failed, ret = %d.\n", ret);
250
251 return ret;
252 }
253
hccs_wait_cmd_complete_by_irq(struct hccs_dev * hdev)254 static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
255 {
256 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
257
258 if (!wait_for_completion_timeout(&cl_info->done,
259 usecs_to_jiffies(cl_info->deadline_us))) {
260 dev_err(hdev->dev, "PCC command executed timeout!\n");
261 return -ETIMEDOUT;
262 }
263
264 return 0;
265 }
266
hccs_fill_pcc_shared_mem_region(struct hccs_dev * hdev,u8 cmd,struct hccs_desc * desc,void __iomem * comm_space,u16 space_size)267 static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
268 u8 cmd,
269 struct hccs_desc *desc,
270 void __iomem *comm_space,
271 u16 space_size)
272 {
273 struct acpi_pcct_shared_memory tmp = {
274 .signature = PCC_SIGNATURE | hdev->chan_id,
275 .command = cmd,
276 .status = 0,
277 };
278
279 memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
280 sizeof(struct acpi_pcct_shared_memory));
281
282 /* Copy the message to the PCC comm space */
283 memcpy_toio(comm_space, (void *)desc, space_size);
284 }
285
hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev * hdev,u8 cmd,struct hccs_desc * desc,void __iomem * comm_space,u16 space_size)286 static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
287 u8 cmd,
288 struct hccs_desc *desc,
289 void __iomem *comm_space,
290 u16 space_size)
291 {
292 struct acpi_pcct_ext_pcc_shared_memory tmp = {
293 .signature = PCC_SIGNATURE | hdev->chan_id,
294 .flags = PCC_CMD_COMPLETION_NOTIFY,
295 .length = HCCS_PCC_SHARE_MEM_BYTES,
296 .command = cmd,
297 };
298
299 memcpy_toio(hdev->cl_info.pcc_chan->shmem, (void *)&tmp,
300 sizeof(struct acpi_pcct_ext_pcc_shared_memory));
301
302 /* Copy the message to the PCC comm space */
303 memcpy_toio(comm_space, (void *)desc, space_size);
304 }
305
hccs_pcc_cmd_send(struct hccs_dev * hdev,u8 cmd,struct hccs_desc * desc)306 static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
307 struct hccs_desc *desc)
308 {
309 const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
310 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
311 struct mbox_chan *mbox_chan = cl_info->pcc_chan->mchan;
312 struct hccs_fw_inner_head *fw_inner_head;
313 void __iomem *comm_space;
314 u16 space_size;
315 int ret;
316
317 comm_space = cl_info->pcc_chan->shmem + verspec_data->shared_mem_size;
318 space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
319 verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
320 comm_space, space_size);
321 if (verspec_data->has_txdone_irq)
322 reinit_completion(&cl_info->done);
323
324 /* Ring doorbell */
325 ret = mbox_send_message(mbox_chan, &cmd);
326 if (ret < 0) {
327 dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
328 ret);
329 goto end;
330 }
331
332 ret = verspec_data->wait_cmd_complete(hdev);
333 if (ret)
334 goto end;
335
336 /* Copy response data */
337 memcpy_fromio((void *)desc, comm_space, space_size);
338 fw_inner_head = &desc->rsp.fw_inner_head;
339 if (fw_inner_head->retStatus) {
340 dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
341 fw_inner_head->retStatus);
342 ret = -EIO;
343 }
344
345 end:
346 if (verspec_data->has_txdone_irq)
347 mbox_chan_txdone(mbox_chan, ret);
348 else
349 mbox_client_txdone(mbox_chan, ret);
350 return ret;
351 }
352
hccs_init_req_desc(struct hccs_desc * desc)353 static void hccs_init_req_desc(struct hccs_desc *desc)
354 {
355 struct hccs_req_desc *req = &desc->req;
356
357 memset(desc, 0, sizeof(*desc));
358 req->req_head.module_code = HCCS_SERDES_MODULE_CODE;
359 }
360
hccs_get_dev_caps(struct hccs_dev * hdev)361 static int hccs_get_dev_caps(struct hccs_dev *hdev)
362 {
363 struct hccs_desc desc;
364 int ret;
365
366 hccs_init_req_desc(&desc);
367 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DEV_CAP, &desc);
368 if (ret) {
369 dev_err(hdev->dev, "Get device capabilities failed, ret = %d.\n",
370 ret);
371 return ret;
372 }
373 memcpy(&hdev->caps, desc.rsp.data, sizeof(hdev->caps));
374
375 return 0;
376 }
377
hccs_query_chip_num_on_platform(struct hccs_dev * hdev)378 static int hccs_query_chip_num_on_platform(struct hccs_dev *hdev)
379 {
380 struct hccs_desc desc;
381 int ret;
382
383 hccs_init_req_desc(&desc);
384 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_CHIP_NUM, &desc);
385 if (ret) {
386 dev_err(hdev->dev, "query system chip number failed, ret = %d.\n",
387 ret);
388 return ret;
389 }
390
391 hdev->chip_num = *((u8 *)&desc.rsp.data);
392 if (!hdev->chip_num) {
393 dev_err(hdev->dev, "chip num obtained from firmware is zero.\n");
394 return -EINVAL;
395 }
396
397 return 0;
398 }
399
hccs_get_chip_info(struct hccs_dev * hdev,struct hccs_chip_info * chip)400 static int hccs_get_chip_info(struct hccs_dev *hdev,
401 struct hccs_chip_info *chip)
402 {
403 struct hccs_die_num_req_param *req_param;
404 struct hccs_desc desc;
405 int ret;
406
407 hccs_init_req_desc(&desc);
408 req_param = (struct hccs_die_num_req_param *)desc.req.data;
409 req_param->chip_id = chip->chip_id;
410 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_NUM, &desc);
411 if (ret)
412 return ret;
413
414 chip->die_num = *((u8 *)&desc.rsp.data);
415
416 return 0;
417 }
418
hccs_query_chip_info_on_platform(struct hccs_dev * hdev)419 static int hccs_query_chip_info_on_platform(struct hccs_dev *hdev)
420 {
421 struct hccs_chip_info *chip;
422 int ret;
423 u8 idx;
424
425 ret = hccs_query_chip_num_on_platform(hdev);
426 if (ret) {
427 dev_err(hdev->dev, "query chip number on platform failed, ret = %d.\n",
428 ret);
429 return ret;
430 }
431
432 hdev->chips = devm_kzalloc(hdev->dev,
433 hdev->chip_num * sizeof(struct hccs_chip_info),
434 GFP_KERNEL);
435 if (!hdev->chips) {
436 dev_err(hdev->dev, "allocate all chips memory failed.\n");
437 return -ENOMEM;
438 }
439
440 for (idx = 0; idx < hdev->chip_num; idx++) {
441 chip = &hdev->chips[idx];
442 chip->chip_id = idx;
443 ret = hccs_get_chip_info(hdev, chip);
444 if (ret) {
445 dev_err(hdev->dev, "get chip%u info failed, ret = %d.\n",
446 idx, ret);
447 return ret;
448 }
449 chip->hdev = hdev;
450 }
451
452 return 0;
453 }
454
hccs_query_die_info_on_chip(struct hccs_dev * hdev,u8 chip_id,u8 die_idx,struct hccs_die_info * die)455 static int hccs_query_die_info_on_chip(struct hccs_dev *hdev, u8 chip_id,
456 u8 die_idx, struct hccs_die_info *die)
457 {
458 struct hccs_die_info_req_param *req_param;
459 struct hccs_die_info_rsp_data *rsp_data;
460 struct hccs_desc desc;
461 int ret;
462
463 hccs_init_req_desc(&desc);
464 req_param = (struct hccs_die_info_req_param *)desc.req.data;
465 req_param->chip_id = chip_id;
466 req_param->die_idx = die_idx;
467 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_INFO, &desc);
468 if (ret)
469 return ret;
470
471 rsp_data = (struct hccs_die_info_rsp_data *)desc.rsp.data;
472 die->die_id = rsp_data->die_id;
473 die->port_num = rsp_data->port_num;
474 die->min_port_id = rsp_data->min_port_id;
475 die->max_port_id = rsp_data->max_port_id;
476 if (die->min_port_id > die->max_port_id) {
477 dev_err(hdev->dev, "min port id(%u) > max port id(%u) on die_idx(%u).\n",
478 die->min_port_id, die->max_port_id, die_idx);
479 return -EINVAL;
480 }
481 if (die->max_port_id > HCCS_DIE_MAX_PORT_ID) {
482 dev_err(hdev->dev, "max port id(%u) on die_idx(%u) is too big.\n",
483 die->max_port_id, die_idx);
484 return -EINVAL;
485 }
486
487 return 0;
488 }
489
hccs_query_all_die_info_on_platform(struct hccs_dev * hdev)490 static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
491 {
492 struct device *dev = hdev->dev;
493 struct hccs_chip_info *chip;
494 struct hccs_die_info *die;
495 bool has_die_info = false;
496 u8 i, j;
497 int ret;
498
499 for (i = 0; i < hdev->chip_num; i++) {
500 chip = &hdev->chips[i];
501 if (!chip->die_num)
502 continue;
503
504 has_die_info = true;
505 chip->dies = devm_kzalloc(hdev->dev,
506 chip->die_num * sizeof(struct hccs_die_info),
507 GFP_KERNEL);
508 if (!chip->dies) {
509 dev_err(dev, "allocate all dies memory on chip%u failed.\n",
510 i);
511 return -ENOMEM;
512 }
513
514 for (j = 0; j < chip->die_num; j++) {
515 die = &chip->dies[j];
516 ret = hccs_query_die_info_on_chip(hdev, i, j, die);
517 if (ret) {
518 dev_err(dev, "get die idx (%u) info on chip%u failed, ret = %d.\n",
519 j, i, ret);
520 return ret;
521 }
522 die->chip = chip;
523 }
524 }
525
526 return has_die_info ? 0 : -EINVAL;
527 }
528
hccs_get_bd_info(struct hccs_dev * hdev,u8 opcode,struct hccs_desc * desc,void * buf,size_t buf_len,struct hccs_rsp_head * rsp_head)529 static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode,
530 struct hccs_desc *desc,
531 void *buf, size_t buf_len,
532 struct hccs_rsp_head *rsp_head)
533 {
534 struct hccs_rsp_head *head;
535 struct hccs_rsp_desc *rsp;
536 int ret;
537
538 ret = hccs_pcc_cmd_send(hdev, opcode, desc);
539 if (ret)
540 return ret;
541
542 rsp = &desc->rsp;
543 head = &rsp->rsp_head;
544 if (head->data_len > buf_len) {
545 dev_err(hdev->dev,
546 "buffer overflow (buf_len = %zu, data_len = %u)!\n",
547 buf_len, head->data_len);
548 return -ENOMEM;
549 }
550
551 memcpy(buf, rsp->data, head->data_len);
552 *rsp_head = *head;
553
554 return 0;
555 }
556
hccs_get_all_port_attr(struct hccs_dev * hdev,struct hccs_die_info * die,struct hccs_port_attr * attrs,u16 size)557 static int hccs_get_all_port_attr(struct hccs_dev *hdev,
558 struct hccs_die_info *die,
559 struct hccs_port_attr *attrs, u16 size)
560 {
561 struct hccs_die_comm_req_param *req_param;
562 struct hccs_req_head *req_head;
563 struct hccs_rsp_head rsp_head;
564 struct hccs_desc desc;
565 size_t left_buf_len;
566 u32 data_len = 0;
567 u8 start_id;
568 u8 *buf;
569 int ret;
570
571 buf = (u8 *)attrs;
572 left_buf_len = sizeof(struct hccs_port_attr) * size;
573 start_id = die->min_port_id;
574 while (start_id <= die->max_port_id) {
575 hccs_init_req_desc(&desc);
576 req_head = &desc.req.req_head;
577 req_head->start_id = start_id;
578 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
579 req_param->chip_id = die->chip->chip_id;
580 req_param->die_id = die->die_id;
581
582 ret = hccs_get_bd_info(hdev, HCCS_GET_DIE_PORT_INFO, &desc,
583 buf + data_len, left_buf_len, &rsp_head);
584 if (ret) {
585 dev_err(hdev->dev,
586 "get the information of port%u on die%u failed, ret = %d.\n",
587 start_id, die->die_id, ret);
588 return ret;
589 }
590
591 data_len += rsp_head.data_len;
592 left_buf_len -= rsp_head.data_len;
593 if (unlikely(rsp_head.next_id <= start_id)) {
594 dev_err(hdev->dev,
595 "next port id (%u) is not greater than last start id (%u) on die%u.\n",
596 rsp_head.next_id, start_id, die->die_id);
597 return -EINVAL;
598 }
599 start_id = rsp_head.next_id;
600 }
601
602 if (left_buf_len != 0) {
603 dev_err(hdev->dev, "failed to get the expected port number(%u) attribute.\n",
604 size);
605 return -EINVAL;
606 }
607
608 return 0;
609 }
610
hccs_get_all_port_info_on_die(struct hccs_dev * hdev,struct hccs_die_info * die)611 static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev,
612 struct hccs_die_info *die)
613 {
614 struct hccs_port_attr *attrs;
615 struct hccs_port_info *port;
616 int ret;
617 u8 i;
618
619 attrs = kcalloc(die->port_num, sizeof(struct hccs_port_attr),
620 GFP_KERNEL);
621 if (!attrs)
622 return -ENOMEM;
623
624 ret = hccs_get_all_port_attr(hdev, die, attrs, die->port_num);
625 if (ret)
626 goto out;
627
628 for (i = 0; i < die->port_num; i++) {
629 port = &die->ports[i];
630 port->port_id = attrs[i].port_id;
631 port->port_type = attrs[i].port_type;
632 port->max_lane_num = attrs[i].max_lane_num;
633 port->enable = attrs[i].enable;
634 port->die = die;
635 }
636
637 out:
638 kfree(attrs);
639 return ret;
640 }
641
hccs_query_all_port_info_on_platform(struct hccs_dev * hdev)642 static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
643 {
644 struct device *dev = hdev->dev;
645 struct hccs_chip_info *chip;
646 struct hccs_die_info *die;
647 bool has_port_info = false;
648 u8 i, j;
649 int ret;
650
651 for (i = 0; i < hdev->chip_num; i++) {
652 chip = &hdev->chips[i];
653 for (j = 0; j < chip->die_num; j++) {
654 die = &chip->dies[j];
655 if (!die->port_num)
656 continue;
657
658 has_port_info = true;
659 die->ports = devm_kzalloc(dev,
660 die->port_num * sizeof(struct hccs_port_info),
661 GFP_KERNEL);
662 if (!die->ports) {
663 dev_err(dev, "allocate ports memory on chip%u/die%u failed.\n",
664 i, die->die_id);
665 return -ENOMEM;
666 }
667
668 ret = hccs_get_all_port_info_on_die(hdev, die);
669 if (ret) {
670 dev_err(dev, "get all port info on chip%u/die%u failed, ret = %d.\n",
671 i, die->die_id, ret);
672 return ret;
673 }
674 }
675 }
676
677 return has_port_info ? 0 : -EINVAL;
678 }
679
hccs_get_hw_info(struct hccs_dev * hdev)680 static int hccs_get_hw_info(struct hccs_dev *hdev)
681 {
682 int ret;
683
684 ret = hccs_query_chip_info_on_platform(hdev);
685 if (ret) {
686 dev_err(hdev->dev, "query chip info on platform failed, ret = %d.\n",
687 ret);
688 return ret;
689 }
690
691 ret = hccs_query_all_die_info_on_platform(hdev);
692 if (ret) {
693 dev_err(hdev->dev, "query all die info on platform failed, ret = %d.\n",
694 ret);
695 return ret;
696 }
697
698 ret = hccs_query_all_port_info_on_platform(hdev);
699 if (ret) {
700 dev_err(hdev->dev, "query all port info on platform failed, ret = %d.\n",
701 ret);
702 return ret;
703 }
704
705 return 0;
706 }
707
hccs_calc_used_type_num(struct hccs_dev * hdev,unsigned long * hccs_ver)708 static u16 hccs_calc_used_type_num(struct hccs_dev *hdev,
709 unsigned long *hccs_ver)
710 {
711 struct hccs_chip_info *chip;
712 struct hccs_port_info *port;
713 struct hccs_die_info *die;
714 u16 used_type_num = 0;
715 u16 i, j, k;
716
717 for (i = 0; i < hdev->chip_num; i++) {
718 chip = &hdev->chips[i];
719 for (j = 0; j < chip->die_num; j++) {
720 die = &chip->dies[j];
721 for (k = 0; k < die->port_num; k++) {
722 port = &die->ports[k];
723 set_bit(port->port_type, hccs_ver);
724 }
725 }
726 }
727
728 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1)
729 used_type_num++;
730
731 return used_type_num;
732 }
733
hccs_init_type_name_maps(struct hccs_dev * hdev)734 static int hccs_init_type_name_maps(struct hccs_dev *hdev)
735 {
736 DECLARE_BITMAP(hccs_ver, HCCS_IP_MAX + 1) = {};
737 unsigned int i;
738 u16 idx = 0;
739
740 hdev->used_type_num = hccs_calc_used_type_num(hdev, hccs_ver);
741 hdev->type_name_maps = devm_kcalloc(hdev->dev, hdev->used_type_num,
742 sizeof(struct hccs_type_name_map),
743 GFP_KERNEL);
744 if (!hdev->type_name_maps)
745 return -ENOMEM;
746
747 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1) {
748 hdev->type_name_maps[idx].type = i;
749 sprintf(hdev->type_name_maps[idx].name,
750 "%s%u", HCCS_IP_PREFIX, i);
751 idx++;
752 }
753
754 return 0;
755 }
756
hccs_query_port_link_status(struct hccs_dev * hdev,const struct hccs_port_info * port,struct hccs_link_status * link_status)757 static int hccs_query_port_link_status(struct hccs_dev *hdev,
758 const struct hccs_port_info *port,
759 struct hccs_link_status *link_status)
760 {
761 const struct hccs_die_info *die = port->die;
762 const struct hccs_chip_info *chip = die->chip;
763 struct hccs_port_comm_req_param *req_param;
764 struct hccs_desc desc;
765 int ret;
766
767 hccs_init_req_desc(&desc);
768 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
769 req_param->chip_id = chip->chip_id;
770 req_param->die_id = die->die_id;
771 req_param->port_id = port->port_id;
772 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_LINK_STATUS, &desc);
773 if (ret) {
774 dev_err(hdev->dev,
775 "get port link status info failed, ret = %d.\n", ret);
776 return ret;
777 }
778
779 *link_status = *((struct hccs_link_status *)desc.rsp.data);
780
781 return 0;
782 }
783
hccs_query_port_crc_err_cnt(struct hccs_dev * hdev,const struct hccs_port_info * port,u64 * crc_err_cnt)784 static int hccs_query_port_crc_err_cnt(struct hccs_dev *hdev,
785 const struct hccs_port_info *port,
786 u64 *crc_err_cnt)
787 {
788 const struct hccs_die_info *die = port->die;
789 const struct hccs_chip_info *chip = die->chip;
790 struct hccs_port_comm_req_param *req_param;
791 struct hccs_desc desc;
792 int ret;
793
794 hccs_init_req_desc(&desc);
795 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
796 req_param->chip_id = chip->chip_id;
797 req_param->die_id = die->die_id;
798 req_param->port_id = port->port_id;
799 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_CRC_ERR_CNT, &desc);
800 if (ret) {
801 dev_err(hdev->dev,
802 "get port crc error count failed, ret = %d.\n", ret);
803 return ret;
804 }
805
806 memcpy(crc_err_cnt, &desc.rsp.data, sizeof(u64));
807
808 return 0;
809 }
810
hccs_get_die_all_link_status(struct hccs_dev * hdev,const struct hccs_die_info * die,u8 * all_linked)811 static int hccs_get_die_all_link_status(struct hccs_dev *hdev,
812 const struct hccs_die_info *die,
813 u8 *all_linked)
814 {
815 struct hccs_die_comm_req_param *req_param;
816 struct hccs_desc desc;
817 int ret;
818
819 if (die->port_num == 0) {
820 *all_linked = 1;
821 return 0;
822 }
823
824 hccs_init_req_desc(&desc);
825 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
826 req_param->chip_id = die->chip->chip_id;
827 req_param->die_id = die->die_id;
828 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LINK_STA, &desc);
829 if (ret) {
830 dev_err(hdev->dev,
831 "get link status of all ports failed on die%u, ret = %d.\n",
832 die->die_id, ret);
833 return ret;
834 }
835
836 *all_linked = *((u8 *)&desc.rsp.data);
837
838 return 0;
839 }
840
hccs_get_die_all_port_lane_status(struct hccs_dev * hdev,const struct hccs_die_info * die,u8 * full_lane)841 static int hccs_get_die_all_port_lane_status(struct hccs_dev *hdev,
842 const struct hccs_die_info *die,
843 u8 *full_lane)
844 {
845 struct hccs_die_comm_req_param *req_param;
846 struct hccs_desc desc;
847 int ret;
848
849 if (die->port_num == 0) {
850 *full_lane = 1;
851 return 0;
852 }
853
854 hccs_init_req_desc(&desc);
855 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
856 req_param->chip_id = die->chip->chip_id;
857 req_param->die_id = die->die_id;
858 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LANE_STA, &desc);
859 if (ret) {
860 dev_err(hdev->dev, "get lane status of all ports failed on die%u, ret = %d.\n",
861 die->die_id, ret);
862 return ret;
863 }
864
865 *full_lane = *((u8 *)&desc.rsp.data);
866
867 return 0;
868 }
869
hccs_get_die_total_crc_err_cnt(struct hccs_dev * hdev,const struct hccs_die_info * die,u64 * total_crc_err_cnt)870 static int hccs_get_die_total_crc_err_cnt(struct hccs_dev *hdev,
871 const struct hccs_die_info *die,
872 u64 *total_crc_err_cnt)
873 {
874 struct hccs_die_comm_req_param *req_param;
875 struct hccs_desc desc;
876 int ret;
877
878 if (die->port_num == 0) {
879 *total_crc_err_cnt = 0;
880 return 0;
881 }
882
883 hccs_init_req_desc(&desc);
884 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
885 req_param->chip_id = die->chip->chip_id;
886 req_param->die_id = die->die_id;
887 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_CRC_ERR_CNT, &desc);
888 if (ret) {
889 dev_err(hdev->dev, "get crc error count sum failed on die%u, ret = %d.\n",
890 die->die_id, ret);
891 return ret;
892 }
893
894 memcpy(total_crc_err_cnt, &desc.rsp.data, sizeof(u64));
895
896 return 0;
897 }
898
hccs_show(struct kobject * k,struct attribute * attr,char * buf)899 static ssize_t hccs_show(struct kobject *k, struct attribute *attr, char *buf)
900 {
901 struct kobj_attribute *kobj_attr;
902
903 kobj_attr = container_of(attr, struct kobj_attribute, attr);
904
905 return kobj_attr->show(k, kobj_attr, buf);
906 }
907
908 static const struct sysfs_ops hccs_comm_ops = {
909 .show = hccs_show,
910 };
911
type_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)912 static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
913 char *buf)
914 {
915 const struct hccs_port_info *port = kobj_to_port_info(kobj);
916
917 return sysfs_emit(buf, "%s%u\n", HCCS_IP_PREFIX, port->port_type);
918 }
919 static struct kobj_attribute hccs_type_attr = __ATTR_RO(type);
920
lane_mode_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)921 static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
922 char *buf)
923 {
924 const struct hccs_port_info *port = kobj_to_port_info(kobj);
925
926 return sysfs_emit(buf, "x%u\n", port->max_lane_num);
927 }
928 static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode);
929
enable_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)930 static ssize_t enable_show(struct kobject *kobj,
931 struct kobj_attribute *attr, char *buf)
932 {
933 const struct hccs_port_info *port = kobj_to_port_info(kobj);
934
935 return sysfs_emit(buf, "%u\n", port->enable);
936 }
937 static struct kobj_attribute port_enable_attr = __ATTR_RO(enable);
938
cur_lane_num_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)939 static ssize_t cur_lane_num_show(struct kobject *kobj,
940 struct kobj_attribute *attr, char *buf)
941 {
942 const struct hccs_port_info *port = kobj_to_port_info(kobj);
943 struct hccs_dev *hdev = port->die->chip->hdev;
944 struct hccs_link_status link_status = {0};
945 int ret;
946
947 mutex_lock(&hdev->lock);
948 ret = hccs_query_port_link_status(hdev, port, &link_status);
949 mutex_unlock(&hdev->lock);
950 if (ret)
951 return ret;
952
953 return sysfs_emit(buf, "%u\n", link_status.lane_num);
954 }
955 static struct kobj_attribute cur_lane_num_attr = __ATTR_RO(cur_lane_num);
956
link_fsm_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)957 static ssize_t link_fsm_show(struct kobject *kobj,
958 struct kobj_attribute *attr, char *buf)
959 {
960 const struct hccs_port_info *port = kobj_to_port_info(kobj);
961 struct hccs_dev *hdev = port->die->chip->hdev;
962 struct hccs_link_status link_status = {0};
963 const struct {
964 u8 link_fsm;
965 char *str;
966 } link_fsm_map[] = {
967 {HCCS_PORT_RESET, "reset"},
968 {HCCS_PORT_SETUP, "setup"},
969 {HCCS_PORT_CONFIG, "config"},
970 {HCCS_PORT_READY, "link-up"},
971 };
972 const char *link_fsm_str = "unknown";
973 size_t i;
974 int ret;
975
976 mutex_lock(&hdev->lock);
977 ret = hccs_query_port_link_status(hdev, port, &link_status);
978 mutex_unlock(&hdev->lock);
979 if (ret)
980 return ret;
981
982 for (i = 0; i < ARRAY_SIZE(link_fsm_map); i++) {
983 if (link_fsm_map[i].link_fsm == link_status.link_fsm) {
984 link_fsm_str = link_fsm_map[i].str;
985 break;
986 }
987 }
988
989 return sysfs_emit(buf, "%s\n", link_fsm_str);
990 }
991 static struct kobj_attribute link_fsm_attr = __ATTR_RO(link_fsm);
992
lane_mask_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)993 static ssize_t lane_mask_show(struct kobject *kobj,
994 struct kobj_attribute *attr, char *buf)
995 {
996 const struct hccs_port_info *port = kobj_to_port_info(kobj);
997 struct hccs_dev *hdev = port->die->chip->hdev;
998 struct hccs_link_status link_status = {0};
999 int ret;
1000
1001 mutex_lock(&hdev->lock);
1002 ret = hccs_query_port_link_status(hdev, port, &link_status);
1003 mutex_unlock(&hdev->lock);
1004 if (ret)
1005 return ret;
1006
1007 return sysfs_emit(buf, "0x%x\n", link_status.lane_mask);
1008 }
1009 static struct kobj_attribute lane_mask_attr = __ATTR_RO(lane_mask);
1010
crc_err_cnt_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1011 static ssize_t crc_err_cnt_show(struct kobject *kobj,
1012 struct kobj_attribute *attr, char *buf)
1013 {
1014 const struct hccs_port_info *port = kobj_to_port_info(kobj);
1015 struct hccs_dev *hdev = port->die->chip->hdev;
1016 u64 crc_err_cnt;
1017 int ret;
1018
1019 mutex_lock(&hdev->lock);
1020 ret = hccs_query_port_crc_err_cnt(hdev, port, &crc_err_cnt);
1021 mutex_unlock(&hdev->lock);
1022 if (ret)
1023 return ret;
1024
1025 return sysfs_emit(buf, "%llu\n", crc_err_cnt);
1026 }
1027 static struct kobj_attribute crc_err_cnt_attr = __ATTR_RO(crc_err_cnt);
1028
1029 static struct attribute *hccs_port_default_attrs[] = {
1030 &hccs_type_attr.attr,
1031 &lane_mode_attr.attr,
1032 &port_enable_attr.attr,
1033 &cur_lane_num_attr.attr,
1034 &link_fsm_attr.attr,
1035 &lane_mask_attr.attr,
1036 &crc_err_cnt_attr.attr,
1037 NULL,
1038 };
1039 ATTRIBUTE_GROUPS(hccs_port_default);
1040
1041 static const struct kobj_type hccs_port_type = {
1042 .sysfs_ops = &hccs_comm_ops,
1043 .default_groups = hccs_port_default_groups,
1044 };
1045
all_linked_on_die_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1046 static ssize_t all_linked_on_die_show(struct kobject *kobj,
1047 struct kobj_attribute *attr, char *buf)
1048 {
1049 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1050 struct hccs_dev *hdev = die->chip->hdev;
1051 u8 all_linked;
1052 int ret;
1053
1054 mutex_lock(&hdev->lock);
1055 ret = hccs_get_die_all_link_status(hdev, die, &all_linked);
1056 mutex_unlock(&hdev->lock);
1057 if (ret)
1058 return ret;
1059
1060 return sysfs_emit(buf, "%u\n", all_linked);
1061 }
1062 static struct kobj_attribute all_linked_on_die_attr =
1063 __ATTR(all_linked, 0444, all_linked_on_die_show, NULL);
1064
linked_full_lane_on_die_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1065 static ssize_t linked_full_lane_on_die_show(struct kobject *kobj,
1066 struct kobj_attribute *attr,
1067 char *buf)
1068 {
1069 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1070 struct hccs_dev *hdev = die->chip->hdev;
1071 u8 full_lane;
1072 int ret;
1073
1074 mutex_lock(&hdev->lock);
1075 ret = hccs_get_die_all_port_lane_status(hdev, die, &full_lane);
1076 mutex_unlock(&hdev->lock);
1077 if (ret)
1078 return ret;
1079
1080 return sysfs_emit(buf, "%u\n", full_lane);
1081 }
1082 static struct kobj_attribute linked_full_lane_on_die_attr =
1083 __ATTR(linked_full_lane, 0444, linked_full_lane_on_die_show, NULL);
1084
crc_err_cnt_sum_on_die_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1085 static ssize_t crc_err_cnt_sum_on_die_show(struct kobject *kobj,
1086 struct kobj_attribute *attr,
1087 char *buf)
1088 {
1089 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1090 struct hccs_dev *hdev = die->chip->hdev;
1091 u64 total_crc_err_cnt;
1092 int ret;
1093
1094 mutex_lock(&hdev->lock);
1095 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &total_crc_err_cnt);
1096 mutex_unlock(&hdev->lock);
1097 if (ret)
1098 return ret;
1099
1100 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1101 }
1102 static struct kobj_attribute crc_err_cnt_sum_on_die_attr =
1103 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_die_show, NULL);
1104
1105 static struct attribute *hccs_die_default_attrs[] = {
1106 &all_linked_on_die_attr.attr,
1107 &linked_full_lane_on_die_attr.attr,
1108 &crc_err_cnt_sum_on_die_attr.attr,
1109 NULL,
1110 };
1111 ATTRIBUTE_GROUPS(hccs_die_default);
1112
1113 static const struct kobj_type hccs_die_type = {
1114 .sysfs_ops = &hccs_comm_ops,
1115 .default_groups = hccs_die_default_groups,
1116 };
1117
all_linked_on_chip_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1118 static ssize_t all_linked_on_chip_show(struct kobject *kobj,
1119 struct kobj_attribute *attr, char *buf)
1120 {
1121 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1122 struct hccs_dev *hdev = chip->hdev;
1123 const struct hccs_die_info *die;
1124 u8 all_linked = 1;
1125 u8 i, tmp;
1126 int ret;
1127
1128 mutex_lock(&hdev->lock);
1129 for (i = 0; i < chip->die_num; i++) {
1130 die = &chip->dies[i];
1131 ret = hccs_get_die_all_link_status(hdev, die, &tmp);
1132 if (ret) {
1133 mutex_unlock(&hdev->lock);
1134 return ret;
1135 }
1136 if (tmp != all_linked) {
1137 all_linked = 0;
1138 break;
1139 }
1140 }
1141 mutex_unlock(&hdev->lock);
1142
1143 return sysfs_emit(buf, "%u\n", all_linked);
1144 }
1145 static struct kobj_attribute all_linked_on_chip_attr =
1146 __ATTR(all_linked, 0444, all_linked_on_chip_show, NULL);
1147
linked_full_lane_on_chip_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1148 static ssize_t linked_full_lane_on_chip_show(struct kobject *kobj,
1149 struct kobj_attribute *attr,
1150 char *buf)
1151 {
1152 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1153 struct hccs_dev *hdev = chip->hdev;
1154 const struct hccs_die_info *die;
1155 u8 full_lane = 1;
1156 u8 i, tmp;
1157 int ret;
1158
1159 mutex_lock(&hdev->lock);
1160 for (i = 0; i < chip->die_num; i++) {
1161 die = &chip->dies[i];
1162 ret = hccs_get_die_all_port_lane_status(hdev, die, &tmp);
1163 if (ret) {
1164 mutex_unlock(&hdev->lock);
1165 return ret;
1166 }
1167 if (tmp != full_lane) {
1168 full_lane = 0;
1169 break;
1170 }
1171 }
1172 mutex_unlock(&hdev->lock);
1173
1174 return sysfs_emit(buf, "%u\n", full_lane);
1175 }
1176 static struct kobj_attribute linked_full_lane_on_chip_attr =
1177 __ATTR(linked_full_lane, 0444, linked_full_lane_on_chip_show, NULL);
1178
crc_err_cnt_sum_on_chip_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1179 static ssize_t crc_err_cnt_sum_on_chip_show(struct kobject *kobj,
1180 struct kobj_attribute *attr,
1181 char *buf)
1182 {
1183 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1184 u64 crc_err_cnt, total_crc_err_cnt = 0;
1185 struct hccs_dev *hdev = chip->hdev;
1186 const struct hccs_die_info *die;
1187 int ret;
1188 u16 i;
1189
1190 mutex_lock(&hdev->lock);
1191 for (i = 0; i < chip->die_num; i++) {
1192 die = &chip->dies[i];
1193 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &crc_err_cnt);
1194 if (ret) {
1195 mutex_unlock(&hdev->lock);
1196 return ret;
1197 }
1198
1199 total_crc_err_cnt += crc_err_cnt;
1200 }
1201 mutex_unlock(&hdev->lock);
1202
1203 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1204 }
1205 static struct kobj_attribute crc_err_cnt_sum_on_chip_attr =
1206 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_chip_show, NULL);
1207
1208 static struct attribute *hccs_chip_default_attrs[] = {
1209 &all_linked_on_chip_attr.attr,
1210 &linked_full_lane_on_chip_attr.attr,
1211 &crc_err_cnt_sum_on_chip_attr.attr,
1212 NULL,
1213 };
1214 ATTRIBUTE_GROUPS(hccs_chip_default);
1215
1216 static const struct kobj_type hccs_chip_type = {
1217 .sysfs_ops = &hccs_comm_ops,
1218 .default_groups = hccs_chip_default_groups,
1219 };
1220
hccs_parse_pm_port_type(struct hccs_dev * hdev,const char * buf,u8 * port_type)1221 static int hccs_parse_pm_port_type(struct hccs_dev *hdev, const char *buf,
1222 u8 *port_type)
1223 {
1224 char hccs_name[HCCS_NAME_MAX_LEN + 1] = "";
1225 u8 type;
1226 int ret;
1227
1228 ret = sscanf(buf, "%" __stringify(HCCS_NAME_MAX_LEN) "s", hccs_name);
1229 if (ret != 1)
1230 return -EINVAL;
1231
1232 ret = hccs_name_to_port_type(hdev, hccs_name, &type);
1233 if (ret) {
1234 dev_dbg(hdev->dev, "input invalid, please get the available types from 'used_types'.\n");
1235 return ret;
1236 }
1237
1238 if (type == HCCS_V2 && hdev->caps & HCCS_CAPS_HCCS_V2_PM) {
1239 *port_type = type;
1240 return 0;
1241 }
1242
1243 dev_dbg(hdev->dev, "%s doesn't support for increasing and decreasing lane.\n",
1244 hccs_name);
1245
1246 return -EOPNOTSUPP;
1247 }
1248
hccs_query_port_idle_status(struct hccs_dev * hdev,struct hccs_port_info * port,u8 * idle)1249 static int hccs_query_port_idle_status(struct hccs_dev *hdev,
1250 struct hccs_port_info *port, u8 *idle)
1251 {
1252 const struct hccs_die_info *die = port->die;
1253 const struct hccs_chip_info *chip = die->chip;
1254 struct hccs_port_comm_req_param *req_param;
1255 struct hccs_desc desc;
1256 int ret;
1257
1258 hccs_init_req_desc(&desc);
1259 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
1260 req_param->chip_id = chip->chip_id;
1261 req_param->die_id = die->die_id;
1262 req_param->port_id = port->port_id;
1263 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_IDLE_STATUS, &desc);
1264 if (ret) {
1265 dev_err(hdev->dev,
1266 "get port idle status failed, ret = %d.\n", ret);
1267 return ret;
1268 }
1269
1270 *idle = *((u8 *)desc.rsp.data);
1271 return 0;
1272 }
1273
hccs_get_all_spec_port_idle_sta(struct hccs_dev * hdev,u8 port_type,bool * all_idle)1274 static int hccs_get_all_spec_port_idle_sta(struct hccs_dev *hdev, u8 port_type,
1275 bool *all_idle)
1276 {
1277 struct hccs_chip_info *chip;
1278 struct hccs_port_info *port;
1279 struct hccs_die_info *die;
1280 int ret = 0;
1281 u8 i, j, k;
1282 u8 idle;
1283
1284 *all_idle = false;
1285 for (i = 0; i < hdev->chip_num; i++) {
1286 chip = &hdev->chips[i];
1287 for (j = 0; j < chip->die_num; j++) {
1288 die = &chip->dies[j];
1289 for (k = 0; k < die->port_num; k++) {
1290 port = &die->ports[k];
1291 if (port->port_type != port_type)
1292 continue;
1293 ret = hccs_query_port_idle_status(hdev, port,
1294 &idle);
1295 if (ret) {
1296 dev_err(hdev->dev,
1297 "hccs%u on chip%u/die%u get idle status failed, ret = %d.\n",
1298 k, i, j, ret);
1299 return ret;
1300 } else if (idle == 0) {
1301 dev_info(hdev->dev, "hccs%u on chip%u/die%u is busy.\n",
1302 k, i, j);
1303 return 0;
1304 }
1305 }
1306 }
1307 }
1308 *all_idle = true;
1309
1310 return 0;
1311 }
1312
hccs_get_all_spec_port_full_lane_sta(struct hccs_dev * hdev,u8 port_type,bool * full_lane)1313 static int hccs_get_all_spec_port_full_lane_sta(struct hccs_dev *hdev,
1314 u8 port_type, bool *full_lane)
1315 {
1316 struct hccs_link_status status = {0};
1317 struct hccs_chip_info *chip;
1318 struct hccs_port_info *port;
1319 struct hccs_die_info *die;
1320 u8 i, j, k;
1321 int ret;
1322
1323 *full_lane = false;
1324 for (i = 0; i < hdev->chip_num; i++) {
1325 chip = &hdev->chips[i];
1326 for (j = 0; j < chip->die_num; j++) {
1327 die = &chip->dies[j];
1328 for (k = 0; k < die->port_num; k++) {
1329 port = &die->ports[k];
1330 if (port->port_type != port_type)
1331 continue;
1332 ret = hccs_query_port_link_status(hdev, port,
1333 &status);
1334 if (ret)
1335 return ret;
1336 if (status.lane_num != port->max_lane_num)
1337 return 0;
1338 }
1339 }
1340 }
1341 *full_lane = true;
1342
1343 return 0;
1344 }
1345
hccs_prepare_inc_lane(struct hccs_dev * hdev,u8 type)1346 static int hccs_prepare_inc_lane(struct hccs_dev *hdev, u8 type)
1347 {
1348 struct hccs_inc_lane_req_param *req_param;
1349 struct hccs_desc desc;
1350 int ret;
1351
1352 hccs_init_req_desc(&desc);
1353 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1354 req_param->port_type = type;
1355 req_param->opt_type = HCCS_PREPARE_INC_LANE;
1356 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1357 if (ret)
1358 dev_err(hdev->dev, "prepare for increasing lane failed, ret = %d.\n",
1359 ret);
1360
1361 return ret;
1362 }
1363
hccs_wait_serdes_adapt_completed(struct hccs_dev * hdev,u8 type)1364 static int hccs_wait_serdes_adapt_completed(struct hccs_dev *hdev, u8 type)
1365 {
1366 #define HCCS_MAX_WAIT_CNT_FOR_ADAPT 10
1367 #define HCCS_QUERY_ADAPT_RES_DELAY_MS 100
1368 #define HCCS_SERDES_ADAPT_OK 0
1369
1370 struct hccs_inc_lane_req_param *req_param;
1371 u8 wait_cnt = HCCS_MAX_WAIT_CNT_FOR_ADAPT;
1372 struct hccs_desc desc;
1373 u8 adapt_res;
1374 int ret;
1375
1376 do {
1377 hccs_init_req_desc(&desc);
1378 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1379 req_param->port_type = type;
1380 req_param->opt_type = HCCS_GET_ADAPT_RES;
1381 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1382 if (ret) {
1383 dev_err(hdev->dev, "query adapting result failed, ret = %d.\n",
1384 ret);
1385 return ret;
1386 }
1387 adapt_res = *((u8 *)&desc.rsp.data);
1388 if (adapt_res == HCCS_SERDES_ADAPT_OK)
1389 return 0;
1390
1391 msleep(HCCS_QUERY_ADAPT_RES_DELAY_MS);
1392 } while (--wait_cnt);
1393
1394 dev_err(hdev->dev, "wait for adapting completed timeout.\n");
1395
1396 return -ETIMEDOUT;
1397 }
1398
hccs_start_hpcs_retraining(struct hccs_dev * hdev,u8 type)1399 static int hccs_start_hpcs_retraining(struct hccs_dev *hdev, u8 type)
1400 {
1401 struct hccs_inc_lane_req_param *req_param;
1402 struct hccs_desc desc;
1403 int ret;
1404
1405 hccs_init_req_desc(&desc);
1406 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1407 req_param->port_type = type;
1408 req_param->opt_type = HCCS_START_RETRAINING;
1409 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1410 if (ret)
1411 dev_err(hdev->dev, "start hpcs retraining failed, ret = %d.\n",
1412 ret);
1413
1414 return ret;
1415 }
1416
hccs_start_inc_lane(struct hccs_dev * hdev,u8 type)1417 static int hccs_start_inc_lane(struct hccs_dev *hdev, u8 type)
1418 {
1419 int ret;
1420
1421 ret = hccs_prepare_inc_lane(hdev, type);
1422 if (ret)
1423 return ret;
1424
1425 ret = hccs_wait_serdes_adapt_completed(hdev, type);
1426 if (ret)
1427 return ret;
1428
1429 return hccs_start_hpcs_retraining(hdev, type);
1430 }
1431
hccs_start_dec_lane(struct hccs_dev * hdev,u8 type)1432 static int hccs_start_dec_lane(struct hccs_dev *hdev, u8 type)
1433 {
1434 struct hccs_desc desc;
1435 u8 *port_type;
1436 int ret;
1437
1438 hccs_init_req_desc(&desc);
1439 port_type = (u8 *)desc.req.data;
1440 *port_type = type;
1441 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_DEC_LANE, &desc);
1442 if (ret)
1443 dev_err(hdev->dev, "start to decrease lane failed, ret = %d.\n",
1444 ret);
1445
1446 return ret;
1447 }
1448
dec_lane_of_type_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1449 static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
1450 const char *buf, size_t count)
1451 {
1452 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1453 bool all_in_idle;
1454 u8 port_type;
1455 int ret;
1456
1457 ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
1458 if (ret)
1459 return ret;
1460
1461 mutex_lock(&hdev->lock);
1462 ret = hccs_get_all_spec_port_idle_sta(hdev, port_type, &all_in_idle);
1463 if (ret)
1464 goto out;
1465 if (!all_in_idle) {
1466 ret = -EBUSY;
1467 dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n",
1468 hccs_port_type_to_name(hdev, port_type), ret);
1469 goto out;
1470 }
1471
1472 ret = hccs_start_dec_lane(hdev, port_type);
1473 out:
1474 mutex_unlock(&hdev->lock);
1475
1476 return ret == 0 ? count : ret;
1477 }
1478 static struct kobj_attribute dec_lane_of_type_attr =
1479 __ATTR(dec_lane_of_type, 0200, NULL, dec_lane_of_type_store);
1480
inc_lane_of_type_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1481 static ssize_t inc_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
1482 const char *buf, size_t count)
1483 {
1484 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1485 bool full_lane;
1486 u8 port_type;
1487 int ret;
1488
1489 ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
1490 if (ret)
1491 return ret;
1492
1493 mutex_lock(&hdev->lock);
1494 ret = hccs_get_all_spec_port_full_lane_sta(hdev, port_type, &full_lane);
1495 if (ret || full_lane)
1496 goto out;
1497
1498 ret = hccs_start_inc_lane(hdev, port_type);
1499 out:
1500 mutex_unlock(&hdev->lock);
1501 return ret == 0 ? count : ret;
1502 }
1503 static struct kobj_attribute inc_lane_of_type_attr =
1504 __ATTR(inc_lane_of_type, 0200, NULL, inc_lane_of_type_store);
1505
available_inc_dec_lane_types_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1506 static ssize_t available_inc_dec_lane_types_show(struct kobject *kobj,
1507 struct kobj_attribute *attr,
1508 char *buf)
1509 {
1510 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1511
1512 if (hdev->caps & HCCS_CAPS_HCCS_V2_PM)
1513 return sysfs_emit(buf, "%s\n",
1514 hccs_port_type_to_name(hdev, HCCS_V2));
1515
1516 return -EINVAL;
1517 }
1518 static struct kobj_attribute available_inc_dec_lane_types_attr =
1519 __ATTR(available_inc_dec_lane_types, 0444,
1520 available_inc_dec_lane_types_show, NULL);
1521
used_types_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1522 static ssize_t used_types_show(struct kobject *kobj,
1523 struct kobj_attribute *attr, char *buf)
1524 {
1525 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1526 int len = 0;
1527 u16 i;
1528
1529 for (i = 0; i < hdev->used_type_num - 1; i++)
1530 len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
1531 len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
1532
1533 return len;
1534 }
1535 static struct kobj_attribute used_types_attr =
1536 __ATTR(used_types, 0444, used_types_show, NULL);
1537
hccs_remove_misc_sysfs(struct hccs_dev * hdev)1538 static void hccs_remove_misc_sysfs(struct hccs_dev *hdev)
1539 {
1540 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
1541
1542 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
1543 return;
1544
1545 sysfs_remove_file(&hdev->dev->kobj,
1546 &available_inc_dec_lane_types_attr.attr);
1547 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1548 sysfs_remove_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
1549 }
1550
hccs_add_misc_sysfs(struct hccs_dev * hdev)1551 static int hccs_add_misc_sysfs(struct hccs_dev *hdev)
1552 {
1553 int ret;
1554
1555 ret = sysfs_create_file(&hdev->dev->kobj, &used_types_attr.attr);
1556 if (ret)
1557 return ret;
1558
1559 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
1560 return 0;
1561
1562 ret = sysfs_create_file(&hdev->dev->kobj,
1563 &available_inc_dec_lane_types_attr.attr);
1564 if (ret)
1565 goto used_types_remove;
1566
1567 ret = sysfs_create_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1568 if (ret)
1569 goto inc_dec_lane_types_remove;
1570
1571 ret = sysfs_create_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
1572 if (ret)
1573 goto dec_lane_of_type_remove;
1574
1575 return 0;
1576
1577 dec_lane_of_type_remove:
1578 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1579 inc_dec_lane_types_remove:
1580 sysfs_remove_file(&hdev->dev->kobj,
1581 &available_inc_dec_lane_types_attr.attr);
1582 used_types_remove:
1583 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
1584 return ret;
1585 }
1586
hccs_remove_die_dir(struct hccs_die_info * die)1587 static void hccs_remove_die_dir(struct hccs_die_info *die)
1588 {
1589 struct hccs_port_info *port;
1590 u8 i;
1591
1592 for (i = 0; i < die->port_num; i++) {
1593 port = &die->ports[i];
1594 if (port->dir_created)
1595 kobject_put(&port->kobj);
1596 }
1597
1598 kobject_put(&die->kobj);
1599 }
1600
hccs_remove_chip_dir(struct hccs_chip_info * chip)1601 static void hccs_remove_chip_dir(struct hccs_chip_info *chip)
1602 {
1603 struct hccs_die_info *die;
1604 u8 i;
1605
1606 for (i = 0; i < chip->die_num; i++) {
1607 die = &chip->dies[i];
1608 if (die->dir_created)
1609 hccs_remove_die_dir(die);
1610 }
1611
1612 kobject_put(&chip->kobj);
1613 }
1614
hccs_remove_topo_dirs(struct hccs_dev * hdev)1615 static void hccs_remove_topo_dirs(struct hccs_dev *hdev)
1616 {
1617 u8 i;
1618
1619 for (i = 0; i < hdev->chip_num; i++)
1620 hccs_remove_chip_dir(&hdev->chips[i]);
1621
1622 hccs_remove_misc_sysfs(hdev);
1623 }
1624
hccs_create_hccs_dir(struct hccs_dev * hdev,struct hccs_die_info * die,struct hccs_port_info * port)1625 static int hccs_create_hccs_dir(struct hccs_dev *hdev,
1626 struct hccs_die_info *die,
1627 struct hccs_port_info *port)
1628 {
1629 int ret;
1630
1631 ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
1632 &die->kobj, "hccs%u", port->port_id);
1633 if (ret) {
1634 kobject_put(&port->kobj);
1635 return ret;
1636 }
1637
1638 return 0;
1639 }
1640
hccs_create_die_dir(struct hccs_dev * hdev,struct hccs_chip_info * chip,struct hccs_die_info * die)1641 static int hccs_create_die_dir(struct hccs_dev *hdev,
1642 struct hccs_chip_info *chip,
1643 struct hccs_die_info *die)
1644 {
1645 struct hccs_port_info *port;
1646 int ret;
1647 u16 i;
1648
1649 ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
1650 &chip->kobj, "die%u", die->die_id);
1651 if (ret) {
1652 kobject_put(&die->kobj);
1653 return ret;
1654 }
1655
1656 for (i = 0; i < die->port_num; i++) {
1657 port = &die->ports[i];
1658 ret = hccs_create_hccs_dir(hdev, die, port);
1659 if (ret) {
1660 dev_err(hdev->dev, "create hccs%u dir failed.\n",
1661 port->port_id);
1662 goto err;
1663 }
1664 port->dir_created = true;
1665 }
1666
1667 return 0;
1668 err:
1669 hccs_remove_die_dir(die);
1670
1671 return ret;
1672 }
1673
hccs_create_chip_dir(struct hccs_dev * hdev,struct hccs_chip_info * chip)1674 static int hccs_create_chip_dir(struct hccs_dev *hdev,
1675 struct hccs_chip_info *chip)
1676 {
1677 struct hccs_die_info *die;
1678 int ret;
1679 u16 id;
1680
1681 ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
1682 &hdev->dev->kobj, "chip%u", chip->chip_id);
1683 if (ret) {
1684 kobject_put(&chip->kobj);
1685 return ret;
1686 }
1687
1688 for (id = 0; id < chip->die_num; id++) {
1689 die = &chip->dies[id];
1690 ret = hccs_create_die_dir(hdev, chip, die);
1691 if (ret)
1692 goto err;
1693 die->dir_created = true;
1694 }
1695
1696 return 0;
1697 err:
1698 hccs_remove_chip_dir(chip);
1699
1700 return ret;
1701 }
1702
hccs_create_topo_dirs(struct hccs_dev * hdev)1703 static int hccs_create_topo_dirs(struct hccs_dev *hdev)
1704 {
1705 struct hccs_chip_info *chip;
1706 u8 id, k;
1707 int ret;
1708
1709 for (id = 0; id < hdev->chip_num; id++) {
1710 chip = &hdev->chips[id];
1711 ret = hccs_create_chip_dir(hdev, chip);
1712 if (ret) {
1713 dev_err(hdev->dev, "init chip%u dir failed!\n", id);
1714 goto err;
1715 }
1716 }
1717
1718 ret = hccs_add_misc_sysfs(hdev);
1719 if (ret) {
1720 dev_err(hdev->dev, "create misc sysfs interface failed, ret = %d\n", ret);
1721 goto err;
1722 }
1723
1724 return 0;
1725 err:
1726 for (k = 0; k < id; k++)
1727 hccs_remove_chip_dir(&hdev->chips[k]);
1728
1729 return ret;
1730 }
1731
hccs_probe(struct platform_device * pdev)1732 static int hccs_probe(struct platform_device *pdev)
1733 {
1734 struct acpi_device *acpi_dev;
1735 struct hccs_dev *hdev;
1736 int rc;
1737
1738 if (acpi_disabled) {
1739 dev_err(&pdev->dev, "acpi is disabled.\n");
1740 return -ENODEV;
1741 }
1742 acpi_dev = ACPI_COMPANION(&pdev->dev);
1743 if (!acpi_dev)
1744 return -ENODEV;
1745
1746 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1747 if (!hdev)
1748 return -ENOMEM;
1749 hdev->acpi_dev = acpi_dev;
1750 hdev->dev = &pdev->dev;
1751 platform_set_drvdata(pdev, hdev);
1752
1753 /*
1754 * Here would never be failure as the driver and device has been matched.
1755 */
1756 hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
1757
1758 mutex_init(&hdev->lock);
1759 rc = hccs_get_pcc_chan_id(hdev);
1760 if (rc)
1761 return rc;
1762 rc = hccs_register_pcc_channel(hdev);
1763 if (rc)
1764 return rc;
1765
1766 rc = hccs_get_dev_caps(hdev);
1767 if (rc)
1768 goto unregister_pcc_chan;
1769
1770 rc = hccs_get_hw_info(hdev);
1771 if (rc)
1772 goto unregister_pcc_chan;
1773
1774 rc = hccs_init_type_name_maps(hdev);
1775 if (rc)
1776 goto unregister_pcc_chan;
1777
1778 rc = hccs_create_topo_dirs(hdev);
1779 if (rc)
1780 goto unregister_pcc_chan;
1781
1782 return 0;
1783
1784 unregister_pcc_chan:
1785 hccs_unregister_pcc_channel(hdev);
1786
1787 return rc;
1788 }
1789
hccs_remove(struct platform_device * pdev)1790 static void hccs_remove(struct platform_device *pdev)
1791 {
1792 struct hccs_dev *hdev = platform_get_drvdata(pdev);
1793
1794 hccs_remove_topo_dirs(hdev);
1795 hccs_unregister_pcc_channel(hdev);
1796 }
1797
1798 static const struct hccs_verspecific_data hisi04b1_verspec_data = {
1799 .rx_callback = NULL,
1800 .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
1801 .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
1802 .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
1803 .has_txdone_irq = false,
1804 };
1805
1806 static const struct hccs_verspecific_data hisi04b2_verspec_data = {
1807 .rx_callback = hccs_pcc_rx_callback,
1808 .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
1809 .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
1810 .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
1811 .has_txdone_irq = true,
1812 };
1813
1814 static const struct acpi_device_id hccs_acpi_match[] = {
1815 { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
1816 { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
1817 { }
1818 };
1819 MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
1820
1821 static struct platform_driver hccs_driver = {
1822 .probe = hccs_probe,
1823 .remove = hccs_remove,
1824 .driver = {
1825 .name = "kunpeng_hccs",
1826 .acpi_match_table = hccs_acpi_match,
1827 },
1828 };
1829
1830 module_platform_driver(hccs_driver);
1831
1832 MODULE_DESCRIPTION("Kunpeng SoC HCCS driver");
1833 MODULE_LICENSE("GPL");
1834 MODULE_AUTHOR("Huisong Li <lihuisong@huawei.com>");
1835