1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ZynqMP R5 Remote Processor driver 4 * 5 */ 6 7 #include <dt-bindings/power/xlnx-zynqmp-power.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/firmware/xlnx-zynqmp.h> 10 #include <linux/kernel.h> 11 #include <linux/mailbox_client.h> 12 #include <linux/mailbox/zynqmp-ipi-message.h> 13 #include <linux/module.h> 14 #include <linux/of_address.h> 15 #include <linux/of_platform.h> 16 #include <linux/of_reserved_mem.h> 17 #include <linux/platform_device.h> 18 #include <linux/remoteproc.h> 19 20 #include "remoteproc_internal.h" 21 22 /* IPI buffer MAX length */ 23 #define IPI_BUF_LEN_MAX 32U 24 25 /* RX mailbox client buffer max length */ 26 #define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \ 27 sizeof(struct zynqmp_ipi_message)) 28 /* 29 * settings for RPU cluster mode which 30 * reflects possible values of xlnx,cluster-mode dt-property 31 */ 32 enum zynqmp_r5_cluster_mode { 33 SPLIT_MODE = 0, /* When cores run as separate processor */ 34 LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */ 35 SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */ 36 }; 37 38 /** 39 * struct mem_bank_data - Memory Bank description 40 * 41 * @addr: Start address of memory bank 42 * @da: device address 43 * @size: Size of Memory bank 44 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off 45 * @bank_name: name of the bank for remoteproc framework 46 */ 47 struct mem_bank_data { 48 phys_addr_t addr; 49 u32 da; 50 size_t size; 51 u32 pm_domain_id; 52 char *bank_name; 53 }; 54 55 /** 56 * struct mbox_info 57 * 58 * @rx_mc_buf: to copy data from mailbox rx channel 59 * @tx_mc_buf: to copy data to mailbox tx channel 60 * @r5_core: this mailbox's corresponding r5_core pointer 61 * @mbox_work: schedule work after receiving data from mailbox 62 * @mbox_cl: mailbox client 63 * @tx_chan: mailbox tx channel 64 * @rx_chan: mailbox rx channel 65 */ 66 struct mbox_info { 67 unsigned char rx_mc_buf[MBOX_CLIENT_BUF_MAX]; 68 unsigned char tx_mc_buf[MBOX_CLIENT_BUF_MAX]; 69 struct zynqmp_r5_core *r5_core; 70 struct work_struct mbox_work; 71 struct mbox_client mbox_cl; 72 struct mbox_chan *tx_chan; 73 struct mbox_chan *rx_chan; 74 }; 75 76 /* 77 * Hardcoded TCM bank values. This will stay in driver to maintain backward 78 * compatibility with device-tree that does not have TCM information. 79 */ 80 static const struct mem_bank_data zynqmp_tcm_banks_split[] = { 81 {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ 82 {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, 83 {0xffe90000UL, 0x0, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, 84 {0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, 85 }; 86 87 /* In lockstep mode cluster uses each 64KB TCM from second core as well */ 88 static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { 89 {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ 90 {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, 91 {0xffe10000UL, 0x10000, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, 92 {0xffe30000UL, 0x30000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, 93 }; 94 95 /** 96 * struct zynqmp_r5_core 97 * 98 * @dev: device of RPU instance 99 * @np: device node of RPU instance 100 * @tcm_bank_count: number TCM banks accessible to this RPU 101 * @tcm_banks: array of each TCM bank data 102 * @rproc: rproc handle 103 * @pm_domain_id: RPU CPU power domain id 104 * @ipi: pointer to mailbox information 105 */ 106 struct zynqmp_r5_core { 107 struct device *dev; 108 struct device_node *np; 109 int tcm_bank_count; 110 struct mem_bank_data **tcm_banks; 111 struct rproc *rproc; 112 u32 pm_domain_id; 113 struct mbox_info *ipi; 114 }; 115 116 /** 117 * struct zynqmp_r5_cluster 118 * 119 * @dev: r5f subsystem cluster device node 120 * @mode: cluster mode of type zynqmp_r5_cluster_mode 121 * @core_count: number of r5 cores used for this cluster mode 122 * @r5_cores: Array of pointers pointing to r5 core 123 */ 124 struct zynqmp_r5_cluster { 125 struct device *dev; 126 enum zynqmp_r5_cluster_mode mode; 127 int core_count; 128 struct zynqmp_r5_core **r5_cores; 129 }; 130 131 /** 132 * event_notified_idr_cb() - callback for vq_interrupt per notifyid 133 * @id: rproc->notify id 134 * @ptr: pointer to idr private data 135 * @data: data passed to idr_for_each callback 136 * 137 * Pass notification to remoteproc virtio 138 * 139 * Return: 0. having return is to satisfy the idr_for_each() function 140 * pointer input argument requirement. 141 **/ 142 static int event_notified_idr_cb(int id, void *ptr, void *data) 143 { 144 struct rproc *rproc = data; 145 146 if (rproc_vq_interrupt(rproc, id) == IRQ_NONE) 147 dev_dbg(&rproc->dev, "data not found for vqid=%d\n", id); 148 149 return 0; 150 } 151 152 /** 153 * handle_event_notified() - remoteproc notification work function 154 * @work: pointer to the work structure 155 * 156 * It checks each registered remoteproc notify IDs. 157 */ 158 static void handle_event_notified(struct work_struct *work) 159 { 160 struct mbox_info *ipi; 161 struct rproc *rproc; 162 163 ipi = container_of(work, struct mbox_info, mbox_work); 164 rproc = ipi->r5_core->rproc; 165 166 /* 167 * We only use IPI for interrupt. The RPU firmware side may or may 168 * not write the notifyid when it trigger IPI. 169 * And thus, we scan through all the registered notifyids and 170 * find which one is valid to get the message. 171 * Even if message from firmware is NULL, we attempt to get vqid 172 */ 173 idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc); 174 } 175 176 /** 177 * zynqmp_r5_mb_rx_cb() - receive channel mailbox callback 178 * @cl: mailbox client 179 * @msg: message pointer 180 * 181 * Receive data from ipi buffer, ack interrupt and then 182 * it will schedule the R5 notification work. 183 */ 184 static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg) 185 { 186 struct zynqmp_ipi_message *ipi_msg, *buf_msg; 187 struct mbox_info *ipi; 188 size_t len; 189 190 ipi = container_of(cl, struct mbox_info, mbox_cl); 191 192 /* copy data from ipi buffer to r5_core */ 193 ipi_msg = (struct zynqmp_ipi_message *)msg; 194 buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf; 195 len = ipi_msg->len; 196 if (len > IPI_BUF_LEN_MAX) { 197 dev_warn(cl->dev, "msg size exceeded than %d\n", 198 IPI_BUF_LEN_MAX); 199 len = IPI_BUF_LEN_MAX; 200 } 201 buf_msg->len = len; 202 memcpy(buf_msg->data, ipi_msg->data, len); 203 204 /* received and processed interrupt ack */ 205 if (mbox_send_message(ipi->rx_chan, NULL) < 0) 206 dev_err(cl->dev, "ack failed to mbox rx_chan\n"); 207 208 schedule_work(&ipi->mbox_work); 209 } 210 211 /** 212 * zynqmp_r5_setup_mbox() - Setup mailboxes related properties 213 * this is used for each individual R5 core 214 * 215 * @cdev: child node device 216 * 217 * Function to setup mailboxes related properties 218 * return : NULL if failed else pointer to mbox_info 219 */ 220 static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev) 221 { 222 struct mbox_client *mbox_cl; 223 struct mbox_info *ipi; 224 225 ipi = kzalloc(sizeof(*ipi), GFP_KERNEL); 226 if (!ipi) 227 return NULL; 228 229 mbox_cl = &ipi->mbox_cl; 230 mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb; 231 mbox_cl->tx_block = false; 232 mbox_cl->knows_txdone = false; 233 mbox_cl->tx_done = NULL; 234 mbox_cl->dev = cdev; 235 236 /* Request TX and RX channels */ 237 ipi->tx_chan = mbox_request_channel_byname(mbox_cl, "tx"); 238 if (IS_ERR(ipi->tx_chan)) { 239 ipi->tx_chan = NULL; 240 kfree(ipi); 241 dev_warn(cdev, "mbox tx channel request failed\n"); 242 return NULL; 243 } 244 245 ipi->rx_chan = mbox_request_channel_byname(mbox_cl, "rx"); 246 if (IS_ERR(ipi->rx_chan)) { 247 mbox_free_channel(ipi->tx_chan); 248 ipi->rx_chan = NULL; 249 ipi->tx_chan = NULL; 250 kfree(ipi); 251 dev_warn(cdev, "mbox rx channel request failed\n"); 252 return NULL; 253 } 254 255 INIT_WORK(&ipi->mbox_work, handle_event_notified); 256 257 return ipi; 258 } 259 260 static void zynqmp_r5_free_mbox(struct mbox_info *ipi) 261 { 262 if (!ipi) 263 return; 264 265 if (ipi->tx_chan) { 266 mbox_free_channel(ipi->tx_chan); 267 ipi->tx_chan = NULL; 268 } 269 270 if (ipi->rx_chan) { 271 mbox_free_channel(ipi->rx_chan); 272 ipi->rx_chan = NULL; 273 } 274 275 kfree(ipi); 276 } 277 278 /* 279 * zynqmp_r5_core_kick() - kick a firmware if mbox is provided 280 * @rproc: r5 core's corresponding rproc structure 281 * @vqid: virtqueue ID 282 */ 283 static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid) 284 { 285 struct zynqmp_r5_core *r5_core = rproc->priv; 286 struct device *dev = r5_core->dev; 287 struct zynqmp_ipi_message *mb_msg; 288 struct mbox_info *ipi; 289 int ret; 290 291 ipi = r5_core->ipi; 292 if (!ipi) 293 return; 294 295 mb_msg = (struct zynqmp_ipi_message *)ipi->tx_mc_buf; 296 memcpy(mb_msg->data, &vqid, sizeof(vqid)); 297 mb_msg->len = sizeof(vqid); 298 ret = mbox_send_message(ipi->tx_chan, mb_msg); 299 if (ret < 0) 300 dev_warn(dev, "failed to send message\n"); 301 } 302 303 /* 304 * zynqmp_r5_rproc_start() 305 * @rproc: single R5 core's corresponding rproc instance 306 * 307 * Start R5 Core from designated boot address. 308 * 309 * return 0 on success, otherwise non-zero value on failure 310 */ 311 static int zynqmp_r5_rproc_start(struct rproc *rproc) 312 { 313 struct zynqmp_r5_core *r5_core = rproc->priv; 314 enum rpu_boot_mem bootmem; 315 int ret; 316 317 /* 318 * The exception vector pointers (EVP) refer to the base-address of 319 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector 320 * starts at the base-address and subsequent vectors are on 4-byte 321 * boundaries. 322 * 323 * Exception vectors can start either from 0x0000_0000 (LOVEC) or 324 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory) 325 * 326 * Usually firmware will put Exception vectors at LOVEC. 327 * 328 * It is not recommend that you change the exception vector. 329 * Changing the EVP to HIVEC will result in increased interrupt latency 330 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor 331 * is non-secured, then the Cortex-R5F processor cannot access the 332 * HIVEC exception vectors in the OCM. 333 */ 334 bootmem = (rproc->bootaddr >= 0xFFFC0000) ? 335 PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC; 336 337 dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr, 338 bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM"); 339 340 ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1, 341 bootmem, ZYNQMP_PM_REQUEST_ACK_NO); 342 if (ret) 343 dev_err(r5_core->dev, 344 "failed to start RPU = 0x%x\n", r5_core->pm_domain_id); 345 return ret; 346 } 347 348 /* 349 * zynqmp_r5_rproc_stop() 350 * @rproc: single R5 core's corresponding rproc instance 351 * 352 * Power down R5 Core. 353 * 354 * return 0 on success, otherwise non-zero value on failure 355 */ 356 static int zynqmp_r5_rproc_stop(struct rproc *rproc) 357 { 358 struct zynqmp_r5_core *r5_core = rproc->priv; 359 int ret; 360 361 ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id, 362 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 363 if (ret) 364 dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret); 365 366 return ret; 367 } 368 369 /* 370 * zynqmp_r5_mem_region_map() 371 * @rproc: single R5 core's corresponding rproc instance 372 * @mem: mem descriptor to map reserved memory-regions 373 * 374 * Callback to map va for memory-region's carveout. 375 * 376 * return 0 on success, otherwise non-zero value on failure 377 */ 378 static int zynqmp_r5_mem_region_map(struct rproc *rproc, 379 struct rproc_mem_entry *mem) 380 { 381 void __iomem *va; 382 383 va = ioremap_wc(mem->dma, mem->len); 384 if (IS_ERR_OR_NULL(va)) 385 return -ENOMEM; 386 387 mem->va = (void *)va; 388 389 return 0; 390 } 391 392 /* 393 * zynqmp_r5_rproc_mem_unmap 394 * @rproc: single R5 core's corresponding rproc instance 395 * @mem: mem entry to unmap 396 * 397 * Unmap memory-region carveout 398 * 399 * return: always returns 0 400 */ 401 static int zynqmp_r5_mem_region_unmap(struct rproc *rproc, 402 struct rproc_mem_entry *mem) 403 { 404 iounmap((void __iomem *)mem->va); 405 return 0; 406 } 407 408 /* 409 * add_mem_regions_carveout() 410 * @rproc: single R5 core's corresponding rproc instance 411 * 412 * Construct rproc mem carveouts from memory-region property nodes 413 * 414 * return 0 on success, otherwise non-zero value on failure 415 */ 416 static int add_mem_regions_carveout(struct rproc *rproc) 417 { 418 struct rproc_mem_entry *rproc_mem; 419 struct zynqmp_r5_core *r5_core; 420 struct of_phandle_iterator it; 421 struct reserved_mem *rmem; 422 int i = 0; 423 424 r5_core = rproc->priv; 425 426 /* Register associated reserved memory regions */ 427 of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0); 428 429 while (of_phandle_iterator_next(&it) == 0) { 430 rmem = of_reserved_mem_lookup(it.node); 431 if (!rmem) { 432 of_node_put(it.node); 433 dev_err(&rproc->dev, "unable to acquire memory-region\n"); 434 return -EINVAL; 435 } 436 437 if (!strcmp(it.node->name, "vdev0buffer")) { 438 /* Init reserved memory for vdev buffer */ 439 rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i, 440 rmem->size, 441 rmem->base, 442 it.node->name); 443 } else { 444 /* Register associated reserved memory regions */ 445 rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL, 446 (dma_addr_t)rmem->base, 447 rmem->size, rmem->base, 448 zynqmp_r5_mem_region_map, 449 zynqmp_r5_mem_region_unmap, 450 it.node->name); 451 } 452 453 if (!rproc_mem) { 454 of_node_put(it.node); 455 return -ENOMEM; 456 } 457 458 rproc_add_carveout(rproc, rproc_mem); 459 rproc_coredump_add_segment(rproc, rmem->base, rmem->size); 460 461 dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx", 462 it.node->name, rmem->base, rmem->size); 463 i++; 464 } 465 466 return 0; 467 } 468 469 /* 470 * tcm_mem_unmap() 471 * @rproc: single R5 core's corresponding rproc instance 472 * @mem: tcm mem entry to unmap 473 * 474 * Unmap TCM banks when powering down R5 core. 475 * 476 * return always 0 477 */ 478 static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem) 479 { 480 iounmap((void __iomem *)mem->va); 481 482 return 0; 483 } 484 485 /* 486 * tcm_mem_map() 487 * @rproc: single R5 core's corresponding rproc instance 488 * @mem: tcm memory entry descriptor 489 * 490 * Given TCM bank entry, this func setup virtual address for TCM bank 491 * remoteproc carveout. It also takes care of va to da address translation 492 * 493 * return 0 on success, otherwise non-zero value on failure 494 */ 495 static int tcm_mem_map(struct rproc *rproc, 496 struct rproc_mem_entry *mem) 497 { 498 void __iomem *va; 499 500 va = ioremap_wc(mem->dma, mem->len); 501 if (IS_ERR_OR_NULL(va)) 502 return -ENOMEM; 503 504 /* Update memory entry va */ 505 mem->va = (void *)va; 506 507 /* clear TCMs */ 508 memset_io(va, 0, mem->len); 509 510 return 0; 511 } 512 513 /* 514 * add_tcm_banks() 515 * @rproc: single R5 core's corresponding rproc instance 516 * 517 * allocate and add remoteproc carveout for TCM memory 518 * 519 * return 0 on success, otherwise non-zero value on failure 520 */ 521 static int add_tcm_banks(struct rproc *rproc) 522 { 523 struct rproc_mem_entry *rproc_mem; 524 struct zynqmp_r5_core *r5_core; 525 int i, num_banks, ret; 526 phys_addr_t bank_addr; 527 struct device *dev; 528 u32 pm_domain_id; 529 size_t bank_size; 530 char *bank_name; 531 u32 da; 532 533 r5_core = rproc->priv; 534 dev = r5_core->dev; 535 num_banks = r5_core->tcm_bank_count; 536 537 /* 538 * Power-on Each 64KB TCM, 539 * register its address space, map and unmap functions 540 * and add carveouts accordingly 541 */ 542 for (i = 0; i < num_banks; i++) { 543 bank_addr = r5_core->tcm_banks[i]->addr; 544 da = r5_core->tcm_banks[i]->da; 545 bank_name = r5_core->tcm_banks[i]->bank_name; 546 bank_size = r5_core->tcm_banks[i]->size; 547 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 548 549 ret = zynqmp_pm_request_node(pm_domain_id, 550 ZYNQMP_PM_CAPABILITY_ACCESS, 0, 551 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 552 if (ret < 0) { 553 dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); 554 goto release_tcm; 555 } 556 557 dev_dbg(dev, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx", 558 bank_name, bank_addr, da, bank_size); 559 560 rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, 561 bank_size, da, 562 tcm_mem_map, tcm_mem_unmap, 563 bank_name); 564 if (!rproc_mem) { 565 ret = -ENOMEM; 566 zynqmp_pm_release_node(pm_domain_id); 567 goto release_tcm; 568 } 569 570 rproc_add_carveout(rproc, rproc_mem); 571 rproc_coredump_add_segment(rproc, da, bank_size); 572 } 573 574 return 0; 575 576 release_tcm: 577 /* If failed, Turn off all TCM banks turned on before */ 578 for (i--; i >= 0; i--) { 579 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 580 zynqmp_pm_release_node(pm_domain_id); 581 } 582 return ret; 583 } 584 585 /* 586 * zynqmp_r5_parse_fw() 587 * @rproc: single R5 core's corresponding rproc instance 588 * @fw: ptr to firmware to be loaded onto r5 core 589 * 590 * get resource table if available 591 * 592 * return 0 on success, otherwise non-zero value on failure 593 */ 594 static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw) 595 { 596 int ret; 597 598 ret = rproc_elf_load_rsc_table(rproc, fw); 599 if (ret == -EINVAL) { 600 /* 601 * resource table only required for IPC. 602 * if not present, this is not necessarily an error; 603 * for example, loading r5 hello world application 604 * so simply inform user and keep going. 605 */ 606 dev_info(&rproc->dev, "no resource table found.\n"); 607 ret = 0; 608 } 609 return ret; 610 } 611 612 /** 613 * zynqmp_r5_rproc_prepare() 614 * adds carveouts for TCM bank and reserved memory regions 615 * 616 * @rproc: Device node of each rproc 617 * 618 * Return: 0 for success else < 0 error code 619 */ 620 static int zynqmp_r5_rproc_prepare(struct rproc *rproc) 621 { 622 int ret; 623 624 ret = add_tcm_banks(rproc); 625 if (ret) { 626 dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret); 627 return ret; 628 } 629 630 ret = add_mem_regions_carveout(rproc); 631 if (ret) { 632 dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret); 633 return ret; 634 } 635 636 return 0; 637 } 638 639 /** 640 * zynqmp_r5_rproc_unprepare() 641 * Turns off TCM banks using power-domain id 642 * 643 * @rproc: Device node of each rproc 644 * 645 * Return: always 0 646 */ 647 static int zynqmp_r5_rproc_unprepare(struct rproc *rproc) 648 { 649 struct zynqmp_r5_core *r5_core; 650 u32 pm_domain_id; 651 int i; 652 653 r5_core = rproc->priv; 654 655 for (i = 0; i < r5_core->tcm_bank_count; i++) { 656 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 657 if (zynqmp_pm_release_node(pm_domain_id)) 658 dev_warn(r5_core->dev, 659 "can't turn off TCM bank 0x%x", pm_domain_id); 660 } 661 662 return 0; 663 } 664 665 static const struct rproc_ops zynqmp_r5_rproc_ops = { 666 .prepare = zynqmp_r5_rproc_prepare, 667 .unprepare = zynqmp_r5_rproc_unprepare, 668 .start = zynqmp_r5_rproc_start, 669 .stop = zynqmp_r5_rproc_stop, 670 .load = rproc_elf_load_segments, 671 .parse_fw = zynqmp_r5_parse_fw, 672 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, 673 .sanity_check = rproc_elf_sanity_check, 674 .get_boot_addr = rproc_elf_get_boot_addr, 675 .kick = zynqmp_r5_rproc_kick, 676 }; 677 678 /** 679 * zynqmp_r5_add_rproc_core() 680 * Allocate and add struct rproc object for each r5f core 681 * This is called for each individual r5f core 682 * 683 * @cdev: Device node of each r5 core 684 * 685 * Return: zynqmp_r5_core object for success else error code pointer 686 */ 687 static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) 688 { 689 struct zynqmp_r5_core *r5_core; 690 struct rproc *r5_rproc; 691 int ret; 692 693 /* Set up DMA mask */ 694 ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32)); 695 if (ret) 696 return ERR_PTR(ret); 697 698 /* Allocate remoteproc instance */ 699 r5_rproc = rproc_alloc(cdev, dev_name(cdev), 700 &zynqmp_r5_rproc_ops, 701 NULL, sizeof(struct zynqmp_r5_core)); 702 if (!r5_rproc) { 703 dev_err(cdev, "failed to allocate memory for rproc instance\n"); 704 return ERR_PTR(-ENOMEM); 705 } 706 707 rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM); 708 709 r5_rproc->auto_boot = false; 710 r5_core = r5_rproc->priv; 711 r5_core->dev = cdev; 712 r5_core->np = dev_of_node(cdev); 713 if (!r5_core->np) { 714 dev_err(cdev, "can't get device node for r5 core\n"); 715 ret = -EINVAL; 716 goto free_rproc; 717 } 718 719 /* Add R5 remoteproc core */ 720 ret = rproc_add(r5_rproc); 721 if (ret) { 722 dev_err(cdev, "failed to add r5 remoteproc\n"); 723 goto free_rproc; 724 } 725 726 r5_core->rproc = r5_rproc; 727 return r5_core; 728 729 free_rproc: 730 rproc_free(r5_rproc); 731 return ERR_PTR(ret); 732 } 733 734 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) 735 { 736 int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count; 737 struct of_phandle_args out_args; 738 struct zynqmp_r5_core *r5_core; 739 struct platform_device *cpdev; 740 struct mem_bank_data *tcm; 741 struct device_node *np; 742 struct resource *res; 743 u64 abs_addr, size; 744 struct device *dev; 745 746 for (i = 0; i < cluster->core_count; i++) { 747 r5_core = cluster->r5_cores[i]; 748 dev = r5_core->dev; 749 np = r5_core->np; 750 751 pd_count = of_count_phandle_with_args(np, "power-domains", 752 "#power-domain-cells"); 753 754 if (pd_count <= 0) { 755 dev_err(dev, "invalid power-domains property, %d\n", pd_count); 756 return -EINVAL; 757 } 758 759 /* First entry in power-domains list is for r5 core, rest for TCM. */ 760 tcm_bank_count = pd_count - 1; 761 762 if (tcm_bank_count <= 0) { 763 dev_err(dev, "invalid TCM count %d\n", tcm_bank_count); 764 return -EINVAL; 765 } 766 767 r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, 768 sizeof(struct mem_bank_data *), 769 GFP_KERNEL); 770 if (!r5_core->tcm_banks) 771 return -ENOMEM; 772 773 r5_core->tcm_bank_count = tcm_bank_count; 774 for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, tcm_pd_idx++) { 775 tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data), 776 GFP_KERNEL); 777 if (!tcm) 778 return -ENOMEM; 779 780 r5_core->tcm_banks[j] = tcm; 781 782 /* Get power-domains id of TCM. */ 783 ret = of_parse_phandle_with_args(np, "power-domains", 784 "#power-domain-cells", 785 tcm_pd_idx, &out_args); 786 if (ret) { 787 dev_err(r5_core->dev, 788 "failed to get tcm %d pm domain, ret %d\n", 789 tcm_pd_idx, ret); 790 return ret; 791 } 792 tcm->pm_domain_id = out_args.args[0]; 793 of_node_put(out_args.np); 794 795 /* Get TCM address without translation. */ 796 ret = of_property_read_reg(np, j, &abs_addr, &size); 797 if (ret) { 798 dev_err(dev, "failed to get reg property\n"); 799 return ret; 800 } 801 802 /* 803 * Remote processor can address only 32 bits 804 * so convert 64-bits into 32-bits. This will discard 805 * any unwanted upper 32-bits. 806 */ 807 tcm->da = (u32)abs_addr; 808 tcm->size = (u32)size; 809 810 cpdev = to_platform_device(dev); 811 res = platform_get_resource(cpdev, IORESOURCE_MEM, j); 812 if (!res) { 813 dev_err(dev, "failed to get tcm resource\n"); 814 return -EINVAL; 815 } 816 817 tcm->addr = (u32)res->start; 818 tcm->bank_name = (char *)res->name; 819 res = devm_request_mem_region(dev, tcm->addr, tcm->size, 820 tcm->bank_name); 821 if (!res) { 822 dev_err(dev, "failed to request tcm resource\n"); 823 return -EINVAL; 824 } 825 } 826 } 827 828 return 0; 829 } 830 831 /** 832 * zynqmp_r5_get_tcm_node() 833 * Ideally this function should parse tcm node and store information 834 * in r5_core instance. For now, Hardcoded TCM information is used. 835 * This approach is used as TCM bindings for system-dt is being developed 836 * 837 * @cluster: pointer to zynqmp_r5_cluster type object 838 * 839 * Return: 0 for success and < 0 error code for failure. 840 */ 841 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster) 842 { 843 const struct mem_bank_data *zynqmp_tcm_banks; 844 struct device *dev = cluster->dev; 845 struct zynqmp_r5_core *r5_core; 846 int tcm_bank_count, tcm_node; 847 int i, j; 848 849 if (cluster->mode == SPLIT_MODE) { 850 zynqmp_tcm_banks = zynqmp_tcm_banks_split; 851 tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_split); 852 } else { 853 zynqmp_tcm_banks = zynqmp_tcm_banks_lockstep; 854 tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_lockstep); 855 } 856 857 /* count per core tcm banks */ 858 tcm_bank_count = tcm_bank_count / cluster->core_count; 859 860 /* 861 * r5 core 0 will use all of TCM banks in lockstep mode. 862 * In split mode, r5 core0 will use 128k and r5 core1 will use another 863 * 128k. Assign TCM banks to each core accordingly 864 */ 865 tcm_node = 0; 866 for (i = 0; i < cluster->core_count; i++) { 867 r5_core = cluster->r5_cores[i]; 868 r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, 869 sizeof(struct mem_bank_data *), 870 GFP_KERNEL); 871 if (!r5_core->tcm_banks) 872 return -ENOMEM; 873 874 for (j = 0; j < tcm_bank_count; j++) { 875 /* 876 * Use pre-defined TCM reg values. 877 * Eventually this should be replaced by values 878 * parsed from dts. 879 */ 880 r5_core->tcm_banks[j] = 881 (struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node]; 882 tcm_node++; 883 } 884 885 r5_core->tcm_bank_count = tcm_bank_count; 886 } 887 888 return 0; 889 } 890 891 /* 892 * zynqmp_r5_core_init() 893 * Create and initialize zynqmp_r5_core type object 894 * 895 * @cluster: pointer to zynqmp_r5_cluster type object 896 * @fw_reg_val: value expected by firmware to configure RPU cluster mode 897 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split) 898 * 899 * Return: 0 for success and error code for failure. 900 */ 901 static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, 902 enum rpu_oper_mode fw_reg_val, 903 enum rpu_tcm_comb tcm_mode) 904 { 905 struct device *dev = cluster->dev; 906 struct zynqmp_r5_core *r5_core; 907 int ret = -EINVAL, i; 908 909 r5_core = cluster->r5_cores[0]; 910 911 /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */ 912 if (of_find_property(r5_core->np, "reg", NULL)) 913 ret = zynqmp_r5_get_tcm_node_from_dt(cluster); 914 else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) 915 ret = zynqmp_r5_get_tcm_node(cluster); 916 917 if (ret) { 918 dev_err(dev, "can't get tcm, err %d\n", ret); 919 return ret; 920 } 921 922 for (i = 0; i < cluster->core_count; i++) { 923 r5_core = cluster->r5_cores[i]; 924 925 /* Initialize r5 cores with power-domains parsed from dts */ 926 ret = of_property_read_u32_index(r5_core->np, "power-domains", 927 1, &r5_core->pm_domain_id); 928 if (ret) { 929 dev_err(dev, "failed to get power-domains property\n"); 930 return ret; 931 } 932 933 ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val); 934 if (ret < 0) { 935 dev_err(r5_core->dev, "failed to set RPU mode\n"); 936 return ret; 937 } 938 939 if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) || 940 device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { 941 ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, 942 tcm_mode); 943 if (ret < 0) { 944 dev_err(r5_core->dev, "failed to configure TCM\n"); 945 return ret; 946 } 947 } 948 } 949 950 return 0; 951 } 952 953 /* 954 * zynqmp_r5_cluster_init() 955 * Create and initialize zynqmp_r5_cluster type object 956 * 957 * @cluster: pointer to zynqmp_r5_cluster type object 958 * 959 * Return: 0 for success and error code for failure. 960 */ 961 static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) 962 { 963 enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE; 964 struct device *dev = cluster->dev; 965 struct device_node *dev_node = dev_of_node(dev); 966 struct platform_device *child_pdev; 967 struct zynqmp_r5_core **r5_cores; 968 enum rpu_oper_mode fw_reg_val; 969 struct device **child_devs; 970 struct device_node *child; 971 enum rpu_tcm_comb tcm_mode; 972 int core_count, ret, i; 973 struct mbox_info *ipi; 974 975 ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode); 976 977 /* 978 * on success returns 0, if not defined then returns -EINVAL, 979 * In that case, default is LOCKSTEP mode. Other than that 980 * returns relative error code < 0. 981 */ 982 if (ret != -EINVAL && ret != 0) { 983 dev_err(dev, "Invalid xlnx,cluster-mode property\n"); 984 return ret; 985 } 986 987 /* 988 * For now driver only supports split mode and lockstep mode. 989 * fail driver probe if either of that is not set in dts. 990 */ 991 if (cluster_mode == LOCKSTEP_MODE) { 992 fw_reg_val = PM_RPU_MODE_LOCKSTEP; 993 } else if (cluster_mode == SPLIT_MODE) { 994 fw_reg_val = PM_RPU_MODE_SPLIT; 995 } else { 996 dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode); 997 return -EINVAL; 998 } 999 1000 if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) { 1001 ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode); 1002 if (ret) 1003 return ret; 1004 } else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { 1005 if (cluster_mode == LOCKSTEP_MODE) 1006 tcm_mode = PM_RPU_TCM_COMB; 1007 else 1008 tcm_mode = PM_RPU_TCM_SPLIT; 1009 } else { 1010 tcm_mode = PM_RPU_TCM_COMB; 1011 } 1012 1013 /* 1014 * Number of cores is decided by number of child nodes of 1015 * r5f subsystem node in dts. If Split mode is used in dts 1016 * 2 child nodes are expected. 1017 * In lockstep mode if two child nodes are available, 1018 * only use first child node and consider it as core0 1019 * and ignore core1 dt node. 1020 */ 1021 core_count = of_get_available_child_count(dev_node); 1022 if (core_count == 0) { 1023 dev_err(dev, "Invalid number of r5 cores %d", core_count); 1024 return -EINVAL; 1025 } else if (cluster_mode == SPLIT_MODE && core_count != 2) { 1026 dev_err(dev, "Invalid number of r5 cores for split mode\n"); 1027 return -EINVAL; 1028 } else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) { 1029 dev_warn(dev, "Only r5 core0 will be used\n"); 1030 core_count = 1; 1031 } 1032 1033 child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL); 1034 if (!child_devs) 1035 return -ENOMEM; 1036 1037 r5_cores = kcalloc(core_count, 1038 sizeof(struct zynqmp_r5_core *), GFP_KERNEL); 1039 if (!r5_cores) { 1040 kfree(child_devs); 1041 return -ENOMEM; 1042 } 1043 1044 i = 0; 1045 for_each_available_child_of_node(dev_node, child) { 1046 child_pdev = of_find_device_by_node(child); 1047 if (!child_pdev) { 1048 of_node_put(child); 1049 ret = -ENODEV; 1050 goto release_r5_cores; 1051 } 1052 1053 child_devs[i] = &child_pdev->dev; 1054 1055 /* create and add remoteproc instance of type struct rproc */ 1056 r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev); 1057 if (IS_ERR(r5_cores[i])) { 1058 of_node_put(child); 1059 ret = PTR_ERR(r5_cores[i]); 1060 r5_cores[i] = NULL; 1061 goto release_r5_cores; 1062 } 1063 1064 /* 1065 * If mailbox nodes are disabled using "status" property then 1066 * setting up mailbox channels will fail. 1067 */ 1068 ipi = zynqmp_r5_setup_mbox(&child_pdev->dev); 1069 if (ipi) { 1070 r5_cores[i]->ipi = ipi; 1071 ipi->r5_core = r5_cores[i]; 1072 } 1073 1074 /* 1075 * If two child nodes are available in dts in lockstep mode, 1076 * then ignore second child node. 1077 */ 1078 if (cluster_mode == LOCKSTEP_MODE) { 1079 of_node_put(child); 1080 break; 1081 } 1082 1083 i++; 1084 } 1085 1086 cluster->mode = cluster_mode; 1087 cluster->core_count = core_count; 1088 cluster->r5_cores = r5_cores; 1089 1090 ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode); 1091 if (ret < 0) { 1092 dev_err(dev, "failed to init r5 core err %d\n", ret); 1093 cluster->core_count = 0; 1094 cluster->r5_cores = NULL; 1095 1096 /* 1097 * at this point rproc resources for each core are allocated. 1098 * adjust index to free resources in reverse order 1099 */ 1100 i = core_count - 1; 1101 goto release_r5_cores; 1102 } 1103 1104 kfree(child_devs); 1105 return 0; 1106 1107 release_r5_cores: 1108 while (i >= 0) { 1109 put_device(child_devs[i]); 1110 if (r5_cores[i]) { 1111 zynqmp_r5_free_mbox(r5_cores[i]->ipi); 1112 of_reserved_mem_device_release(r5_cores[i]->dev); 1113 rproc_del(r5_cores[i]->rproc); 1114 rproc_free(r5_cores[i]->rproc); 1115 } 1116 i--; 1117 } 1118 kfree(r5_cores); 1119 kfree(child_devs); 1120 return ret; 1121 } 1122 1123 static void zynqmp_r5_cluster_exit(void *data) 1124 { 1125 struct platform_device *pdev = data; 1126 struct zynqmp_r5_cluster *cluster; 1127 struct zynqmp_r5_core *r5_core; 1128 int i; 1129 1130 cluster = platform_get_drvdata(pdev); 1131 if (!cluster) 1132 return; 1133 1134 for (i = 0; i < cluster->core_count; i++) { 1135 r5_core = cluster->r5_cores[i]; 1136 zynqmp_r5_free_mbox(r5_core->ipi); 1137 of_reserved_mem_device_release(r5_core->dev); 1138 put_device(r5_core->dev); 1139 rproc_del(r5_core->rproc); 1140 rproc_free(r5_core->rproc); 1141 } 1142 1143 kfree(cluster->r5_cores); 1144 kfree(cluster); 1145 platform_set_drvdata(pdev, NULL); 1146 } 1147 1148 /* 1149 * zynqmp_r5_remoteproc_probe() 1150 * parse device-tree, initialize hardware and allocate required resources 1151 * and remoteproc ops 1152 * 1153 * @pdev: domain platform device for R5 cluster 1154 * 1155 * Return: 0 for success and < 0 for failure. 1156 */ 1157 static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) 1158 { 1159 struct zynqmp_r5_cluster *cluster; 1160 struct device *dev = &pdev->dev; 1161 int ret; 1162 1163 cluster = kzalloc(sizeof(*cluster), GFP_KERNEL); 1164 if (!cluster) 1165 return -ENOMEM; 1166 1167 cluster->dev = dev; 1168 1169 ret = devm_of_platform_populate(dev); 1170 if (ret) { 1171 dev_err_probe(dev, ret, "failed to populate platform dev\n"); 1172 kfree(cluster); 1173 return ret; 1174 } 1175 1176 /* wire in so each core can be cleaned up at driver remove */ 1177 platform_set_drvdata(pdev, cluster); 1178 1179 ret = zynqmp_r5_cluster_init(cluster); 1180 if (ret) { 1181 kfree(cluster); 1182 platform_set_drvdata(pdev, NULL); 1183 dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n"); 1184 return ret; 1185 } 1186 1187 ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev); 1188 if (ret) 1189 return ret; 1190 1191 return 0; 1192 } 1193 1194 /* Match table for OF platform binding */ 1195 static const struct of_device_id zynqmp_r5_remoteproc_match[] = { 1196 { .compatible = "xlnx,versal-net-r52fss", }, 1197 { .compatible = "xlnx,versal-r5fss", }, 1198 { .compatible = "xlnx,zynqmp-r5fss", }, 1199 { /* end of list */ }, 1200 }; 1201 MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match); 1202 1203 static struct platform_driver zynqmp_r5_remoteproc_driver = { 1204 .probe = zynqmp_r5_remoteproc_probe, 1205 .driver = { 1206 .name = "zynqmp_r5_remoteproc", 1207 .of_match_table = zynqmp_r5_remoteproc_match, 1208 }, 1209 }; 1210 module_platform_driver(zynqmp_r5_remoteproc_driver); 1211 1212 MODULE_DESCRIPTION("Xilinx R5F remote processor driver"); 1213 MODULE_AUTHOR("Xilinx Inc."); 1214 MODULE_LICENSE("GPL"); 1215