1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ZynqMP R5 Remote Processor driver 4 * 5 */ 6 7 #include <dt-bindings/power/xlnx-zynqmp-power.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/firmware/xlnx-zynqmp.h> 10 #include <linux/kernel.h> 11 #include <linux/mailbox_client.h> 12 #include <linux/mailbox/zynqmp-ipi-message.h> 13 #include <linux/module.h> 14 #include <linux/of_address.h> 15 #include <linux/of_platform.h> 16 #include <linux/of_reserved_mem.h> 17 #include <linux/platform_device.h> 18 #include <linux/remoteproc.h> 19 20 #include "remoteproc_internal.h" 21 22 /* IPI buffer MAX length */ 23 #define IPI_BUF_LEN_MAX 32U 24 25 /* RX mailbox client buffer max length */ 26 #define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \ 27 sizeof(struct zynqmp_ipi_message)) 28 29 #define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \ 30 (uint32_t)'m' << 8 | (uint32_t)'p') 31 32 /* 33 * settings for RPU cluster mode which 34 * reflects possible values of xlnx,cluster-mode dt-property 35 */ 36 enum zynqmp_r5_cluster_mode { 37 SPLIT_MODE = 0, /* When cores run as separate processor */ 38 LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */ 39 SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */ 40 }; 41 42 /** 43 * struct mem_bank_data - Memory Bank description 44 * 45 * @addr: Start address of memory bank 46 * @da: device address 47 * @size: Size of Memory bank 48 * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off 49 * @bank_name: name of the bank for remoteproc framework 50 */ 51 struct mem_bank_data { 52 phys_addr_t addr; 53 u32 da; 54 size_t size; 55 u32 pm_domain_id; 56 char *bank_name; 57 }; 58 59 /** 60 * struct mbox_info 61 * 62 * @rx_mc_buf: to copy data from mailbox rx channel 63 * @tx_mc_buf: to copy data to mailbox tx channel 64 * @r5_core: this mailbox's corresponding r5_core pointer 65 * @mbox_work: schedule work after receiving data from mailbox 66 * @mbox_cl: mailbox client 67 * @tx_chan: mailbox tx channel 68 * @rx_chan: mailbox rx channel 69 */ 70 struct mbox_info { 71 unsigned char rx_mc_buf[MBOX_CLIENT_BUF_MAX]; 72 unsigned char tx_mc_buf[MBOX_CLIENT_BUF_MAX]; 73 struct zynqmp_r5_core *r5_core; 74 struct work_struct mbox_work; 75 struct mbox_client mbox_cl; 76 struct mbox_chan *tx_chan; 77 struct mbox_chan *rx_chan; 78 }; 79 80 /** 81 * struct rsc_tbl_data 82 * 83 * Platform specific data structure used to sync resource table address. 84 * It's important to maintain order and size of each field on remote side. 85 * 86 * @version: version of data structure 87 * @magic_num: 32-bit magic number. 88 * @comp_magic_num: complement of above magic number 89 * @rsc_tbl_size: resource table size 90 * @rsc_tbl: resource table address 91 */ 92 struct rsc_tbl_data { 93 const int version; 94 const u32 magic_num; 95 const u32 comp_magic_num; 96 const u32 rsc_tbl_size; 97 const uintptr_t rsc_tbl; 98 } __packed; 99 100 /* 101 * Hardcoded TCM bank values. This will stay in driver to maintain backward 102 * compatibility with device-tree that does not have TCM information. 103 */ 104 static const struct mem_bank_data zynqmp_tcm_banks_split[] = { 105 {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ 106 {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, 107 {0xffe90000UL, 0x0, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, 108 {0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, 109 }; 110 111 /* In lockstep mode cluster uses each 64KB TCM from second core as well */ 112 static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { 113 {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */ 114 {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"}, 115 {0xffe10000UL, 0x10000, 0x10000UL, PD_R5_1_ATCM, "atcm1"}, 116 {0xffe30000UL, 0x30000, 0x10000UL, PD_R5_1_BTCM, "btcm1"}, 117 }; 118 119 /** 120 * struct zynqmp_r5_core 121 * 122 * @rsc_tbl_va: resource table virtual address 123 * @dev: device of RPU instance 124 * @np: device node of RPU instance 125 * @tcm_bank_count: number TCM banks accessible to this RPU 126 * @tcm_banks: array of each TCM bank data 127 * @rproc: rproc handle 128 * @rsc_tbl_size: resource table size retrieved from remote 129 * @pm_domain_id: RPU CPU power domain id 130 * @ipi: pointer to mailbox information 131 */ 132 struct zynqmp_r5_core { 133 void __iomem *rsc_tbl_va; 134 struct device *dev; 135 struct device_node *np; 136 int tcm_bank_count; 137 struct mem_bank_data **tcm_banks; 138 struct rproc *rproc; 139 u32 rsc_tbl_size; 140 u32 pm_domain_id; 141 struct mbox_info *ipi; 142 }; 143 144 /** 145 * struct zynqmp_r5_cluster 146 * 147 * @dev: r5f subsystem cluster device node 148 * @mode: cluster mode of type zynqmp_r5_cluster_mode 149 * @core_count: number of r5 cores used for this cluster mode 150 * @r5_cores: Array of pointers pointing to r5 core 151 */ 152 struct zynqmp_r5_cluster { 153 struct device *dev; 154 enum zynqmp_r5_cluster_mode mode; 155 int core_count; 156 struct zynqmp_r5_core **r5_cores; 157 }; 158 159 /** 160 * event_notified_idr_cb() - callback for vq_interrupt per notifyid 161 * @id: rproc->notify id 162 * @ptr: pointer to idr private data 163 * @data: data passed to idr_for_each callback 164 * 165 * Pass notification to remoteproc virtio 166 * 167 * Return: 0. having return is to satisfy the idr_for_each() function 168 * pointer input argument requirement. 169 **/ 170 static int event_notified_idr_cb(int id, void *ptr, void *data) 171 { 172 struct rproc *rproc = data; 173 174 if (rproc_vq_interrupt(rproc, id) == IRQ_NONE) 175 dev_dbg(&rproc->dev, "data not found for vqid=%d\n", id); 176 177 return 0; 178 } 179 180 /** 181 * handle_event_notified() - remoteproc notification work function 182 * @work: pointer to the work structure 183 * 184 * It checks each registered remoteproc notify IDs. 185 */ 186 static void handle_event_notified(struct work_struct *work) 187 { 188 struct mbox_info *ipi; 189 struct rproc *rproc; 190 191 ipi = container_of(work, struct mbox_info, mbox_work); 192 rproc = ipi->r5_core->rproc; 193 194 /* 195 * We only use IPI for interrupt. The RPU firmware side may or may 196 * not write the notifyid when it trigger IPI. 197 * And thus, we scan through all the registered notifyids and 198 * find which one is valid to get the message. 199 * Even if message from firmware is NULL, we attempt to get vqid 200 */ 201 idr_for_each(&rproc->notifyids, event_notified_idr_cb, rproc); 202 } 203 204 /** 205 * zynqmp_r5_mb_rx_cb() - receive channel mailbox callback 206 * @cl: mailbox client 207 * @msg: message pointer 208 * 209 * Receive data from ipi buffer, ack interrupt and then 210 * it will schedule the R5 notification work. 211 */ 212 static void zynqmp_r5_mb_rx_cb(struct mbox_client *cl, void *msg) 213 { 214 struct zynqmp_ipi_message *ipi_msg, *buf_msg; 215 struct mbox_info *ipi; 216 size_t len; 217 218 ipi = container_of(cl, struct mbox_info, mbox_cl); 219 220 /* copy data from ipi buffer to r5_core */ 221 ipi_msg = (struct zynqmp_ipi_message *)msg; 222 buf_msg = (struct zynqmp_ipi_message *)ipi->rx_mc_buf; 223 len = ipi_msg->len; 224 if (len > IPI_BUF_LEN_MAX) { 225 dev_warn(cl->dev, "msg size exceeded than %d\n", 226 IPI_BUF_LEN_MAX); 227 len = IPI_BUF_LEN_MAX; 228 } 229 buf_msg->len = len; 230 memcpy(buf_msg->data, ipi_msg->data, len); 231 232 /* received and processed interrupt ack */ 233 if (mbox_send_message(ipi->rx_chan, NULL) < 0) 234 dev_err(cl->dev, "ack failed to mbox rx_chan\n"); 235 236 schedule_work(&ipi->mbox_work); 237 } 238 239 /** 240 * zynqmp_r5_setup_mbox() - Setup mailboxes related properties 241 * this is used for each individual R5 core 242 * 243 * @cdev: child node device 244 * 245 * Function to setup mailboxes related properties 246 * return : NULL if failed else pointer to mbox_info 247 */ 248 static struct mbox_info *zynqmp_r5_setup_mbox(struct device *cdev) 249 { 250 struct mbox_client *mbox_cl; 251 struct mbox_info *ipi; 252 253 ipi = kzalloc(sizeof(*ipi), GFP_KERNEL); 254 if (!ipi) 255 return NULL; 256 257 mbox_cl = &ipi->mbox_cl; 258 mbox_cl->rx_callback = zynqmp_r5_mb_rx_cb; 259 mbox_cl->tx_block = false; 260 mbox_cl->knows_txdone = false; 261 mbox_cl->tx_done = NULL; 262 mbox_cl->dev = cdev; 263 264 /* Request TX and RX channels */ 265 ipi->tx_chan = mbox_request_channel_byname(mbox_cl, "tx"); 266 if (IS_ERR(ipi->tx_chan)) { 267 ipi->tx_chan = NULL; 268 kfree(ipi); 269 dev_warn(cdev, "mbox tx channel request failed\n"); 270 return NULL; 271 } 272 273 ipi->rx_chan = mbox_request_channel_byname(mbox_cl, "rx"); 274 if (IS_ERR(ipi->rx_chan)) { 275 mbox_free_channel(ipi->tx_chan); 276 ipi->rx_chan = NULL; 277 ipi->tx_chan = NULL; 278 kfree(ipi); 279 dev_warn(cdev, "mbox rx channel request failed\n"); 280 return NULL; 281 } 282 283 INIT_WORK(&ipi->mbox_work, handle_event_notified); 284 285 return ipi; 286 } 287 288 static void zynqmp_r5_free_mbox(struct mbox_info *ipi) 289 { 290 if (!ipi) 291 return; 292 293 if (ipi->tx_chan) { 294 mbox_free_channel(ipi->tx_chan); 295 ipi->tx_chan = NULL; 296 } 297 298 if (ipi->rx_chan) { 299 mbox_free_channel(ipi->rx_chan); 300 ipi->rx_chan = NULL; 301 } 302 303 kfree(ipi); 304 } 305 306 /* 307 * zynqmp_r5_core_kick() - kick a firmware if mbox is provided 308 * @rproc: r5 core's corresponding rproc structure 309 * @vqid: virtqueue ID 310 */ 311 static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid) 312 { 313 struct zynqmp_r5_core *r5_core = rproc->priv; 314 struct device *dev = r5_core->dev; 315 struct zynqmp_ipi_message *mb_msg; 316 struct mbox_info *ipi; 317 int ret; 318 319 ipi = r5_core->ipi; 320 if (!ipi) 321 return; 322 323 mb_msg = (struct zynqmp_ipi_message *)ipi->tx_mc_buf; 324 memcpy(mb_msg->data, &vqid, sizeof(vqid)); 325 mb_msg->len = sizeof(vqid); 326 ret = mbox_send_message(ipi->tx_chan, mb_msg); 327 if (ret < 0) 328 dev_warn(dev, "failed to send message\n"); 329 } 330 331 /* 332 * zynqmp_r5_rproc_start() 333 * @rproc: single R5 core's corresponding rproc instance 334 * 335 * Start R5 Core from designated boot address. 336 * 337 * return 0 on success, otherwise non-zero value on failure 338 */ 339 static int zynqmp_r5_rproc_start(struct rproc *rproc) 340 { 341 struct zynqmp_r5_core *r5_core = rproc->priv; 342 enum rpu_boot_mem bootmem; 343 int ret; 344 345 /* 346 * The exception vector pointers (EVP) refer to the base-address of 347 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector 348 * starts at the base-address and subsequent vectors are on 4-byte 349 * boundaries. 350 * 351 * Exception vectors can start either from 0x0000_0000 (LOVEC) or 352 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory) 353 * 354 * Usually firmware will put Exception vectors at LOVEC. 355 * 356 * It is not recommend that you change the exception vector. 357 * Changing the EVP to HIVEC will result in increased interrupt latency 358 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor 359 * is non-secured, then the Cortex-R5F processor cannot access the 360 * HIVEC exception vectors in the OCM. 361 */ 362 bootmem = (rproc->bootaddr >= 0xFFFC0000) ? 363 PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC; 364 365 dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr, 366 bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM"); 367 368 ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1, 369 bootmem, ZYNQMP_PM_REQUEST_ACK_NO); 370 if (ret) 371 dev_err(r5_core->dev, 372 "failed to start RPU = 0x%x\n", r5_core->pm_domain_id); 373 return ret; 374 } 375 376 /* 377 * zynqmp_r5_rproc_stop() 378 * @rproc: single R5 core's corresponding rproc instance 379 * 380 * Power down R5 Core. 381 * 382 * return 0 on success, otherwise non-zero value on failure 383 */ 384 static int zynqmp_r5_rproc_stop(struct rproc *rproc) 385 { 386 struct zynqmp_r5_core *r5_core = rproc->priv; 387 int ret; 388 389 ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id, 390 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 391 if (ret) 392 dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret); 393 394 return ret; 395 } 396 397 /* 398 * zynqmp_r5_mem_region_map() 399 * @rproc: single R5 core's corresponding rproc instance 400 * @mem: mem descriptor to map reserved memory-regions 401 * 402 * Callback to map va for memory-region's carveout. 403 * 404 * return 0 on success, otherwise non-zero value on failure 405 */ 406 static int zynqmp_r5_mem_region_map(struct rproc *rproc, 407 struct rproc_mem_entry *mem) 408 { 409 void __iomem *va; 410 411 va = ioremap_wc(mem->dma, mem->len); 412 if (IS_ERR_OR_NULL(va)) 413 return -ENOMEM; 414 415 mem->va = (void *)va; 416 417 return 0; 418 } 419 420 /* 421 * zynqmp_r5_rproc_mem_unmap 422 * @rproc: single R5 core's corresponding rproc instance 423 * @mem: mem entry to unmap 424 * 425 * Unmap memory-region carveout 426 * 427 * return: always returns 0 428 */ 429 static int zynqmp_r5_mem_region_unmap(struct rproc *rproc, 430 struct rproc_mem_entry *mem) 431 { 432 iounmap((void __iomem *)mem->va); 433 return 0; 434 } 435 436 /* 437 * add_mem_regions_carveout() 438 * @rproc: single R5 core's corresponding rproc instance 439 * 440 * Construct rproc mem carveouts from memory-region property nodes 441 * 442 * return 0 on success, otherwise non-zero value on failure 443 */ 444 static int add_mem_regions_carveout(struct rproc *rproc) 445 { 446 struct rproc_mem_entry *rproc_mem; 447 struct zynqmp_r5_core *r5_core; 448 struct of_phandle_iterator it; 449 struct reserved_mem *rmem; 450 int i = 0; 451 452 r5_core = rproc->priv; 453 454 /* Register associated reserved memory regions */ 455 of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0); 456 457 while (of_phandle_iterator_next(&it) == 0) { 458 rmem = of_reserved_mem_lookup(it.node); 459 if (!rmem) { 460 of_node_put(it.node); 461 dev_err(&rproc->dev, "unable to acquire memory-region\n"); 462 return -EINVAL; 463 } 464 465 if (!strcmp(it.node->name, "vdev0buffer")) { 466 /* Init reserved memory for vdev buffer */ 467 rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i, 468 rmem->size, 469 rmem->base, 470 it.node->name); 471 } else { 472 /* Register associated reserved memory regions */ 473 rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL, 474 (dma_addr_t)rmem->base, 475 rmem->size, rmem->base, 476 zynqmp_r5_mem_region_map, 477 zynqmp_r5_mem_region_unmap, 478 it.node->name); 479 } 480 481 if (!rproc_mem) { 482 of_node_put(it.node); 483 return -ENOMEM; 484 } 485 486 rproc_add_carveout(rproc, rproc_mem); 487 rproc_coredump_add_segment(rproc, rmem->base, rmem->size); 488 489 dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx", 490 it.node->name, rmem->base, rmem->size); 491 i++; 492 } 493 494 return 0; 495 } 496 497 /* 498 * tcm_mem_unmap() 499 * @rproc: single R5 core's corresponding rproc instance 500 * @mem: tcm mem entry to unmap 501 * 502 * Unmap TCM banks when powering down R5 core. 503 * 504 * return always 0 505 */ 506 static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem) 507 { 508 iounmap((void __iomem *)mem->va); 509 510 return 0; 511 } 512 513 /* 514 * tcm_mem_map() 515 * @rproc: single R5 core's corresponding rproc instance 516 * @mem: tcm memory entry descriptor 517 * 518 * Given TCM bank entry, this func setup virtual address for TCM bank 519 * remoteproc carveout. It also takes care of va to da address translation 520 * 521 * return 0 on success, otherwise non-zero value on failure 522 */ 523 static int tcm_mem_map(struct rproc *rproc, 524 struct rproc_mem_entry *mem) 525 { 526 void __iomem *va; 527 528 va = ioremap_wc(mem->dma, mem->len); 529 if (IS_ERR_OR_NULL(va)) 530 return -ENOMEM; 531 532 /* Update memory entry va */ 533 mem->va = (void *)va; 534 535 /* clear TCMs */ 536 memset_io(va, 0, mem->len); 537 538 return 0; 539 } 540 541 /* 542 * add_tcm_banks() 543 * @rproc: single R5 core's corresponding rproc instance 544 * 545 * allocate and add remoteproc carveout for TCM memory 546 * 547 * return 0 on success, otherwise non-zero value on failure 548 */ 549 static int add_tcm_banks(struct rproc *rproc) 550 { 551 struct rproc_mem_entry *rproc_mem; 552 struct zynqmp_r5_core *r5_core; 553 int i, num_banks, ret; 554 phys_addr_t bank_addr; 555 struct device *dev; 556 u32 pm_domain_id; 557 size_t bank_size; 558 char *bank_name; 559 u32 da; 560 561 r5_core = rproc->priv; 562 dev = r5_core->dev; 563 num_banks = r5_core->tcm_bank_count; 564 565 /* 566 * Power-on Each 64KB TCM, 567 * register its address space, map and unmap functions 568 * and add carveouts accordingly 569 */ 570 for (i = 0; i < num_banks; i++) { 571 bank_addr = r5_core->tcm_banks[i]->addr; 572 da = r5_core->tcm_banks[i]->da; 573 bank_name = r5_core->tcm_banks[i]->bank_name; 574 bank_size = r5_core->tcm_banks[i]->size; 575 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 576 577 ret = zynqmp_pm_request_node(pm_domain_id, 578 ZYNQMP_PM_CAPABILITY_ACCESS, 0, 579 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 580 if (ret < 0) { 581 dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id); 582 goto release_tcm; 583 } 584 585 dev_dbg(dev, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx", 586 bank_name, bank_addr, da, bank_size); 587 588 /* 589 * In DETACHED state firmware is already running so no need to 590 * request add TCM registers. However, request TCM PD node to let 591 * platform management firmware know that TCM is in use. 592 */ 593 if (rproc->state == RPROC_DETACHED) 594 continue; 595 596 rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr, 597 bank_size, da, 598 tcm_mem_map, tcm_mem_unmap, 599 bank_name); 600 if (!rproc_mem) { 601 ret = -ENOMEM; 602 zynqmp_pm_release_node(pm_domain_id); 603 goto release_tcm; 604 } 605 606 rproc_add_carveout(rproc, rproc_mem); 607 rproc_coredump_add_segment(rproc, da, bank_size); 608 } 609 610 return 0; 611 612 release_tcm: 613 /* If failed, Turn off all TCM banks turned on before */ 614 for (i--; i >= 0; i--) { 615 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 616 zynqmp_pm_release_node(pm_domain_id); 617 } 618 return ret; 619 } 620 621 /* 622 * zynqmp_r5_parse_fw() 623 * @rproc: single R5 core's corresponding rproc instance 624 * @fw: ptr to firmware to be loaded onto r5 core 625 * 626 * get resource table if available 627 * 628 * return 0 on success, otherwise non-zero value on failure 629 */ 630 static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw) 631 { 632 int ret; 633 634 ret = rproc_elf_load_rsc_table(rproc, fw); 635 if (ret == -EINVAL) { 636 /* 637 * resource table only required for IPC. 638 * if not present, this is not necessarily an error; 639 * for example, loading r5 hello world application 640 * so simply inform user and keep going. 641 */ 642 dev_info(&rproc->dev, "no resource table found.\n"); 643 ret = 0; 644 } 645 return ret; 646 } 647 648 /** 649 * zynqmp_r5_rproc_prepare() 650 * adds carveouts for TCM bank and reserved memory regions 651 * 652 * @rproc: Device node of each rproc 653 * 654 * Return: 0 for success else < 0 error code 655 */ 656 static int zynqmp_r5_rproc_prepare(struct rproc *rproc) 657 { 658 int ret; 659 660 ret = add_tcm_banks(rproc); 661 if (ret) { 662 dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret); 663 return ret; 664 } 665 666 ret = add_mem_regions_carveout(rproc); 667 if (ret) { 668 dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret); 669 return ret; 670 } 671 672 return 0; 673 } 674 675 /** 676 * zynqmp_r5_rproc_unprepare() 677 * Turns off TCM banks using power-domain id 678 * 679 * @rproc: Device node of each rproc 680 * 681 * Return: always 0 682 */ 683 static int zynqmp_r5_rproc_unprepare(struct rproc *rproc) 684 { 685 struct zynqmp_r5_core *r5_core; 686 u32 pm_domain_id; 687 int i; 688 689 r5_core = rproc->priv; 690 691 for (i = 0; i < r5_core->tcm_bank_count; i++) { 692 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id; 693 if (zynqmp_pm_release_node(pm_domain_id)) 694 dev_warn(r5_core->dev, 695 "can't turn off TCM bank 0x%x", pm_domain_id); 696 } 697 698 return 0; 699 } 700 701 static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc *rproc, 702 size_t *size) 703 { 704 struct zynqmp_r5_core *r5_core; 705 706 r5_core = rproc->priv; 707 708 *size = r5_core->rsc_tbl_size; 709 710 return (struct resource_table *)r5_core->rsc_tbl_va; 711 } 712 713 static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core) 714 { 715 struct resource_table *rsc_tbl_addr; 716 struct device *dev = r5_core->dev; 717 struct rsc_tbl_data *rsc_data_va; 718 struct resource res_mem; 719 struct device_node *np; 720 int ret; 721 722 /* 723 * It is expected from remote processor firmware to provide resource 724 * table address via struct rsc_tbl_data data structure. 725 * Start address of first entry under "memory-region" property list 726 * contains that data structure which holds resource table address, size 727 * and some magic number to validate correct resource table entry. 728 */ 729 np = of_parse_phandle(r5_core->np, "memory-region", 0); 730 if (!np) { 731 dev_err(dev, "failed to get memory region dev node\n"); 732 return -EINVAL; 733 } 734 735 ret = of_address_to_resource(np, 0, &res_mem); 736 of_node_put(np); 737 if (ret) { 738 dev_err(dev, "failed to get memory-region resource addr\n"); 739 return -EINVAL; 740 } 741 742 rsc_data_va = (struct rsc_tbl_data *)ioremap_wc(res_mem.start, 743 sizeof(struct rsc_tbl_data)); 744 if (!rsc_data_va) { 745 dev_err(dev, "failed to map resource table data address\n"); 746 return -EIO; 747 } 748 749 /* 750 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then 751 * do not consider resource table address valid and don't attach 752 */ 753 if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC || 754 rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) { 755 dev_dbg(dev, "invalid magic number, won't attach\n"); 756 return -EINVAL; 757 } 758 759 r5_core->rsc_tbl_va = ioremap_wc(rsc_data_va->rsc_tbl, 760 rsc_data_va->rsc_tbl_size); 761 if (!r5_core->rsc_tbl_va) { 762 dev_err(dev, "failed to get resource table va\n"); 763 return -EINVAL; 764 } 765 766 rsc_tbl_addr = (struct resource_table *)r5_core->rsc_tbl_va; 767 768 /* 769 * As of now resource table version 1 is expected. Don't fail to attach 770 * but warn users about it. 771 */ 772 if (rsc_tbl_addr->ver != 1) 773 dev_warn(dev, "unexpected resource table version %d\n", 774 rsc_tbl_addr->ver); 775 776 r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size; 777 778 iounmap((void __iomem *)rsc_data_va); 779 780 return 0; 781 } 782 783 static int zynqmp_r5_attach(struct rproc *rproc) 784 { 785 dev_dbg(&rproc->dev, "rproc %d attached\n", rproc->index); 786 787 return 0; 788 } 789 790 static int zynqmp_r5_detach(struct rproc *rproc) 791 { 792 /* 793 * Generate last notification to remote after clearing virtio flag. 794 * Remote can avoid polling on virtio reset flag if kick is generated 795 * during detach by host and check virtio reset flag on kick interrupt. 796 */ 797 zynqmp_r5_rproc_kick(rproc, 0); 798 799 return 0; 800 } 801 802 static const struct rproc_ops zynqmp_r5_rproc_ops = { 803 .prepare = zynqmp_r5_rproc_prepare, 804 .unprepare = zynqmp_r5_rproc_unprepare, 805 .start = zynqmp_r5_rproc_start, 806 .stop = zynqmp_r5_rproc_stop, 807 .load = rproc_elf_load_segments, 808 .parse_fw = zynqmp_r5_parse_fw, 809 .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, 810 .sanity_check = rproc_elf_sanity_check, 811 .get_boot_addr = rproc_elf_get_boot_addr, 812 .kick = zynqmp_r5_rproc_kick, 813 .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table, 814 .attach = zynqmp_r5_attach, 815 .detach = zynqmp_r5_detach, 816 }; 817 818 /** 819 * zynqmp_r5_add_rproc_core() 820 * Allocate and add struct rproc object for each r5f core 821 * This is called for each individual r5f core 822 * 823 * @cdev: Device node of each r5 core 824 * 825 * Return: zynqmp_r5_core object for success else error code pointer 826 */ 827 static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) 828 { 829 struct zynqmp_r5_core *r5_core; 830 struct rproc *r5_rproc; 831 int ret; 832 833 /* Set up DMA mask */ 834 ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32)); 835 if (ret) 836 return ERR_PTR(ret); 837 838 /* Allocate remoteproc instance */ 839 r5_rproc = rproc_alloc(cdev, dev_name(cdev), 840 &zynqmp_r5_rproc_ops, 841 NULL, sizeof(struct zynqmp_r5_core)); 842 if (!r5_rproc) { 843 dev_err(cdev, "failed to allocate memory for rproc instance\n"); 844 return ERR_PTR(-ENOMEM); 845 } 846 847 rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM); 848 849 r5_rproc->auto_boot = false; 850 r5_core = r5_rproc->priv; 851 r5_core->dev = cdev; 852 r5_core->np = dev_of_node(cdev); 853 if (!r5_core->np) { 854 dev_err(cdev, "can't get device node for r5 core\n"); 855 ret = -EINVAL; 856 goto free_rproc; 857 } 858 859 /* Add R5 remoteproc core */ 860 ret = rproc_add(r5_rproc); 861 if (ret) { 862 dev_err(cdev, "failed to add r5 remoteproc\n"); 863 goto free_rproc; 864 } 865 866 /* 867 * If firmware is already available in the memory then move rproc state 868 * to DETACHED. Firmware can be preloaded via debugger or by any other 869 * agent (processors) in the system. 870 * If firmware isn't available in the memory and resource table isn't 871 * found, then rproc state remains OFFLINE. 872 */ 873 if (!zynqmp_r5_get_rsc_table_va(r5_core)) 874 r5_rproc->state = RPROC_DETACHED; 875 876 r5_core->rproc = r5_rproc; 877 return r5_core; 878 879 free_rproc: 880 rproc_free(r5_rproc); 881 return ERR_PTR(ret); 882 } 883 884 static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) 885 { 886 int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count; 887 struct of_phandle_args out_args; 888 struct zynqmp_r5_core *r5_core; 889 struct platform_device *cpdev; 890 struct mem_bank_data *tcm; 891 struct device_node *np; 892 struct resource *res; 893 u64 abs_addr, size; 894 struct device *dev; 895 896 for (i = 0; i < cluster->core_count; i++) { 897 r5_core = cluster->r5_cores[i]; 898 dev = r5_core->dev; 899 np = r5_core->np; 900 901 pd_count = of_count_phandle_with_args(np, "power-domains", 902 "#power-domain-cells"); 903 904 if (pd_count <= 0) { 905 dev_err(dev, "invalid power-domains property, %d\n", pd_count); 906 return -EINVAL; 907 } 908 909 /* First entry in power-domains list is for r5 core, rest for TCM. */ 910 tcm_bank_count = pd_count - 1; 911 912 if (tcm_bank_count <= 0) { 913 dev_err(dev, "invalid TCM count %d\n", tcm_bank_count); 914 return -EINVAL; 915 } 916 917 r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, 918 sizeof(struct mem_bank_data *), 919 GFP_KERNEL); 920 if (!r5_core->tcm_banks) 921 return -ENOMEM; 922 923 r5_core->tcm_bank_count = tcm_bank_count; 924 for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, tcm_pd_idx++) { 925 tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data), 926 GFP_KERNEL); 927 if (!tcm) 928 return -ENOMEM; 929 930 r5_core->tcm_banks[j] = tcm; 931 932 /* Get power-domains id of TCM. */ 933 ret = of_parse_phandle_with_args(np, "power-domains", 934 "#power-domain-cells", 935 tcm_pd_idx, &out_args); 936 if (ret) { 937 dev_err(r5_core->dev, 938 "failed to get tcm %d pm domain, ret %d\n", 939 tcm_pd_idx, ret); 940 return ret; 941 } 942 tcm->pm_domain_id = out_args.args[0]; 943 of_node_put(out_args.np); 944 945 /* Get TCM address without translation. */ 946 ret = of_property_read_reg(np, j, &abs_addr, &size); 947 if (ret) { 948 dev_err(dev, "failed to get reg property\n"); 949 return ret; 950 } 951 952 /* 953 * Remote processor can address only 32 bits 954 * so convert 64-bits into 32-bits. This will discard 955 * any unwanted upper 32-bits. 956 */ 957 tcm->da = (u32)abs_addr; 958 tcm->size = (u32)size; 959 960 cpdev = to_platform_device(dev); 961 res = platform_get_resource(cpdev, IORESOURCE_MEM, j); 962 if (!res) { 963 dev_err(dev, "failed to get tcm resource\n"); 964 return -EINVAL; 965 } 966 967 tcm->addr = (u32)res->start; 968 tcm->bank_name = (char *)res->name; 969 res = devm_request_mem_region(dev, tcm->addr, tcm->size, 970 tcm->bank_name); 971 if (!res) { 972 dev_err(dev, "failed to request tcm resource\n"); 973 return -EINVAL; 974 } 975 } 976 } 977 978 return 0; 979 } 980 981 /** 982 * zynqmp_r5_get_tcm_node() 983 * Ideally this function should parse tcm node and store information 984 * in r5_core instance. For now, Hardcoded TCM information is used. 985 * This approach is used as TCM bindings for system-dt is being developed 986 * 987 * @cluster: pointer to zynqmp_r5_cluster type object 988 * 989 * Return: 0 for success and < 0 error code for failure. 990 */ 991 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster) 992 { 993 const struct mem_bank_data *zynqmp_tcm_banks; 994 struct device *dev = cluster->dev; 995 struct zynqmp_r5_core *r5_core; 996 int tcm_bank_count, tcm_node; 997 int i, j; 998 999 if (cluster->mode == SPLIT_MODE) { 1000 zynqmp_tcm_banks = zynqmp_tcm_banks_split; 1001 tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_split); 1002 } else { 1003 zynqmp_tcm_banks = zynqmp_tcm_banks_lockstep; 1004 tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_lockstep); 1005 } 1006 1007 /* count per core tcm banks */ 1008 tcm_bank_count = tcm_bank_count / cluster->core_count; 1009 1010 /* 1011 * r5 core 0 will use all of TCM banks in lockstep mode. 1012 * In split mode, r5 core0 will use 128k and r5 core1 will use another 1013 * 128k. Assign TCM banks to each core accordingly 1014 */ 1015 tcm_node = 0; 1016 for (i = 0; i < cluster->core_count; i++) { 1017 r5_core = cluster->r5_cores[i]; 1018 r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count, 1019 sizeof(struct mem_bank_data *), 1020 GFP_KERNEL); 1021 if (!r5_core->tcm_banks) 1022 return -ENOMEM; 1023 1024 for (j = 0; j < tcm_bank_count; j++) { 1025 /* 1026 * Use pre-defined TCM reg values. 1027 * Eventually this should be replaced by values 1028 * parsed from dts. 1029 */ 1030 r5_core->tcm_banks[j] = 1031 (struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node]; 1032 tcm_node++; 1033 } 1034 1035 r5_core->tcm_bank_count = tcm_bank_count; 1036 } 1037 1038 return 0; 1039 } 1040 1041 /* 1042 * zynqmp_r5_core_init() 1043 * Create and initialize zynqmp_r5_core type object 1044 * 1045 * @cluster: pointer to zynqmp_r5_cluster type object 1046 * @fw_reg_val: value expected by firmware to configure RPU cluster mode 1047 * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split) 1048 * 1049 * Return: 0 for success and error code for failure. 1050 */ 1051 static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster, 1052 enum rpu_oper_mode fw_reg_val, 1053 enum rpu_tcm_comb tcm_mode) 1054 { 1055 struct device *dev = cluster->dev; 1056 struct zynqmp_r5_core *r5_core; 1057 int ret = -EINVAL, i; 1058 1059 r5_core = cluster->r5_cores[0]; 1060 1061 /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */ 1062 if (of_find_property(r5_core->np, "reg", NULL)) 1063 ret = zynqmp_r5_get_tcm_node_from_dt(cluster); 1064 else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) 1065 ret = zynqmp_r5_get_tcm_node(cluster); 1066 1067 if (ret) { 1068 dev_err(dev, "can't get tcm, err %d\n", ret); 1069 return ret; 1070 } 1071 1072 for (i = 0; i < cluster->core_count; i++) { 1073 r5_core = cluster->r5_cores[i]; 1074 1075 /* Initialize r5 cores with power-domains parsed from dts */ 1076 ret = of_property_read_u32_index(r5_core->np, "power-domains", 1077 1, &r5_core->pm_domain_id); 1078 if (ret) { 1079 dev_err(dev, "failed to get power-domains property\n"); 1080 return ret; 1081 } 1082 1083 ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val); 1084 if (ret < 0) { 1085 dev_err(r5_core->dev, "failed to set RPU mode\n"); 1086 return ret; 1087 } 1088 1089 if (of_find_property(dev_of_node(dev), "xlnx,tcm-mode", NULL) || 1090 device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { 1091 ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, 1092 tcm_mode); 1093 if (ret < 0) { 1094 dev_err(r5_core->dev, "failed to configure TCM\n"); 1095 return ret; 1096 } 1097 } 1098 } 1099 1100 return 0; 1101 } 1102 1103 /* 1104 * zynqmp_r5_cluster_init() 1105 * Create and initialize zynqmp_r5_cluster type object 1106 * 1107 * @cluster: pointer to zynqmp_r5_cluster type object 1108 * 1109 * Return: 0 for success and error code for failure. 1110 */ 1111 static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) 1112 { 1113 enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE; 1114 struct device *dev = cluster->dev; 1115 struct device_node *dev_node = dev_of_node(dev); 1116 struct platform_device *child_pdev; 1117 struct zynqmp_r5_core **r5_cores; 1118 enum rpu_oper_mode fw_reg_val; 1119 struct device **child_devs; 1120 struct device_node *child; 1121 enum rpu_tcm_comb tcm_mode; 1122 int core_count, ret, i; 1123 struct mbox_info *ipi; 1124 1125 ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode); 1126 1127 /* 1128 * on success returns 0, if not defined then returns -EINVAL, 1129 * In that case, default is LOCKSTEP mode. Other than that 1130 * returns relative error code < 0. 1131 */ 1132 if (ret != -EINVAL && ret != 0) { 1133 dev_err(dev, "Invalid xlnx,cluster-mode property\n"); 1134 return ret; 1135 } 1136 1137 /* 1138 * For now driver only supports split mode and lockstep mode. 1139 * fail driver probe if either of that is not set in dts. 1140 */ 1141 if (cluster_mode == LOCKSTEP_MODE) { 1142 fw_reg_val = PM_RPU_MODE_LOCKSTEP; 1143 } else if (cluster_mode == SPLIT_MODE) { 1144 fw_reg_val = PM_RPU_MODE_SPLIT; 1145 } else { 1146 dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode); 1147 return -EINVAL; 1148 } 1149 1150 if (of_find_property(dev_node, "xlnx,tcm-mode", NULL)) { 1151 ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode); 1152 if (ret) 1153 return ret; 1154 } else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) { 1155 if (cluster_mode == LOCKSTEP_MODE) 1156 tcm_mode = PM_RPU_TCM_COMB; 1157 else 1158 tcm_mode = PM_RPU_TCM_SPLIT; 1159 } else { 1160 tcm_mode = PM_RPU_TCM_COMB; 1161 } 1162 1163 /* 1164 * Number of cores is decided by number of child nodes of 1165 * r5f subsystem node in dts. If Split mode is used in dts 1166 * 2 child nodes are expected. 1167 * In lockstep mode if two child nodes are available, 1168 * only use first child node and consider it as core0 1169 * and ignore core1 dt node. 1170 */ 1171 core_count = of_get_available_child_count(dev_node); 1172 if (core_count == 0) { 1173 dev_err(dev, "Invalid number of r5 cores %d", core_count); 1174 return -EINVAL; 1175 } else if (cluster_mode == SPLIT_MODE && core_count != 2) { 1176 dev_err(dev, "Invalid number of r5 cores for split mode\n"); 1177 return -EINVAL; 1178 } else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) { 1179 dev_warn(dev, "Only r5 core0 will be used\n"); 1180 core_count = 1; 1181 } 1182 1183 child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL); 1184 if (!child_devs) 1185 return -ENOMEM; 1186 1187 r5_cores = kcalloc(core_count, 1188 sizeof(struct zynqmp_r5_core *), GFP_KERNEL); 1189 if (!r5_cores) { 1190 kfree(child_devs); 1191 return -ENOMEM; 1192 } 1193 1194 i = 0; 1195 for_each_available_child_of_node(dev_node, child) { 1196 child_pdev = of_find_device_by_node(child); 1197 if (!child_pdev) { 1198 of_node_put(child); 1199 ret = -ENODEV; 1200 goto release_r5_cores; 1201 } 1202 1203 child_devs[i] = &child_pdev->dev; 1204 1205 /* create and add remoteproc instance of type struct rproc */ 1206 r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev); 1207 if (IS_ERR(r5_cores[i])) { 1208 of_node_put(child); 1209 ret = PTR_ERR(r5_cores[i]); 1210 r5_cores[i] = NULL; 1211 goto release_r5_cores; 1212 } 1213 1214 /* 1215 * If mailbox nodes are disabled using "status" property then 1216 * setting up mailbox channels will fail. 1217 */ 1218 ipi = zynqmp_r5_setup_mbox(&child_pdev->dev); 1219 if (ipi) { 1220 r5_cores[i]->ipi = ipi; 1221 ipi->r5_core = r5_cores[i]; 1222 } 1223 1224 /* 1225 * If two child nodes are available in dts in lockstep mode, 1226 * then ignore second child node. 1227 */ 1228 if (cluster_mode == LOCKSTEP_MODE) { 1229 of_node_put(child); 1230 break; 1231 } 1232 1233 i++; 1234 } 1235 1236 cluster->mode = cluster_mode; 1237 cluster->core_count = core_count; 1238 cluster->r5_cores = r5_cores; 1239 1240 ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode); 1241 if (ret < 0) { 1242 dev_err(dev, "failed to init r5 core err %d\n", ret); 1243 cluster->core_count = 0; 1244 cluster->r5_cores = NULL; 1245 1246 /* 1247 * at this point rproc resources for each core are allocated. 1248 * adjust index to free resources in reverse order 1249 */ 1250 i = core_count - 1; 1251 goto release_r5_cores; 1252 } 1253 1254 kfree(child_devs); 1255 return 0; 1256 1257 release_r5_cores: 1258 while (i >= 0) { 1259 put_device(child_devs[i]); 1260 if (r5_cores[i]) { 1261 zynqmp_r5_free_mbox(r5_cores[i]->ipi); 1262 of_reserved_mem_device_release(r5_cores[i]->dev); 1263 rproc_del(r5_cores[i]->rproc); 1264 rproc_free(r5_cores[i]->rproc); 1265 } 1266 i--; 1267 } 1268 kfree(r5_cores); 1269 kfree(child_devs); 1270 return ret; 1271 } 1272 1273 static void zynqmp_r5_cluster_exit(void *data) 1274 { 1275 struct platform_device *pdev = data; 1276 struct zynqmp_r5_cluster *cluster; 1277 struct zynqmp_r5_core *r5_core; 1278 int i; 1279 1280 cluster = platform_get_drvdata(pdev); 1281 if (!cluster) 1282 return; 1283 1284 for (i = 0; i < cluster->core_count; i++) { 1285 r5_core = cluster->r5_cores[i]; 1286 zynqmp_r5_free_mbox(r5_core->ipi); 1287 iounmap(r5_core->rsc_tbl_va); 1288 of_reserved_mem_device_release(r5_core->dev); 1289 put_device(r5_core->dev); 1290 rproc_del(r5_core->rproc); 1291 rproc_free(r5_core->rproc); 1292 } 1293 1294 kfree(cluster->r5_cores); 1295 kfree(cluster); 1296 platform_set_drvdata(pdev, NULL); 1297 } 1298 1299 /* 1300 * zynqmp_r5_remoteproc_probe() 1301 * parse device-tree, initialize hardware and allocate required resources 1302 * and remoteproc ops 1303 * 1304 * @pdev: domain platform device for R5 cluster 1305 * 1306 * Return: 0 for success and < 0 for failure. 1307 */ 1308 static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) 1309 { 1310 struct zynqmp_r5_cluster *cluster; 1311 struct device *dev = &pdev->dev; 1312 int ret; 1313 1314 cluster = kzalloc(sizeof(*cluster), GFP_KERNEL); 1315 if (!cluster) 1316 return -ENOMEM; 1317 1318 cluster->dev = dev; 1319 1320 ret = devm_of_platform_populate(dev); 1321 if (ret) { 1322 dev_err_probe(dev, ret, "failed to populate platform dev\n"); 1323 kfree(cluster); 1324 return ret; 1325 } 1326 1327 /* wire in so each core can be cleaned up at driver remove */ 1328 platform_set_drvdata(pdev, cluster); 1329 1330 ret = zynqmp_r5_cluster_init(cluster); 1331 if (ret) { 1332 kfree(cluster); 1333 platform_set_drvdata(pdev, NULL); 1334 dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n"); 1335 return ret; 1336 } 1337 1338 ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev); 1339 if (ret) 1340 return ret; 1341 1342 return 0; 1343 } 1344 1345 /* Match table for OF platform binding */ 1346 static const struct of_device_id zynqmp_r5_remoteproc_match[] = { 1347 { .compatible = "xlnx,versal-net-r52fss", }, 1348 { .compatible = "xlnx,versal-r5fss", }, 1349 { .compatible = "xlnx,zynqmp-r5fss", }, 1350 { /* end of list */ }, 1351 }; 1352 MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match); 1353 1354 static struct platform_driver zynqmp_r5_remoteproc_driver = { 1355 .probe = zynqmp_r5_remoteproc_probe, 1356 .driver = { 1357 .name = "zynqmp_r5_remoteproc", 1358 .of_match_table = zynqmp_r5_remoteproc_match, 1359 }, 1360 }; 1361 module_platform_driver(zynqmp_r5_remoteproc_driver); 1362 1363 MODULE_DESCRIPTION("Xilinx R5F remote processor driver"); 1364 MODULE_AUTHOR("Xilinx Inc."); 1365 MODULE_LICENSE("GPL"); 1366