1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Remote Processor Framework 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Google, Inc. 7 * 8 * Ohad Ben-Cohen <ohad@wizery.com> 9 * Brian Swetland <swetland@google.com> 10 * Mark Grosen <mgrosen@ti.com> 11 * Fernando Guzman Lugo <fernando.lugo@ti.com> 12 * Suman Anna <s-anna@ti.com> 13 * Robert Tivy <rtivy@ti.com> 14 * Armando Uribe De Leon <x0095078@ti.com> 15 */ 16 17 #define pr_fmt(fmt) "%s: " fmt, __func__ 18 19 #include <linux/delay.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/device.h> 23 #include <linux/panic_notifier.h> 24 #include <linux/slab.h> 25 #include <linux/mutex.h> 26 #include <linux/dma-map-ops.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */ 29 #include <linux/firmware.h> 30 #include <linux/string.h> 31 #include <linux/debugfs.h> 32 #include <linux/rculist.h> 33 #include <linux/remoteproc.h> 34 #include <linux/iommu.h> 35 #include <linux/idr.h> 36 #include <linux/elf.h> 37 #include <linux/crc32.h> 38 #include <linux/of_reserved_mem.h> 39 #include <linux/virtio_ids.h> 40 #include <linux/virtio_ring.h> 41 #include <asm/byteorder.h> 42 #include <linux/platform_device.h> 43 44 #include "remoteproc_internal.h" 45 46 #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL 47 48 static DEFINE_MUTEX(rproc_list_mutex); 49 static LIST_HEAD(rproc_list); 50 static struct notifier_block rproc_panic_nb; 51 52 typedef int (*rproc_handle_resource_t)(struct rproc *rproc, 53 void *, int offset, int avail); 54 55 static int rproc_alloc_carveout(struct rproc *rproc, 56 struct rproc_mem_entry *mem); 57 static int rproc_release_carveout(struct rproc *rproc, 58 struct rproc_mem_entry *mem); 59 60 /* Unique indices for remoteproc devices */ 61 static DEFINE_IDA(rproc_dev_index); 62 63 static const char * const rproc_crash_names[] = { 64 [RPROC_MMUFAULT] = "mmufault", 65 [RPROC_WATCHDOG] = "watchdog", 66 [RPROC_FATAL_ERROR] = "fatal error", 67 }; 68 69 /* translate rproc_crash_type to string */ 70 static const char *rproc_crash_to_string(enum rproc_crash_type type) 71 { 72 if (type < ARRAY_SIZE(rproc_crash_names)) 73 return rproc_crash_names[type]; 74 return "unknown"; 75 } 76 77 /* 78 * This is the IOMMU fault handler we register with the IOMMU API 79 * (when relevant; not all remote processors access memory through 80 * an IOMMU). 81 * 82 * IOMMU core will invoke this handler whenever the remote processor 83 * will try to access an unmapped device address. 84 */ 85 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, 86 unsigned long iova, int flags, void *token) 87 { 88 struct rproc *rproc = token; 89 90 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); 91 92 rproc_report_crash(rproc, RPROC_MMUFAULT); 93 94 /* 95 * Let the iommu core know we're not really handling this fault; 96 * we just used it as a recovery trigger. 97 */ 98 return -ENOSYS; 99 } 100 101 static int rproc_enable_iommu(struct rproc *rproc) 102 { 103 struct iommu_domain *domain; 104 struct device *dev = rproc->dev.parent; 105 int ret; 106 107 if (!rproc->has_iommu) { 108 dev_dbg(dev, "iommu not present\n"); 109 return 0; 110 } 111 112 domain = iommu_domain_alloc(dev->bus); 113 if (!domain) { 114 dev_err(dev, "can't alloc iommu domain\n"); 115 return -ENOMEM; 116 } 117 118 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); 119 120 ret = iommu_attach_device(domain, dev); 121 if (ret) { 122 dev_err(dev, "can't attach iommu device: %d\n", ret); 123 goto free_domain; 124 } 125 126 rproc->domain = domain; 127 128 return 0; 129 130 free_domain: 131 iommu_domain_free(domain); 132 return ret; 133 } 134 135 static void rproc_disable_iommu(struct rproc *rproc) 136 { 137 struct iommu_domain *domain = rproc->domain; 138 struct device *dev = rproc->dev.parent; 139 140 if (!domain) 141 return; 142 143 iommu_detach_device(domain, dev); 144 iommu_domain_free(domain); 145 } 146 147 phys_addr_t rproc_va_to_pa(void *cpu_addr) 148 { 149 /* 150 * Return physical address according to virtual address location 151 * - in vmalloc: if region ioremapped or defined as dma_alloc_coherent 152 * - in kernel: if region allocated in generic dma memory pool 153 */ 154 if (is_vmalloc_addr(cpu_addr)) { 155 return page_to_phys(vmalloc_to_page(cpu_addr)) + 156 offset_in_page(cpu_addr); 157 } 158 159 WARN_ON(!virt_addr_valid(cpu_addr)); 160 return virt_to_phys(cpu_addr); 161 } 162 EXPORT_SYMBOL(rproc_va_to_pa); 163 164 /** 165 * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address 166 * @rproc: handle of a remote processor 167 * @da: remoteproc device address to translate 168 * @len: length of the memory region @da is pointing to 169 * 170 * Some remote processors will ask us to allocate them physically contiguous 171 * memory regions (which we call "carveouts"), and map them to specific 172 * device addresses (which are hardcoded in the firmware). They may also have 173 * dedicated memory regions internal to the processors, and use them either 174 * exclusively or alongside carveouts. 175 * 176 * They may then ask us to copy objects into specific device addresses (e.g. 177 * code/data sections) or expose us certain symbols in other device address 178 * (e.g. their trace buffer). 179 * 180 * This function is a helper function with which we can go over the allocated 181 * carveouts and translate specific device addresses to kernel virtual addresses 182 * so we can access the referenced memory. This function also allows to perform 183 * translations on the internal remoteproc memory regions through a platform 184 * implementation specific da_to_va ops, if present. 185 * 186 * The function returns a valid kernel address on success or NULL on failure. 187 * 188 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, 189 * but only on kernel direct mapped RAM memory. Instead, we're just using 190 * here the output of the DMA API for the carveouts, which should be more 191 * correct. 192 */ 193 void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) 194 { 195 struct rproc_mem_entry *carveout; 196 void *ptr = NULL; 197 198 if (rproc->ops->da_to_va) { 199 ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem); 200 if (ptr) 201 goto out; 202 } 203 204 list_for_each_entry(carveout, &rproc->carveouts, node) { 205 int offset = da - carveout->da; 206 207 /* Verify that carveout is allocated */ 208 if (!carveout->va) 209 continue; 210 211 /* try next carveout if da is too small */ 212 if (offset < 0) 213 continue; 214 215 /* try next carveout if da is too large */ 216 if (offset + len > carveout->len) 217 continue; 218 219 ptr = carveout->va + offset; 220 221 if (is_iomem) 222 *is_iomem = carveout->is_iomem; 223 224 break; 225 } 226 227 out: 228 return ptr; 229 } 230 EXPORT_SYMBOL(rproc_da_to_va); 231 232 /** 233 * rproc_find_carveout_by_name() - lookup the carveout region by a name 234 * @rproc: handle of a remote processor 235 * @name: carveout name to find (format string) 236 * @...: optional parameters matching @name string 237 * 238 * Platform driver has the capability to register some pre-allacoted carveout 239 * (physically contiguous memory regions) before rproc firmware loading and 240 * associated resource table analysis. These regions may be dedicated memory 241 * regions internal to the coprocessor or specified DDR region with specific 242 * attributes 243 * 244 * This function is a helper function with which we can go over the 245 * allocated carveouts and return associated region characteristics like 246 * coprocessor address, length or processor virtual address. 247 * 248 * Return: a valid pointer on carveout entry on success or NULL on failure. 249 */ 250 __printf(2, 3) 251 struct rproc_mem_entry * 252 rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) 253 { 254 va_list args; 255 char _name[32]; 256 struct rproc_mem_entry *carveout, *mem = NULL; 257 258 if (!name) 259 return NULL; 260 261 va_start(args, name); 262 vsnprintf(_name, sizeof(_name), name, args); 263 va_end(args); 264 265 list_for_each_entry(carveout, &rproc->carveouts, node) { 266 /* Compare carveout and requested names */ 267 if (!strcmp(carveout->name, _name)) { 268 mem = carveout; 269 break; 270 } 271 } 272 273 return mem; 274 } 275 276 /** 277 * rproc_check_carveout_da() - Check specified carveout da configuration 278 * @rproc: handle of a remote processor 279 * @mem: pointer on carveout to check 280 * @da: area device address 281 * @len: associated area size 282 * 283 * This function is a helper function to verify requested device area (couple 284 * da, len) is part of specified carveout. 285 * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is 286 * checked. 287 * 288 * Return: 0 if carveout matches request else error 289 */ 290 static int rproc_check_carveout_da(struct rproc *rproc, 291 struct rproc_mem_entry *mem, u32 da, u32 len) 292 { 293 struct device *dev = &rproc->dev; 294 int delta; 295 296 /* Check requested resource length */ 297 if (len > mem->len) { 298 dev_err(dev, "Registered carveout doesn't fit len request\n"); 299 return -EINVAL; 300 } 301 302 if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { 303 /* Address doesn't match registered carveout configuration */ 304 return -EINVAL; 305 } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { 306 delta = da - mem->da; 307 308 /* Check requested resource belongs to registered carveout */ 309 if (delta < 0) { 310 dev_err(dev, 311 "Registered carveout doesn't fit da request\n"); 312 return -EINVAL; 313 } 314 315 if (delta + len > mem->len) { 316 dev_err(dev, 317 "Registered carveout doesn't fit len request\n"); 318 return -EINVAL; 319 } 320 } 321 322 return 0; 323 } 324 325 int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) 326 { 327 struct rproc *rproc = rvdev->rproc; 328 struct device *dev = &rproc->dev; 329 struct rproc_vring *rvring = &rvdev->vring[i]; 330 struct fw_rsc_vdev *rsc; 331 int ret, notifyid; 332 struct rproc_mem_entry *mem; 333 size_t size; 334 335 /* actual size of vring (in bytes) */ 336 size = PAGE_ALIGN(vring_size(rvring->len, rvring->align)); 337 338 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; 339 340 /* Search for pre-registered carveout */ 341 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, 342 i); 343 if (mem) { 344 if (rproc_check_carveout_da(rproc, mem, rsc->vring[i].da, size)) 345 return -ENOMEM; 346 } else { 347 /* Register carveout in in list */ 348 mem = rproc_mem_entry_init(dev, NULL, 0, 349 size, rsc->vring[i].da, 350 rproc_alloc_carveout, 351 rproc_release_carveout, 352 "vdev%dvring%d", 353 rvdev->index, i); 354 if (!mem) { 355 dev_err(dev, "Can't allocate memory entry structure\n"); 356 return -ENOMEM; 357 } 358 359 rproc_add_carveout(rproc, mem); 360 } 361 362 /* 363 * Assign an rproc-wide unique index for this vring 364 * TODO: assign a notifyid for rvdev updates as well 365 * TODO: support predefined notifyids (via resource table) 366 */ 367 ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); 368 if (ret < 0) { 369 dev_err(dev, "idr_alloc failed: %d\n", ret); 370 return ret; 371 } 372 notifyid = ret; 373 374 /* Potentially bump max_notifyid */ 375 if (notifyid > rproc->max_notifyid) 376 rproc->max_notifyid = notifyid; 377 378 rvring->notifyid = notifyid; 379 380 /* Let the rproc know the notifyid of this vring.*/ 381 rsc->vring[i].notifyid = notifyid; 382 return 0; 383 } 384 385 static int 386 rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i) 387 { 388 struct rproc *rproc = rvdev->rproc; 389 struct device *dev = &rproc->dev; 390 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; 391 struct rproc_vring *rvring = &rvdev->vring[i]; 392 393 dev_dbg(dev, "vdev rsc: vring%d: da 0x%x, qsz %d, align %d\n", 394 i, vring->da, vring->num, vring->align); 395 396 /* verify queue size and vring alignment are sane */ 397 if (!vring->num || !vring->align) { 398 dev_err(dev, "invalid qsz (%d) or alignment (%d)\n", 399 vring->num, vring->align); 400 return -EINVAL; 401 } 402 403 rvring->len = vring->num; 404 rvring->align = vring->align; 405 rvring->rvdev = rvdev; 406 407 return 0; 408 } 409 410 void rproc_free_vring(struct rproc_vring *rvring) 411 { 412 struct rproc *rproc = rvring->rvdev->rproc; 413 int idx = rvring - rvring->rvdev->vring; 414 struct fw_rsc_vdev *rsc; 415 416 idr_remove(&rproc->notifyids, rvring->notifyid); 417 418 /* 419 * At this point rproc_stop() has been called and the installed resource 420 * table in the remote processor memory may no longer be accessible. As 421 * such and as per rproc_stop(), rproc->table_ptr points to the cached 422 * resource table (rproc->cached_table). The cached resource table is 423 * only available when a remote processor has been booted by the 424 * remoteproc core, otherwise it is NULL. 425 * 426 * Based on the above, reset the virtio device section in the cached 427 * resource table only if there is one to work with. 428 */ 429 if (rproc->table_ptr) { 430 rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; 431 rsc->vring[idx].da = 0; 432 rsc->vring[idx].notifyid = -1; 433 } 434 } 435 436 static int rproc_vdev_do_start(struct rproc_subdev *subdev) 437 { 438 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 439 440 return rproc_add_virtio_dev(rvdev, rvdev->id); 441 } 442 443 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) 444 { 445 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 446 int ret; 447 448 ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev); 449 if (ret) 450 dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret); 451 } 452 453 /** 454 * rproc_rvdev_release() - release the existence of a rvdev 455 * 456 * @dev: the subdevice's dev 457 */ 458 static void rproc_rvdev_release(struct device *dev) 459 { 460 struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev); 461 462 of_reserved_mem_device_release(dev); 463 464 kfree(rvdev); 465 } 466 467 static int copy_dma_range_map(struct device *to, struct device *from) 468 { 469 const struct bus_dma_region *map = from->dma_range_map, *new_map, *r; 470 int num_ranges = 0; 471 472 if (!map) 473 return 0; 474 475 for (r = map; r->size; r++) 476 num_ranges++; 477 478 new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)), 479 GFP_KERNEL); 480 if (!new_map) 481 return -ENOMEM; 482 to->dma_range_map = new_map; 483 return 0; 484 } 485 486 /** 487 * rproc_handle_vdev() - handle a vdev fw resource 488 * @rproc: the remote processor 489 * @ptr: the vring resource descriptor 490 * @offset: offset of the resource entry 491 * @avail: size of available data (for sanity checking the image) 492 * 493 * This resource entry requests the host to statically register a virtio 494 * device (vdev), and setup everything needed to support it. It contains 495 * everything needed to make it possible: the virtio device id, virtio 496 * device features, vrings information, virtio config space, etc... 497 * 498 * Before registering the vdev, the vrings are allocated from non-cacheable 499 * physically contiguous memory. Currently we only support two vrings per 500 * remote processor (temporary limitation). We might also want to consider 501 * doing the vring allocation only later when ->find_vqs() is invoked, and 502 * then release them upon ->del_vqs(). 503 * 504 * Note: @da is currently not really handled correctly: we dynamically 505 * allocate it using the DMA API, ignoring requested hard coded addresses, 506 * and we don't take care of any required IOMMU programming. This is all 507 * going to be taken care of when the generic iommu-based DMA API will be 508 * merged. Meanwhile, statically-addressed iommu-based firmware images should 509 * use RSC_DEVMEM resource entries to map their required @da to the physical 510 * address of their base CMA region (ouch, hacky!). 511 * 512 * Returns 0 on success, or an appropriate error code otherwise 513 */ 514 static int rproc_handle_vdev(struct rproc *rproc, void *ptr, 515 int offset, int avail) 516 { 517 struct fw_rsc_vdev *rsc = ptr; 518 struct device *dev = &rproc->dev; 519 struct rproc_vdev *rvdev; 520 int i, ret; 521 char name[16]; 522 523 /* make sure resource isn't truncated */ 524 if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len > 525 avail) { 526 dev_err(dev, "vdev rsc is truncated\n"); 527 return -EINVAL; 528 } 529 530 /* make sure reserved bytes are zeroes */ 531 if (rsc->reserved[0] || rsc->reserved[1]) { 532 dev_err(dev, "vdev rsc has non zero reserved bytes\n"); 533 return -EINVAL; 534 } 535 536 dev_dbg(dev, "vdev rsc: id %d, dfeatures 0x%x, cfg len %d, %d vrings\n", 537 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); 538 539 /* we currently support only two vrings per rvdev */ 540 if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) { 541 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); 542 return -EINVAL; 543 } 544 545 rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL); 546 if (!rvdev) 547 return -ENOMEM; 548 549 kref_init(&rvdev->refcount); 550 551 rvdev->id = rsc->id; 552 rvdev->rproc = rproc; 553 rvdev->index = rproc->nb_vdev++; 554 555 /* Initialise vdev subdevice */ 556 snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); 557 rvdev->dev.parent = &rproc->dev; 558 ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent); 559 if (ret) 560 return ret; 561 rvdev->dev.release = rproc_rvdev_release; 562 dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); 563 dev_set_drvdata(&rvdev->dev, rvdev); 564 565 ret = device_register(&rvdev->dev); 566 if (ret) { 567 put_device(&rvdev->dev); 568 return ret; 569 } 570 /* Make device dma capable by inheriting from parent's capabilities */ 571 set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent)); 572 573 ret = dma_coerce_mask_and_coherent(&rvdev->dev, 574 dma_get_mask(rproc->dev.parent)); 575 if (ret) { 576 dev_warn(dev, 577 "Failed to set DMA mask %llx. Trying to continue... %x\n", 578 dma_get_mask(rproc->dev.parent), ret); 579 } 580 581 /* parse the vrings */ 582 for (i = 0; i < rsc->num_of_vrings; i++) { 583 ret = rproc_parse_vring(rvdev, rsc, i); 584 if (ret) 585 goto free_rvdev; 586 } 587 588 /* remember the resource offset*/ 589 rvdev->rsc_offset = offset; 590 591 /* allocate the vring resources */ 592 for (i = 0; i < rsc->num_of_vrings; i++) { 593 ret = rproc_alloc_vring(rvdev, i); 594 if (ret) 595 goto unwind_vring_allocations; 596 } 597 598 list_add_tail(&rvdev->node, &rproc->rvdevs); 599 600 rvdev->subdev.start = rproc_vdev_do_start; 601 rvdev->subdev.stop = rproc_vdev_do_stop; 602 603 rproc_add_subdev(rproc, &rvdev->subdev); 604 605 return 0; 606 607 unwind_vring_allocations: 608 for (i--; i >= 0; i--) 609 rproc_free_vring(&rvdev->vring[i]); 610 free_rvdev: 611 device_unregister(&rvdev->dev); 612 return ret; 613 } 614 615 void rproc_vdev_release(struct kref *ref) 616 { 617 struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); 618 struct rproc_vring *rvring; 619 struct rproc *rproc = rvdev->rproc; 620 int id; 621 622 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { 623 rvring = &rvdev->vring[id]; 624 rproc_free_vring(rvring); 625 } 626 627 rproc_remove_subdev(rproc, &rvdev->subdev); 628 list_del(&rvdev->node); 629 device_unregister(&rvdev->dev); 630 } 631 632 /** 633 * rproc_handle_trace() - handle a shared trace buffer resource 634 * @rproc: the remote processor 635 * @ptr: the trace resource descriptor 636 * @offset: offset of the resource entry 637 * @avail: size of available data (for sanity checking the image) 638 * 639 * In case the remote processor dumps trace logs into memory, 640 * export it via debugfs. 641 * 642 * Currently, the 'da' member of @rsc should contain the device address 643 * where the remote processor is dumping the traces. Later we could also 644 * support dynamically allocating this address using the generic 645 * DMA API (but currently there isn't a use case for that). 646 * 647 * Returns 0 on success, or an appropriate error code otherwise 648 */ 649 static int rproc_handle_trace(struct rproc *rproc, void *ptr, 650 int offset, int avail) 651 { 652 struct fw_rsc_trace *rsc = ptr; 653 struct rproc_debug_trace *trace; 654 struct device *dev = &rproc->dev; 655 char name[15]; 656 657 if (sizeof(*rsc) > avail) { 658 dev_err(dev, "trace rsc is truncated\n"); 659 return -EINVAL; 660 } 661 662 /* make sure reserved bytes are zeroes */ 663 if (rsc->reserved) { 664 dev_err(dev, "trace rsc has non zero reserved bytes\n"); 665 return -EINVAL; 666 } 667 668 trace = kzalloc(sizeof(*trace), GFP_KERNEL); 669 if (!trace) 670 return -ENOMEM; 671 672 /* set the trace buffer dma properties */ 673 trace->trace_mem.len = rsc->len; 674 trace->trace_mem.da = rsc->da; 675 676 /* set pointer on rproc device */ 677 trace->rproc = rproc; 678 679 /* make sure snprintf always null terminates, even if truncating */ 680 snprintf(name, sizeof(name), "trace%d", rproc->num_traces); 681 682 /* create the debugfs entry */ 683 trace->tfile = rproc_create_trace_file(name, rproc, trace); 684 if (!trace->tfile) { 685 kfree(trace); 686 return -EINVAL; 687 } 688 689 list_add_tail(&trace->node, &rproc->traces); 690 691 rproc->num_traces++; 692 693 dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n", 694 name, rsc->da, rsc->len); 695 696 return 0; 697 } 698 699 /** 700 * rproc_handle_devmem() - handle devmem resource entry 701 * @rproc: remote processor handle 702 * @ptr: the devmem resource entry 703 * @offset: offset of the resource entry 704 * @avail: size of available data (for sanity checking the image) 705 * 706 * Remote processors commonly need to access certain on-chip peripherals. 707 * 708 * Some of these remote processors access memory via an iommu device, 709 * and might require us to configure their iommu before they can access 710 * the on-chip peripherals they need. 711 * 712 * This resource entry is a request to map such a peripheral device. 713 * 714 * These devmem entries will contain the physical address of the device in 715 * the 'pa' member. If a specific device address is expected, then 'da' will 716 * contain it (currently this is the only use case supported). 'len' will 717 * contain the size of the physical region we need to map. 718 * 719 * Currently we just "trust" those devmem entries to contain valid physical 720 * addresses, but this is going to change: we want the implementations to 721 * tell us ranges of physical addresses the firmware is allowed to request, 722 * and not allow firmwares to request access to physical addresses that 723 * are outside those ranges. 724 */ 725 static int rproc_handle_devmem(struct rproc *rproc, void *ptr, 726 int offset, int avail) 727 { 728 struct fw_rsc_devmem *rsc = ptr; 729 struct rproc_mem_entry *mapping; 730 struct device *dev = &rproc->dev; 731 int ret; 732 733 /* no point in handling this resource without a valid iommu domain */ 734 if (!rproc->domain) 735 return -EINVAL; 736 737 if (sizeof(*rsc) > avail) { 738 dev_err(dev, "devmem rsc is truncated\n"); 739 return -EINVAL; 740 } 741 742 /* make sure reserved bytes are zeroes */ 743 if (rsc->reserved) { 744 dev_err(dev, "devmem rsc has non zero reserved bytes\n"); 745 return -EINVAL; 746 } 747 748 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 749 if (!mapping) 750 return -ENOMEM; 751 752 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); 753 if (ret) { 754 dev_err(dev, "failed to map devmem: %d\n", ret); 755 goto out; 756 } 757 758 /* 759 * We'll need this info later when we'll want to unmap everything 760 * (e.g. on shutdown). 761 * 762 * We can't trust the remote processor not to change the resource 763 * table, so we must maintain this info independently. 764 */ 765 mapping->da = rsc->da; 766 mapping->len = rsc->len; 767 list_add_tail(&mapping->node, &rproc->mappings); 768 769 dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", 770 rsc->pa, rsc->da, rsc->len); 771 772 return 0; 773 774 out: 775 kfree(mapping); 776 return ret; 777 } 778 779 /** 780 * rproc_alloc_carveout() - allocated specified carveout 781 * @rproc: rproc handle 782 * @mem: the memory entry to allocate 783 * 784 * This function allocate specified memory entry @mem using 785 * dma_alloc_coherent() as default allocator 786 */ 787 static int rproc_alloc_carveout(struct rproc *rproc, 788 struct rproc_mem_entry *mem) 789 { 790 struct rproc_mem_entry *mapping = NULL; 791 struct device *dev = &rproc->dev; 792 dma_addr_t dma; 793 void *va; 794 int ret; 795 796 va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL); 797 if (!va) { 798 dev_err(dev->parent, 799 "failed to allocate dma memory: len 0x%zx\n", 800 mem->len); 801 return -ENOMEM; 802 } 803 804 dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", 805 va, &dma, mem->len); 806 807 if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { 808 /* 809 * Check requested da is equal to dma address 810 * and print a warn message in case of missalignment. 811 * Don't stop rproc_start sequence as coprocessor may 812 * build pa to da translation on its side. 813 */ 814 if (mem->da != (u32)dma) 815 dev_warn(dev->parent, 816 "Allocated carveout doesn't fit device address request\n"); 817 } 818 819 /* 820 * Ok, this is non-standard. 821 * 822 * Sometimes we can't rely on the generic iommu-based DMA API 823 * to dynamically allocate the device address and then set the IOMMU 824 * tables accordingly, because some remote processors might 825 * _require_ us to use hard coded device addresses that their 826 * firmware was compiled with. 827 * 828 * In this case, we must use the IOMMU API directly and map 829 * the memory to the device address as expected by the remote 830 * processor. 831 * 832 * Obviously such remote processor devices should not be configured 833 * to use the iommu-based DMA API: we expect 'dma' to contain the 834 * physical address in this case. 835 */ 836 if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) { 837 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 838 if (!mapping) { 839 ret = -ENOMEM; 840 goto dma_free; 841 } 842 843 ret = iommu_map(rproc->domain, mem->da, dma, mem->len, 844 mem->flags); 845 if (ret) { 846 dev_err(dev, "iommu_map failed: %d\n", ret); 847 goto free_mapping; 848 } 849 850 /* 851 * We'll need this info later when we'll want to unmap 852 * everything (e.g. on shutdown). 853 * 854 * We can't trust the remote processor not to change the 855 * resource table, so we must maintain this info independently. 856 */ 857 mapping->da = mem->da; 858 mapping->len = mem->len; 859 list_add_tail(&mapping->node, &rproc->mappings); 860 861 dev_dbg(dev, "carveout mapped 0x%x to %pad\n", 862 mem->da, &dma); 863 } 864 865 if (mem->da == FW_RSC_ADDR_ANY) { 866 /* Update device address as undefined by requester */ 867 if ((u64)dma & HIGH_BITS_MASK) 868 dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n"); 869 870 mem->da = (u32)dma; 871 } 872 873 mem->dma = dma; 874 mem->va = va; 875 876 return 0; 877 878 free_mapping: 879 kfree(mapping); 880 dma_free: 881 dma_free_coherent(dev->parent, mem->len, va, dma); 882 return ret; 883 } 884 885 /** 886 * rproc_release_carveout() - release acquired carveout 887 * @rproc: rproc handle 888 * @mem: the memory entry to release 889 * 890 * This function releases specified memory entry @mem allocated via 891 * rproc_alloc_carveout() function by @rproc. 892 */ 893 static int rproc_release_carveout(struct rproc *rproc, 894 struct rproc_mem_entry *mem) 895 { 896 struct device *dev = &rproc->dev; 897 898 /* clean up carveout allocations */ 899 dma_free_coherent(dev->parent, mem->len, mem->va, mem->dma); 900 return 0; 901 } 902 903 /** 904 * rproc_handle_carveout() - handle phys contig memory allocation requests 905 * @rproc: rproc handle 906 * @ptr: the resource entry 907 * @offset: offset of the resource entry 908 * @avail: size of available data (for image validation) 909 * 910 * This function will handle firmware requests for allocation of physically 911 * contiguous memory regions. 912 * 913 * These request entries should come first in the firmware's resource table, 914 * as other firmware entries might request placing other data objects inside 915 * these memory regions (e.g. data/code segments, trace resource entries, ...). 916 * 917 * Allocating memory this way helps utilizing the reserved physical memory 918 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries 919 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB 920 * pressure is important; it may have a substantial impact on performance. 921 */ 922 static int rproc_handle_carveout(struct rproc *rproc, 923 void *ptr, int offset, int avail) 924 { 925 struct fw_rsc_carveout *rsc = ptr; 926 struct rproc_mem_entry *carveout; 927 struct device *dev = &rproc->dev; 928 929 if (sizeof(*rsc) > avail) { 930 dev_err(dev, "carveout rsc is truncated\n"); 931 return -EINVAL; 932 } 933 934 /* make sure reserved bytes are zeroes */ 935 if (rsc->reserved) { 936 dev_err(dev, "carveout rsc has non zero reserved bytes\n"); 937 return -EINVAL; 938 } 939 940 dev_dbg(dev, "carveout rsc: name: %s, da 0x%x, pa 0x%x, len 0x%x, flags 0x%x\n", 941 rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags); 942 943 /* 944 * Check carveout rsc already part of a registered carveout, 945 * Search by name, then check the da and length 946 */ 947 carveout = rproc_find_carveout_by_name(rproc, rsc->name); 948 949 if (carveout) { 950 if (carveout->rsc_offset != FW_RSC_ADDR_ANY) { 951 dev_err(dev, 952 "Carveout already associated to resource table\n"); 953 return -ENOMEM; 954 } 955 956 if (rproc_check_carveout_da(rproc, carveout, rsc->da, rsc->len)) 957 return -ENOMEM; 958 959 /* Update memory carveout with resource table info */ 960 carveout->rsc_offset = offset; 961 carveout->flags = rsc->flags; 962 963 return 0; 964 } 965 966 /* Register carveout in in list */ 967 carveout = rproc_mem_entry_init(dev, NULL, 0, rsc->len, rsc->da, 968 rproc_alloc_carveout, 969 rproc_release_carveout, rsc->name); 970 if (!carveout) { 971 dev_err(dev, "Can't allocate memory entry structure\n"); 972 return -ENOMEM; 973 } 974 975 carveout->flags = rsc->flags; 976 carveout->rsc_offset = offset; 977 rproc_add_carveout(rproc, carveout); 978 979 return 0; 980 } 981 982 /** 983 * rproc_add_carveout() - register an allocated carveout region 984 * @rproc: rproc handle 985 * @mem: memory entry to register 986 * 987 * This function registers specified memory entry in @rproc carveouts list. 988 * Specified carveout should have been allocated before registering. 989 */ 990 void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem) 991 { 992 list_add_tail(&mem->node, &rproc->carveouts); 993 } 994 EXPORT_SYMBOL(rproc_add_carveout); 995 996 /** 997 * rproc_mem_entry_init() - allocate and initialize rproc_mem_entry struct 998 * @dev: pointer on device struct 999 * @va: virtual address 1000 * @dma: dma address 1001 * @len: memory carveout length 1002 * @da: device address 1003 * @alloc: memory carveout allocation function 1004 * @release: memory carveout release function 1005 * @name: carveout name 1006 * 1007 * This function allocates a rproc_mem_entry struct and fill it with parameters 1008 * provided by client. 1009 */ 1010 __printf(8, 9) 1011 struct rproc_mem_entry * 1012 rproc_mem_entry_init(struct device *dev, 1013 void *va, dma_addr_t dma, size_t len, u32 da, 1014 int (*alloc)(struct rproc *, struct rproc_mem_entry *), 1015 int (*release)(struct rproc *, struct rproc_mem_entry *), 1016 const char *name, ...) 1017 { 1018 struct rproc_mem_entry *mem; 1019 va_list args; 1020 1021 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 1022 if (!mem) 1023 return mem; 1024 1025 mem->va = va; 1026 mem->dma = dma; 1027 mem->da = da; 1028 mem->len = len; 1029 mem->alloc = alloc; 1030 mem->release = release; 1031 mem->rsc_offset = FW_RSC_ADDR_ANY; 1032 mem->of_resm_idx = -1; 1033 1034 va_start(args, name); 1035 vsnprintf(mem->name, sizeof(mem->name), name, args); 1036 va_end(args); 1037 1038 return mem; 1039 } 1040 EXPORT_SYMBOL(rproc_mem_entry_init); 1041 1042 /** 1043 * rproc_of_resm_mem_entry_init() - allocate and initialize rproc_mem_entry struct 1044 * from a reserved memory phandle 1045 * @dev: pointer on device struct 1046 * @of_resm_idx: reserved memory phandle index in "memory-region" 1047 * @len: memory carveout length 1048 * @da: device address 1049 * @name: carveout name 1050 * 1051 * This function allocates a rproc_mem_entry struct and fill it with parameters 1052 * provided by client. 1053 */ 1054 __printf(5, 6) 1055 struct rproc_mem_entry * 1056 rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, 1057 u32 da, const char *name, ...) 1058 { 1059 struct rproc_mem_entry *mem; 1060 va_list args; 1061 1062 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 1063 if (!mem) 1064 return mem; 1065 1066 mem->da = da; 1067 mem->len = len; 1068 mem->rsc_offset = FW_RSC_ADDR_ANY; 1069 mem->of_resm_idx = of_resm_idx; 1070 1071 va_start(args, name); 1072 vsnprintf(mem->name, sizeof(mem->name), name, args); 1073 va_end(args); 1074 1075 return mem; 1076 } 1077 EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); 1078 1079 /** 1080 * rproc_of_parse_firmware() - parse and return the firmware-name 1081 * @dev: pointer on device struct representing a rproc 1082 * @index: index to use for the firmware-name retrieval 1083 * @fw_name: pointer to a character string, in which the firmware 1084 * name is returned on success and unmodified otherwise. 1085 * 1086 * This is an OF helper function that parses a device's DT node for 1087 * the "firmware-name" property and returns the firmware name pointer 1088 * in @fw_name on success. 1089 * 1090 * Return: 0 on success, or an appropriate failure. 1091 */ 1092 int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name) 1093 { 1094 int ret; 1095 1096 ret = of_property_read_string_index(dev->of_node, "firmware-name", 1097 index, fw_name); 1098 return ret ? ret : 0; 1099 } 1100 EXPORT_SYMBOL(rproc_of_parse_firmware); 1101 1102 /* 1103 * A lookup table for resource handlers. The indices are defined in 1104 * enum fw_resource_type. 1105 */ 1106 static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { 1107 [RSC_CARVEOUT] = rproc_handle_carveout, 1108 [RSC_DEVMEM] = rproc_handle_devmem, 1109 [RSC_TRACE] = rproc_handle_trace, 1110 [RSC_VDEV] = rproc_handle_vdev, 1111 }; 1112 1113 /* handle firmware resource entries before booting the remote processor */ 1114 static int rproc_handle_resources(struct rproc *rproc, 1115 rproc_handle_resource_t handlers[RSC_LAST]) 1116 { 1117 struct device *dev = &rproc->dev; 1118 rproc_handle_resource_t handler; 1119 int ret = 0, i; 1120 1121 if (!rproc->table_ptr) 1122 return 0; 1123 1124 for (i = 0; i < rproc->table_ptr->num; i++) { 1125 int offset = rproc->table_ptr->offset[i]; 1126 struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset; 1127 int avail = rproc->table_sz - offset - sizeof(*hdr); 1128 void *rsc = (void *)hdr + sizeof(*hdr); 1129 1130 /* make sure table isn't truncated */ 1131 if (avail < 0) { 1132 dev_err(dev, "rsc table is truncated\n"); 1133 return -EINVAL; 1134 } 1135 1136 dev_dbg(dev, "rsc: type %d\n", hdr->type); 1137 1138 if (hdr->type >= RSC_VENDOR_START && 1139 hdr->type <= RSC_VENDOR_END) { 1140 ret = rproc_handle_rsc(rproc, hdr->type, rsc, 1141 offset + sizeof(*hdr), avail); 1142 if (ret == RSC_HANDLED) 1143 continue; 1144 else if (ret < 0) 1145 break; 1146 1147 dev_warn(dev, "unsupported vendor resource %d\n", 1148 hdr->type); 1149 continue; 1150 } 1151 1152 if (hdr->type >= RSC_LAST) { 1153 dev_warn(dev, "unsupported resource %d\n", hdr->type); 1154 continue; 1155 } 1156 1157 handler = handlers[hdr->type]; 1158 if (!handler) 1159 continue; 1160 1161 ret = handler(rproc, rsc, offset + sizeof(*hdr), avail); 1162 if (ret) 1163 break; 1164 } 1165 1166 return ret; 1167 } 1168 1169 static int rproc_prepare_subdevices(struct rproc *rproc) 1170 { 1171 struct rproc_subdev *subdev; 1172 int ret; 1173 1174 list_for_each_entry(subdev, &rproc->subdevs, node) { 1175 if (subdev->prepare) { 1176 ret = subdev->prepare(subdev); 1177 if (ret) 1178 goto unroll_preparation; 1179 } 1180 } 1181 1182 return 0; 1183 1184 unroll_preparation: 1185 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { 1186 if (subdev->unprepare) 1187 subdev->unprepare(subdev); 1188 } 1189 1190 return ret; 1191 } 1192 1193 static int rproc_start_subdevices(struct rproc *rproc) 1194 { 1195 struct rproc_subdev *subdev; 1196 int ret; 1197 1198 list_for_each_entry(subdev, &rproc->subdevs, node) { 1199 if (subdev->start) { 1200 ret = subdev->start(subdev); 1201 if (ret) 1202 goto unroll_registration; 1203 } 1204 } 1205 1206 return 0; 1207 1208 unroll_registration: 1209 list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) { 1210 if (subdev->stop) 1211 subdev->stop(subdev, true); 1212 } 1213 1214 return ret; 1215 } 1216 1217 static void rproc_stop_subdevices(struct rproc *rproc, bool crashed) 1218 { 1219 struct rproc_subdev *subdev; 1220 1221 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { 1222 if (subdev->stop) 1223 subdev->stop(subdev, crashed); 1224 } 1225 } 1226 1227 static void rproc_unprepare_subdevices(struct rproc *rproc) 1228 { 1229 struct rproc_subdev *subdev; 1230 1231 list_for_each_entry_reverse(subdev, &rproc->subdevs, node) { 1232 if (subdev->unprepare) 1233 subdev->unprepare(subdev); 1234 } 1235 } 1236 1237 /** 1238 * rproc_alloc_registered_carveouts() - allocate all carveouts registered 1239 * in the list 1240 * @rproc: the remote processor handle 1241 * 1242 * This function parses registered carveout list, performs allocation 1243 * if alloc() ops registered and updates resource table information 1244 * if rsc_offset set. 1245 * 1246 * Return: 0 on success 1247 */ 1248 static int rproc_alloc_registered_carveouts(struct rproc *rproc) 1249 { 1250 struct rproc_mem_entry *entry, *tmp; 1251 struct fw_rsc_carveout *rsc; 1252 struct device *dev = &rproc->dev; 1253 u64 pa; 1254 int ret; 1255 1256 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 1257 if (entry->alloc) { 1258 ret = entry->alloc(rproc, entry); 1259 if (ret) { 1260 dev_err(dev, "Unable to allocate carveout %s: %d\n", 1261 entry->name, ret); 1262 return -ENOMEM; 1263 } 1264 } 1265 1266 if (entry->rsc_offset != FW_RSC_ADDR_ANY) { 1267 /* update resource table */ 1268 rsc = (void *)rproc->table_ptr + entry->rsc_offset; 1269 1270 /* 1271 * Some remote processors might need to know the pa 1272 * even though they are behind an IOMMU. E.g., OMAP4's 1273 * remote M3 processor needs this so it can control 1274 * on-chip hardware accelerators that are not behind 1275 * the IOMMU, and therefor must know the pa. 1276 * 1277 * Generally we don't want to expose physical addresses 1278 * if we don't have to (remote processors are generally 1279 * _not_ trusted), so we might want to do this only for 1280 * remote processor that _must_ have this (e.g. OMAP4's 1281 * dual M3 subsystem). 1282 * 1283 * Non-IOMMU processors might also want to have this info. 1284 * In this case, the device address and the physical address 1285 * are the same. 1286 */ 1287 1288 /* Use va if defined else dma to generate pa */ 1289 if (entry->va) 1290 pa = (u64)rproc_va_to_pa(entry->va); 1291 else 1292 pa = (u64)entry->dma; 1293 1294 if (((u64)pa) & HIGH_BITS_MASK) 1295 dev_warn(dev, 1296 "Physical address cast in 32bit to fit resource table format\n"); 1297 1298 rsc->pa = (u32)pa; 1299 rsc->da = entry->da; 1300 rsc->len = entry->len; 1301 } 1302 } 1303 1304 return 0; 1305 } 1306 1307 1308 /** 1309 * rproc_resource_cleanup() - clean up and free all acquired resources 1310 * @rproc: rproc handle 1311 * 1312 * This function will free all resources acquired for @rproc, and it 1313 * is called whenever @rproc either shuts down or fails to boot. 1314 */ 1315 void rproc_resource_cleanup(struct rproc *rproc) 1316 { 1317 struct rproc_mem_entry *entry, *tmp; 1318 struct rproc_debug_trace *trace, *ttmp; 1319 struct rproc_vdev *rvdev, *rvtmp; 1320 struct device *dev = &rproc->dev; 1321 1322 /* clean up debugfs trace entries */ 1323 list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) { 1324 rproc_remove_trace_file(trace->tfile); 1325 rproc->num_traces--; 1326 list_del(&trace->node); 1327 kfree(trace); 1328 } 1329 1330 /* clean up iommu mapping entries */ 1331 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { 1332 size_t unmapped; 1333 1334 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 1335 if (unmapped != entry->len) { 1336 /* nothing much to do besides complaining */ 1337 dev_err(dev, "failed to unmap %zx/%zu\n", entry->len, 1338 unmapped); 1339 } 1340 1341 list_del(&entry->node); 1342 kfree(entry); 1343 } 1344 1345 /* clean up carveout allocations */ 1346 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 1347 if (entry->release) 1348 entry->release(rproc, entry); 1349 list_del(&entry->node); 1350 kfree(entry); 1351 } 1352 1353 /* clean up remote vdev entries */ 1354 list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) 1355 kref_put(&rvdev->refcount, rproc_vdev_release); 1356 1357 rproc_coredump_cleanup(rproc); 1358 } 1359 EXPORT_SYMBOL(rproc_resource_cleanup); 1360 1361 static int rproc_start(struct rproc *rproc, const struct firmware *fw) 1362 { 1363 struct resource_table *loaded_table; 1364 struct device *dev = &rproc->dev; 1365 int ret; 1366 1367 /* load the ELF segments to memory */ 1368 ret = rproc_load_segments(rproc, fw); 1369 if (ret) { 1370 dev_err(dev, "Failed to load program segments: %d\n", ret); 1371 return ret; 1372 } 1373 1374 /* 1375 * The starting device has been given the rproc->cached_table as the 1376 * resource table. The address of the vring along with the other 1377 * allocated resources (carveouts etc) is stored in cached_table. 1378 * In order to pass this information to the remote device we must copy 1379 * this information to device memory. We also update the table_ptr so 1380 * that any subsequent changes will be applied to the loaded version. 1381 */ 1382 loaded_table = rproc_find_loaded_rsc_table(rproc, fw); 1383 if (loaded_table) { 1384 memcpy(loaded_table, rproc->cached_table, rproc->table_sz); 1385 rproc->table_ptr = loaded_table; 1386 } 1387 1388 ret = rproc_prepare_subdevices(rproc); 1389 if (ret) { 1390 dev_err(dev, "failed to prepare subdevices for %s: %d\n", 1391 rproc->name, ret); 1392 goto reset_table_ptr; 1393 } 1394 1395 /* power up the remote processor */ 1396 ret = rproc->ops->start(rproc); 1397 if (ret) { 1398 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); 1399 goto unprepare_subdevices; 1400 } 1401 1402 /* Start any subdevices for the remote processor */ 1403 ret = rproc_start_subdevices(rproc); 1404 if (ret) { 1405 dev_err(dev, "failed to probe subdevices for %s: %d\n", 1406 rproc->name, ret); 1407 goto stop_rproc; 1408 } 1409 1410 rproc->state = RPROC_RUNNING; 1411 1412 dev_info(dev, "remote processor %s is now up\n", rproc->name); 1413 1414 return 0; 1415 1416 stop_rproc: 1417 rproc->ops->stop(rproc); 1418 unprepare_subdevices: 1419 rproc_unprepare_subdevices(rproc); 1420 reset_table_ptr: 1421 rproc->table_ptr = rproc->cached_table; 1422 1423 return ret; 1424 } 1425 1426 static int __rproc_attach(struct rproc *rproc) 1427 { 1428 struct device *dev = &rproc->dev; 1429 int ret; 1430 1431 ret = rproc_prepare_subdevices(rproc); 1432 if (ret) { 1433 dev_err(dev, "failed to prepare subdevices for %s: %d\n", 1434 rproc->name, ret); 1435 goto out; 1436 } 1437 1438 /* Attach to the remote processor */ 1439 ret = rproc_attach_device(rproc); 1440 if (ret) { 1441 dev_err(dev, "can't attach to rproc %s: %d\n", 1442 rproc->name, ret); 1443 goto unprepare_subdevices; 1444 } 1445 1446 /* Start any subdevices for the remote processor */ 1447 ret = rproc_start_subdevices(rproc); 1448 if (ret) { 1449 dev_err(dev, "failed to probe subdevices for %s: %d\n", 1450 rproc->name, ret); 1451 goto stop_rproc; 1452 } 1453 1454 rproc->state = RPROC_ATTACHED; 1455 1456 dev_info(dev, "remote processor %s is now attached\n", rproc->name); 1457 1458 return 0; 1459 1460 stop_rproc: 1461 rproc->ops->stop(rproc); 1462 unprepare_subdevices: 1463 rproc_unprepare_subdevices(rproc); 1464 out: 1465 return ret; 1466 } 1467 1468 /* 1469 * take a firmware and boot a remote processor with it. 1470 */ 1471 static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) 1472 { 1473 struct device *dev = &rproc->dev; 1474 const char *name = rproc->firmware; 1475 int ret; 1476 1477 ret = rproc_fw_sanity_check(rproc, fw); 1478 if (ret) 1479 return ret; 1480 1481 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); 1482 1483 /* 1484 * if enabling an IOMMU isn't relevant for this rproc, this is 1485 * just a nop 1486 */ 1487 ret = rproc_enable_iommu(rproc); 1488 if (ret) { 1489 dev_err(dev, "can't enable iommu: %d\n", ret); 1490 return ret; 1491 } 1492 1493 /* Prepare rproc for firmware loading if needed */ 1494 ret = rproc_prepare_device(rproc); 1495 if (ret) { 1496 dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); 1497 goto disable_iommu; 1498 } 1499 1500 rproc->bootaddr = rproc_get_boot_addr(rproc, fw); 1501 1502 /* Load resource table, core dump segment list etc from the firmware */ 1503 ret = rproc_parse_fw(rproc, fw); 1504 if (ret) 1505 goto unprepare_rproc; 1506 1507 /* reset max_notifyid */ 1508 rproc->max_notifyid = -1; 1509 1510 /* reset handled vdev */ 1511 rproc->nb_vdev = 0; 1512 1513 /* handle fw resources which are required to boot rproc */ 1514 ret = rproc_handle_resources(rproc, rproc_loading_handlers); 1515 if (ret) { 1516 dev_err(dev, "Failed to process resources: %d\n", ret); 1517 goto clean_up_resources; 1518 } 1519 1520 /* Allocate carveout resources associated to rproc */ 1521 ret = rproc_alloc_registered_carveouts(rproc); 1522 if (ret) { 1523 dev_err(dev, "Failed to allocate associated carveouts: %d\n", 1524 ret); 1525 goto clean_up_resources; 1526 } 1527 1528 ret = rproc_start(rproc, fw); 1529 if (ret) 1530 goto clean_up_resources; 1531 1532 return 0; 1533 1534 clean_up_resources: 1535 rproc_resource_cleanup(rproc); 1536 kfree(rproc->cached_table); 1537 rproc->cached_table = NULL; 1538 rproc->table_ptr = NULL; 1539 unprepare_rproc: 1540 /* release HW resources if needed */ 1541 rproc_unprepare_device(rproc); 1542 disable_iommu: 1543 rproc_disable_iommu(rproc); 1544 return ret; 1545 } 1546 1547 static int rproc_set_rsc_table(struct rproc *rproc) 1548 { 1549 struct resource_table *table_ptr; 1550 struct device *dev = &rproc->dev; 1551 size_t table_sz; 1552 int ret; 1553 1554 table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz); 1555 if (!table_ptr) { 1556 /* Not having a resource table is acceptable */ 1557 return 0; 1558 } 1559 1560 if (IS_ERR(table_ptr)) { 1561 ret = PTR_ERR(table_ptr); 1562 dev_err(dev, "can't load resource table: %d\n", ret); 1563 return ret; 1564 } 1565 1566 /* 1567 * If it is possible to detach the remote processor, keep an untouched 1568 * copy of the resource table. That way we can start fresh again when 1569 * the remote processor is re-attached, that is: 1570 * 1571 * DETACHED -> ATTACHED -> DETACHED -> ATTACHED 1572 * 1573 * Free'd in rproc_reset_rsc_table_on_detach() and 1574 * rproc_reset_rsc_table_on_stop(). 1575 */ 1576 if (rproc->ops->detach) { 1577 rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL); 1578 if (!rproc->clean_table) 1579 return -ENOMEM; 1580 } else { 1581 rproc->clean_table = NULL; 1582 } 1583 1584 rproc->cached_table = NULL; 1585 rproc->table_ptr = table_ptr; 1586 rproc->table_sz = table_sz; 1587 1588 return 0; 1589 } 1590 1591 static int rproc_reset_rsc_table_on_detach(struct rproc *rproc) 1592 { 1593 struct resource_table *table_ptr; 1594 1595 /* A resource table was never retrieved, nothing to do here */ 1596 if (!rproc->table_ptr) 1597 return 0; 1598 1599 /* 1600 * If we made it to this point a clean_table _must_ have been 1601 * allocated in rproc_set_rsc_table(). If one isn't present 1602 * something went really wrong and we must complain. 1603 */ 1604 if (WARN_ON(!rproc->clean_table)) 1605 return -EINVAL; 1606 1607 /* Remember where the external entity installed the resource table */ 1608 table_ptr = rproc->table_ptr; 1609 1610 /* 1611 * If we made it here the remote processor was started by another 1612 * entity and a cache table doesn't exist. As such make a copy of 1613 * the resource table currently used by the remote processor and 1614 * use that for the rest of the shutdown process. The memory 1615 * allocated here is free'd in rproc_detach(). 1616 */ 1617 rproc->cached_table = kmemdup(rproc->table_ptr, 1618 rproc->table_sz, GFP_KERNEL); 1619 if (!rproc->cached_table) 1620 return -ENOMEM; 1621 1622 /* 1623 * Use a copy of the resource table for the remainder of the 1624 * shutdown process. 1625 */ 1626 rproc->table_ptr = rproc->cached_table; 1627 1628 /* 1629 * Reset the memory area where the firmware loaded the resource table 1630 * to its original value. That way when we re-attach the remote 1631 * processor the resource table is clean and ready to be used again. 1632 */ 1633 memcpy(table_ptr, rproc->clean_table, rproc->table_sz); 1634 1635 /* 1636 * The clean resource table is no longer needed. Allocated in 1637 * rproc_set_rsc_table(). 1638 */ 1639 kfree(rproc->clean_table); 1640 1641 return 0; 1642 } 1643 1644 static int rproc_reset_rsc_table_on_stop(struct rproc *rproc) 1645 { 1646 /* A resource table was never retrieved, nothing to do here */ 1647 if (!rproc->table_ptr) 1648 return 0; 1649 1650 /* 1651 * If a cache table exists the remote processor was started by 1652 * the remoteproc core. That cache table should be used for 1653 * the rest of the shutdown process. 1654 */ 1655 if (rproc->cached_table) 1656 goto out; 1657 1658 /* 1659 * If we made it here the remote processor was started by another 1660 * entity and a cache table doesn't exist. As such make a copy of 1661 * the resource table currently used by the remote processor and 1662 * use that for the rest of the shutdown process. The memory 1663 * allocated here is free'd in rproc_shutdown(). 1664 */ 1665 rproc->cached_table = kmemdup(rproc->table_ptr, 1666 rproc->table_sz, GFP_KERNEL); 1667 if (!rproc->cached_table) 1668 return -ENOMEM; 1669 1670 /* 1671 * Since the remote processor is being switched off the clean table 1672 * won't be needed. Allocated in rproc_set_rsc_table(). 1673 */ 1674 kfree(rproc->clean_table); 1675 1676 out: 1677 /* 1678 * Use a copy of the resource table for the remainder of the 1679 * shutdown process. 1680 */ 1681 rproc->table_ptr = rproc->cached_table; 1682 return 0; 1683 } 1684 1685 /* 1686 * Attach to remote processor - similar to rproc_fw_boot() but without 1687 * the steps that deal with the firmware image. 1688 */ 1689 static int rproc_attach(struct rproc *rproc) 1690 { 1691 struct device *dev = &rproc->dev; 1692 int ret; 1693 1694 /* 1695 * if enabling an IOMMU isn't relevant for this rproc, this is 1696 * just a nop 1697 */ 1698 ret = rproc_enable_iommu(rproc); 1699 if (ret) { 1700 dev_err(dev, "can't enable iommu: %d\n", ret); 1701 return ret; 1702 } 1703 1704 /* Do anything that is needed to boot the remote processor */ 1705 ret = rproc_prepare_device(rproc); 1706 if (ret) { 1707 dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret); 1708 goto disable_iommu; 1709 } 1710 1711 ret = rproc_set_rsc_table(rproc); 1712 if (ret) { 1713 dev_err(dev, "can't load resource table: %d\n", ret); 1714 goto unprepare_device; 1715 } 1716 1717 /* reset max_notifyid */ 1718 rproc->max_notifyid = -1; 1719 1720 /* reset handled vdev */ 1721 rproc->nb_vdev = 0; 1722 1723 /* 1724 * Handle firmware resources required to attach to a remote processor. 1725 * Because we are attaching rather than booting the remote processor, 1726 * we expect the platform driver to properly set rproc->table_ptr. 1727 */ 1728 ret = rproc_handle_resources(rproc, rproc_loading_handlers); 1729 if (ret) { 1730 dev_err(dev, "Failed to process resources: %d\n", ret); 1731 goto unprepare_device; 1732 } 1733 1734 /* Allocate carveout resources associated to rproc */ 1735 ret = rproc_alloc_registered_carveouts(rproc); 1736 if (ret) { 1737 dev_err(dev, "Failed to allocate associated carveouts: %d\n", 1738 ret); 1739 goto clean_up_resources; 1740 } 1741 1742 ret = __rproc_attach(rproc); 1743 if (ret) 1744 goto clean_up_resources; 1745 1746 return 0; 1747 1748 clean_up_resources: 1749 rproc_resource_cleanup(rproc); 1750 unprepare_device: 1751 /* release HW resources if needed */ 1752 rproc_unprepare_device(rproc); 1753 disable_iommu: 1754 rproc_disable_iommu(rproc); 1755 return ret; 1756 } 1757 1758 /* 1759 * take a firmware and boot it up. 1760 * 1761 * Note: this function is called asynchronously upon registration of the 1762 * remote processor (so we must wait until it completes before we try 1763 * to unregister the device. one other option is just to use kref here, 1764 * that might be cleaner). 1765 */ 1766 static void rproc_auto_boot_callback(const struct firmware *fw, void *context) 1767 { 1768 struct rproc *rproc = context; 1769 1770 rproc_boot(rproc); 1771 1772 release_firmware(fw); 1773 } 1774 1775 static int rproc_trigger_auto_boot(struct rproc *rproc) 1776 { 1777 int ret; 1778 1779 /* 1780 * Since the remote processor is in a detached state, it has already 1781 * been booted by another entity. As such there is no point in waiting 1782 * for a firmware image to be loaded, we can simply initiate the process 1783 * of attaching to it immediately. 1784 */ 1785 if (rproc->state == RPROC_DETACHED) 1786 return rproc_boot(rproc); 1787 1788 /* 1789 * We're initiating an asynchronous firmware loading, so we can 1790 * be built-in kernel code, without hanging the boot process. 1791 */ 1792 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 1793 rproc->firmware, &rproc->dev, GFP_KERNEL, 1794 rproc, rproc_auto_boot_callback); 1795 if (ret < 0) 1796 dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret); 1797 1798 return ret; 1799 } 1800 1801 static int rproc_stop(struct rproc *rproc, bool crashed) 1802 { 1803 struct device *dev = &rproc->dev; 1804 int ret; 1805 1806 /* No need to continue if a stop() operation has not been provided */ 1807 if (!rproc->ops->stop) 1808 return -EINVAL; 1809 1810 /* Stop any subdevices for the remote processor */ 1811 rproc_stop_subdevices(rproc, crashed); 1812 1813 /* the installed resource table is no longer accessible */ 1814 ret = rproc_reset_rsc_table_on_stop(rproc); 1815 if (ret) { 1816 dev_err(dev, "can't reset resource table: %d\n", ret); 1817 return ret; 1818 } 1819 1820 1821 /* power off the remote processor */ 1822 ret = rproc->ops->stop(rproc); 1823 if (ret) { 1824 dev_err(dev, "can't stop rproc: %d\n", ret); 1825 return ret; 1826 } 1827 1828 rproc_unprepare_subdevices(rproc); 1829 1830 rproc->state = RPROC_OFFLINE; 1831 1832 dev_info(dev, "stopped remote processor %s\n", rproc->name); 1833 1834 return 0; 1835 } 1836 1837 /* 1838 * __rproc_detach(): Does the opposite of __rproc_attach() 1839 */ 1840 static int __rproc_detach(struct rproc *rproc) 1841 { 1842 struct device *dev = &rproc->dev; 1843 int ret; 1844 1845 /* No need to continue if a detach() operation has not been provided */ 1846 if (!rproc->ops->detach) 1847 return -EINVAL; 1848 1849 /* Stop any subdevices for the remote processor */ 1850 rproc_stop_subdevices(rproc, false); 1851 1852 /* the installed resource table is no longer accessible */ 1853 ret = rproc_reset_rsc_table_on_detach(rproc); 1854 if (ret) { 1855 dev_err(dev, "can't reset resource table: %d\n", ret); 1856 return ret; 1857 } 1858 1859 /* Tell the remote processor the core isn't available anymore */ 1860 ret = rproc->ops->detach(rproc); 1861 if (ret) { 1862 dev_err(dev, "can't detach from rproc: %d\n", ret); 1863 return ret; 1864 } 1865 1866 rproc_unprepare_subdevices(rproc); 1867 1868 rproc->state = RPROC_DETACHED; 1869 1870 dev_info(dev, "detached remote processor %s\n", rproc->name); 1871 1872 return 0; 1873 } 1874 1875 /** 1876 * rproc_trigger_recovery() - recover a remoteproc 1877 * @rproc: the remote processor 1878 * 1879 * The recovery is done by resetting all the virtio devices, that way all the 1880 * rpmsg drivers will be reseted along with the remote processor making the 1881 * remoteproc functional again. 1882 * 1883 * This function can sleep, so it cannot be called from atomic context. 1884 */ 1885 int rproc_trigger_recovery(struct rproc *rproc) 1886 { 1887 const struct firmware *firmware_p; 1888 struct device *dev = &rproc->dev; 1889 int ret; 1890 1891 ret = mutex_lock_interruptible(&rproc->lock); 1892 if (ret) 1893 return ret; 1894 1895 /* State could have changed before we got the mutex */ 1896 if (rproc->state != RPROC_CRASHED) 1897 goto unlock_mutex; 1898 1899 dev_err(dev, "recovering %s\n", rproc->name); 1900 1901 ret = rproc_stop(rproc, true); 1902 if (ret) 1903 goto unlock_mutex; 1904 1905 /* generate coredump */ 1906 rproc->ops->coredump(rproc); 1907 1908 /* load firmware */ 1909 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1910 if (ret < 0) { 1911 dev_err(dev, "request_firmware failed: %d\n", ret); 1912 goto unlock_mutex; 1913 } 1914 1915 /* boot the remote processor up again */ 1916 ret = rproc_start(rproc, firmware_p); 1917 1918 release_firmware(firmware_p); 1919 1920 unlock_mutex: 1921 mutex_unlock(&rproc->lock); 1922 return ret; 1923 } 1924 1925 /** 1926 * rproc_crash_handler_work() - handle a crash 1927 * @work: work treating the crash 1928 * 1929 * This function needs to handle everything related to a crash, like cpu 1930 * registers and stack dump, information to help to debug the fatal error, etc. 1931 */ 1932 static void rproc_crash_handler_work(struct work_struct *work) 1933 { 1934 struct rproc *rproc = container_of(work, struct rproc, crash_handler); 1935 struct device *dev = &rproc->dev; 1936 1937 dev_dbg(dev, "enter %s\n", __func__); 1938 1939 mutex_lock(&rproc->lock); 1940 1941 if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) { 1942 /* handle only the first crash detected */ 1943 mutex_unlock(&rproc->lock); 1944 return; 1945 } 1946 1947 rproc->state = RPROC_CRASHED; 1948 dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt, 1949 rproc->name); 1950 1951 mutex_unlock(&rproc->lock); 1952 1953 if (!rproc->recovery_disabled) 1954 rproc_trigger_recovery(rproc); 1955 1956 pm_relax(rproc->dev.parent); 1957 } 1958 1959 /** 1960 * rproc_boot() - boot a remote processor 1961 * @rproc: handle of a remote processor 1962 * 1963 * Boot a remote processor (i.e. load its firmware, power it on, ...). 1964 * 1965 * If the remote processor is already powered on, this function immediately 1966 * returns (successfully). 1967 * 1968 * Returns 0 on success, and an appropriate error value otherwise. 1969 */ 1970 int rproc_boot(struct rproc *rproc) 1971 { 1972 const struct firmware *firmware_p; 1973 struct device *dev; 1974 int ret; 1975 1976 if (!rproc) { 1977 pr_err("invalid rproc handle\n"); 1978 return -EINVAL; 1979 } 1980 1981 dev = &rproc->dev; 1982 1983 ret = mutex_lock_interruptible(&rproc->lock); 1984 if (ret) { 1985 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1986 return ret; 1987 } 1988 1989 if (rproc->state == RPROC_DELETED) { 1990 ret = -ENODEV; 1991 dev_err(dev, "can't boot deleted rproc %s\n", rproc->name); 1992 goto unlock_mutex; 1993 } 1994 1995 /* skip the boot or attach process if rproc is already powered up */ 1996 if (atomic_inc_return(&rproc->power) > 1) { 1997 ret = 0; 1998 goto unlock_mutex; 1999 } 2000 2001 if (rproc->state == RPROC_DETACHED) { 2002 dev_info(dev, "attaching to %s\n", rproc->name); 2003 2004 ret = rproc_attach(rproc); 2005 } else { 2006 dev_info(dev, "powering up %s\n", rproc->name); 2007 2008 /* load firmware */ 2009 ret = request_firmware(&firmware_p, rproc->firmware, dev); 2010 if (ret < 0) { 2011 dev_err(dev, "request_firmware failed: %d\n", ret); 2012 goto downref_rproc; 2013 } 2014 2015 ret = rproc_fw_boot(rproc, firmware_p); 2016 2017 release_firmware(firmware_p); 2018 } 2019 2020 downref_rproc: 2021 if (ret) 2022 atomic_dec(&rproc->power); 2023 unlock_mutex: 2024 mutex_unlock(&rproc->lock); 2025 return ret; 2026 } 2027 EXPORT_SYMBOL(rproc_boot); 2028 2029 /** 2030 * rproc_shutdown() - power off the remote processor 2031 * @rproc: the remote processor 2032 * 2033 * Power off a remote processor (previously booted with rproc_boot()). 2034 * 2035 * In case @rproc is still being used by an additional user(s), then 2036 * this function will just decrement the power refcount and exit, 2037 * without really powering off the device. 2038 * 2039 * Every call to rproc_boot() must (eventually) be accompanied by a call 2040 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. 2041 * 2042 * Notes: 2043 * - we're not decrementing the rproc's refcount, only the power refcount. 2044 * which means that the @rproc handle stays valid even after rproc_shutdown() 2045 * returns, and users can still use it with a subsequent rproc_boot(), if 2046 * needed. 2047 */ 2048 void rproc_shutdown(struct rproc *rproc) 2049 { 2050 struct device *dev = &rproc->dev; 2051 int ret; 2052 2053 ret = mutex_lock_interruptible(&rproc->lock); 2054 if (ret) { 2055 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 2056 return; 2057 } 2058 2059 /* if the remote proc is still needed, bail out */ 2060 if (!atomic_dec_and_test(&rproc->power)) 2061 goto out; 2062 2063 ret = rproc_stop(rproc, false); 2064 if (ret) { 2065 atomic_inc(&rproc->power); 2066 goto out; 2067 } 2068 2069 /* clean up all acquired resources */ 2070 rproc_resource_cleanup(rproc); 2071 2072 /* release HW resources if needed */ 2073 rproc_unprepare_device(rproc); 2074 2075 rproc_disable_iommu(rproc); 2076 2077 /* Free the copy of the resource table */ 2078 kfree(rproc->cached_table); 2079 rproc->cached_table = NULL; 2080 rproc->table_ptr = NULL; 2081 out: 2082 mutex_unlock(&rproc->lock); 2083 } 2084 EXPORT_SYMBOL(rproc_shutdown); 2085 2086 /** 2087 * rproc_detach() - Detach the remote processor from the 2088 * remoteproc core 2089 * 2090 * @rproc: the remote processor 2091 * 2092 * Detach a remote processor (previously attached to with rproc_attach()). 2093 * 2094 * In case @rproc is still being used by an additional user(s), then 2095 * this function will just decrement the power refcount and exit, 2096 * without disconnecting the device. 2097 * 2098 * Function rproc_detach() calls __rproc_detach() in order to let a remote 2099 * processor know that services provided by the application processor are 2100 * no longer available. From there it should be possible to remove the 2101 * platform driver and even power cycle the application processor (if the HW 2102 * supports it) without needing to switch off the remote processor. 2103 */ 2104 int rproc_detach(struct rproc *rproc) 2105 { 2106 struct device *dev = &rproc->dev; 2107 int ret; 2108 2109 ret = mutex_lock_interruptible(&rproc->lock); 2110 if (ret) { 2111 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 2112 return ret; 2113 } 2114 2115 /* if the remote proc is still needed, bail out */ 2116 if (!atomic_dec_and_test(&rproc->power)) { 2117 ret = 0; 2118 goto out; 2119 } 2120 2121 ret = __rproc_detach(rproc); 2122 if (ret) { 2123 atomic_inc(&rproc->power); 2124 goto out; 2125 } 2126 2127 /* clean up all acquired resources */ 2128 rproc_resource_cleanup(rproc); 2129 2130 /* release HW resources if needed */ 2131 rproc_unprepare_device(rproc); 2132 2133 rproc_disable_iommu(rproc); 2134 2135 /* Free the copy of the resource table */ 2136 kfree(rproc->cached_table); 2137 rproc->cached_table = NULL; 2138 rproc->table_ptr = NULL; 2139 out: 2140 mutex_unlock(&rproc->lock); 2141 return ret; 2142 } 2143 EXPORT_SYMBOL(rproc_detach); 2144 2145 /** 2146 * rproc_get_by_phandle() - find a remote processor by phandle 2147 * @phandle: phandle to the rproc 2148 * 2149 * Finds an rproc handle using the remote processor's phandle, and then 2150 * return a handle to the rproc. 2151 * 2152 * This function increments the remote processor's refcount, so always 2153 * use rproc_put() to decrement it back once rproc isn't needed anymore. 2154 * 2155 * Returns the rproc handle on success, and NULL on failure. 2156 */ 2157 #ifdef CONFIG_OF 2158 struct rproc *rproc_get_by_phandle(phandle phandle) 2159 { 2160 struct rproc *rproc = NULL, *r; 2161 struct device_node *np; 2162 2163 np = of_find_node_by_phandle(phandle); 2164 if (!np) 2165 return NULL; 2166 2167 rcu_read_lock(); 2168 list_for_each_entry_rcu(r, &rproc_list, node) { 2169 if (r->dev.parent && r->dev.parent->of_node == np) { 2170 /* prevent underlying implementation from being removed */ 2171 if (!try_module_get(r->dev.parent->driver->owner)) { 2172 dev_err(&r->dev, "can't get owner\n"); 2173 break; 2174 } 2175 2176 rproc = r; 2177 get_device(&rproc->dev); 2178 break; 2179 } 2180 } 2181 rcu_read_unlock(); 2182 2183 of_node_put(np); 2184 2185 return rproc; 2186 } 2187 #else 2188 struct rproc *rproc_get_by_phandle(phandle phandle) 2189 { 2190 return NULL; 2191 } 2192 #endif 2193 EXPORT_SYMBOL(rproc_get_by_phandle); 2194 2195 /** 2196 * rproc_set_firmware() - assign a new firmware 2197 * @rproc: rproc handle to which the new firmware is being assigned 2198 * @fw_name: new firmware name to be assigned 2199 * 2200 * This function allows remoteproc drivers or clients to configure a custom 2201 * firmware name that is different from the default name used during remoteproc 2202 * registration. The function does not trigger a remote processor boot, 2203 * only sets the firmware name used for a subsequent boot. This function 2204 * should also be called only when the remote processor is offline. 2205 * 2206 * This allows either the userspace to configure a different name through 2207 * sysfs or a kernel-level remoteproc or a remoteproc client driver to set 2208 * a specific firmware when it is controlling the boot and shutdown of the 2209 * remote processor. 2210 * 2211 * Return: 0 on success or a negative value upon failure 2212 */ 2213 int rproc_set_firmware(struct rproc *rproc, const char *fw_name) 2214 { 2215 struct device *dev; 2216 int ret, len; 2217 char *p; 2218 2219 if (!rproc || !fw_name) 2220 return -EINVAL; 2221 2222 dev = rproc->dev.parent; 2223 2224 ret = mutex_lock_interruptible(&rproc->lock); 2225 if (ret) { 2226 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 2227 return -EINVAL; 2228 } 2229 2230 if (rproc->state != RPROC_OFFLINE) { 2231 dev_err(dev, "can't change firmware while running\n"); 2232 ret = -EBUSY; 2233 goto out; 2234 } 2235 2236 len = strcspn(fw_name, "\n"); 2237 if (!len) { 2238 dev_err(dev, "can't provide empty string for firmware name\n"); 2239 ret = -EINVAL; 2240 goto out; 2241 } 2242 2243 p = kstrndup(fw_name, len, GFP_KERNEL); 2244 if (!p) { 2245 ret = -ENOMEM; 2246 goto out; 2247 } 2248 2249 kfree_const(rproc->firmware); 2250 rproc->firmware = p; 2251 2252 out: 2253 mutex_unlock(&rproc->lock); 2254 return ret; 2255 } 2256 EXPORT_SYMBOL(rproc_set_firmware); 2257 2258 static int rproc_validate(struct rproc *rproc) 2259 { 2260 switch (rproc->state) { 2261 case RPROC_OFFLINE: 2262 /* 2263 * An offline processor without a start() 2264 * function makes no sense. 2265 */ 2266 if (!rproc->ops->start) 2267 return -EINVAL; 2268 break; 2269 case RPROC_DETACHED: 2270 /* 2271 * A remote processor in a detached state without an 2272 * attach() function makes not sense. 2273 */ 2274 if (!rproc->ops->attach) 2275 return -EINVAL; 2276 /* 2277 * When attaching to a remote processor the device memory 2278 * is already available and as such there is no need to have a 2279 * cached table. 2280 */ 2281 if (rproc->cached_table) 2282 return -EINVAL; 2283 break; 2284 default: 2285 /* 2286 * When adding a remote processor, the state of the device 2287 * can be offline or detached, nothing else. 2288 */ 2289 return -EINVAL; 2290 } 2291 2292 return 0; 2293 } 2294 2295 /** 2296 * rproc_add() - register a remote processor 2297 * @rproc: the remote processor handle to register 2298 * 2299 * Registers @rproc with the remoteproc framework, after it has been 2300 * allocated with rproc_alloc(). 2301 * 2302 * This is called by the platform-specific rproc implementation, whenever 2303 * a new remote processor device is probed. 2304 * 2305 * Returns 0 on success and an appropriate error code otherwise. 2306 * 2307 * Note: this function initiates an asynchronous firmware loading 2308 * context, which will look for virtio devices supported by the rproc's 2309 * firmware. 2310 * 2311 * If found, those virtio devices will be created and added, so as a result 2312 * of registering this remote processor, additional virtio drivers might be 2313 * probed. 2314 */ 2315 int rproc_add(struct rproc *rproc) 2316 { 2317 struct device *dev = &rproc->dev; 2318 int ret; 2319 2320 ret = device_add(dev); 2321 if (ret < 0) 2322 return ret; 2323 2324 ret = rproc_validate(rproc); 2325 if (ret < 0) 2326 return ret; 2327 2328 dev_info(dev, "%s is available\n", rproc->name); 2329 2330 /* create debugfs entries */ 2331 rproc_create_debug_dir(rproc); 2332 2333 /* add char device for this remoteproc */ 2334 ret = rproc_char_device_add(rproc); 2335 if (ret < 0) 2336 return ret; 2337 2338 /* if rproc is marked always-on, request it to boot */ 2339 if (rproc->auto_boot) { 2340 ret = rproc_trigger_auto_boot(rproc); 2341 if (ret < 0) 2342 return ret; 2343 } 2344 2345 /* expose to rproc_get_by_phandle users */ 2346 mutex_lock(&rproc_list_mutex); 2347 list_add_rcu(&rproc->node, &rproc_list); 2348 mutex_unlock(&rproc_list_mutex); 2349 2350 return 0; 2351 } 2352 EXPORT_SYMBOL(rproc_add); 2353 2354 static void devm_rproc_remove(void *rproc) 2355 { 2356 rproc_del(rproc); 2357 } 2358 2359 /** 2360 * devm_rproc_add() - resource managed rproc_add() 2361 * @dev: the underlying device 2362 * @rproc: the remote processor handle to register 2363 * 2364 * This function performs like rproc_add() but the registered rproc device will 2365 * automatically be removed on driver detach. 2366 * 2367 * Returns: 0 on success, negative errno on failure 2368 */ 2369 int devm_rproc_add(struct device *dev, struct rproc *rproc) 2370 { 2371 int err; 2372 2373 err = rproc_add(rproc); 2374 if (err) 2375 return err; 2376 2377 return devm_add_action_or_reset(dev, devm_rproc_remove, rproc); 2378 } 2379 EXPORT_SYMBOL(devm_rproc_add); 2380 2381 /** 2382 * rproc_type_release() - release a remote processor instance 2383 * @dev: the rproc's device 2384 * 2385 * This function should _never_ be called directly. 2386 * 2387 * It will be called by the driver core when no one holds a valid pointer 2388 * to @dev anymore. 2389 */ 2390 static void rproc_type_release(struct device *dev) 2391 { 2392 struct rproc *rproc = container_of(dev, struct rproc, dev); 2393 2394 dev_info(&rproc->dev, "releasing %s\n", rproc->name); 2395 2396 idr_destroy(&rproc->notifyids); 2397 2398 if (rproc->index >= 0) 2399 ida_simple_remove(&rproc_dev_index, rproc->index); 2400 2401 kfree_const(rproc->firmware); 2402 kfree_const(rproc->name); 2403 kfree(rproc->ops); 2404 kfree(rproc); 2405 } 2406 2407 static const struct device_type rproc_type = { 2408 .name = "remoteproc", 2409 .release = rproc_type_release, 2410 }; 2411 2412 static int rproc_alloc_firmware(struct rproc *rproc, 2413 const char *name, const char *firmware) 2414 { 2415 const char *p; 2416 2417 /* 2418 * Allocate a firmware name if the caller gave us one to work 2419 * with. Otherwise construct a new one using a default pattern. 2420 */ 2421 if (firmware) 2422 p = kstrdup_const(firmware, GFP_KERNEL); 2423 else 2424 p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name); 2425 2426 if (!p) 2427 return -ENOMEM; 2428 2429 rproc->firmware = p; 2430 2431 return 0; 2432 } 2433 2434 static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops) 2435 { 2436 rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL); 2437 if (!rproc->ops) 2438 return -ENOMEM; 2439 2440 /* Default to rproc_coredump if no coredump function is specified */ 2441 if (!rproc->ops->coredump) 2442 rproc->ops->coredump = rproc_coredump; 2443 2444 if (rproc->ops->load) 2445 return 0; 2446 2447 /* Default to ELF loader if no load function is specified */ 2448 rproc->ops->load = rproc_elf_load_segments; 2449 rproc->ops->parse_fw = rproc_elf_load_rsc_table; 2450 rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table; 2451 rproc->ops->sanity_check = rproc_elf_sanity_check; 2452 rproc->ops->get_boot_addr = rproc_elf_get_boot_addr; 2453 2454 return 0; 2455 } 2456 2457 /** 2458 * rproc_alloc() - allocate a remote processor handle 2459 * @dev: the underlying device 2460 * @name: name of this remote processor 2461 * @ops: platform-specific handlers (mainly start/stop) 2462 * @firmware: name of firmware file to load, can be NULL 2463 * @len: length of private data needed by the rproc driver (in bytes) 2464 * 2465 * Allocates a new remote processor handle, but does not register 2466 * it yet. if @firmware is NULL, a default name is used. 2467 * 2468 * This function should be used by rproc implementations during initialization 2469 * of the remote processor. 2470 * 2471 * After creating an rproc handle using this function, and when ready, 2472 * implementations should then call rproc_add() to complete 2473 * the registration of the remote processor. 2474 * 2475 * On success the new rproc is returned, and on failure, NULL. 2476 * 2477 * Note: _never_ directly deallocate @rproc, even if it was not registered 2478 * yet. Instead, when you need to unroll rproc_alloc(), use rproc_free(). 2479 */ 2480 struct rproc *rproc_alloc(struct device *dev, const char *name, 2481 const struct rproc_ops *ops, 2482 const char *firmware, int len) 2483 { 2484 struct rproc *rproc; 2485 2486 if (!dev || !name || !ops) 2487 return NULL; 2488 2489 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); 2490 if (!rproc) 2491 return NULL; 2492 2493 rproc->priv = &rproc[1]; 2494 rproc->auto_boot = true; 2495 rproc->elf_class = ELFCLASSNONE; 2496 rproc->elf_machine = EM_NONE; 2497 2498 device_initialize(&rproc->dev); 2499 rproc->dev.parent = dev; 2500 rproc->dev.type = &rproc_type; 2501 rproc->dev.class = &rproc_class; 2502 rproc->dev.driver_data = rproc; 2503 idr_init(&rproc->notifyids); 2504 2505 rproc->name = kstrdup_const(name, GFP_KERNEL); 2506 if (!rproc->name) 2507 goto put_device; 2508 2509 if (rproc_alloc_firmware(rproc, name, firmware)) 2510 goto put_device; 2511 2512 if (rproc_alloc_ops(rproc, ops)) 2513 goto put_device; 2514 2515 /* Assign a unique device index and name */ 2516 rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); 2517 if (rproc->index < 0) { 2518 dev_err(dev, "ida_simple_get failed: %d\n", rproc->index); 2519 goto put_device; 2520 } 2521 2522 dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); 2523 2524 atomic_set(&rproc->power, 0); 2525 2526 mutex_init(&rproc->lock); 2527 2528 INIT_LIST_HEAD(&rproc->carveouts); 2529 INIT_LIST_HEAD(&rproc->mappings); 2530 INIT_LIST_HEAD(&rproc->traces); 2531 INIT_LIST_HEAD(&rproc->rvdevs); 2532 INIT_LIST_HEAD(&rproc->subdevs); 2533 INIT_LIST_HEAD(&rproc->dump_segments); 2534 2535 INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work); 2536 2537 rproc->state = RPROC_OFFLINE; 2538 2539 return rproc; 2540 2541 put_device: 2542 put_device(&rproc->dev); 2543 return NULL; 2544 } 2545 EXPORT_SYMBOL(rproc_alloc); 2546 2547 /** 2548 * rproc_free() - unroll rproc_alloc() 2549 * @rproc: the remote processor handle 2550 * 2551 * This function decrements the rproc dev refcount. 2552 * 2553 * If no one holds any reference to rproc anymore, then its refcount would 2554 * now drop to zero, and it would be freed. 2555 */ 2556 void rproc_free(struct rproc *rproc) 2557 { 2558 put_device(&rproc->dev); 2559 } 2560 EXPORT_SYMBOL(rproc_free); 2561 2562 /** 2563 * rproc_put() - release rproc reference 2564 * @rproc: the remote processor handle 2565 * 2566 * This function decrements the rproc dev refcount. 2567 * 2568 * If no one holds any reference to rproc anymore, then its refcount would 2569 * now drop to zero, and it would be freed. 2570 */ 2571 void rproc_put(struct rproc *rproc) 2572 { 2573 module_put(rproc->dev.parent->driver->owner); 2574 put_device(&rproc->dev); 2575 } 2576 EXPORT_SYMBOL(rproc_put); 2577 2578 /** 2579 * rproc_del() - unregister a remote processor 2580 * @rproc: rproc handle to unregister 2581 * 2582 * This function should be called when the platform specific rproc 2583 * implementation decides to remove the rproc device. it should 2584 * _only_ be called if a previous invocation of rproc_add() 2585 * has completed successfully. 2586 * 2587 * After rproc_del() returns, @rproc isn't freed yet, because 2588 * of the outstanding reference created by rproc_alloc. To decrement that 2589 * one last refcount, one still needs to call rproc_free(). 2590 * 2591 * Returns 0 on success and -EINVAL if @rproc isn't valid. 2592 */ 2593 int rproc_del(struct rproc *rproc) 2594 { 2595 if (!rproc) 2596 return -EINVAL; 2597 2598 /* TODO: make sure this works with rproc->power > 1 */ 2599 rproc_shutdown(rproc); 2600 2601 mutex_lock(&rproc->lock); 2602 rproc->state = RPROC_DELETED; 2603 mutex_unlock(&rproc->lock); 2604 2605 rproc_delete_debug_dir(rproc); 2606 rproc_char_device_remove(rproc); 2607 2608 /* the rproc is downref'ed as soon as it's removed from the klist */ 2609 mutex_lock(&rproc_list_mutex); 2610 list_del_rcu(&rproc->node); 2611 mutex_unlock(&rproc_list_mutex); 2612 2613 /* Ensure that no readers of rproc_list are still active */ 2614 synchronize_rcu(); 2615 2616 device_del(&rproc->dev); 2617 2618 return 0; 2619 } 2620 EXPORT_SYMBOL(rproc_del); 2621 2622 static void devm_rproc_free(struct device *dev, void *res) 2623 { 2624 rproc_free(*(struct rproc **)res); 2625 } 2626 2627 /** 2628 * devm_rproc_alloc() - resource managed rproc_alloc() 2629 * @dev: the underlying device 2630 * @name: name of this remote processor 2631 * @ops: platform-specific handlers (mainly start/stop) 2632 * @firmware: name of firmware file to load, can be NULL 2633 * @len: length of private data needed by the rproc driver (in bytes) 2634 * 2635 * This function performs like rproc_alloc() but the acquired rproc device will 2636 * automatically be released on driver detach. 2637 * 2638 * Returns: new rproc instance, or NULL on failure 2639 */ 2640 struct rproc *devm_rproc_alloc(struct device *dev, const char *name, 2641 const struct rproc_ops *ops, 2642 const char *firmware, int len) 2643 { 2644 struct rproc **ptr, *rproc; 2645 2646 ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL); 2647 if (!ptr) 2648 return NULL; 2649 2650 rproc = rproc_alloc(dev, name, ops, firmware, len); 2651 if (rproc) { 2652 *ptr = rproc; 2653 devres_add(dev, ptr); 2654 } else { 2655 devres_free(ptr); 2656 } 2657 2658 return rproc; 2659 } 2660 EXPORT_SYMBOL(devm_rproc_alloc); 2661 2662 /** 2663 * rproc_add_subdev() - add a subdevice to a remoteproc 2664 * @rproc: rproc handle to add the subdevice to 2665 * @subdev: subdev handle to register 2666 * 2667 * Caller is responsible for populating optional subdevice function pointers. 2668 */ 2669 void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev) 2670 { 2671 list_add_tail(&subdev->node, &rproc->subdevs); 2672 } 2673 EXPORT_SYMBOL(rproc_add_subdev); 2674 2675 /** 2676 * rproc_remove_subdev() - remove a subdevice from a remoteproc 2677 * @rproc: rproc handle to remove the subdevice from 2678 * @subdev: subdev handle, previously registered with rproc_add_subdev() 2679 */ 2680 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) 2681 { 2682 list_del(&subdev->node); 2683 } 2684 EXPORT_SYMBOL(rproc_remove_subdev); 2685 2686 /** 2687 * rproc_get_by_child() - acquire rproc handle of @dev's ancestor 2688 * @dev: child device to find ancestor of 2689 * 2690 * Returns the ancestor rproc instance, or NULL if not found. 2691 */ 2692 struct rproc *rproc_get_by_child(struct device *dev) 2693 { 2694 for (dev = dev->parent; dev; dev = dev->parent) { 2695 if (dev->type == &rproc_type) 2696 return dev->driver_data; 2697 } 2698 2699 return NULL; 2700 } 2701 EXPORT_SYMBOL(rproc_get_by_child); 2702 2703 /** 2704 * rproc_report_crash() - rproc crash reporter function 2705 * @rproc: remote processor 2706 * @type: crash type 2707 * 2708 * This function must be called every time a crash is detected by the low-level 2709 * drivers implementing a specific remoteproc. This should not be called from a 2710 * non-remoteproc driver. 2711 * 2712 * This function can be called from atomic/interrupt context. 2713 */ 2714 void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type) 2715 { 2716 if (!rproc) { 2717 pr_err("NULL rproc pointer\n"); 2718 return; 2719 } 2720 2721 /* Prevent suspend while the remoteproc is being recovered */ 2722 pm_stay_awake(rproc->dev.parent); 2723 2724 dev_err(&rproc->dev, "crash detected in %s: type %s\n", 2725 rproc->name, rproc_crash_to_string(type)); 2726 2727 /* create a new task to handle the error */ 2728 schedule_work(&rproc->crash_handler); 2729 } 2730 EXPORT_SYMBOL(rproc_report_crash); 2731 2732 static int rproc_panic_handler(struct notifier_block *nb, unsigned long event, 2733 void *ptr) 2734 { 2735 unsigned int longest = 0; 2736 struct rproc *rproc; 2737 unsigned int d; 2738 2739 rcu_read_lock(); 2740 list_for_each_entry_rcu(rproc, &rproc_list, node) { 2741 if (!rproc->ops->panic) 2742 continue; 2743 2744 if (rproc->state != RPROC_RUNNING && 2745 rproc->state != RPROC_ATTACHED) 2746 continue; 2747 2748 d = rproc->ops->panic(rproc); 2749 longest = max(longest, d); 2750 } 2751 rcu_read_unlock(); 2752 2753 /* 2754 * Delay for the longest requested duration before returning. This can 2755 * be used by the remoteproc drivers to give the remote processor time 2756 * to perform any requested operations (such as flush caches), when 2757 * it's not possible to signal the Linux side due to the panic. 2758 */ 2759 mdelay(longest); 2760 2761 return NOTIFY_DONE; 2762 } 2763 2764 static void __init rproc_init_panic(void) 2765 { 2766 rproc_panic_nb.notifier_call = rproc_panic_handler; 2767 atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb); 2768 } 2769 2770 static void __exit rproc_exit_panic(void) 2771 { 2772 atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb); 2773 } 2774 2775 static int __init remoteproc_init(void) 2776 { 2777 rproc_init_sysfs(); 2778 rproc_init_debugfs(); 2779 rproc_init_cdev(); 2780 rproc_init_panic(); 2781 2782 return 0; 2783 } 2784 subsys_initcall(remoteproc_init); 2785 2786 static void __exit remoteproc_exit(void) 2787 { 2788 ida_destroy(&rproc_dev_index); 2789 2790 rproc_exit_panic(); 2791 rproc_exit_debugfs(); 2792 rproc_exit_sysfs(); 2793 } 2794 module_exit(remoteproc_exit); 2795 2796 MODULE_LICENSE("GPL v2"); 2797 MODULE_DESCRIPTION("Generic Remote Processor Framework"); 2798