1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TI K3 Remote Processor(s) driver common code 4 * 5 * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and 6 * ti_k3_m4_remoteproc.c. 7 * 8 * ti_k3_r5_remoteproc.c: 9 * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/ 10 * Suman Anna <s-anna@ti.com> 11 * 12 * ti_k3_dsp_remoteproc.c: 13 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/ 14 * Suman Anna <s-anna@ti.com> 15 * 16 * ti_k3_m4_remoteproc.c: 17 * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/ 18 * Hari Nagalla <hnagalla@ti.com> 19 */ 20 21 #include <linux/io.h> 22 #include <linux/mailbox_client.h> 23 #include <linux/module.h> 24 #include <linux/of_address.h> 25 #include <linux/of_device.h> 26 #include <linux/of_reserved_mem.h> 27 #include <linux/omap-mailbox.h> 28 #include <linux/platform_device.h> 29 #include <linux/remoteproc.h> 30 #include <linux/reset.h> 31 #include <linux/slab.h> 32 33 #include "omap_remoteproc.h" 34 #include "remoteproc_internal.h" 35 #include "ti_sci_proc.h" 36 #include "ti_k3_common.h" 37 38 /** 39 * k3_rproc_mbox_callback() - inbound mailbox message handler 40 * @client: mailbox client pointer used for requesting the mailbox channel 41 * @data: mailbox payload 42 * 43 * This handler is invoked by the K3 mailbox driver whenever a mailbox 44 * message is received. Usually, the mailbox payload simply contains 45 * the index of the virtqueue that is kicked by the remote processor, 46 * and we let remoteproc core handle it. 47 * 48 * In addition to virtqueue indices, we also have some out-of-band values 49 * that indicate different events. Those values are deliberately very 50 * large so they don't coincide with virtqueue indices. 51 */ 52 void k3_rproc_mbox_callback(struct mbox_client *client, void *data) 53 { 54 struct k3_rproc *kproc = container_of(client, struct k3_rproc, client); 55 struct device *dev = kproc->rproc->dev.parent; 56 struct rproc *rproc = kproc->rproc; 57 u32 msg = (u32)(uintptr_t)(data); 58 59 dev_dbg(dev, "mbox msg: 0x%x\n", msg); 60 61 switch (msg) { 62 case RP_MBOX_CRASH: 63 /* 64 * remoteproc detected an exception, but error recovery is not 65 * supported. So, just log this for now 66 */ 67 dev_err(dev, "K3 rproc %s crashed\n", rproc->name); 68 break; 69 case RP_MBOX_ECHO_REPLY: 70 dev_info(dev, "received echo reply from %s\n", rproc->name); 71 break; 72 default: 73 /* silently handle all other valid messages */ 74 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) 75 return; 76 if (msg > rproc->max_notifyid) { 77 dev_dbg(dev, "dropping unknown message 0x%x", msg); 78 return; 79 } 80 /* msg contains the index of the triggered vring */ 81 if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE) 82 dev_dbg(dev, "no message was found in vqid %d\n", msg); 83 } 84 } 85 EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback); 86 87 /* 88 * Kick the remote processor to notify about pending unprocessed messages. 89 * The vqid usage is not used and is inconsequential, as the kick is performed 90 * through a simulated GPIO (a bit in an IPC interrupt-triggering register), 91 * the remote processor is expected to process both its Tx and Rx virtqueues. 92 */ 93 void k3_rproc_kick(struct rproc *rproc, int vqid) 94 { 95 struct k3_rproc *kproc = rproc->priv; 96 struct device *dev = kproc->dev; 97 u32 msg = (u32)vqid; 98 int ret; 99 100 /* 101 * Send the index of the triggered virtqueue in the mailbox payload. 102 * NOTE: msg is cast to uintptr_t to prevent compiler warnings when 103 * void* is 64bit. It is safely cast back to u32 in the mailbox driver. 104 */ 105 ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg); 106 if (ret < 0) 107 dev_err(dev, "failed to send mailbox message, status = %d\n", 108 ret); 109 } 110 EXPORT_SYMBOL_GPL(k3_rproc_kick); 111 112 /* Put the remote processor into reset */ 113 int k3_rproc_reset(struct k3_rproc *kproc) 114 { 115 struct device *dev = kproc->dev; 116 int ret; 117 118 if (kproc->data->uses_lreset) { 119 ret = reset_control_assert(kproc->reset); 120 if (ret) 121 dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret)); 122 } else { 123 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, 124 kproc->ti_sci_id); 125 if (ret) 126 dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret)); 127 } 128 129 return ret; 130 } 131 EXPORT_SYMBOL_GPL(k3_rproc_reset); 132 133 /* Release the remote processor from reset */ 134 int k3_rproc_release(struct k3_rproc *kproc) 135 { 136 struct device *dev = kproc->dev; 137 int ret; 138 139 if (kproc->data->uses_lreset) { 140 ret = reset_control_deassert(kproc->reset); 141 if (ret) { 142 dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret)); 143 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, 144 kproc->ti_sci_id)) 145 dev_warn(dev, "module-reset assert back failed\n"); 146 } 147 } else { 148 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, 149 kproc->ti_sci_id); 150 if (ret) 151 dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret)); 152 } 153 154 return ret; 155 } 156 EXPORT_SYMBOL_GPL(k3_rproc_release); 157 158 static void k3_rproc_free_channel(void *data) 159 { 160 struct k3_rproc *kproc = data; 161 162 mbox_free_channel(kproc->mbox); 163 } 164 165 int k3_rproc_request_mbox(struct rproc *rproc) 166 { 167 struct k3_rproc *kproc = rproc->priv; 168 struct mbox_client *client = &kproc->client; 169 struct device *dev = kproc->dev; 170 int ret; 171 172 client->dev = dev; 173 client->tx_done = NULL; 174 client->rx_callback = k3_rproc_mbox_callback; 175 client->tx_block = false; 176 client->knows_txdone = false; 177 178 kproc->mbox = mbox_request_channel(client, 0); 179 if (IS_ERR(kproc->mbox)) 180 return dev_err_probe(dev, PTR_ERR(kproc->mbox), 181 "mbox_request_channel failed\n"); 182 183 ret = devm_add_action_or_reset(dev, k3_rproc_free_channel, kproc); 184 if (ret) 185 return ret; 186 187 return 0; 188 } 189 EXPORT_SYMBOL_GPL(k3_rproc_request_mbox); 190 191 /* 192 * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a 193 * generic module reset that powers on the device and allows the internal 194 * memories to be accessed while the local reset is asserted. This function is 195 * used to release the global reset on remote cores to allow loading into the 196 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any 197 * firmware loading, and is followed by the .start() ops after loading to 198 * actually let the remote cores to run. 199 */ 200 int k3_rproc_prepare(struct rproc *rproc) 201 { 202 struct k3_rproc *kproc = rproc->priv; 203 struct device *dev = kproc->dev; 204 int ret; 205 206 /* If the core is running already no need to deassert the module reset */ 207 if (rproc->state == RPROC_DETACHED) 208 return 0; 209 210 /* 211 * Ensure the local reset is asserted so the core doesn't 212 * execute bogus code when the module reset is released. 213 */ 214 if (kproc->data->uses_lreset) { 215 ret = k3_rproc_reset(kproc); 216 if (ret) 217 return ret; 218 219 ret = reset_control_status(kproc->reset); 220 if (ret <= 0) { 221 dev_err(dev, "local reset still not asserted\n"); 222 return ret; 223 } 224 } 225 226 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, 227 kproc->ti_sci_id); 228 if (ret) { 229 dev_err(dev, "could not deassert module-reset for internal RAM loading\n"); 230 return ret; 231 } 232 233 return 0; 234 } 235 EXPORT_SYMBOL_GPL(k3_rproc_prepare); 236 237 /* 238 * This function implements the .unprepare() ops and performs the complimentary 239 * operations to that of the .prepare() ops. The function is used to assert the 240 * global reset on applicable K3 DSP and M4 cores. This completes the second 241 * portion of powering down the remote core. The cores themselves are only 242 * halted in the .stop() callback through the local reset, and the .unprepare() 243 * ops is invoked by the remoteproc core after the remoteproc is stopped to 244 * balance the global reset. 245 */ 246 int k3_rproc_unprepare(struct rproc *rproc) 247 { 248 struct k3_rproc *kproc = rproc->priv; 249 struct device *dev = kproc->dev; 250 int ret; 251 252 /* If the core is going to be detached do not assert the module reset */ 253 if (rproc->state == RPROC_DETACHED) 254 return 0; 255 256 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, 257 kproc->ti_sci_id); 258 if (ret) { 259 dev_err(dev, "module-reset assert failed\n"); 260 return ret; 261 } 262 263 return 0; 264 } 265 EXPORT_SYMBOL_GPL(k3_rproc_unprepare); 266 267 /* 268 * Power up the remote processor. 269 * 270 * This function will be invoked only after the firmware for this rproc 271 * was loaded, parsed successfully, and all of its resource requirements 272 * were met. This callback is invoked only in remoteproc mode. 273 */ 274 int k3_rproc_start(struct rproc *rproc) 275 { 276 struct k3_rproc *kproc = rproc->priv; 277 278 return k3_rproc_release(kproc); 279 } 280 EXPORT_SYMBOL_GPL(k3_rproc_start); 281 282 /* 283 * Stop the remote processor. 284 * 285 * This function puts the remote processor into reset, and finishes processing 286 * of any pending messages. This callback is invoked only in remoteproc mode. 287 */ 288 int k3_rproc_stop(struct rproc *rproc) 289 { 290 struct k3_rproc *kproc = rproc->priv; 291 292 return k3_rproc_reset(kproc); 293 } 294 EXPORT_SYMBOL_GPL(k3_rproc_stop); 295 296 /* 297 * Attach to a running remote processor (IPC-only mode) 298 * 299 * The rproc attach callback is a NOP. The remote processor is already booted, 300 * and all required resources have been acquired during probe routine, so there 301 * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only 302 * mode. This callback is invoked only in IPC-only mode and exists because 303 * rproc_validate() checks for its existence. 304 */ 305 int k3_rproc_attach(struct rproc *rproc) { return 0; } 306 EXPORT_SYMBOL_GPL(k3_rproc_attach); 307 308 /* 309 * Detach from a running remote processor (IPC-only mode) 310 * 311 * The rproc detach callback is a NOP. The remote processor is not stopped and 312 * will be left in booted state in IPC-only mode. This callback is invoked only 313 * in IPC-only mode and exists for sanity sake 314 */ 315 int k3_rproc_detach(struct rproc *rproc) { return 0; } 316 EXPORT_SYMBOL_GPL(k3_rproc_detach); 317 318 /* 319 * This function implements the .get_loaded_rsc_table() callback and is used 320 * to provide the resource table for a booted remote processor in IPC-only 321 * mode. The remote processor firmwares follow a design-by-contract approach 322 * and are expected to have the resource table at the base of the DDR region 323 * reserved for firmware usage. This provides flexibility for the remote 324 * processor to be booted by different bootloaders that may or may not have the 325 * ability to publish the resource table address and size through a DT 326 * property. 327 */ 328 struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc, 329 size_t *rsc_table_sz) 330 { 331 struct k3_rproc *kproc = rproc->priv; 332 struct device *dev = kproc->dev; 333 334 if (!kproc->rmem[0].cpu_addr) { 335 dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found"); 336 return ERR_PTR(-ENOMEM); 337 } 338 339 /* 340 * NOTE: The resource table size is currently hard-coded to a maximum 341 * of 256 bytes. The most common resource table usage for K3 firmwares 342 * is to only have the vdev resource entry and an optional trace entry. 343 * The exact size could be computed based on resource table address, but 344 * the hard-coded value suffices to support the IPC-only mode. 345 */ 346 *rsc_table_sz = 256; 347 return (__force struct resource_table *)kproc->rmem[0].cpu_addr; 348 } 349 EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table); 350 351 /* 352 * Custom function to translate a remote processor device address (internal 353 * RAMs only) to a kernel virtual address. The remote processors can access 354 * their RAMs at either an internal address visible only from a remote 355 * processor, or at the SoC-level bus address. Both these addresses need to be 356 * looked through for translation. The translated addresses can be used either 357 * by the remoteproc core for loading (when using kernel remoteproc loader), or 358 * by any rpmsg bus drivers. 359 */ 360 void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) 361 { 362 struct k3_rproc *kproc = rproc->priv; 363 void __iomem *va = NULL; 364 phys_addr_t bus_addr; 365 u32 dev_addr, offset; 366 size_t size; 367 int i; 368 369 if (len == 0) 370 return NULL; 371 372 for (i = 0; i < kproc->num_mems; i++) { 373 bus_addr = kproc->mem[i].bus_addr; 374 dev_addr = kproc->mem[i].dev_addr; 375 size = kproc->mem[i].size; 376 377 /* handle rproc-view addresses */ 378 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { 379 offset = da - dev_addr; 380 va = kproc->mem[i].cpu_addr + offset; 381 return (__force void *)va; 382 } 383 384 /* handle SoC-view addresses */ 385 if (da >= bus_addr && (da + len) <= (bus_addr + size)) { 386 offset = da - bus_addr; 387 va = kproc->mem[i].cpu_addr + offset; 388 return (__force void *)va; 389 } 390 } 391 392 /* handle static DDR reserved memory regions */ 393 for (i = 0; i < kproc->num_rmems; i++) { 394 dev_addr = kproc->rmem[i].dev_addr; 395 size = kproc->rmem[i].size; 396 397 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { 398 offset = da - dev_addr; 399 va = kproc->rmem[i].cpu_addr + offset; 400 return (__force void *)va; 401 } 402 } 403 404 return NULL; 405 } 406 EXPORT_SYMBOL_GPL(k3_rproc_da_to_va); 407 408 int k3_rproc_of_get_memories(struct platform_device *pdev, 409 struct k3_rproc *kproc) 410 { 411 const struct k3_rproc_dev_data *data = kproc->data; 412 struct device *dev = &pdev->dev; 413 struct resource *res; 414 int num_mems = 0; 415 int i; 416 417 num_mems = data->num_mems; 418 kproc->mem = devm_kcalloc(kproc->dev, num_mems, 419 sizeof(*kproc->mem), GFP_KERNEL); 420 if (!kproc->mem) 421 return -ENOMEM; 422 423 for (i = 0; i < num_mems; i++) { 424 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 425 data->mems[i].name); 426 if (!res) { 427 dev_err(dev, "found no memory resource for %s\n", 428 data->mems[i].name); 429 return -EINVAL; 430 } 431 if (!devm_request_mem_region(dev, res->start, 432 resource_size(res), 433 dev_name(dev))) { 434 dev_err(dev, "could not request %s region for resource\n", 435 data->mems[i].name); 436 return -EBUSY; 437 } 438 439 kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, 440 resource_size(res)); 441 if (!kproc->mem[i].cpu_addr) { 442 dev_err(dev, "failed to map %s memory\n", 443 data->mems[i].name); 444 return -ENOMEM; 445 } 446 kproc->mem[i].bus_addr = res->start; 447 kproc->mem[i].dev_addr = data->mems[i].dev_addr; 448 kproc->mem[i].size = resource_size(res); 449 450 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n", 451 data->mems[i].name, &kproc->mem[i].bus_addr, 452 kproc->mem[i].size, kproc->mem[i].cpu_addr, 453 kproc->mem[i].dev_addr); 454 } 455 kproc->num_mems = num_mems; 456 457 return 0; 458 } 459 EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories); 460 461 void k3_mem_release(void *data) 462 { 463 struct device *dev = data; 464 465 of_reserved_mem_device_release(dev); 466 } 467 EXPORT_SYMBOL_GPL(k3_mem_release); 468 469 int k3_reserved_mem_init(struct k3_rproc *kproc) 470 { 471 struct device *dev = kproc->dev; 472 struct device_node *np = dev->of_node; 473 struct device_node *rmem_np; 474 struct reserved_mem *rmem; 475 int num_rmems; 476 int ret, i; 477 478 num_rmems = of_property_count_elems_of_size(np, "memory-region", 479 sizeof(phandle)); 480 if (num_rmems < 0) { 481 dev_err(dev, "device does not reserved memory regions (%d)\n", 482 num_rmems); 483 return -EINVAL; 484 } 485 if (num_rmems < 2) { 486 dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n", 487 num_rmems); 488 return -EINVAL; 489 } 490 491 /* use reserved memory region 0 for vring DMA allocations */ 492 ret = of_reserved_mem_device_init_by_idx(dev, np, 0); 493 if (ret) { 494 dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret); 495 return ret; 496 } 497 ret = devm_add_action_or_reset(dev, k3_mem_release, dev); 498 if (ret) 499 return ret; 500 501 num_rmems--; 502 kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); 503 if (!kproc->rmem) 504 return -ENOMEM; 505 506 /* use remaining reserved memory regions for static carveouts */ 507 for (i = 0; i < num_rmems; i++) { 508 rmem_np = of_parse_phandle(np, "memory-region", i + 1); 509 if (!rmem_np) 510 return -EINVAL; 511 512 rmem = of_reserved_mem_lookup(rmem_np); 513 of_node_put(rmem_np); 514 if (!rmem) 515 return -EINVAL; 516 517 kproc->rmem[i].bus_addr = rmem->base; 518 /* 64-bit address regions currently not supported */ 519 kproc->rmem[i].dev_addr = (u32)rmem->base; 520 kproc->rmem[i].size = rmem->size; 521 kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); 522 if (!kproc->rmem[i].cpu_addr) { 523 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", 524 i + 1, &rmem->base, &rmem->size); 525 return -ENOMEM; 526 } 527 528 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %p da 0x%x\n", 529 i + 1, &kproc->rmem[i].bus_addr, 530 kproc->rmem[i].size, kproc->rmem[i].cpu_addr, 531 kproc->rmem[i].dev_addr); 532 } 533 kproc->num_rmems = num_rmems; 534 535 return 0; 536 } 537 EXPORT_SYMBOL_GPL(k3_reserved_mem_init); 538 539 void k3_release_tsp(void *data) 540 { 541 struct ti_sci_proc *tsp = data; 542 543 ti_sci_proc_release(tsp); 544 } 545 EXPORT_SYMBOL_GPL(k3_release_tsp); 546 547 MODULE_LICENSE("GPL"); 548 MODULE_DESCRIPTION("TI K3 common Remoteproc code"); 549