1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Remote processor messaging transport (OMAP platform-specific bits) 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Google, Inc. 7 * 8 * Ohad Ben-Cohen <ohad@wizery.com> 9 * Brian Swetland <swetland@google.com> 10 */ 11 12 #include <linux/dma-direct.h> 13 #include <linux/dma-map-ops.h> 14 #include <linux/dma-mapping.h> 15 #include <linux/export.h> 16 #include <linux/of_reserved_mem.h> 17 #include <linux/platform_device.h> 18 #include <linux/remoteproc.h> 19 #include <linux/virtio.h> 20 #include <linux/virtio_config.h> 21 #include <linux/virtio_ids.h> 22 #include <linux/virtio_ring.h> 23 #include <linux/err.h> 24 #include <linux/kref.h> 25 #include <linux/slab.h> 26 27 #include "remoteproc_internal.h" 28 29 static int copy_dma_range_map(struct device *to, struct device *from) 30 { 31 const struct bus_dma_region *map = from->dma_range_map, *new_map, *r; 32 int num_ranges = 0; 33 34 if (!map) 35 return 0; 36 37 for (r = map; r->size; r++) 38 num_ranges++; 39 40 new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)), 41 GFP_KERNEL); 42 if (!new_map) 43 return -ENOMEM; 44 to->dma_range_map = new_map; 45 return 0; 46 } 47 48 static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) 49 { 50 struct platform_device *pdev; 51 52 pdev = container_of(vdev->dev.parent, struct platform_device, dev); 53 54 return platform_get_drvdata(pdev); 55 } 56 57 static struct rproc *vdev_to_rproc(struct virtio_device *vdev) 58 { 59 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 60 61 return rvdev->rproc; 62 } 63 64 /* kick the remote processor, and let it know which virtqueue to poke at */ 65 static bool rproc_virtio_notify(struct virtqueue *vq) 66 { 67 struct rproc_vring *rvring = vq->priv; 68 struct rproc *rproc = rvring->rvdev->rproc; 69 int notifyid = rvring->notifyid; 70 71 dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid); 72 73 rproc->ops->kick(rproc, notifyid); 74 return true; 75 } 76 77 /** 78 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted 79 * @rproc: handle to the remote processor 80 * @notifyid: index of the signalled virtqueue (unique per this @rproc) 81 * 82 * This function should be called by the platform-specific rproc driver, 83 * when the remote processor signals that a specific virtqueue has pending 84 * messages available. 85 * 86 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue, 87 * and otherwise returns IRQ_HANDLED. 88 */ 89 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid) 90 { 91 struct rproc_vring *rvring; 92 93 dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid); 94 95 rvring = idr_find(&rproc->notifyids, notifyid); 96 if (!rvring || !rvring->vq) 97 return IRQ_NONE; 98 99 return vring_interrupt(0, rvring->vq); 100 } 101 EXPORT_SYMBOL(rproc_vq_interrupt); 102 103 static struct virtqueue *rp_find_vq(struct virtio_device *vdev, 104 unsigned int id, 105 void (*callback)(struct virtqueue *vq), 106 const char *name, bool ctx) 107 { 108 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 109 struct rproc *rproc = vdev_to_rproc(vdev); 110 struct device *dev = &rproc->dev; 111 struct rproc_mem_entry *mem; 112 struct rproc_vring *rvring; 113 struct fw_rsc_vdev *rsc; 114 struct virtqueue *vq; 115 void *addr; 116 int num, size; 117 118 /* we're temporarily limited to two virtqueues per rvdev */ 119 if (id >= ARRAY_SIZE(rvdev->vring)) 120 return ERR_PTR(-EINVAL); 121 122 if (!name) 123 return NULL; 124 125 /* Search allocated memory region by name */ 126 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, 127 id); 128 if (!mem || !mem->va) 129 return ERR_PTR(-ENOMEM); 130 131 rvring = &rvdev->vring[id]; 132 addr = mem->va; 133 num = rvring->num; 134 135 /* zero vring */ 136 size = vring_size(num, rvring->align); 137 memset(addr, 0, size); 138 139 dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n", 140 id, addr, num, rvring->notifyid); 141 142 /* 143 * Create the new vq, and tell virtio we're not interested in 144 * the 'weak' smp barriers, since we're talking with a real device. 145 */ 146 vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx, 147 addr, rproc_virtio_notify, callback, name); 148 if (!vq) { 149 dev_err(dev, "vring_new_virtqueue %s failed\n", name); 150 rproc_free_vring(rvring); 151 return ERR_PTR(-ENOMEM); 152 } 153 154 vq->num_max = num; 155 156 rvring->vq = vq; 157 vq->priv = rvring; 158 159 /* Update vring in resource table */ 160 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; 161 rsc->vring[id].da = mem->da; 162 163 return vq; 164 } 165 166 static void __rproc_virtio_del_vqs(struct virtio_device *vdev) 167 { 168 struct virtqueue *vq, *n; 169 struct rproc_vring *rvring; 170 171 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 172 rvring = vq->priv; 173 rvring->vq = NULL; 174 vring_del_virtqueue(vq); 175 } 176 } 177 178 static void rproc_virtio_del_vqs(struct virtio_device *vdev) 179 { 180 __rproc_virtio_del_vqs(vdev); 181 } 182 183 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 184 struct virtqueue *vqs[], 185 struct virtqueue_info vqs_info[], 186 struct irq_affinity *desc) 187 { 188 int i, ret, queue_idx = 0; 189 190 for (i = 0; i < nvqs; ++i) { 191 struct virtqueue_info *vqi = &vqs_info[i]; 192 193 if (!vqi->name) { 194 vqs[i] = NULL; 195 continue; 196 } 197 198 vqs[i] = rp_find_vq(vdev, queue_idx++, vqi->callback, 199 vqi->name, vqi->ctx); 200 if (IS_ERR(vqs[i])) { 201 ret = PTR_ERR(vqs[i]); 202 goto error; 203 } 204 } 205 206 return 0; 207 208 error: 209 __rproc_virtio_del_vqs(vdev); 210 return ret; 211 } 212 213 static u8 rproc_virtio_get_status(struct virtio_device *vdev) 214 { 215 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 216 struct fw_rsc_vdev *rsc; 217 218 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 219 220 return rsc->status; 221 } 222 223 static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status) 224 { 225 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 226 struct fw_rsc_vdev *rsc; 227 228 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 229 230 rsc->status = status; 231 dev_dbg(&vdev->dev, "status: %d\n", status); 232 } 233 234 static void rproc_virtio_reset(struct virtio_device *vdev) 235 { 236 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 237 struct fw_rsc_vdev *rsc; 238 239 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 240 241 rsc->status = 0; 242 dev_dbg(&vdev->dev, "reset !\n"); 243 } 244 245 /* provide the vdev features as retrieved from the firmware */ 246 static u64 rproc_virtio_get_features(struct virtio_device *vdev) 247 { 248 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 249 struct fw_rsc_vdev *rsc; 250 251 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 252 253 return rsc->dfeatures; 254 } 255 256 static void rproc_transport_features(struct virtio_device *vdev) 257 { 258 /* 259 * Packed ring isn't enabled on remoteproc for now, 260 * because remoteproc uses vring_new_virtqueue() which 261 * creates virtio rings on preallocated memory. 262 */ 263 __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED); 264 } 265 266 static int rproc_virtio_finalize_features(struct virtio_device *vdev) 267 { 268 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 269 struct fw_rsc_vdev *rsc; 270 271 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 272 273 /* Give virtio_ring a chance to accept features */ 274 vring_transport_features(vdev); 275 276 /* Give virtio_rproc a chance to accept features. */ 277 rproc_transport_features(vdev); 278 279 /* Make sure we don't have any features > 32 bits! */ 280 BUG_ON((u32)vdev->features != vdev->features); 281 282 /* 283 * Remember the finalized features of our vdev, and provide it 284 * to the remote processor once it is powered on. 285 */ 286 rsc->gfeatures = vdev->features; 287 288 return 0; 289 } 290 291 static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset, 292 void *buf, unsigned int len) 293 { 294 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 295 struct fw_rsc_vdev *rsc; 296 void *cfg; 297 298 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 299 cfg = &rsc->vring[rsc->num_of_vrings]; 300 301 if (offset + len > rsc->config_len || offset + len < len) { 302 dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n"); 303 return; 304 } 305 306 memcpy(buf, cfg + offset, len); 307 } 308 309 static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset, 310 const void *buf, unsigned int len) 311 { 312 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 313 struct fw_rsc_vdev *rsc; 314 void *cfg; 315 316 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 317 cfg = &rsc->vring[rsc->num_of_vrings]; 318 319 if (offset + len > rsc->config_len || offset + len < len) { 320 dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n"); 321 return; 322 } 323 324 memcpy(cfg + offset, buf, len); 325 } 326 327 static const struct virtio_config_ops rproc_virtio_config_ops = { 328 .get_features = rproc_virtio_get_features, 329 .finalize_features = rproc_virtio_finalize_features, 330 .find_vqs = rproc_virtio_find_vqs, 331 .del_vqs = rproc_virtio_del_vqs, 332 .reset = rproc_virtio_reset, 333 .set_status = rproc_virtio_set_status, 334 .get_status = rproc_virtio_get_status, 335 .get = rproc_virtio_get, 336 .set = rproc_virtio_set, 337 }; 338 339 /* 340 * This function is called whenever vdev is released, and is responsible 341 * to decrement the remote processor's refcount which was taken when vdev was 342 * added. 343 * 344 * Never call this function directly; it will be called by the driver 345 * core when needed. 346 */ 347 static void rproc_virtio_dev_release(struct device *dev) 348 { 349 struct virtio_device *vdev = dev_to_virtio(dev); 350 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 351 352 kfree(vdev); 353 354 of_reserved_mem_device_release(&rvdev->pdev->dev); 355 dma_release_coherent_memory(&rvdev->pdev->dev); 356 357 put_device(&rvdev->pdev->dev); 358 } 359 360 /** 361 * rproc_add_virtio_dev() - register an rproc-induced virtio device 362 * @rvdev: the remote vdev 363 * @id: the device type identification (used to match it with a driver). 364 * 365 * This function registers a virtio device. This vdev's partent is 366 * the rproc device. 367 * 368 * Return: 0 on success or an appropriate error value otherwise 369 */ 370 static int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) 371 { 372 struct rproc *rproc = rvdev->rproc; 373 struct device *dev = &rvdev->pdev->dev; 374 struct virtio_device *vdev; 375 struct rproc_mem_entry *mem; 376 int ret; 377 378 if (rproc->ops->kick == NULL) { 379 ret = -EINVAL; 380 dev_err(dev, ".kick method not defined for %s\n", rproc->name); 381 goto out; 382 } 383 384 /* Try to find dedicated vdev buffer carveout */ 385 mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); 386 if (mem) { 387 phys_addr_t pa; 388 389 if (mem->of_resm_idx != -1) { 390 struct device_node *np = rproc->dev.parent->of_node; 391 392 /* Associate reserved memory to vdev device */ 393 ret = of_reserved_mem_device_init_by_idx(dev, np, 394 mem->of_resm_idx); 395 if (ret) { 396 dev_err(dev, "Can't associate reserved memory\n"); 397 goto out; 398 } 399 } else { 400 if (mem->va) { 401 dev_warn(dev, "vdev %d buffer already mapped\n", 402 rvdev->index); 403 pa = rproc_va_to_pa(mem->va); 404 } else { 405 /* Use dma address as carveout no memmapped yet */ 406 pa = (phys_addr_t)mem->dma; 407 } 408 409 /* Associate vdev buffer memory pool to vdev subdev */ 410 ret = dma_declare_coherent_memory(dev, pa, 411 mem->da, 412 mem->len); 413 if (ret < 0) { 414 dev_err(dev, "Failed to associate buffer\n"); 415 goto out; 416 } 417 } 418 } else { 419 struct device_node *np = rproc->dev.parent->of_node; 420 421 /* 422 * If we don't have dedicated buffer, just attempt to re-assign 423 * the reserved memory from our parent. A default memory-region 424 * at index 0 from the parent's memory-regions is assigned for 425 * the rvdev dev to allocate from. Failure is non-critical and 426 * the allocations will fall back to global pools, so don't 427 * check return value either. 428 */ 429 of_reserved_mem_device_init_by_idx(dev, np, 0); 430 } 431 432 /* Allocate virtio device */ 433 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 434 if (!vdev) { 435 ret = -ENOMEM; 436 goto out; 437 } 438 vdev->id.device = id, 439 vdev->config = &rproc_virtio_config_ops, 440 vdev->dev.parent = dev; 441 vdev->dev.release = rproc_virtio_dev_release; 442 443 /* Reference the vdev and vring allocations */ 444 get_device(dev); 445 446 ret = register_virtio_device(vdev); 447 if (ret) { 448 put_device(&vdev->dev); 449 dev_err(dev, "failed to register vdev: %d\n", ret); 450 goto out; 451 } 452 453 dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id); 454 455 out: 456 return ret; 457 } 458 459 /** 460 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device 461 * @dev: the virtio device 462 * @data: must be null 463 * 464 * This function unregisters an existing virtio device. 465 * 466 * Return: 0 467 */ 468 static int rproc_remove_virtio_dev(struct device *dev, void *data) 469 { 470 struct virtio_device *vdev = dev_to_virtio(dev); 471 472 unregister_virtio_device(vdev); 473 return 0; 474 } 475 476 static int rproc_vdev_do_start(struct rproc_subdev *subdev) 477 { 478 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 479 480 return rproc_add_virtio_dev(rvdev, rvdev->id); 481 } 482 483 static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) 484 { 485 struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); 486 struct device *dev = &rvdev->pdev->dev; 487 int ret; 488 489 ret = device_for_each_child(dev, NULL, rproc_remove_virtio_dev); 490 if (ret) 491 dev_warn(dev, "can't remove vdev child device: %d\n", ret); 492 } 493 494 static int rproc_virtio_probe(struct platform_device *pdev) 495 { 496 struct device *dev = &pdev->dev; 497 struct rproc_vdev_data *rvdev_data = dev->platform_data; 498 struct rproc_vdev *rvdev; 499 struct rproc *rproc = container_of(dev->parent, struct rproc, dev); 500 struct fw_rsc_vdev *rsc; 501 int i, ret; 502 503 if (!rvdev_data) 504 return -EINVAL; 505 506 rvdev = devm_kzalloc(dev, sizeof(*rvdev), GFP_KERNEL); 507 if (!rvdev) 508 return -ENOMEM; 509 510 rvdev->id = rvdev_data->id; 511 rvdev->rproc = rproc; 512 rvdev->index = rvdev_data->index; 513 514 ret = copy_dma_range_map(dev, rproc->dev.parent); 515 if (ret) 516 return ret; 517 518 /* Make device dma capable by inheriting from parent's capabilities */ 519 set_dma_ops(dev, get_dma_ops(rproc->dev.parent)); 520 521 ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent)); 522 if (ret) { 523 dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n", 524 dma_get_mask(rproc->dev.parent), ERR_PTR(ret)); 525 } 526 527 platform_set_drvdata(pdev, rvdev); 528 rvdev->pdev = pdev; 529 530 rsc = rvdev_data->rsc; 531 532 /* parse the vrings */ 533 for (i = 0; i < rsc->num_of_vrings; i++) { 534 ret = rproc_parse_vring(rvdev, rsc, i); 535 if (ret) 536 return ret; 537 } 538 539 /* remember the resource offset*/ 540 rvdev->rsc_offset = rvdev_data->rsc_offset; 541 542 /* allocate the vring resources */ 543 for (i = 0; i < rsc->num_of_vrings; i++) { 544 ret = rproc_alloc_vring(rvdev, i); 545 if (ret) 546 goto unwind_vring_allocations; 547 } 548 549 rproc_add_rvdev(rproc, rvdev); 550 551 rvdev->subdev.start = rproc_vdev_do_start; 552 rvdev->subdev.stop = rproc_vdev_do_stop; 553 554 rproc_add_subdev(rproc, &rvdev->subdev); 555 556 /* 557 * We're indirectly making a non-temporary copy of the rproc pointer 558 * here, because the platform device or the vdev device will indirectly 559 * access the wrapping rproc. 560 * 561 * Therefore we must increment the rproc refcount here, and decrement 562 * it _only_ on platform remove. 563 */ 564 get_device(&rproc->dev); 565 566 return 0; 567 568 unwind_vring_allocations: 569 for (i--; i >= 0; i--) 570 rproc_free_vring(&rvdev->vring[i]); 571 572 return ret; 573 } 574 575 static void rproc_virtio_remove(struct platform_device *pdev) 576 { 577 struct rproc_vdev *rvdev = dev_get_drvdata(&pdev->dev); 578 struct rproc *rproc = rvdev->rproc; 579 struct rproc_vring *rvring; 580 int id; 581 582 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { 583 rvring = &rvdev->vring[id]; 584 rproc_free_vring(rvring); 585 } 586 587 rproc_remove_subdev(rproc, &rvdev->subdev); 588 rproc_remove_rvdev(rvdev); 589 590 put_device(&rproc->dev); 591 } 592 593 /* Platform driver */ 594 static struct platform_driver rproc_virtio_driver = { 595 .probe = rproc_virtio_probe, 596 .remove_new = rproc_virtio_remove, 597 .driver = { 598 .name = "rproc-virtio", 599 }, 600 }; 601 builtin_platform_driver(rproc_virtio_driver); 602