1 /* 2 * Virtio-based remote processor messaging bus 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * Ohad Ben-Cohen <ohad@wizery.com> 8 * Brian Swetland <swetland@google.com> 9 * 10 * This software is licensed under the terms of the GNU General Public 11 * License version 2, as published by the Free Software Foundation, and 12 * may be copied, distributed, and modified under those terms. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20 #define pr_fmt(fmt) "%s: " fmt, __func__ 21 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_ids.h> 26 #include <linux/virtio_config.h> 27 #include <linux/scatterlist.h> 28 #include <linux/dma-mapping.h> 29 #include <linux/slab.h> 30 #include <linux/idr.h> 31 #include <linux/jiffies.h> 32 #include <linux/sched.h> 33 #include <linux/wait.h> 34 #include <linux/rpmsg.h> 35 #include <linux/mutex.h> 36 #include <linux/of_device.h> 37 38 /** 39 * struct virtproc_info - virtual remote processor state 40 * @vdev: the virtio device 41 * @rvq: rx virtqueue 42 * @svq: tx virtqueue 43 * @rbufs: kernel address of rx buffers 44 * @sbufs: kernel address of tx buffers 45 * @num_bufs: total number of buffers for rx and tx 46 * @last_sbuf: index of last tx buffer used 47 * @bufs_dma: dma base addr of the buffers 48 * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders. 49 * sending a message might require waking up a dozing remote 50 * processor, which involves sleeping, hence the mutex. 51 * @endpoints: idr of local endpoints, allows fast retrieval 52 * @endpoints_lock: lock of the endpoints set 53 * @sendq: wait queue of sending contexts waiting for a tx buffers 54 * @sleepers: number of senders that are waiting for a tx buffer 55 * @ns_ept: the bus's name service endpoint 56 * 57 * This structure stores the rpmsg state of a given virtio remote processor 58 * device (there might be several virtio proc devices for each physical 59 * remote processor). 60 */ 61 struct virtproc_info { 62 struct virtio_device *vdev; 63 struct virtqueue *rvq, *svq; 64 void *rbufs, *sbufs; 65 unsigned int num_bufs; 66 int last_sbuf; 67 dma_addr_t bufs_dma; 68 struct mutex tx_lock; 69 struct idr endpoints; 70 struct mutex endpoints_lock; 71 wait_queue_head_t sendq; 72 atomic_t sleepers; 73 struct rpmsg_endpoint *ns_ept; 74 }; 75 76 /** 77 * struct rpmsg_channel_info - internal channel info representation 78 * @name: name of service 79 * @src: local address 80 * @dst: destination address 81 */ 82 struct rpmsg_channel_info { 83 char name[RPMSG_NAME_SIZE]; 84 u32 src; 85 u32 dst; 86 }; 87 88 #define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev) 89 #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv) 90 91 /* 92 * We're allocating buffers of 512 bytes each for communications. The 93 * number of buffers will be computed from the number of buffers supported 94 * by the vring, upto a maximum of 512 buffers (256 in each direction). 95 * 96 * Each buffer will have 16 bytes for the msg header and 496 bytes for 97 * the payload. 98 * 99 * This will utilize a maximum total space of 256KB for the buffers. 100 * 101 * We might also want to add support for user-provided buffers in time. 102 * This will allow bigger buffer size flexibility, and can also be used 103 * to achieve zero-copy messaging. 104 * 105 * Note that these numbers are purely a decision of this driver - we 106 * can change this without changing anything in the firmware of the remote 107 * processor. 108 */ 109 #define MAX_RPMSG_NUM_BUFS (512) 110 #define RPMSG_BUF_SIZE (512) 111 112 /* 113 * Local addresses are dynamically allocated on-demand. 114 * We do not dynamically assign addresses from the low 1024 range, 115 * in order to reserve that address range for predefined services. 116 */ 117 #define RPMSG_RESERVED_ADDRESSES (1024) 118 119 /* Address 53 is reserved for advertising remote services */ 120 #define RPMSG_NS_ADDR (53) 121 122 /* sysfs show configuration fields */ 123 #define rpmsg_show_attr(field, path, format_string) \ 124 static ssize_t \ 125 field##_show(struct device *dev, \ 126 struct device_attribute *attr, char *buf) \ 127 { \ 128 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \ 129 \ 130 return sprintf(buf, format_string, rpdev->path); \ 131 } 132 133 /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */ 134 rpmsg_show_attr(name, id.name, "%s\n"); 135 rpmsg_show_attr(src, src, "0x%x\n"); 136 rpmsg_show_attr(dst, dst, "0x%x\n"); 137 rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n"); 138 139 static ssize_t modalias_show(struct device *dev, 140 struct device_attribute *attr, char *buf) 141 { 142 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 143 144 return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name); 145 } 146 147 static struct device_attribute rpmsg_dev_attrs[] = { 148 __ATTR_RO(name), 149 __ATTR_RO(modalias), 150 __ATTR_RO(dst), 151 __ATTR_RO(src), 152 __ATTR_RO(announce), 153 __ATTR_NULL 154 }; 155 156 /* rpmsg devices and drivers are matched using the service name */ 157 static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev, 158 const struct rpmsg_device_id *id) 159 { 160 return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0; 161 } 162 163 /* match rpmsg channel and rpmsg driver */ 164 static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) 165 { 166 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 167 struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv); 168 const struct rpmsg_device_id *ids = rpdrv->id_table; 169 unsigned int i; 170 171 if (ids) 172 for (i = 0; ids[i].name[0]; i++) 173 if (rpmsg_id_match(rpdev, &ids[i])) 174 return 1; 175 176 return of_driver_match_device(dev, drv); 177 } 178 179 static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env) 180 { 181 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 182 183 return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT, 184 rpdev->id.name); 185 } 186 187 /** 188 * __ept_release() - deallocate an rpmsg endpoint 189 * @kref: the ept's reference count 190 * 191 * This function deallocates an ept, and is invoked when its @kref refcount 192 * drops to zero. 193 * 194 * Never invoke this function directly! 195 */ 196 static void __ept_release(struct kref *kref) 197 { 198 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, 199 refcount); 200 /* 201 * At this point no one holds a reference to ept anymore, 202 * so we can directly free it 203 */ 204 kfree(ept); 205 } 206 207 /* for more info, see below documentation of rpmsg_create_ept() */ 208 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, 209 struct rpmsg_channel *rpdev, 210 rpmsg_rx_cb_t cb, 211 void *priv, u32 addr) 212 { 213 int id_min, id_max, id; 214 struct rpmsg_endpoint *ept; 215 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; 216 217 ept = kzalloc(sizeof(*ept), GFP_KERNEL); 218 if (!ept) 219 return NULL; 220 221 kref_init(&ept->refcount); 222 mutex_init(&ept->cb_lock); 223 224 ept->rpdev = rpdev; 225 ept->cb = cb; 226 ept->priv = priv; 227 228 /* do we need to allocate a local address ? */ 229 if (addr == RPMSG_ADDR_ANY) { 230 id_min = RPMSG_RESERVED_ADDRESSES; 231 id_max = 0; 232 } else { 233 id_min = addr; 234 id_max = addr + 1; 235 } 236 237 mutex_lock(&vrp->endpoints_lock); 238 239 /* bind the endpoint to an rpmsg address (and allocate one if needed) */ 240 id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL); 241 if (id < 0) { 242 dev_err(dev, "idr_alloc failed: %d\n", id); 243 goto free_ept; 244 } 245 ept->addr = id; 246 247 mutex_unlock(&vrp->endpoints_lock); 248 249 return ept; 250 251 free_ept: 252 mutex_unlock(&vrp->endpoints_lock); 253 kref_put(&ept->refcount, __ept_release); 254 return NULL; 255 } 256 257 /** 258 * rpmsg_create_ept() - create a new rpmsg_endpoint 259 * @rpdev: rpmsg channel device 260 * @cb: rx callback handler 261 * @priv: private data for the driver's use 262 * @addr: local rpmsg address to bind with @cb 263 * 264 * Every rpmsg address in the system is bound to an rx callback (so when 265 * inbound messages arrive, they are dispatched by the rpmsg bus using the 266 * appropriate callback handler) by means of an rpmsg_endpoint struct. 267 * 268 * This function allows drivers to create such an endpoint, and by that, 269 * bind a callback, and possibly some private data too, to an rpmsg address 270 * (either one that is known in advance, or one that will be dynamically 271 * assigned for them). 272 * 273 * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint 274 * is already created for them when they are probed by the rpmsg bus 275 * (using the rx callback provided when they registered to the rpmsg bus). 276 * 277 * So things should just work for simple drivers: they already have an 278 * endpoint, their rx callback is bound to their rpmsg address, and when 279 * relevant inbound messages arrive (i.e. messages which their dst address 280 * equals to the src address of their rpmsg channel), the driver's handler 281 * is invoked to process it. 282 * 283 * That said, more complicated drivers might do need to allocate 284 * additional rpmsg addresses, and bind them to different rx callbacks. 285 * To accomplish that, those drivers need to call this function. 286 * 287 * Drivers should provide their @rpdev channel (so the new endpoint would belong 288 * to the same remote processor their channel belongs to), an rx callback 289 * function, an optional private data (which is provided back when the 290 * rx callback is invoked), and an address they want to bind with the 291 * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will 292 * dynamically assign them an available rpmsg address (drivers should have 293 * a very good reason why not to always use RPMSG_ADDR_ANY here). 294 * 295 * Returns a pointer to the endpoint on success, or NULL on error. 296 */ 297 struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, 298 rpmsg_rx_cb_t cb, void *priv, u32 addr) 299 { 300 return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr); 301 } 302 EXPORT_SYMBOL(rpmsg_create_ept); 303 304 /** 305 * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint 306 * @vrp: virtproc which owns this ept 307 * @ept: endpoing to destroy 308 * 309 * An internal function which destroy an ept without assuming it is 310 * bound to an rpmsg channel. This is needed for handling the internal 311 * name service endpoint, which isn't bound to an rpmsg channel. 312 * See also __rpmsg_create_ept(). 313 */ 314 static void 315 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept) 316 { 317 /* make sure new inbound messages can't find this ept anymore */ 318 mutex_lock(&vrp->endpoints_lock); 319 idr_remove(&vrp->endpoints, ept->addr); 320 mutex_unlock(&vrp->endpoints_lock); 321 322 /* make sure in-flight inbound messages won't invoke cb anymore */ 323 mutex_lock(&ept->cb_lock); 324 ept->cb = NULL; 325 mutex_unlock(&ept->cb_lock); 326 327 kref_put(&ept->refcount, __ept_release); 328 } 329 330 /** 331 * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint 332 * @ept: endpoing to destroy 333 * 334 * Should be used by drivers to destroy an rpmsg endpoint previously 335 * created with rpmsg_create_ept(). 336 */ 337 void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) 338 { 339 __rpmsg_destroy_ept(ept->rpdev->vrp, ept); 340 } 341 EXPORT_SYMBOL(rpmsg_destroy_ept); 342 343 /* 344 * when an rpmsg driver is probed with a channel, we seamlessly create 345 * it an endpoint, binding its rx callback to a unique local rpmsg 346 * address. 347 * 348 * if we need to, we also announce about this channel to the remote 349 * processor (needed in case the driver is exposing an rpmsg service). 350 */ 351 static int rpmsg_dev_probe(struct device *dev) 352 { 353 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 354 struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); 355 struct virtproc_info *vrp = rpdev->vrp; 356 struct rpmsg_endpoint *ept; 357 int err; 358 359 ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src); 360 if (!ept) { 361 dev_err(dev, "failed to create endpoint\n"); 362 err = -ENOMEM; 363 goto out; 364 } 365 366 rpdev->ept = ept; 367 rpdev->src = ept->addr; 368 369 err = rpdrv->probe(rpdev); 370 if (err) { 371 dev_err(dev, "%s: failed: %d\n", __func__, err); 372 rpmsg_destroy_ept(ept); 373 goto out; 374 } 375 376 /* need to tell remote processor's name service about this channel ? */ 377 if (rpdev->announce && 378 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { 379 struct rpmsg_ns_msg nsm; 380 381 strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); 382 nsm.addr = rpdev->ept->addr; 383 nsm.flags = RPMSG_NS_CREATE; 384 385 err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); 386 if (err) 387 dev_err(dev, "failed to announce service %d\n", err); 388 } 389 390 out: 391 return err; 392 } 393 394 static int rpmsg_dev_remove(struct device *dev) 395 { 396 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 397 struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); 398 struct virtproc_info *vrp = rpdev->vrp; 399 int err = 0; 400 401 /* tell remote processor's name service we're removing this channel */ 402 if (rpdev->announce && 403 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { 404 struct rpmsg_ns_msg nsm; 405 406 strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); 407 nsm.addr = rpdev->src; 408 nsm.flags = RPMSG_NS_DESTROY; 409 410 err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); 411 if (err) 412 dev_err(dev, "failed to announce service %d\n", err); 413 } 414 415 rpdrv->remove(rpdev); 416 417 rpmsg_destroy_ept(rpdev->ept); 418 419 return err; 420 } 421 422 static struct bus_type rpmsg_bus = { 423 .name = "rpmsg", 424 .match = rpmsg_dev_match, 425 .dev_attrs = rpmsg_dev_attrs, 426 .uevent = rpmsg_uevent, 427 .probe = rpmsg_dev_probe, 428 .remove = rpmsg_dev_remove, 429 }; 430 431 /** 432 * __register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus 433 * @rpdrv: pointer to a struct rpmsg_driver 434 * @owner: owning module/driver 435 * 436 * Returns 0 on success, and an appropriate error value on failure. 437 */ 438 int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner) 439 { 440 rpdrv->drv.bus = &rpmsg_bus; 441 rpdrv->drv.owner = owner; 442 return driver_register(&rpdrv->drv); 443 } 444 EXPORT_SYMBOL(__register_rpmsg_driver); 445 446 /** 447 * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus 448 * @rpdrv: pointer to a struct rpmsg_driver 449 * 450 * Returns 0 on success, and an appropriate error value on failure. 451 */ 452 void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv) 453 { 454 driver_unregister(&rpdrv->drv); 455 } 456 EXPORT_SYMBOL(unregister_rpmsg_driver); 457 458 static void rpmsg_release_device(struct device *dev) 459 { 460 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 461 462 kfree(rpdev); 463 } 464 465 /* 466 * match an rpmsg channel with a channel info struct. 467 * this is used to make sure we're not creating rpmsg devices for channels 468 * that already exist. 469 */ 470 static int rpmsg_channel_match(struct device *dev, void *data) 471 { 472 struct rpmsg_channel_info *chinfo = data; 473 struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); 474 475 if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src) 476 return 0; 477 478 if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst) 479 return 0; 480 481 if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE)) 482 return 0; 483 484 /* found a match ! */ 485 return 1; 486 } 487 488 /* 489 * create an rpmsg channel using its name and address info. 490 * this function will be used to create both static and dynamic 491 * channels. 492 */ 493 static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp, 494 struct rpmsg_channel_info *chinfo) 495 { 496 struct rpmsg_channel *rpdev; 497 struct device *tmp, *dev = &vrp->vdev->dev; 498 int ret; 499 500 /* make sure a similar channel doesn't already exist */ 501 tmp = device_find_child(dev, chinfo, rpmsg_channel_match); 502 if (tmp) { 503 /* decrement the matched device's refcount back */ 504 put_device(tmp); 505 dev_err(dev, "channel %s:%x:%x already exist\n", 506 chinfo->name, chinfo->src, chinfo->dst); 507 return NULL; 508 } 509 510 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL); 511 if (!rpdev) 512 return NULL; 513 514 rpdev->vrp = vrp; 515 rpdev->src = chinfo->src; 516 rpdev->dst = chinfo->dst; 517 518 /* 519 * rpmsg server channels has predefined local address (for now), 520 * and their existence needs to be announced remotely 521 */ 522 rpdev->announce = rpdev->src != RPMSG_ADDR_ANY; 523 524 strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE); 525 526 dev_set_name(&rpdev->dev, "%s:%s", 527 dev_name(dev->parent), rpdev->id.name); 528 529 rpdev->dev.parent = &vrp->vdev->dev; 530 rpdev->dev.bus = &rpmsg_bus; 531 rpdev->dev.release = rpmsg_release_device; 532 533 ret = device_register(&rpdev->dev); 534 if (ret) { 535 dev_err(dev, "device_register failed: %d\n", ret); 536 put_device(&rpdev->dev); 537 return NULL; 538 } 539 540 return rpdev; 541 } 542 543 /* 544 * find an existing channel using its name + address properties, 545 * and destroy it 546 */ 547 static int rpmsg_destroy_channel(struct virtproc_info *vrp, 548 struct rpmsg_channel_info *chinfo) 549 { 550 struct virtio_device *vdev = vrp->vdev; 551 struct device *dev; 552 553 dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match); 554 if (!dev) 555 return -EINVAL; 556 557 device_unregister(dev); 558 559 put_device(dev); 560 561 return 0; 562 } 563 564 /* super simple buffer "allocator" that is just enough for now */ 565 static void *get_a_tx_buf(struct virtproc_info *vrp) 566 { 567 unsigned int len; 568 void *ret; 569 570 /* support multiple concurrent senders */ 571 mutex_lock(&vrp->tx_lock); 572 573 /* 574 * either pick the next unused tx buffer 575 * (half of our buffers are used for sending messages) 576 */ 577 if (vrp->last_sbuf < vrp->num_bufs / 2) 578 ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; 579 /* or recycle a used one */ 580 else 581 ret = virtqueue_get_buf(vrp->svq, &len); 582 583 mutex_unlock(&vrp->tx_lock); 584 585 return ret; 586 } 587 588 /** 589 * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed 590 * @vrp: virtual remote processor state 591 * 592 * This function is called before a sender is blocked, waiting for 593 * a tx buffer to become available. 594 * 595 * If we already have blocking senders, this function merely increases 596 * the "sleepers" reference count, and exits. 597 * 598 * Otherwise, if this is the first sender to block, we also enable 599 * virtio's tx callbacks, so we'd be immediately notified when a tx 600 * buffer is consumed (we rely on virtio's tx callback in order 601 * to wake up sleeping senders as soon as a tx buffer is used by the 602 * remote processor). 603 */ 604 static void rpmsg_upref_sleepers(struct virtproc_info *vrp) 605 { 606 /* support multiple concurrent senders */ 607 mutex_lock(&vrp->tx_lock); 608 609 /* are we the first sleeping context waiting for tx buffers ? */ 610 if (atomic_inc_return(&vrp->sleepers) == 1) 611 /* enable "tx-complete" interrupts before dozing off */ 612 virtqueue_enable_cb(vrp->svq); 613 614 mutex_unlock(&vrp->tx_lock); 615 } 616 617 /** 618 * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed 619 * @vrp: virtual remote processor state 620 * 621 * This function is called after a sender, that waited for a tx buffer 622 * to become available, is unblocked. 623 * 624 * If we still have blocking senders, this function merely decreases 625 * the "sleepers" reference count, and exits. 626 * 627 * Otherwise, if there are no more blocking senders, we also disable 628 * virtio's tx callbacks, to avoid the overhead incurred with handling 629 * those (now redundant) interrupts. 630 */ 631 static void rpmsg_downref_sleepers(struct virtproc_info *vrp) 632 { 633 /* support multiple concurrent senders */ 634 mutex_lock(&vrp->tx_lock); 635 636 /* are we the last sleeping context waiting for tx buffers ? */ 637 if (atomic_dec_and_test(&vrp->sleepers)) 638 /* disable "tx-complete" interrupts */ 639 virtqueue_disable_cb(vrp->svq); 640 641 mutex_unlock(&vrp->tx_lock); 642 } 643 644 /** 645 * rpmsg_send_offchannel_raw() - send a message across to the remote processor 646 * @rpdev: the rpmsg channel 647 * @src: source address 648 * @dst: destination address 649 * @data: payload of message 650 * @len: length of payload 651 * @wait: indicates whether caller should block in case no TX buffers available 652 * 653 * This function is the base implementation for all of the rpmsg sending API. 654 * 655 * It will send @data of length @len to @dst, and say it's from @src. The 656 * message will be sent to the remote processor which the @rpdev channel 657 * belongs to. 658 * 659 * The message is sent using one of the TX buffers that are available for 660 * communication with this remote processor. 661 * 662 * If @wait is true, the caller will be blocked until either a TX buffer is 663 * available, or 15 seconds elapses (we don't want callers to 664 * sleep indefinitely due to misbehaving remote processors), and in that 665 * case -ERESTARTSYS is returned. The number '15' itself was picked 666 * arbitrarily; there's little point in asking drivers to provide a timeout 667 * value themselves. 668 * 669 * Otherwise, if @wait is false, and there are no TX buffers available, 670 * the function will immediately fail, and -ENOMEM will be returned. 671 * 672 * Normally drivers shouldn't use this function directly; instead, drivers 673 * should use the appropriate rpmsg_{try}send{to, _offchannel} API 674 * (see include/linux/rpmsg.h). 675 * 676 * Returns 0 on success and an appropriate error value on failure. 677 */ 678 int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, 679 void *data, int len, bool wait) 680 { 681 struct virtproc_info *vrp = rpdev->vrp; 682 struct device *dev = &rpdev->dev; 683 struct scatterlist sg; 684 struct rpmsg_hdr *msg; 685 int err; 686 687 /* bcasting isn't allowed */ 688 if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { 689 dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); 690 return -EINVAL; 691 } 692 693 /* 694 * We currently use fixed-sized buffers, and therefore the payload 695 * length is limited. 696 * 697 * One of the possible improvements here is either to support 698 * user-provided buffers (and then we can also support zero-copy 699 * messaging), or to improve the buffer allocator, to support 700 * variable-length buffer sizes. 701 */ 702 if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { 703 dev_err(dev, "message is too big (%d)\n", len); 704 return -EMSGSIZE; 705 } 706 707 /* grab a buffer */ 708 msg = get_a_tx_buf(vrp); 709 if (!msg && !wait) 710 return -ENOMEM; 711 712 /* no free buffer ? wait for one (but bail after 15 seconds) */ 713 while (!msg) { 714 /* enable "tx-complete" interrupts, if not already enabled */ 715 rpmsg_upref_sleepers(vrp); 716 717 /* 718 * sleep until a free buffer is available or 15 secs elapse. 719 * the timeout period is not configurable because there's 720 * little point in asking drivers to specify that. 721 * if later this happens to be required, it'd be easy to add. 722 */ 723 err = wait_event_interruptible_timeout(vrp->sendq, 724 (msg = get_a_tx_buf(vrp)), 725 msecs_to_jiffies(15000)); 726 727 /* disable "tx-complete" interrupts if we're the last sleeper */ 728 rpmsg_downref_sleepers(vrp); 729 730 /* timeout ? */ 731 if (!err) { 732 dev_err(dev, "timeout waiting for a tx buffer\n"); 733 return -ERESTARTSYS; 734 } 735 } 736 737 msg->len = len; 738 msg->flags = 0; 739 msg->src = src; 740 msg->dst = dst; 741 msg->reserved = 0; 742 memcpy(msg->data, data, len); 743 744 dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", 745 msg->src, msg->dst, msg->len, msg->flags, msg->reserved); 746 #if defined(CONFIG_DYNAMIC_DEBUG) 747 dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, 748 msg, sizeof(*msg) + msg->len, true); 749 #endif 750 751 sg_init_one(&sg, msg, sizeof(*msg) + len); 752 753 mutex_lock(&vrp->tx_lock); 754 755 /* add message to the remote processor's virtqueue */ 756 err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); 757 if (err) { 758 /* 759 * need to reclaim the buffer here, otherwise it's lost 760 * (memory won't leak, but rpmsg won't use it again for TX). 761 * this will wait for a buffer management overhaul. 762 */ 763 dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); 764 goto out; 765 } 766 767 /* tell the remote processor it has a pending message to read */ 768 virtqueue_kick(vrp->svq); 769 out: 770 mutex_unlock(&vrp->tx_lock); 771 return err; 772 } 773 EXPORT_SYMBOL(rpmsg_send_offchannel_raw); 774 775 static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, 776 struct rpmsg_hdr *msg, unsigned int len) 777 { 778 struct rpmsg_endpoint *ept; 779 struct scatterlist sg; 780 int err; 781 782 dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", 783 msg->src, msg->dst, msg->len, msg->flags, msg->reserved); 784 #if defined(CONFIG_DYNAMIC_DEBUG) 785 dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, 786 msg, sizeof(*msg) + msg->len, true); 787 #endif 788 789 /* 790 * We currently use fixed-sized buffers, so trivially sanitize 791 * the reported payload length. 792 */ 793 if (len > RPMSG_BUF_SIZE || 794 msg->len > (len - sizeof(struct rpmsg_hdr))) { 795 dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); 796 return -EINVAL; 797 } 798 799 /* use the dst addr to fetch the callback of the appropriate user */ 800 mutex_lock(&vrp->endpoints_lock); 801 802 ept = idr_find(&vrp->endpoints, msg->dst); 803 804 /* let's make sure no one deallocates ept while we use it */ 805 if (ept) 806 kref_get(&ept->refcount); 807 808 mutex_unlock(&vrp->endpoints_lock); 809 810 if (ept) { 811 /* make sure ept->cb doesn't go away while we use it */ 812 mutex_lock(&ept->cb_lock); 813 814 if (ept->cb) 815 ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, 816 msg->src); 817 818 mutex_unlock(&ept->cb_lock); 819 820 /* farewell, ept, we don't need you anymore */ 821 kref_put(&ept->refcount, __ept_release); 822 } else 823 dev_warn(dev, "msg received with no recipient\n"); 824 825 /* publish the real size of the buffer */ 826 sg_init_one(&sg, msg, RPMSG_BUF_SIZE); 827 828 /* add the buffer back to the remote processor's virtqueue */ 829 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); 830 if (err < 0) { 831 dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); 832 return err; 833 } 834 835 return 0; 836 } 837 838 /* called when an rx buffer is used, and it's time to digest a message */ 839 static void rpmsg_recv_done(struct virtqueue *rvq) 840 { 841 struct virtproc_info *vrp = rvq->vdev->priv; 842 struct device *dev = &rvq->vdev->dev; 843 struct rpmsg_hdr *msg; 844 unsigned int len, msgs_received = 0; 845 int err; 846 847 msg = virtqueue_get_buf(rvq, &len); 848 if (!msg) { 849 dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); 850 return; 851 } 852 853 while (msg) { 854 err = rpmsg_recv_single(vrp, dev, msg, len); 855 if (err) 856 break; 857 858 msgs_received++; 859 860 msg = virtqueue_get_buf(rvq, &len); 861 } 862 863 dev_dbg(dev, "Received %u messages\n", msgs_received); 864 865 /* tell the remote processor we added another available rx buffer */ 866 if (msgs_received) 867 virtqueue_kick(vrp->rvq); 868 } 869 870 /* 871 * This is invoked whenever the remote processor completed processing 872 * a TX msg we just sent it, and the buffer is put back to the used ring. 873 * 874 * Normally, though, we suppress this "tx complete" interrupt in order to 875 * avoid the incurred overhead. 876 */ 877 static void rpmsg_xmit_done(struct virtqueue *svq) 878 { 879 struct virtproc_info *vrp = svq->vdev->priv; 880 881 dev_dbg(&svq->vdev->dev, "%s\n", __func__); 882 883 /* wake up potential senders that are waiting for a tx buffer */ 884 wake_up_interruptible(&vrp->sendq); 885 } 886 887 /* invoked when a name service announcement arrives */ 888 static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, 889 void *priv, u32 src) 890 { 891 struct rpmsg_ns_msg *msg = data; 892 struct rpmsg_channel *newch; 893 struct rpmsg_channel_info chinfo; 894 struct virtproc_info *vrp = priv; 895 struct device *dev = &vrp->vdev->dev; 896 int ret; 897 898 #if defined(CONFIG_DYNAMIC_DEBUG) 899 dynamic_hex_dump("NS announcement: ", DUMP_PREFIX_NONE, 16, 1, 900 data, len, true); 901 #endif 902 903 if (len != sizeof(*msg)) { 904 dev_err(dev, "malformed ns msg (%d)\n", len); 905 return; 906 } 907 908 /* 909 * the name service ept does _not_ belong to a real rpmsg channel, 910 * and is handled by the rpmsg bus itself. 911 * for sanity reasons, make sure a valid rpdev has _not_ sneaked 912 * in somehow. 913 */ 914 if (rpdev) { 915 dev_err(dev, "anomaly: ns ept has an rpdev handle\n"); 916 return; 917 } 918 919 /* don't trust the remote processor for null terminating the name */ 920 msg->name[RPMSG_NAME_SIZE - 1] = '\0'; 921 922 dev_info(dev, "%sing channel %s addr 0x%x\n", 923 msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", 924 msg->name, msg->addr); 925 926 strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); 927 chinfo.src = RPMSG_ADDR_ANY; 928 chinfo.dst = msg->addr; 929 930 if (msg->flags & RPMSG_NS_DESTROY) { 931 ret = rpmsg_destroy_channel(vrp, &chinfo); 932 if (ret) 933 dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret); 934 } else { 935 newch = rpmsg_create_channel(vrp, &chinfo); 936 if (!newch) 937 dev_err(dev, "rpmsg_create_channel failed\n"); 938 } 939 } 940 941 static int rpmsg_probe(struct virtio_device *vdev) 942 { 943 vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; 944 static const char * const names[] = { "input", "output" }; 945 struct virtqueue *vqs[2]; 946 struct virtproc_info *vrp; 947 void *bufs_va; 948 int err = 0, i; 949 size_t total_buf_space; 950 bool notify; 951 952 vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); 953 if (!vrp) 954 return -ENOMEM; 955 956 vrp->vdev = vdev; 957 958 idr_init(&vrp->endpoints); 959 mutex_init(&vrp->endpoints_lock); 960 mutex_init(&vrp->tx_lock); 961 init_waitqueue_head(&vrp->sendq); 962 963 /* We expect two virtqueues, rx and tx (and in this order) */ 964 err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); 965 if (err) 966 goto free_vrp; 967 968 vrp->rvq = vqs[0]; 969 vrp->svq = vqs[1]; 970 971 /* we expect symmetric tx/rx vrings */ 972 WARN_ON(virtqueue_get_vring_size(vrp->rvq) != 973 virtqueue_get_vring_size(vrp->svq)); 974 975 /* we need less buffers if vrings are small */ 976 if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2) 977 vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2; 978 else 979 vrp->num_bufs = MAX_RPMSG_NUM_BUFS; 980 981 total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; 982 983 /* allocate coherent memory for the buffers */ 984 bufs_va = dma_alloc_coherent(vdev->dev.parent->parent, 985 total_buf_space, &vrp->bufs_dma, 986 GFP_KERNEL); 987 if (!bufs_va) { 988 err = -ENOMEM; 989 goto vqs_del; 990 } 991 992 dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n", 993 bufs_va, &vrp->bufs_dma); 994 995 /* half of the buffers is dedicated for RX */ 996 vrp->rbufs = bufs_va; 997 998 /* and half is dedicated for TX */ 999 vrp->sbufs = bufs_va + total_buf_space / 2; 1000 1001 /* set up the receive buffers */ 1002 for (i = 0; i < vrp->num_bufs / 2; i++) { 1003 struct scatterlist sg; 1004 void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; 1005 1006 sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); 1007 1008 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, 1009 GFP_KERNEL); 1010 WARN_ON(err); /* sanity check; this can't really happen */ 1011 } 1012 1013 /* suppress "tx-complete" interrupts */ 1014 virtqueue_disable_cb(vrp->svq); 1015 1016 vdev->priv = vrp; 1017 1018 /* if supported by the remote processor, enable the name service */ 1019 if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { 1020 /* a dedicated endpoint handles the name service msgs */ 1021 vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb, 1022 vrp, RPMSG_NS_ADDR); 1023 if (!vrp->ns_ept) { 1024 dev_err(&vdev->dev, "failed to create the ns ept\n"); 1025 err = -ENOMEM; 1026 goto free_coherent; 1027 } 1028 } 1029 1030 /* 1031 * Prepare to kick but don't notify yet - we can't do this before 1032 * device is ready. 1033 */ 1034 notify = virtqueue_kick_prepare(vrp->rvq); 1035 1036 /* From this point on, we can notify and get callbacks. */ 1037 virtio_device_ready(vdev); 1038 1039 /* tell the remote processor it can start sending messages */ 1040 /* 1041 * this might be concurrent with callbacks, but we are only 1042 * doing notify, not a full kick here, so that's ok. 1043 */ 1044 if (notify) 1045 virtqueue_notify(vrp->rvq); 1046 1047 dev_info(&vdev->dev, "rpmsg host is online\n"); 1048 1049 return 0; 1050 1051 free_coherent: 1052 dma_free_coherent(vdev->dev.parent->parent, total_buf_space, 1053 bufs_va, vrp->bufs_dma); 1054 vqs_del: 1055 vdev->config->del_vqs(vrp->vdev); 1056 free_vrp: 1057 kfree(vrp); 1058 return err; 1059 } 1060 1061 static int rpmsg_remove_device(struct device *dev, void *data) 1062 { 1063 device_unregister(dev); 1064 1065 return 0; 1066 } 1067 1068 static void rpmsg_remove(struct virtio_device *vdev) 1069 { 1070 struct virtproc_info *vrp = vdev->priv; 1071 size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; 1072 int ret; 1073 1074 vdev->config->reset(vdev); 1075 1076 ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); 1077 if (ret) 1078 dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret); 1079 1080 if (vrp->ns_ept) 1081 __rpmsg_destroy_ept(vrp, vrp->ns_ept); 1082 1083 idr_destroy(&vrp->endpoints); 1084 1085 vdev->config->del_vqs(vrp->vdev); 1086 1087 dma_free_coherent(vdev->dev.parent->parent, total_buf_space, 1088 vrp->rbufs, vrp->bufs_dma); 1089 1090 kfree(vrp); 1091 } 1092 1093 static struct virtio_device_id id_table[] = { 1094 { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID }, 1095 { 0 }, 1096 }; 1097 1098 static unsigned int features[] = { 1099 VIRTIO_RPMSG_F_NS, 1100 }; 1101 1102 static struct virtio_driver virtio_ipc_driver = { 1103 .feature_table = features, 1104 .feature_table_size = ARRAY_SIZE(features), 1105 .driver.name = KBUILD_MODNAME, 1106 .driver.owner = THIS_MODULE, 1107 .id_table = id_table, 1108 .probe = rpmsg_probe, 1109 .remove = rpmsg_remove, 1110 }; 1111 1112 static int __init rpmsg_init(void) 1113 { 1114 int ret; 1115 1116 ret = bus_register(&rpmsg_bus); 1117 if (ret) { 1118 pr_err("failed to register rpmsg bus: %d\n", ret); 1119 return ret; 1120 } 1121 1122 ret = register_virtio_driver(&virtio_ipc_driver); 1123 if (ret) { 1124 pr_err("failed to register virtio driver: %d\n", ret); 1125 bus_unregister(&rpmsg_bus); 1126 } 1127 1128 return ret; 1129 } 1130 subsys_initcall(rpmsg_init); 1131 1132 static void __exit rpmsg_fini(void) 1133 { 1134 unregister_virtio_driver(&virtio_ipc_driver); 1135 bus_unregister(&rpmsg_bus); 1136 } 1137 module_exit(rpmsg_fini); 1138 1139 MODULE_DEVICE_TABLE(virtio, id_table); 1140 MODULE_DESCRIPTION("Virtio-based remote processor messaging bus"); 1141 MODULE_LICENSE("GPL v2"); 1142