1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtio-based remote processor messaging bus 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Google, Inc. 7 * 8 * Ohad Ben-Cohen <ohad@wizery.com> 9 * Brian Swetland <swetland@google.com> 10 */ 11 12 #define pr_fmt(fmt) "%s: " fmt, __func__ 13 14 #include <linux/dma-mapping.h> 15 #include <linux/idr.h> 16 #include <linux/jiffies.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/mutex.h> 20 #include <linux/rpmsg.h> 21 #include <linux/rpmsg/byteorder.h> 22 #include <linux/rpmsg/ns.h> 23 #include <linux/scatterlist.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/virtio.h> 27 #include <linux/virtio_ids.h> 28 #include <linux/virtio_config.h> 29 #include <linux/wait.h> 30 31 #include "rpmsg_internal.h" 32 33 /** 34 * struct virtproc_info - virtual remote processor state 35 * @vdev: the virtio device 36 * @rvq: rx virtqueue 37 * @svq: tx virtqueue 38 * @rbufs: kernel address of rx buffers 39 * @sbufs: kernel address of tx buffers 40 * @num_bufs: total number of buffers for rx and tx 41 * @buf_size: size of one rx or tx buffer 42 * @last_sbuf: index of last tx buffer used 43 * @bufs_dma: dma base addr of the buffers 44 * @tx_lock: protects svq and sbufs, to allow concurrent senders. 45 * sending a message might require waking up a dozing remote 46 * processor, which involves sleeping, hence the mutex. 47 * @endpoints: idr of local endpoints, allows fast retrieval 48 * @endpoints_lock: lock of the endpoints set 49 * @sendq: wait queue of sending contexts waiting for a tx buffers 50 * 51 * This structure stores the rpmsg state of a given virtio remote processor 52 * device (there might be several virtio proc devices for each physical 53 * remote processor). 54 */ 55 struct virtproc_info { 56 struct virtio_device *vdev; 57 struct virtqueue *rvq, *svq; 58 void *rbufs, *sbufs; 59 unsigned int num_bufs; 60 unsigned int buf_size; 61 int last_sbuf; 62 dma_addr_t bufs_dma; 63 struct mutex tx_lock; 64 struct idr endpoints; 65 struct mutex endpoints_lock; 66 wait_queue_head_t sendq; 67 }; 68 69 /* The feature bitmap for virtio rpmsg */ 70 #define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */ 71 72 /** 73 * struct rpmsg_hdr - common header for all rpmsg messages 74 * @src: source address 75 * @dst: destination address 76 * @reserved: reserved for future use 77 * @len: length of payload (in bytes) 78 * @flags: message flags 79 * @data: @len bytes of message payload data 80 * 81 * Every message sent(/received) on the rpmsg bus begins with this header. 82 */ 83 struct rpmsg_hdr { 84 __rpmsg32 src; 85 __rpmsg32 dst; 86 __rpmsg32 reserved; 87 __rpmsg16 len; 88 __rpmsg16 flags; 89 u8 data[]; 90 } __packed; 91 92 93 /** 94 * struct virtio_rpmsg_channel - rpmsg channel descriptor 95 * @rpdev: the rpmsg channel device 96 * @vrp: the virtio remote processor device this channel belongs to 97 * 98 * This structure stores the channel that links the rpmsg device to the virtio 99 * remote processor device. 100 */ 101 struct virtio_rpmsg_channel { 102 struct rpmsg_device rpdev; 103 104 struct virtproc_info *vrp; 105 }; 106 107 #define to_virtio_rpmsg_channel(_rpdev) \ 108 container_of(_rpdev, struct virtio_rpmsg_channel, rpdev) 109 110 /* 111 * We're allocating buffers of 512 bytes each for communications. The 112 * number of buffers will be computed from the number of buffers supported 113 * by the vring, upto a maximum of 512 buffers (256 in each direction). 114 * 115 * Each buffer will have 16 bytes for the msg header and 496 bytes for 116 * the payload. 117 * 118 * This will utilize a maximum total space of 256KB for the buffers. 119 * 120 * We might also want to add support for user-provided buffers in time. 121 * This will allow bigger buffer size flexibility, and can also be used 122 * to achieve zero-copy messaging. 123 * 124 * Note that these numbers are purely a decision of this driver - we 125 * can change this without changing anything in the firmware of the remote 126 * processor. 127 */ 128 #define MAX_RPMSG_NUM_BUFS (512) 129 #define MAX_RPMSG_BUF_SIZE (512) 130 131 /* 132 * Local addresses are dynamically allocated on-demand. 133 * We do not dynamically assign addresses from the low 1024 range, 134 * in order to reserve that address range for predefined services. 135 */ 136 #define RPMSG_RESERVED_ADDRESSES (1024) 137 138 static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept); 139 static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len); 140 static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, 141 int len, u32 dst); 142 static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data, 143 int len); 144 static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data, 145 int len, u32 dst); 146 static __poll_t virtio_rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, 147 poll_table *wait); 148 static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept); 149 static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp, 150 struct rpmsg_channel_info *chinfo); 151 152 static const struct rpmsg_endpoint_ops virtio_endpoint_ops = { 153 .destroy_ept = virtio_rpmsg_destroy_ept, 154 .send = virtio_rpmsg_send, 155 .sendto = virtio_rpmsg_sendto, 156 .trysend = virtio_rpmsg_trysend, 157 .trysendto = virtio_rpmsg_trysendto, 158 .poll = virtio_rpmsg_poll, 159 .get_mtu = virtio_rpmsg_get_mtu, 160 }; 161 162 /** 163 * rpmsg_sg_init - initialize scatterlist according to cpu address location 164 * @sg: scatterlist to fill 165 * @cpu_addr: virtual address of the buffer 166 * @len: buffer length 167 * 168 * An internal function filling scatterlist according to virtual address 169 * location (in vmalloc or in kernel). 170 */ 171 static void 172 rpmsg_sg_init(struct scatterlist *sg, void *cpu_addr, unsigned int len) 173 { 174 if (is_vmalloc_addr(cpu_addr)) { 175 sg_init_table(sg, 1); 176 sg_set_page(sg, vmalloc_to_page(cpu_addr), len, 177 offset_in_page(cpu_addr)); 178 } else { 179 WARN_ON(!virt_addr_valid(cpu_addr)); 180 sg_init_one(sg, cpu_addr, len); 181 } 182 } 183 184 /** 185 * __ept_release() - deallocate an rpmsg endpoint 186 * @kref: the ept's reference count 187 * 188 * This function deallocates an ept, and is invoked when its @kref refcount 189 * drops to zero. 190 * 191 * Never invoke this function directly! 192 */ 193 static void __ept_release(struct kref *kref) 194 { 195 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, 196 refcount); 197 /* 198 * At this point no one holds a reference to ept anymore, 199 * so we can directly free it 200 */ 201 kfree(ept); 202 } 203 204 /* for more info, see below documentation of rpmsg_create_ept() */ 205 static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, 206 struct rpmsg_device *rpdev, 207 rpmsg_rx_cb_t cb, 208 void *priv, u32 addr) 209 { 210 int id_min, id_max, id; 211 struct rpmsg_endpoint *ept; 212 struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; 213 214 ept = kzalloc_obj(*ept); 215 if (!ept) 216 return NULL; 217 218 kref_init(&ept->refcount); 219 mutex_init(&ept->cb_lock); 220 221 ept->rpdev = rpdev; 222 ept->cb = cb; 223 ept->priv = priv; 224 ept->ops = &virtio_endpoint_ops; 225 226 /* do we need to allocate a local address ? */ 227 if (addr == RPMSG_ADDR_ANY) { 228 id_min = RPMSG_RESERVED_ADDRESSES; 229 id_max = 0; 230 } else { 231 id_min = addr; 232 id_max = addr + 1; 233 } 234 235 mutex_lock(&vrp->endpoints_lock); 236 237 /* bind the endpoint to an rpmsg address (and allocate one if needed) */ 238 id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL); 239 if (id < 0) { 240 dev_err(dev, "idr_alloc failed: %d\n", id); 241 goto free_ept; 242 } 243 ept->addr = id; 244 245 mutex_unlock(&vrp->endpoints_lock); 246 247 return ept; 248 249 free_ept: 250 mutex_unlock(&vrp->endpoints_lock); 251 kref_put(&ept->refcount, __ept_release); 252 return NULL; 253 } 254 255 static struct rpmsg_device *virtio_rpmsg_create_channel(struct rpmsg_device *rpdev, 256 struct rpmsg_channel_info *chinfo) 257 { 258 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 259 struct virtproc_info *vrp = vch->vrp; 260 261 return __rpmsg_create_channel(vrp, chinfo); 262 } 263 264 static int virtio_rpmsg_release_channel(struct rpmsg_device *rpdev, 265 struct rpmsg_channel_info *chinfo) 266 { 267 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 268 struct virtproc_info *vrp = vch->vrp; 269 270 return rpmsg_unregister_device(&vrp->vdev->dev, chinfo); 271 } 272 273 static struct rpmsg_endpoint *virtio_rpmsg_create_ept(struct rpmsg_device *rpdev, 274 rpmsg_rx_cb_t cb, 275 void *priv, 276 struct rpmsg_channel_info chinfo) 277 { 278 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 279 280 return __rpmsg_create_ept(vch->vrp, rpdev, cb, priv, chinfo.src); 281 } 282 283 /** 284 * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint 285 * @vrp: virtproc which owns this ept 286 * @ept: endpoing to destroy 287 * 288 * An internal function which destroy an ept without assuming it is 289 * bound to an rpmsg channel. This is needed for handling the internal 290 * name service endpoint, which isn't bound to an rpmsg channel. 291 * See also __rpmsg_create_ept(). 292 */ 293 static void 294 __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept) 295 { 296 /* make sure new inbound messages can't find this ept anymore */ 297 mutex_lock(&vrp->endpoints_lock); 298 idr_remove(&vrp->endpoints, ept->addr); 299 mutex_unlock(&vrp->endpoints_lock); 300 301 /* make sure in-flight inbound messages won't invoke cb anymore */ 302 mutex_lock(&ept->cb_lock); 303 ept->cb = NULL; 304 mutex_unlock(&ept->cb_lock); 305 306 kref_put(&ept->refcount, __ept_release); 307 } 308 309 static void virtio_rpmsg_destroy_ept(struct rpmsg_endpoint *ept) 310 { 311 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(ept->rpdev); 312 313 __rpmsg_destroy_ept(vch->vrp, ept); 314 } 315 316 static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev) 317 { 318 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 319 struct virtproc_info *vrp = vch->vrp; 320 struct device *dev = &rpdev->dev; 321 int err = 0; 322 323 /* need to tell remote processor's name service about this channel ? */ 324 if (rpdev->announce && rpdev->ept && 325 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { 326 struct rpmsg_ns_msg nsm; 327 328 strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name)); 329 nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr); 330 nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_CREATE); 331 332 err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); 333 if (err) 334 dev_err(dev, "failed to announce service %d\n", err); 335 } 336 337 return err; 338 } 339 340 static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev) 341 { 342 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 343 struct virtproc_info *vrp = vch->vrp; 344 struct device *dev = &rpdev->dev; 345 int err = 0; 346 347 /* tell remote processor's name service we're removing this channel */ 348 if (rpdev->announce && rpdev->ept && 349 virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { 350 struct rpmsg_ns_msg nsm; 351 352 strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name)); 353 nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr); 354 nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_DESTROY); 355 356 err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); 357 if (err) 358 dev_err(dev, "failed to announce service %d\n", err); 359 } 360 361 return err; 362 } 363 364 static const struct rpmsg_device_ops virtio_rpmsg_ops = { 365 .create_channel = virtio_rpmsg_create_channel, 366 .release_channel = virtio_rpmsg_release_channel, 367 .create_ept = virtio_rpmsg_create_ept, 368 .announce_create = virtio_rpmsg_announce_create, 369 .announce_destroy = virtio_rpmsg_announce_destroy, 370 }; 371 372 static void virtio_rpmsg_release_device(struct device *dev) 373 { 374 struct rpmsg_device *rpdev = to_rpmsg_device(dev); 375 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 376 377 kfree(rpdev->driver_override); 378 kfree(vch); 379 } 380 381 /* 382 * create an rpmsg channel using its name and address info. 383 * this function will be used to create both static and dynamic 384 * channels. 385 */ 386 static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp, 387 struct rpmsg_channel_info *chinfo) 388 { 389 struct virtio_rpmsg_channel *vch; 390 struct rpmsg_device *rpdev; 391 struct device *tmp, *dev = &vrp->vdev->dev; 392 int ret; 393 394 /* make sure a similar channel doesn't already exist */ 395 tmp = rpmsg_find_device(dev, chinfo); 396 if (tmp) { 397 /* decrement the matched device's refcount back */ 398 put_device(tmp); 399 dev_err(dev, "channel %s:%x:%x already exist\n", 400 chinfo->name, chinfo->src, chinfo->dst); 401 return NULL; 402 } 403 404 vch = kzalloc_obj(*vch); 405 if (!vch) 406 return NULL; 407 408 /* Link the channel to our vrp */ 409 vch->vrp = vrp; 410 411 /* Assign public information to the rpmsg_device */ 412 rpdev = &vch->rpdev; 413 rpdev->src = chinfo->src; 414 rpdev->dst = chinfo->dst; 415 rpdev->ops = &virtio_rpmsg_ops; 416 rpdev->little_endian = virtio_is_little_endian(vrp->vdev); 417 418 /* 419 * rpmsg server channels has predefined local address (for now), 420 * and their existence needs to be announced remotely 421 */ 422 rpdev->announce = rpdev->src != RPMSG_ADDR_ANY; 423 424 strscpy(rpdev->id.name, chinfo->name, sizeof(rpdev->id.name)); 425 426 rpdev->dev.parent = &vrp->vdev->dev; 427 rpdev->dev.release = virtio_rpmsg_release_device; 428 ret = rpmsg_register_device(rpdev); 429 if (ret) 430 return NULL; 431 432 return rpdev; 433 } 434 435 /* super simple buffer "allocator" that is just enough for now */ 436 static void *get_a_tx_buf(struct virtproc_info *vrp) 437 { 438 unsigned int len; 439 void *ret; 440 441 mutex_lock(&vrp->tx_lock); 442 443 /* 444 * either pick the next unused tx buffer 445 * (half of our buffers are used for sending messages) 446 */ 447 if (vrp->last_sbuf < vrp->num_bufs / 2) 448 ret = vrp->sbufs + vrp->buf_size * vrp->last_sbuf++; 449 /* or recycle a used one */ 450 else 451 ret = virtqueue_get_buf(vrp->svq, &len); 452 453 mutex_unlock(&vrp->tx_lock); 454 455 return ret; 456 } 457 458 /** 459 * rpmsg_send_offchannel_raw() - send a message across to the remote processor 460 * @rpdev: the rpmsg channel 461 * @src: source address 462 * @dst: destination address 463 * @data: payload of message 464 * @len: length of payload 465 * @wait: indicates whether caller should block in case no TX buffers available 466 * 467 * This function is the base implementation for all of the rpmsg sending API. 468 * 469 * It will send @data of length @len to @dst, and say it's from @src. The 470 * message will be sent to the remote processor which the @rpdev channel 471 * belongs to. 472 * 473 * The message is sent using one of the TX buffers that are available for 474 * communication with this remote processor. 475 * 476 * If @wait is true, the caller will be blocked until either a TX buffer is 477 * available, or 15 seconds elapses (we don't want callers to 478 * sleep indefinitely due to misbehaving remote processors), and in that 479 * case -ERESTARTSYS is returned. The number '15' itself was picked 480 * arbitrarily; there's little point in asking drivers to provide a timeout 481 * value themselves. 482 * 483 * Otherwise, if @wait is false, and there are no TX buffers available, 484 * the function will immediately fail, and -ENOMEM will be returned. 485 * 486 * Normally drivers shouldn't use this function directly; instead, drivers 487 * should use the appropriate rpmsg_{try}send{to} API 488 * (see include/linux/rpmsg.h). 489 * 490 * Return: 0 on success and an appropriate error value on failure. 491 */ 492 static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, 493 u32 src, u32 dst, 494 const void *data, int len, bool wait) 495 { 496 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 497 struct virtproc_info *vrp = vch->vrp; 498 struct device *dev = &rpdev->dev; 499 struct scatterlist sg; 500 struct rpmsg_hdr *msg; 501 int err; 502 503 /* bcasting isn't allowed */ 504 if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { 505 dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); 506 return -EINVAL; 507 } 508 509 /* 510 * We currently use fixed-sized buffers, and therefore the payload 511 * length is limited. 512 * 513 * One of the possible improvements here is either to support 514 * user-provided buffers (and then we can also support zero-copy 515 * messaging), or to improve the buffer allocator, to support 516 * variable-length buffer sizes. 517 */ 518 if (len > vrp->buf_size - sizeof(struct rpmsg_hdr)) { 519 dev_err(dev, "message is too big (%d)\n", len); 520 return -EMSGSIZE; 521 } 522 523 /* grab a buffer */ 524 msg = get_a_tx_buf(vrp); 525 if (!msg && !wait) 526 return -ENOMEM; 527 528 /* no free buffer ? wait for one (but bail after 15 seconds) */ 529 while (!msg) { 530 /* 531 * sleep until a free buffer is available or 15 secs elapse. 532 * the timeout period is not configurable because there's 533 * little point in asking drivers to specify that. 534 * if later this happens to be required, it'd be easy to add. 535 */ 536 err = wait_event_interruptible_timeout(vrp->sendq, 537 (msg = get_a_tx_buf(vrp)), 538 msecs_to_jiffies(15000)); 539 540 /* timeout ? */ 541 if (!err) { 542 dev_err(dev, "timeout waiting for a tx buffer\n"); 543 return -ERESTARTSYS; 544 } 545 } 546 547 msg->len = cpu_to_rpmsg16(rpdev, len); 548 msg->flags = 0; 549 msg->src = cpu_to_rpmsg32(rpdev, src); 550 msg->dst = cpu_to_rpmsg32(rpdev, dst); 551 msg->reserved = 0; 552 memcpy(msg->data, data, len); 553 554 dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", 555 src, dst, len, msg->flags, msg->reserved); 556 #if defined(CONFIG_DYNAMIC_DEBUG) 557 dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, 558 msg, sizeof(*msg) + len, true); 559 #endif 560 561 rpmsg_sg_init(&sg, msg, sizeof(*msg) + len); 562 563 mutex_lock(&vrp->tx_lock); 564 565 /* add message to the remote processor's virtqueue */ 566 err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); 567 if (err) { 568 /* 569 * need to reclaim the buffer here, otherwise it's lost 570 * (memory won't leak, but rpmsg won't use it again for TX). 571 * this will wait for a buffer management overhaul. 572 */ 573 dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); 574 goto out; 575 } 576 577 /* tell the remote processor it has a pending message to read */ 578 virtqueue_kick(vrp->svq); 579 out: 580 mutex_unlock(&vrp->tx_lock); 581 return err; 582 } 583 584 static int virtio_rpmsg_send(struct rpmsg_endpoint *ept, const void *data, int len) 585 { 586 struct rpmsg_device *rpdev = ept->rpdev; 587 u32 src = ept->addr, dst = rpdev->dst; 588 589 return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); 590 } 591 592 static int virtio_rpmsg_sendto(struct rpmsg_endpoint *ept, const void *data, 593 int len, u32 dst) 594 { 595 struct rpmsg_device *rpdev = ept->rpdev; 596 u32 src = ept->addr; 597 598 return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true); 599 } 600 601 static int virtio_rpmsg_trysend(struct rpmsg_endpoint *ept, const void *data, 602 int len) 603 { 604 struct rpmsg_device *rpdev = ept->rpdev; 605 u32 src = ept->addr, dst = rpdev->dst; 606 607 return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); 608 } 609 610 static int virtio_rpmsg_trysendto(struct rpmsg_endpoint *ept, const void *data, 611 int len, u32 dst) 612 { 613 struct rpmsg_device *rpdev = ept->rpdev; 614 u32 src = ept->addr; 615 616 return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false); 617 } 618 619 static __poll_t virtio_rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, 620 poll_table *wait) 621 { 622 struct rpmsg_device *rpdev = ept->rpdev; 623 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 624 struct virtproc_info *vrp = vch->vrp; 625 __poll_t mask = 0; 626 627 poll_wait(filp, &vrp->sendq, wait); 628 629 /* support multiple concurrent senders */ 630 mutex_lock(&vrp->tx_lock); 631 632 /* 633 * check for a free buffer, either: 634 * - we haven't used all of the available transmit buffers (half of the 635 * allocated buffers are used for transmit, hence num_bufs / 2), or, 636 * - we ask the virtqueue if there's a buffer available 637 */ 638 if (vrp->last_sbuf < vrp->num_bufs / 2 || 639 !virtqueue_enable_cb(vrp->svq)) 640 mask |= EPOLLOUT; 641 642 mutex_unlock(&vrp->tx_lock); 643 644 return mask; 645 } 646 647 static ssize_t virtio_rpmsg_get_mtu(struct rpmsg_endpoint *ept) 648 { 649 struct rpmsg_device *rpdev = ept->rpdev; 650 struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); 651 652 return vch->vrp->buf_size - sizeof(struct rpmsg_hdr); 653 } 654 655 static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, 656 struct rpmsg_hdr *msg, unsigned int len) 657 { 658 struct rpmsg_endpoint *ept; 659 struct scatterlist sg; 660 bool little_endian = virtio_is_little_endian(vrp->vdev); 661 unsigned int msg_len = __rpmsg16_to_cpu(little_endian, msg->len); 662 int err; 663 664 dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", 665 __rpmsg32_to_cpu(little_endian, msg->src), 666 __rpmsg32_to_cpu(little_endian, msg->dst), msg_len, 667 __rpmsg16_to_cpu(little_endian, msg->flags), 668 __rpmsg32_to_cpu(little_endian, msg->reserved)); 669 #if defined(CONFIG_DYNAMIC_DEBUG) 670 dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, 671 msg, sizeof(*msg) + msg_len, true); 672 #endif 673 674 /* 675 * We currently use fixed-sized buffers, so trivially sanitize 676 * the reported payload length. 677 */ 678 if (len > vrp->buf_size || 679 msg_len > (len - sizeof(struct rpmsg_hdr))) { 680 dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg_len); 681 return -EINVAL; 682 } 683 684 /* use the dst addr to fetch the callback of the appropriate user */ 685 mutex_lock(&vrp->endpoints_lock); 686 687 ept = idr_find(&vrp->endpoints, __rpmsg32_to_cpu(little_endian, msg->dst)); 688 689 /* let's make sure no one deallocates ept while we use it */ 690 if (ept) 691 kref_get(&ept->refcount); 692 693 mutex_unlock(&vrp->endpoints_lock); 694 695 if (ept) { 696 /* make sure ept->cb doesn't go away while we use it */ 697 mutex_lock(&ept->cb_lock); 698 699 if (ept->cb) 700 ept->cb(ept->rpdev, msg->data, msg_len, ept->priv, 701 __rpmsg32_to_cpu(little_endian, msg->src)); 702 703 mutex_unlock(&ept->cb_lock); 704 705 /* farewell, ept, we don't need you anymore */ 706 kref_put(&ept->refcount, __ept_release); 707 } else 708 dev_warn_ratelimited(dev, "msg received with no recipient\n"); 709 710 /* publish the real size of the buffer */ 711 rpmsg_sg_init(&sg, msg, vrp->buf_size); 712 713 /* add the buffer back to the remote processor's virtqueue */ 714 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); 715 if (err < 0) { 716 dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); 717 return err; 718 } 719 720 return 0; 721 } 722 723 /* called when an rx buffer is used, and it's time to digest a message */ 724 static void rpmsg_recv_done(struct virtqueue *rvq) 725 { 726 struct virtproc_info *vrp = rvq->vdev->priv; 727 struct device *dev = &rvq->vdev->dev; 728 struct rpmsg_hdr *msg; 729 unsigned int len, msgs_received = 0; 730 int err; 731 732 msg = virtqueue_get_buf(rvq, &len); 733 if (!msg) { 734 dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); 735 return; 736 } 737 738 while (msg) { 739 err = rpmsg_recv_single(vrp, dev, msg, len); 740 if (err) 741 break; 742 743 msgs_received++; 744 745 msg = virtqueue_get_buf(rvq, &len); 746 } 747 748 dev_dbg(dev, "Received %u messages\n", msgs_received); 749 750 /* tell the remote processor we added another available rx buffer */ 751 if (msgs_received) 752 virtqueue_kick(vrp->rvq); 753 } 754 755 /* 756 * This is invoked whenever the remote processor completed processing 757 * a TX msg we just sent it, and the buffer is put back to the used ring. 758 * 759 * Normally, though, we suppress this "tx complete" interrupt in order to 760 * avoid the incurred overhead. 761 */ 762 static void rpmsg_xmit_done(struct virtqueue *svq) 763 { 764 struct virtproc_info *vrp = svq->vdev->priv; 765 766 dev_dbg(&svq->vdev->dev, "%s\n", __func__); 767 768 /* wake up potential senders that are waiting for a tx buffer */ 769 wake_up_interruptible(&vrp->sendq); 770 } 771 772 /* 773 * Called to expose to user a /dev/rpmsg_ctrlX interface allowing to 774 * create endpoint-to-endpoint communication without associated RPMsg channel. 775 * The endpoints are rattached to the ctrldev RPMsg device. 776 */ 777 static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev) 778 { 779 struct virtproc_info *vrp = vdev->priv; 780 struct virtio_rpmsg_channel *vch; 781 struct rpmsg_device *rpdev_ctrl; 782 int err = 0; 783 784 vch = kzalloc_obj(*vch); 785 if (!vch) 786 return ERR_PTR(-ENOMEM); 787 788 /* Link the channel to the vrp */ 789 vch->vrp = vrp; 790 791 /* Assign public information to the rpmsg_device */ 792 rpdev_ctrl = &vch->rpdev; 793 rpdev_ctrl->ops = &virtio_rpmsg_ops; 794 795 rpdev_ctrl->dev.parent = &vrp->vdev->dev; 796 rpdev_ctrl->dev.release = virtio_rpmsg_release_device; 797 rpdev_ctrl->little_endian = virtio_is_little_endian(vrp->vdev); 798 799 err = rpmsg_ctrldev_register_device(rpdev_ctrl); 800 if (err) { 801 /* vch will be free in virtio_rpmsg_release_device() */ 802 return ERR_PTR(err); 803 } 804 805 return rpdev_ctrl; 806 } 807 808 static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl) 809 { 810 if (!rpdev_ctrl) 811 return; 812 device_unregister(&rpdev_ctrl->dev); 813 } 814 815 static int rpmsg_probe(struct virtio_device *vdev) 816 { 817 struct virtqueue_info vqs_info[] = { 818 { "input", rpmsg_recv_done }, 819 { "output", rpmsg_xmit_done }, 820 }; 821 struct virtqueue *vqs[2]; 822 struct virtproc_info *vrp; 823 struct virtio_rpmsg_channel *vch = NULL; 824 struct rpmsg_device *rpdev_ns, *rpdev_ctrl; 825 void *bufs_va; 826 int err = 0, i; 827 size_t total_buf_space; 828 bool notify; 829 830 vrp = kzalloc_obj(*vrp); 831 if (!vrp) 832 return -ENOMEM; 833 834 vrp->vdev = vdev; 835 836 idr_init(&vrp->endpoints); 837 mutex_init(&vrp->endpoints_lock); 838 mutex_init(&vrp->tx_lock); 839 init_waitqueue_head(&vrp->sendq); 840 841 /* We expect two virtqueues, rx and tx (and in this order) */ 842 err = virtio_find_vqs(vdev, 2, vqs, vqs_info, NULL); 843 if (err) 844 goto free_vrp; 845 846 vrp->rvq = vqs[0]; 847 vrp->svq = vqs[1]; 848 849 /* we expect symmetric tx/rx vrings */ 850 WARN_ON(virtqueue_get_vring_size(vrp->rvq) != 851 virtqueue_get_vring_size(vrp->svq)); 852 853 /* we need less buffers if vrings are small */ 854 if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2) 855 vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2; 856 else 857 vrp->num_bufs = MAX_RPMSG_NUM_BUFS; 858 859 vrp->buf_size = MAX_RPMSG_BUF_SIZE; 860 861 total_buf_space = vrp->num_bufs * vrp->buf_size; 862 863 /* allocate coherent memory for the buffers */ 864 bufs_va = dma_alloc_coherent(vdev->dev.parent, 865 total_buf_space, &vrp->bufs_dma, 866 GFP_KERNEL); 867 if (!bufs_va) { 868 err = -ENOMEM; 869 goto vqs_del; 870 } 871 872 dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n", 873 bufs_va, &vrp->bufs_dma); 874 875 /* half of the buffers is dedicated for RX */ 876 vrp->rbufs = bufs_va; 877 878 /* and half is dedicated for TX */ 879 vrp->sbufs = bufs_va + total_buf_space / 2; 880 881 /* set up the receive buffers */ 882 for (i = 0; i < vrp->num_bufs / 2; i++) { 883 struct scatterlist sg; 884 void *cpu_addr = vrp->rbufs + i * vrp->buf_size; 885 886 rpmsg_sg_init(&sg, cpu_addr, vrp->buf_size); 887 888 err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, 889 GFP_KERNEL); 890 WARN_ON(err); /* sanity check; this can't really happen */ 891 } 892 893 vdev->priv = vrp; 894 895 rpdev_ctrl = rpmsg_virtio_add_ctrl_dev(vdev); 896 if (IS_ERR(rpdev_ctrl)) { 897 err = PTR_ERR(rpdev_ctrl); 898 goto free_coherent; 899 } 900 901 /* if supported by the remote processor, enable the name service */ 902 if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { 903 vch = kzalloc_obj(*vch); 904 if (!vch) { 905 err = -ENOMEM; 906 goto free_ctrldev; 907 } 908 909 /* Link the channel to our vrp */ 910 vch->vrp = vrp; 911 912 /* Assign public information to the rpmsg_device */ 913 rpdev_ns = &vch->rpdev; 914 rpdev_ns->ops = &virtio_rpmsg_ops; 915 rpdev_ns->little_endian = virtio_is_little_endian(vrp->vdev); 916 917 rpdev_ns->dev.parent = &vrp->vdev->dev; 918 rpdev_ns->dev.release = virtio_rpmsg_release_device; 919 920 err = rpmsg_ns_register_device(rpdev_ns); 921 if (err) 922 /* vch will be free in virtio_rpmsg_release_device() */ 923 goto free_ctrldev; 924 } 925 926 /* 927 * Prepare to kick but don't notify yet - we can't do this before 928 * device is ready. 929 */ 930 notify = virtqueue_kick_prepare(vrp->rvq); 931 932 /* From this point on, we can notify and get callbacks. */ 933 virtio_device_ready(vdev); 934 935 /* tell the remote processor it can start sending messages */ 936 /* 937 * this might be concurrent with callbacks, but we are only 938 * doing notify, not a full kick here, so that's ok. 939 */ 940 if (notify) 941 virtqueue_notify(vrp->rvq); 942 943 dev_info(&vdev->dev, "rpmsg host is online\n"); 944 945 return 0; 946 947 free_ctrldev: 948 rpmsg_virtio_del_ctrl_dev(rpdev_ctrl); 949 free_coherent: 950 dma_free_coherent(vdev->dev.parent, total_buf_space, 951 bufs_va, vrp->bufs_dma); 952 vqs_del: 953 vdev->config->del_vqs(vrp->vdev); 954 free_vrp: 955 kfree(vrp); 956 return err; 957 } 958 959 static int rpmsg_remove_device(struct device *dev, void *data) 960 { 961 device_unregister(dev); 962 963 return 0; 964 } 965 966 static void rpmsg_remove(struct virtio_device *vdev) 967 { 968 struct virtproc_info *vrp = vdev->priv; 969 size_t total_buf_space = vrp->num_bufs * vrp->buf_size; 970 int ret; 971 972 virtio_reset_device(vdev); 973 974 ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); 975 if (ret) 976 dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret); 977 978 idr_destroy(&vrp->endpoints); 979 980 vdev->config->del_vqs(vrp->vdev); 981 982 dma_free_coherent(vdev->dev.parent, total_buf_space, 983 vrp->rbufs, vrp->bufs_dma); 984 985 kfree(vrp); 986 } 987 988 static struct virtio_device_id id_table[] = { 989 { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID }, 990 { 0 }, 991 }; 992 993 static unsigned int features[] = { 994 VIRTIO_RPMSG_F_NS, 995 }; 996 997 static struct virtio_driver virtio_ipc_driver = { 998 .feature_table = features, 999 .feature_table_size = ARRAY_SIZE(features), 1000 .driver.name = KBUILD_MODNAME, 1001 .id_table = id_table, 1002 .probe = rpmsg_probe, 1003 .remove = rpmsg_remove, 1004 }; 1005 1006 static int __init rpmsg_init(void) 1007 { 1008 int ret; 1009 1010 ret = register_virtio_driver(&virtio_ipc_driver); 1011 if (ret) 1012 pr_err("failed to register virtio driver: %d\n", ret); 1013 1014 return ret; 1015 } 1016 subsys_initcall(rpmsg_init); 1017 1018 static void __exit rpmsg_fini(void) 1019 { 1020 unregister_virtio_driver(&virtio_ipc_driver); 1021 } 1022 module_exit(rpmsg_fini); 1023 1024 MODULE_DEVICE_TABLE(virtio, id_table); 1025 MODULE_DESCRIPTION("Virtio-based remote processor messaging bus"); 1026 MODULE_LICENSE("GPL v2"); 1027