1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mailbox: Common code for Mailbox controllers and users 4 * 5 * Copyright (C) 2013-2014 Linaro Ltd. 6 * Author: Jassi Brar <jassisinghbrar@gmail.com> 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/spinlock.h> 11 #include <linux/mutex.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/err.h> 15 #include <linux/module.h> 16 #include <linux/device.h> 17 #include <linux/bitops.h> 18 #include <linux/mailbox_client.h> 19 #include <linux/mailbox_controller.h> 20 #include <linux/of.h> 21 22 #include "mailbox.h" 23 24 static LIST_HEAD(mbox_cons); 25 static DEFINE_MUTEX(con_mutex); 26 27 static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 28 { 29 int idx; 30 unsigned long flags; 31 32 spin_lock_irqsave(&chan->lock, flags); 33 34 /* See if there is any space left */ 35 if (chan->msg_count == MBOX_TX_QUEUE_LEN) { 36 spin_unlock_irqrestore(&chan->lock, flags); 37 return -ENOBUFS; 38 } 39 40 idx = chan->msg_free; 41 chan->msg_data[idx] = mssg; 42 chan->msg_count++; 43 44 if (idx == MBOX_TX_QUEUE_LEN - 1) 45 chan->msg_free = 0; 46 else 47 chan->msg_free++; 48 49 spin_unlock_irqrestore(&chan->lock, flags); 50 51 return idx; 52 } 53 54 static void msg_submit(struct mbox_chan *chan) 55 { 56 unsigned count, idx; 57 unsigned long flags; 58 void *data; 59 int err = -EBUSY; 60 61 spin_lock_irqsave(&chan->lock, flags); 62 63 if (!chan->msg_count || chan->active_req) 64 goto exit; 65 66 count = chan->msg_count; 67 idx = chan->msg_free; 68 if (idx >= count) 69 idx -= count; 70 else 71 idx += MBOX_TX_QUEUE_LEN - count; 72 73 data = chan->msg_data[idx]; 74 75 if (chan->cl->tx_prepare) 76 chan->cl->tx_prepare(chan->cl, data); 77 /* Try to submit a message to the MBOX controller */ 78 err = chan->mbox->ops->send_data(chan, data); 79 if (!err) { 80 chan->active_req = data; 81 chan->msg_count--; 82 } 83 exit: 84 spin_unlock_irqrestore(&chan->lock, flags); 85 86 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { 87 /* kick start the timer immediately to avoid delays */ 88 spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags); 89 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 90 spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags); 91 } 92 } 93 94 static void tx_tick(struct mbox_chan *chan, int r) 95 { 96 unsigned long flags; 97 void *mssg; 98 99 spin_lock_irqsave(&chan->lock, flags); 100 mssg = chan->active_req; 101 chan->active_req = NULL; 102 spin_unlock_irqrestore(&chan->lock, flags); 103 104 /* Submit next message */ 105 msg_submit(chan); 106 107 if (!mssg) 108 return; 109 110 /* Notify the client */ 111 if (chan->cl->tx_done) 112 chan->cl->tx_done(chan->cl, mssg, r); 113 114 if (r != -ETIME && chan->cl->tx_block) 115 complete(&chan->tx_complete); 116 } 117 118 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) 119 { 120 struct mbox_controller *mbox = 121 container_of(hrtimer, struct mbox_controller, poll_hrt); 122 bool txdone, resched = false; 123 int i; 124 unsigned long flags; 125 126 for (i = 0; i < mbox->num_chans; i++) { 127 struct mbox_chan *chan = &mbox->chans[i]; 128 129 if (chan->active_req && chan->cl) { 130 txdone = chan->mbox->ops->last_tx_done(chan); 131 if (txdone) 132 tx_tick(chan, 0); 133 else 134 resched = true; 135 } 136 } 137 138 if (resched) { 139 spin_lock_irqsave(&mbox->poll_hrt_lock, flags); 140 if (!hrtimer_is_queued(hrtimer)) 141 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 142 spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags); 143 144 return HRTIMER_RESTART; 145 } 146 return HRTIMER_NORESTART; 147 } 148 149 /** 150 * mbox_chan_received_data - A way for controller driver to push data 151 * received from remote to the upper layer. 152 * @chan: Pointer to the mailbox channel on which RX happened. 153 * @mssg: Client specific message typecasted as void * 154 * 155 * After startup and before shutdown any data received on the chan 156 * is passed on to the API via atomic mbox_chan_received_data(). 157 * The controller should ACK the RX only after this call returns. 158 */ 159 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) 160 { 161 /* No buffering the received data */ 162 if (chan->cl->rx_callback) 163 chan->cl->rx_callback(chan->cl, mssg); 164 } 165 EXPORT_SYMBOL_GPL(mbox_chan_received_data); 166 167 /** 168 * mbox_chan_txdone - A way for controller driver to notify the 169 * framework that the last TX has completed. 170 * @chan: Pointer to the mailbox chan on which TX happened. 171 * @r: Status of last TX - OK or ERROR 172 * 173 * The controller that has IRQ for TX ACK calls this atomic API 174 * to tick the TX state machine. It works only if txdone_irq 175 * is set by the controller. 176 */ 177 void mbox_chan_txdone(struct mbox_chan *chan, int r) 178 { 179 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { 180 dev_err(chan->mbox->dev, 181 "Controller can't run the TX ticker\n"); 182 return; 183 } 184 185 tx_tick(chan, r); 186 } 187 EXPORT_SYMBOL_GPL(mbox_chan_txdone); 188 189 /** 190 * mbox_client_txdone - The way for a client to run the TX state machine. 191 * @chan: Mailbox channel assigned to this client. 192 * @r: Success status of last transmission. 193 * 194 * The client/protocol had received some 'ACK' packet and it notifies 195 * the API that the last packet was sent successfully. This only works 196 * if the controller can't sense TX-Done. 197 */ 198 void mbox_client_txdone(struct mbox_chan *chan, int r) 199 { 200 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { 201 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); 202 return; 203 } 204 205 tx_tick(chan, r); 206 } 207 EXPORT_SYMBOL_GPL(mbox_client_txdone); 208 209 /** 210 * mbox_client_peek_data - A way for client driver to pull data 211 * received from remote by the controller. 212 * @chan: Mailbox channel assigned to this client. 213 * 214 * A poke to controller driver for any received data. 215 * The data is actually passed onto client via the 216 * mbox_chan_received_data() 217 * The call can be made from atomic context, so the controller's 218 * implementation of peek_data() must not sleep. 219 * 220 * Return: True, if controller has, and is going to push after this, 221 * some data. 222 * False, if controller doesn't have any data to be read. 223 */ 224 bool mbox_client_peek_data(struct mbox_chan *chan) 225 { 226 if (chan->mbox->ops->peek_data) 227 return chan->mbox->ops->peek_data(chan); 228 229 return false; 230 } 231 EXPORT_SYMBOL_GPL(mbox_client_peek_data); 232 233 /** 234 * mbox_send_message - For client to submit a message to be 235 * sent to the remote. 236 * @chan: Mailbox channel assigned to this client. 237 * @mssg: Client specific message typecasted. 238 * 239 * For client to submit data to the controller destined for a remote 240 * processor. If the client had set 'tx_block', the call will return 241 * either when the remote receives the data or when 'tx_tout' millisecs 242 * run out. 243 * In non-blocking mode, the requests are buffered by the API and a 244 * non-negative token is returned for each queued request. If the request 245 * is not queued, a negative token is returned. Upon failure or successful 246 * TX, the API calls 'tx_done' from atomic context, from which the client 247 * could submit yet another request. 248 * The pointer to message should be preserved until it is sent 249 * over the chan, i.e, tx_done() is made. 250 * This function could be called from atomic context as it simply 251 * queues the data and returns a token against the request. 252 * 253 * Return: Non-negative integer for successful submission (non-blocking mode) 254 * or transmission over chan (blocking mode). 255 * Negative value denotes failure. 256 */ 257 int mbox_send_message(struct mbox_chan *chan, void *mssg) 258 { 259 int t; 260 261 if (!chan || !chan->cl) 262 return -EINVAL; 263 264 t = add_to_rbuf(chan, mssg); 265 if (t < 0) { 266 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); 267 return t; 268 } 269 270 msg_submit(chan); 271 272 if (chan->cl->tx_block) { 273 unsigned long wait; 274 int ret; 275 276 if (!chan->cl->tx_tout) /* wait forever */ 277 wait = msecs_to_jiffies(3600000); 278 else 279 wait = msecs_to_jiffies(chan->cl->tx_tout); 280 281 ret = wait_for_completion_timeout(&chan->tx_complete, wait); 282 if (ret == 0) { 283 t = -ETIME; 284 tx_tick(chan, t); 285 } 286 } 287 288 return t; 289 } 290 EXPORT_SYMBOL_GPL(mbox_send_message); 291 292 /** 293 * mbox_flush - flush a mailbox channel 294 * @chan: mailbox channel to flush 295 * @timeout: time, in milliseconds, to allow the flush operation to succeed 296 * 297 * Mailbox controllers that need to work in atomic context can implement the 298 * ->flush() callback to busy loop until a transmission has been completed. 299 * The implementation must call mbox_chan_txdone() upon success. Clients can 300 * call the mbox_flush() function at any time after mbox_send_message() to 301 * flush the transmission. After the function returns success, the mailbox 302 * transmission is guaranteed to have completed. 303 * 304 * Returns: 0 on success or a negative error code on failure. 305 */ 306 int mbox_flush(struct mbox_chan *chan, unsigned long timeout) 307 { 308 int ret; 309 310 if (!chan->mbox->ops->flush) 311 return -ENOTSUPP; 312 313 ret = chan->mbox->ops->flush(chan, timeout); 314 if (ret < 0) 315 tx_tick(chan, ret); 316 317 return ret; 318 } 319 EXPORT_SYMBOL_GPL(mbox_flush); 320 321 static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 322 { 323 struct device *dev = cl->dev; 324 unsigned long flags; 325 int ret; 326 327 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { 328 dev_dbg(dev, "%s: mailbox not free\n", __func__); 329 return -EBUSY; 330 } 331 332 spin_lock_irqsave(&chan->lock, flags); 333 chan->msg_free = 0; 334 chan->msg_count = 0; 335 chan->active_req = NULL; 336 chan->cl = cl; 337 init_completion(&chan->tx_complete); 338 339 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 340 chan->txdone_method = TXDONE_BY_ACK; 341 342 spin_unlock_irqrestore(&chan->lock, flags); 343 344 if (chan->mbox->ops->startup) { 345 ret = chan->mbox->ops->startup(chan); 346 347 if (ret) { 348 dev_err(dev, "Unable to startup the chan (%d)\n", ret); 349 mbox_free_channel(chan); 350 return ret; 351 } 352 } 353 354 return 0; 355 } 356 357 /** 358 * mbox_bind_client - Request a mailbox channel. 359 * @chan: The mailbox channel to bind the client to. 360 * @cl: Identity of the client requesting the channel. 361 * 362 * The Client specifies its requirements and capabilities while asking for 363 * a mailbox channel. It can't be called from atomic context. 364 * The channel is exclusively allocated and can't be used by another 365 * client before the owner calls mbox_free_channel. 366 * After assignment, any packet received on this channel will be 367 * handed over to the client via the 'rx_callback'. 368 * The framework holds reference to the client, so the mbox_client 369 * structure shouldn't be modified until the mbox_free_channel returns. 370 * 371 * Return: 0 if the channel was assigned to the client successfully. 372 * <0 for request failure. 373 */ 374 int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 375 { 376 int ret; 377 378 mutex_lock(&con_mutex); 379 ret = __mbox_bind_client(chan, cl); 380 mutex_unlock(&con_mutex); 381 382 return ret; 383 } 384 EXPORT_SYMBOL_GPL(mbox_bind_client); 385 386 /** 387 * mbox_request_channel - Request a mailbox channel. 388 * @cl: Identity of the client requesting the channel. 389 * @index: Index of mailbox specifier in 'mboxes' property. 390 * 391 * The Client specifies its requirements and capabilities while asking for 392 * a mailbox channel. It can't be called from atomic context. 393 * The channel is exclusively allocated and can't be used by another 394 * client before the owner calls mbox_free_channel. 395 * After assignment, any packet received on this channel will be 396 * handed over to the client via the 'rx_callback'. 397 * The framework holds reference to the client, so the mbox_client 398 * structure shouldn't be modified until the mbox_free_channel returns. 399 * 400 * Return: Pointer to the channel assigned to the client if successful. 401 * ERR_PTR for request failure. 402 */ 403 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) 404 { 405 struct device *dev = cl->dev; 406 struct mbox_controller *mbox; 407 struct of_phandle_args spec; 408 struct mbox_chan *chan; 409 int ret; 410 411 if (!dev || !dev->of_node) { 412 pr_debug("%s: No owner device node\n", __func__); 413 return ERR_PTR(-ENODEV); 414 } 415 416 mutex_lock(&con_mutex); 417 418 if (of_parse_phandle_with_args(dev->of_node, "mboxes", 419 "#mbox-cells", index, &spec)) { 420 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); 421 mutex_unlock(&con_mutex); 422 return ERR_PTR(-ENODEV); 423 } 424 425 chan = ERR_PTR(-EPROBE_DEFER); 426 list_for_each_entry(mbox, &mbox_cons, node) 427 if (mbox->dev->of_node == spec.np) { 428 chan = mbox->of_xlate(mbox, &spec); 429 if (!IS_ERR(chan)) 430 break; 431 } 432 433 of_node_put(spec.np); 434 435 if (IS_ERR(chan)) { 436 mutex_unlock(&con_mutex); 437 return chan; 438 } 439 440 ret = __mbox_bind_client(chan, cl); 441 if (ret) 442 chan = ERR_PTR(ret); 443 444 mutex_unlock(&con_mutex); 445 return chan; 446 } 447 EXPORT_SYMBOL_GPL(mbox_request_channel); 448 449 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, 450 const char *name) 451 { 452 struct device_node *np = cl->dev->of_node; 453 int index; 454 455 if (!np) { 456 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); 457 return ERR_PTR(-EINVAL); 458 } 459 460 index = of_property_match_string(np, "mbox-names", name); 461 if (index < 0) { 462 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", 463 __func__, name); 464 return ERR_PTR(-EINVAL); 465 } 466 return mbox_request_channel(cl, index); 467 } 468 EXPORT_SYMBOL_GPL(mbox_request_channel_byname); 469 470 /** 471 * mbox_free_channel - The client relinquishes control of a mailbox 472 * channel by this call. 473 * @chan: The mailbox channel to be freed. 474 */ 475 void mbox_free_channel(struct mbox_chan *chan) 476 { 477 unsigned long flags; 478 479 if (!chan || !chan->cl) 480 return; 481 482 if (chan->mbox->ops->shutdown) 483 chan->mbox->ops->shutdown(chan); 484 485 /* The queued TX requests are simply aborted, no callbacks are made */ 486 spin_lock_irqsave(&chan->lock, flags); 487 chan->cl = NULL; 488 chan->active_req = NULL; 489 if (chan->txdone_method == TXDONE_BY_ACK) 490 chan->txdone_method = TXDONE_BY_POLL; 491 492 module_put(chan->mbox->dev->driver->owner); 493 spin_unlock_irqrestore(&chan->lock, flags); 494 } 495 EXPORT_SYMBOL_GPL(mbox_free_channel); 496 497 static struct mbox_chan * 498 of_mbox_index_xlate(struct mbox_controller *mbox, 499 const struct of_phandle_args *sp) 500 { 501 int ind = sp->args[0]; 502 503 if (ind >= mbox->num_chans) 504 return ERR_PTR(-EINVAL); 505 506 return &mbox->chans[ind]; 507 } 508 509 /** 510 * mbox_controller_register - Register the mailbox controller 511 * @mbox: Pointer to the mailbox controller. 512 * 513 * The controller driver registers its communication channels 514 */ 515 int mbox_controller_register(struct mbox_controller *mbox) 516 { 517 int i, txdone; 518 519 /* Sanity check */ 520 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) 521 return -EINVAL; 522 523 if (mbox->txdone_irq) 524 txdone = TXDONE_BY_IRQ; 525 else if (mbox->txdone_poll) 526 txdone = TXDONE_BY_POLL; 527 else /* It has to be ACK then */ 528 txdone = TXDONE_BY_ACK; 529 530 if (txdone == TXDONE_BY_POLL) { 531 532 if (!mbox->ops->last_tx_done) { 533 dev_err(mbox->dev, "last_tx_done method is absent\n"); 534 return -EINVAL; 535 } 536 537 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, 538 HRTIMER_MODE_REL); 539 mbox->poll_hrt.function = txdone_hrtimer; 540 spin_lock_init(&mbox->poll_hrt_lock); 541 } 542 543 for (i = 0; i < mbox->num_chans; i++) { 544 struct mbox_chan *chan = &mbox->chans[i]; 545 546 chan->cl = NULL; 547 chan->mbox = mbox; 548 chan->txdone_method = txdone; 549 spin_lock_init(&chan->lock); 550 } 551 552 if (!mbox->of_xlate) 553 mbox->of_xlate = of_mbox_index_xlate; 554 555 mutex_lock(&con_mutex); 556 list_add_tail(&mbox->node, &mbox_cons); 557 mutex_unlock(&con_mutex); 558 559 return 0; 560 } 561 EXPORT_SYMBOL_GPL(mbox_controller_register); 562 563 /** 564 * mbox_controller_unregister - Unregister the mailbox controller 565 * @mbox: Pointer to the mailbox controller. 566 */ 567 void mbox_controller_unregister(struct mbox_controller *mbox) 568 { 569 int i; 570 571 if (!mbox) 572 return; 573 574 mutex_lock(&con_mutex); 575 576 list_del(&mbox->node); 577 578 for (i = 0; i < mbox->num_chans; i++) 579 mbox_free_channel(&mbox->chans[i]); 580 581 if (mbox->txdone_poll) 582 hrtimer_cancel(&mbox->poll_hrt); 583 584 mutex_unlock(&con_mutex); 585 } 586 EXPORT_SYMBOL_GPL(mbox_controller_unregister); 587 588 static void __devm_mbox_controller_unregister(struct device *dev, void *res) 589 { 590 struct mbox_controller **mbox = res; 591 592 mbox_controller_unregister(*mbox); 593 } 594 595 static int devm_mbox_controller_match(struct device *dev, void *res, void *data) 596 { 597 struct mbox_controller **mbox = res; 598 599 if (WARN_ON(!mbox || !*mbox)) 600 return 0; 601 602 return *mbox == data; 603 } 604 605 /** 606 * devm_mbox_controller_register() - managed mbox_controller_register() 607 * @dev: device owning the mailbox controller being registered 608 * @mbox: mailbox controller being registered 609 * 610 * This function adds a device-managed resource that will make sure that the 611 * mailbox controller, which is registered using mbox_controller_register() 612 * as part of this function, will be unregistered along with the rest of 613 * device-managed resources upon driver probe failure or driver removal. 614 * 615 * Returns 0 on success or a negative error code on failure. 616 */ 617 int devm_mbox_controller_register(struct device *dev, 618 struct mbox_controller *mbox) 619 { 620 struct mbox_controller **ptr; 621 int err; 622 623 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr), 624 GFP_KERNEL); 625 if (!ptr) 626 return -ENOMEM; 627 628 err = mbox_controller_register(mbox); 629 if (err < 0) { 630 devres_free(ptr); 631 return err; 632 } 633 634 devres_add(dev, ptr); 635 *ptr = mbox; 636 637 return 0; 638 } 639 EXPORT_SYMBOL_GPL(devm_mbox_controller_register); 640 641 /** 642 * devm_mbox_controller_unregister() - managed mbox_controller_unregister() 643 * @dev: device owning the mailbox controller being unregistered 644 * @mbox: mailbox controller being unregistered 645 * 646 * This function unregisters the mailbox controller and removes the device- 647 * managed resource that was set up to automatically unregister the mailbox 648 * controller on driver probe failure or driver removal. It's typically not 649 * necessary to call this function. 650 */ 651 void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox) 652 { 653 WARN_ON(devres_release(dev, __devm_mbox_controller_unregister, 654 devm_mbox_controller_match, mbox)); 655 } 656 EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister); 657