1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mailbox: Common code for Mailbox controllers and users 4 * 5 * Copyright (C) 2013-2014 Linaro Ltd. 6 * Author: Jassi Brar <jassisinghbrar@gmail.com> 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/spinlock.h> 11 #include <linux/mutex.h> 12 #include <linux/delay.h> 13 #include <linux/slab.h> 14 #include <linux/err.h> 15 #include <linux/module.h> 16 #include <linux/device.h> 17 #include <linux/bitops.h> 18 #include <linux/mailbox_client.h> 19 #include <linux/mailbox_controller.h> 20 21 #include "mailbox.h" 22 23 static LIST_HEAD(mbox_cons); 24 static DEFINE_MUTEX(con_mutex); 25 26 static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 27 { 28 int idx; 29 unsigned long flags; 30 31 spin_lock_irqsave(&chan->lock, flags); 32 33 /* See if there is any space left */ 34 if (chan->msg_count == MBOX_TX_QUEUE_LEN) { 35 spin_unlock_irqrestore(&chan->lock, flags); 36 return -ENOBUFS; 37 } 38 39 idx = chan->msg_free; 40 chan->msg_data[idx] = mssg; 41 chan->msg_count++; 42 43 if (idx == MBOX_TX_QUEUE_LEN - 1) 44 chan->msg_free = 0; 45 else 46 chan->msg_free++; 47 48 spin_unlock_irqrestore(&chan->lock, flags); 49 50 return idx; 51 } 52 53 static void msg_submit(struct mbox_chan *chan) 54 { 55 unsigned count, idx; 56 unsigned long flags; 57 void *data; 58 int err = -EBUSY; 59 60 spin_lock_irqsave(&chan->lock, flags); 61 62 if (!chan->msg_count || chan->active_req) 63 goto exit; 64 65 count = chan->msg_count; 66 idx = chan->msg_free; 67 if (idx >= count) 68 idx -= count; 69 else 70 idx += MBOX_TX_QUEUE_LEN - count; 71 72 data = chan->msg_data[idx]; 73 74 if (chan->cl->tx_prepare) 75 chan->cl->tx_prepare(chan->cl, data); 76 /* Try to submit a message to the MBOX controller */ 77 err = chan->mbox->ops->send_data(chan, data); 78 if (!err) { 79 chan->active_req = data; 80 chan->msg_count--; 81 } 82 exit: 83 spin_unlock_irqrestore(&chan->lock, flags); 84 85 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) 86 /* kick start the timer immediately to avoid delays */ 87 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 88 } 89 90 static void tx_tick(struct mbox_chan *chan, int r) 91 { 92 unsigned long flags; 93 void *mssg; 94 95 spin_lock_irqsave(&chan->lock, flags); 96 mssg = chan->active_req; 97 chan->active_req = NULL; 98 spin_unlock_irqrestore(&chan->lock, flags); 99 100 /* Submit next message */ 101 msg_submit(chan); 102 103 if (!mssg) 104 return; 105 106 /* Notify the client */ 107 if (chan->cl->tx_done) 108 chan->cl->tx_done(chan->cl, mssg, r); 109 110 if (r != -ETIME && chan->cl->tx_block) 111 complete(&chan->tx_complete); 112 } 113 114 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) 115 { 116 struct mbox_controller *mbox = 117 container_of(hrtimer, struct mbox_controller, poll_hrt); 118 bool txdone, resched = false; 119 int i; 120 121 for (i = 0; i < mbox->num_chans; i++) { 122 struct mbox_chan *chan = &mbox->chans[i]; 123 124 if (chan->active_req && chan->cl) { 125 txdone = chan->mbox->ops->last_tx_done(chan); 126 if (txdone) 127 tx_tick(chan, 0); 128 else 129 resched = true; 130 } 131 } 132 133 if (resched) { 134 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 135 return HRTIMER_RESTART; 136 } 137 return HRTIMER_NORESTART; 138 } 139 140 /** 141 * mbox_chan_received_data - A way for controller driver to push data 142 * received from remote to the upper layer. 143 * @chan: Pointer to the mailbox channel on which RX happened. 144 * @mssg: Client specific message typecasted as void * 145 * 146 * After startup and before shutdown any data received on the chan 147 * is passed on to the API via atomic mbox_chan_received_data(). 148 * The controller should ACK the RX only after this call returns. 149 */ 150 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) 151 { 152 /* No buffering the received data */ 153 if (chan->cl->rx_callback) 154 chan->cl->rx_callback(chan->cl, mssg); 155 } 156 EXPORT_SYMBOL_GPL(mbox_chan_received_data); 157 158 /** 159 * mbox_chan_txdone - A way for controller driver to notify the 160 * framework that the last TX has completed. 161 * @chan: Pointer to the mailbox chan on which TX happened. 162 * @r: Status of last TX - OK or ERROR 163 * 164 * The controller that has IRQ for TX ACK calls this atomic API 165 * to tick the TX state machine. It works only if txdone_irq 166 * is set by the controller. 167 */ 168 void mbox_chan_txdone(struct mbox_chan *chan, int r) 169 { 170 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { 171 dev_err(chan->mbox->dev, 172 "Controller can't run the TX ticker\n"); 173 return; 174 } 175 176 tx_tick(chan, r); 177 } 178 EXPORT_SYMBOL_GPL(mbox_chan_txdone); 179 180 /** 181 * mbox_client_txdone - The way for a client to run the TX state machine. 182 * @chan: Mailbox channel assigned to this client. 183 * @r: Success status of last transmission. 184 * 185 * The client/protocol had received some 'ACK' packet and it notifies 186 * the API that the last packet was sent successfully. This only works 187 * if the controller can't sense TX-Done. 188 */ 189 void mbox_client_txdone(struct mbox_chan *chan, int r) 190 { 191 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { 192 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); 193 return; 194 } 195 196 tx_tick(chan, r); 197 } 198 EXPORT_SYMBOL_GPL(mbox_client_txdone); 199 200 /** 201 * mbox_client_peek_data - A way for client driver to pull data 202 * received from remote by the controller. 203 * @chan: Mailbox channel assigned to this client. 204 * 205 * A poke to controller driver for any received data. 206 * The data is actually passed onto client via the 207 * mbox_chan_received_data() 208 * The call can be made from atomic context, so the controller's 209 * implementation of peek_data() must not sleep. 210 * 211 * Return: True, if controller has, and is going to push after this, 212 * some data. 213 * False, if controller doesn't have any data to be read. 214 */ 215 bool mbox_client_peek_data(struct mbox_chan *chan) 216 { 217 if (chan->mbox->ops->peek_data) 218 return chan->mbox->ops->peek_data(chan); 219 220 return false; 221 } 222 EXPORT_SYMBOL_GPL(mbox_client_peek_data); 223 224 /** 225 * mbox_send_message - For client to submit a message to be 226 * sent to the remote. 227 * @chan: Mailbox channel assigned to this client. 228 * @mssg: Client specific message typecasted. 229 * 230 * For client to submit data to the controller destined for a remote 231 * processor. If the client had set 'tx_block', the call will return 232 * either when the remote receives the data or when 'tx_tout' millisecs 233 * run out. 234 * In non-blocking mode, the requests are buffered by the API and a 235 * non-negative token is returned for each queued request. If the request 236 * is not queued, a negative token is returned. Upon failure or successful 237 * TX, the API calls 'tx_done' from atomic context, from which the client 238 * could submit yet another request. 239 * The pointer to message should be preserved until it is sent 240 * over the chan, i.e, tx_done() is made. 241 * This function could be called from atomic context as it simply 242 * queues the data and returns a token against the request. 243 * 244 * Return: Non-negative integer for successful submission (non-blocking mode) 245 * or transmission over chan (blocking mode). 246 * Negative value denotes failure. 247 */ 248 int mbox_send_message(struct mbox_chan *chan, void *mssg) 249 { 250 int t; 251 252 if (!chan || !chan->cl) 253 return -EINVAL; 254 255 t = add_to_rbuf(chan, mssg); 256 if (t < 0) { 257 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); 258 return t; 259 } 260 261 msg_submit(chan); 262 263 if (chan->cl->tx_block) { 264 unsigned long wait; 265 int ret; 266 267 if (!chan->cl->tx_tout) /* wait forever */ 268 wait = msecs_to_jiffies(3600000); 269 else 270 wait = msecs_to_jiffies(chan->cl->tx_tout); 271 272 ret = wait_for_completion_timeout(&chan->tx_complete, wait); 273 if (ret == 0) { 274 t = -ETIME; 275 tx_tick(chan, t); 276 } 277 } 278 279 return t; 280 } 281 EXPORT_SYMBOL_GPL(mbox_send_message); 282 283 /** 284 * mbox_flush - flush a mailbox channel 285 * @chan: mailbox channel to flush 286 * @timeout: time, in milliseconds, to allow the flush operation to succeed 287 * 288 * Mailbox controllers that need to work in atomic context can implement the 289 * ->flush() callback to busy loop until a transmission has been completed. 290 * The implementation must call mbox_chan_txdone() upon success. Clients can 291 * call the mbox_flush() function at any time after mbox_send_message() to 292 * flush the transmission. After the function returns success, the mailbox 293 * transmission is guaranteed to have completed. 294 * 295 * Returns: 0 on success or a negative error code on failure. 296 */ 297 int mbox_flush(struct mbox_chan *chan, unsigned long timeout) 298 { 299 int ret; 300 301 if (!chan->mbox->ops->flush) 302 return -ENOTSUPP; 303 304 ret = chan->mbox->ops->flush(chan, timeout); 305 if (ret < 0) 306 tx_tick(chan, ret); 307 308 return ret; 309 } 310 EXPORT_SYMBOL_GPL(mbox_flush); 311 312 /** 313 * mbox_request_channel - Request a mailbox channel. 314 * @cl: Identity of the client requesting the channel. 315 * @index: Index of mailbox specifier in 'mboxes' property. 316 * 317 * The Client specifies its requirements and capabilities while asking for 318 * a mailbox channel. It can't be called from atomic context. 319 * The channel is exclusively allocated and can't be used by another 320 * client before the owner calls mbox_free_channel. 321 * After assignment, any packet received on this channel will be 322 * handed over to the client via the 'rx_callback'. 323 * The framework holds reference to the client, so the mbox_client 324 * structure shouldn't be modified until the mbox_free_channel returns. 325 * 326 * Return: Pointer to the channel assigned to the client if successful. 327 * ERR_PTR for request failure. 328 */ 329 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) 330 { 331 struct device *dev = cl->dev; 332 struct mbox_controller *mbox; 333 struct of_phandle_args spec; 334 struct mbox_chan *chan; 335 unsigned long flags; 336 int ret; 337 338 if (!dev || !dev->of_node) { 339 pr_debug("%s: No owner device node\n", __func__); 340 return ERR_PTR(-ENODEV); 341 } 342 343 mutex_lock(&con_mutex); 344 345 if (of_parse_phandle_with_args(dev->of_node, "mboxes", 346 "#mbox-cells", index, &spec)) { 347 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); 348 mutex_unlock(&con_mutex); 349 return ERR_PTR(-ENODEV); 350 } 351 352 chan = ERR_PTR(-EPROBE_DEFER); 353 list_for_each_entry(mbox, &mbox_cons, node) 354 if (mbox->dev->of_node == spec.np) { 355 chan = mbox->of_xlate(mbox, &spec); 356 if (!IS_ERR(chan)) 357 break; 358 } 359 360 of_node_put(spec.np); 361 362 if (IS_ERR(chan)) { 363 mutex_unlock(&con_mutex); 364 return chan; 365 } 366 367 if (chan->cl || !try_module_get(mbox->dev->driver->owner)) { 368 dev_dbg(dev, "%s: mailbox not free\n", __func__); 369 mutex_unlock(&con_mutex); 370 return ERR_PTR(-EBUSY); 371 } 372 373 spin_lock_irqsave(&chan->lock, flags); 374 chan->msg_free = 0; 375 chan->msg_count = 0; 376 chan->active_req = NULL; 377 chan->cl = cl; 378 init_completion(&chan->tx_complete); 379 380 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 381 chan->txdone_method = TXDONE_BY_ACK; 382 383 spin_unlock_irqrestore(&chan->lock, flags); 384 385 if (chan->mbox->ops->startup) { 386 ret = chan->mbox->ops->startup(chan); 387 388 if (ret) { 389 dev_err(dev, "Unable to startup the chan (%d)\n", ret); 390 mbox_free_channel(chan); 391 chan = ERR_PTR(ret); 392 } 393 } 394 395 mutex_unlock(&con_mutex); 396 return chan; 397 } 398 EXPORT_SYMBOL_GPL(mbox_request_channel); 399 400 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, 401 const char *name) 402 { 403 struct device_node *np = cl->dev->of_node; 404 struct property *prop; 405 const char *mbox_name; 406 int index = 0; 407 408 if (!np) { 409 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); 410 return ERR_PTR(-EINVAL); 411 } 412 413 if (!of_get_property(np, "mbox-names", NULL)) { 414 dev_err(cl->dev, 415 "%s() requires an \"mbox-names\" property\n", __func__); 416 return ERR_PTR(-EINVAL); 417 } 418 419 of_property_for_each_string(np, "mbox-names", prop, mbox_name) { 420 if (!strncmp(name, mbox_name, strlen(name))) 421 break; 422 index++; 423 } 424 425 return mbox_request_channel(cl, index); 426 } 427 EXPORT_SYMBOL_GPL(mbox_request_channel_byname); 428 429 /** 430 * mbox_free_channel - The client relinquishes control of a mailbox 431 * channel by this call. 432 * @chan: The mailbox channel to be freed. 433 */ 434 void mbox_free_channel(struct mbox_chan *chan) 435 { 436 unsigned long flags; 437 438 if (!chan || !chan->cl) 439 return; 440 441 if (chan->mbox->ops->shutdown) 442 chan->mbox->ops->shutdown(chan); 443 444 /* The queued TX requests are simply aborted, no callbacks are made */ 445 spin_lock_irqsave(&chan->lock, flags); 446 chan->cl = NULL; 447 chan->active_req = NULL; 448 if (chan->txdone_method == TXDONE_BY_ACK) 449 chan->txdone_method = TXDONE_BY_POLL; 450 451 module_put(chan->mbox->dev->driver->owner); 452 spin_unlock_irqrestore(&chan->lock, flags); 453 } 454 EXPORT_SYMBOL_GPL(mbox_free_channel); 455 456 static struct mbox_chan * 457 of_mbox_index_xlate(struct mbox_controller *mbox, 458 const struct of_phandle_args *sp) 459 { 460 int ind = sp->args[0]; 461 462 if (ind >= mbox->num_chans) 463 return ERR_PTR(-EINVAL); 464 465 return &mbox->chans[ind]; 466 } 467 468 /** 469 * mbox_controller_register - Register the mailbox controller 470 * @mbox: Pointer to the mailbox controller. 471 * 472 * The controller driver registers its communication channels 473 */ 474 int mbox_controller_register(struct mbox_controller *mbox) 475 { 476 int i, txdone; 477 478 /* Sanity check */ 479 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) 480 return -EINVAL; 481 482 if (mbox->txdone_irq) 483 txdone = TXDONE_BY_IRQ; 484 else if (mbox->txdone_poll) 485 txdone = TXDONE_BY_POLL; 486 else /* It has to be ACK then */ 487 txdone = TXDONE_BY_ACK; 488 489 if (txdone == TXDONE_BY_POLL) { 490 491 if (!mbox->ops->last_tx_done) { 492 dev_err(mbox->dev, "last_tx_done method is absent\n"); 493 return -EINVAL; 494 } 495 496 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, 497 HRTIMER_MODE_REL); 498 mbox->poll_hrt.function = txdone_hrtimer; 499 } 500 501 for (i = 0; i < mbox->num_chans; i++) { 502 struct mbox_chan *chan = &mbox->chans[i]; 503 504 chan->cl = NULL; 505 chan->mbox = mbox; 506 chan->txdone_method = txdone; 507 spin_lock_init(&chan->lock); 508 } 509 510 if (!mbox->of_xlate) 511 mbox->of_xlate = of_mbox_index_xlate; 512 513 mutex_lock(&con_mutex); 514 list_add_tail(&mbox->node, &mbox_cons); 515 mutex_unlock(&con_mutex); 516 517 return 0; 518 } 519 EXPORT_SYMBOL_GPL(mbox_controller_register); 520 521 /** 522 * mbox_controller_unregister - Unregister the mailbox controller 523 * @mbox: Pointer to the mailbox controller. 524 */ 525 void mbox_controller_unregister(struct mbox_controller *mbox) 526 { 527 int i; 528 529 if (!mbox) 530 return; 531 532 mutex_lock(&con_mutex); 533 534 list_del(&mbox->node); 535 536 for (i = 0; i < mbox->num_chans; i++) 537 mbox_free_channel(&mbox->chans[i]); 538 539 if (mbox->txdone_poll) 540 hrtimer_cancel(&mbox->poll_hrt); 541 542 mutex_unlock(&con_mutex); 543 } 544 EXPORT_SYMBOL_GPL(mbox_controller_unregister); 545 546 static void __devm_mbox_controller_unregister(struct device *dev, void *res) 547 { 548 struct mbox_controller **mbox = res; 549 550 mbox_controller_unregister(*mbox); 551 } 552 553 static int devm_mbox_controller_match(struct device *dev, void *res, void *data) 554 { 555 struct mbox_controller **mbox = res; 556 557 if (WARN_ON(!mbox || !*mbox)) 558 return 0; 559 560 return *mbox == data; 561 } 562 563 /** 564 * devm_mbox_controller_register() - managed mbox_controller_register() 565 * @dev: device owning the mailbox controller being registered 566 * @mbox: mailbox controller being registered 567 * 568 * This function adds a device-managed resource that will make sure that the 569 * mailbox controller, which is registered using mbox_controller_register() 570 * as part of this function, will be unregistered along with the rest of 571 * device-managed resources upon driver probe failure or driver removal. 572 * 573 * Returns 0 on success or a negative error code on failure. 574 */ 575 int devm_mbox_controller_register(struct device *dev, 576 struct mbox_controller *mbox) 577 { 578 struct mbox_controller **ptr; 579 int err; 580 581 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr), 582 GFP_KERNEL); 583 if (!ptr) 584 return -ENOMEM; 585 586 err = mbox_controller_register(mbox); 587 if (err < 0) { 588 devres_free(ptr); 589 return err; 590 } 591 592 devres_add(dev, ptr); 593 *ptr = mbox; 594 595 return 0; 596 } 597 EXPORT_SYMBOL_GPL(devm_mbox_controller_register); 598 599 /** 600 * devm_mbox_controller_unregister() - managed mbox_controller_unregister() 601 * @dev: device owning the mailbox controller being unregistered 602 * @mbox: mailbox controller being unregistered 603 * 604 * This function unregisters the mailbox controller and removes the device- 605 * managed resource that was set up to automatically unregister the mailbox 606 * controller on driver probe failure or driver removal. It's typically not 607 * necessary to call this function. 608 */ 609 void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox) 610 { 611 WARN_ON(devres_release(dev, __devm_mbox_controller_unregister, 612 devm_mbox_controller_match, mbox)); 613 } 614 EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister); 615