1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mailbox: Common code for Mailbox controllers and users 4 * 5 * Copyright (C) 2013-2014 Linaro Ltd. 6 * Author: Jassi Brar <jassisinghbrar@gmail.com> 7 */ 8 9 #include <linux/cleanup.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/mailbox_client.h> 14 #include <linux/mailbox_controller.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/of.h> 18 #include <linux/spinlock.h> 19 20 #include "mailbox.h" 21 22 static LIST_HEAD(mbox_cons); 23 static DEFINE_MUTEX(con_mutex); 24 25 static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 26 { 27 int idx; 28 29 guard(spinlock_irqsave)(&chan->lock); 30 31 /* See if there is any space left */ 32 if (chan->msg_count == MBOX_TX_QUEUE_LEN) 33 return -ENOBUFS; 34 35 idx = chan->msg_free; 36 chan->msg_data[idx] = mssg; 37 chan->msg_count++; 38 39 if (idx == MBOX_TX_QUEUE_LEN - 1) 40 chan->msg_free = 0; 41 else 42 chan->msg_free++; 43 44 return idx; 45 } 46 47 static void msg_submit(struct mbox_chan *chan) 48 { 49 unsigned count, idx; 50 void *data; 51 int err = -EBUSY; 52 53 scoped_guard(spinlock_irqsave, &chan->lock) { 54 if (!chan->msg_count || chan->active_req) 55 break; 56 57 count = chan->msg_count; 58 idx = chan->msg_free; 59 if (idx >= count) 60 idx -= count; 61 else 62 idx += MBOX_TX_QUEUE_LEN - count; 63 64 data = chan->msg_data[idx]; 65 66 if (chan->cl->tx_prepare) 67 chan->cl->tx_prepare(chan->cl, data); 68 /* Try to submit a message to the MBOX controller */ 69 err = chan->mbox->ops->send_data(chan, data); 70 if (!err) { 71 chan->active_req = data; 72 chan->msg_count--; 73 } 74 } 75 76 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { 77 /* kick start the timer immediately to avoid delays */ 78 scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) 79 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 80 } 81 } 82 83 static void tx_tick(struct mbox_chan *chan, int r) 84 { 85 void *mssg; 86 87 scoped_guard(spinlock_irqsave, &chan->lock) { 88 mssg = chan->active_req; 89 chan->active_req = NULL; 90 } 91 92 /* Submit next message */ 93 msg_submit(chan); 94 95 if (!mssg) 96 return; 97 98 /* Notify the client */ 99 if (chan->cl->tx_done) 100 chan->cl->tx_done(chan->cl, mssg, r); 101 102 if (r != -ETIME && chan->cl->tx_block) 103 complete(&chan->tx_complete); 104 } 105 106 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) 107 { 108 struct mbox_controller *mbox = 109 container_of(hrtimer, struct mbox_controller, poll_hrt); 110 bool txdone, resched = false; 111 int i; 112 113 for (i = 0; i < mbox->num_chans; i++) { 114 struct mbox_chan *chan = &mbox->chans[i]; 115 116 if (chan->active_req && chan->cl) { 117 txdone = chan->mbox->ops->last_tx_done(chan); 118 if (txdone) 119 tx_tick(chan, 0); 120 else 121 resched = true; 122 } 123 } 124 125 if (resched) { 126 scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { 127 if (!hrtimer_is_queued(hrtimer)) 128 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 129 } 130 131 return HRTIMER_RESTART; 132 } 133 return HRTIMER_NORESTART; 134 } 135 136 /** 137 * mbox_chan_received_data - A way for controller driver to push data 138 * received from remote to the upper layer. 139 * @chan: Pointer to the mailbox channel on which RX happened. 140 * @mssg: Client specific message typecasted as void * 141 * 142 * After startup and before shutdown any data received on the chan 143 * is passed on to the API via atomic mbox_chan_received_data(). 144 * The controller should ACK the RX only after this call returns. 145 */ 146 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) 147 { 148 /* No buffering the received data */ 149 if (chan->cl->rx_callback) 150 chan->cl->rx_callback(chan->cl, mssg); 151 } 152 EXPORT_SYMBOL_GPL(mbox_chan_received_data); 153 154 /** 155 * mbox_chan_txdone - A way for controller driver to notify the 156 * framework that the last TX has completed. 157 * @chan: Pointer to the mailbox chan on which TX happened. 158 * @r: Status of last TX - OK or ERROR 159 * 160 * The controller that has IRQ for TX ACK calls this atomic API 161 * to tick the TX state machine. It works only if txdone_irq 162 * is set by the controller. 163 */ 164 void mbox_chan_txdone(struct mbox_chan *chan, int r) 165 { 166 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { 167 dev_err(chan->mbox->dev, 168 "Controller can't run the TX ticker\n"); 169 return; 170 } 171 172 tx_tick(chan, r); 173 } 174 EXPORT_SYMBOL_GPL(mbox_chan_txdone); 175 176 /** 177 * mbox_client_txdone - The way for a client to run the TX state machine. 178 * @chan: Mailbox channel assigned to this client. 179 * @r: Success status of last transmission. 180 * 181 * The client/protocol had received some 'ACK' packet and it notifies 182 * the API that the last packet was sent successfully. This only works 183 * if the controller can't sense TX-Done. 184 */ 185 void mbox_client_txdone(struct mbox_chan *chan, int r) 186 { 187 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { 188 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); 189 return; 190 } 191 192 tx_tick(chan, r); 193 } 194 EXPORT_SYMBOL_GPL(mbox_client_txdone); 195 196 /** 197 * mbox_client_peek_data - A way for client driver to pull data 198 * received from remote by the controller. 199 * @chan: Mailbox channel assigned to this client. 200 * 201 * A poke to controller driver for any received data. 202 * The data is actually passed onto client via the 203 * mbox_chan_received_data() 204 * The call can be made from atomic context, so the controller's 205 * implementation of peek_data() must not sleep. 206 * 207 * Return: True, if controller has, and is going to push after this, 208 * some data. 209 * False, if controller doesn't have any data to be read. 210 */ 211 bool mbox_client_peek_data(struct mbox_chan *chan) 212 { 213 if (chan->mbox->ops->peek_data) 214 return chan->mbox->ops->peek_data(chan); 215 216 return false; 217 } 218 EXPORT_SYMBOL_GPL(mbox_client_peek_data); 219 220 /** 221 * mbox_send_message - For client to submit a message to be 222 * sent to the remote. 223 * @chan: Mailbox channel assigned to this client. 224 * @mssg: Client specific message typecasted. 225 * 226 * For client to submit data to the controller destined for a remote 227 * processor. If the client had set 'tx_block', the call will return 228 * either when the remote receives the data or when 'tx_tout' millisecs 229 * run out. 230 * In non-blocking mode, the requests are buffered by the API and a 231 * non-negative token is returned for each queued request. If the request 232 * is not queued, a negative token is returned. Upon failure or successful 233 * TX, the API calls 'tx_done' from atomic context, from which the client 234 * could submit yet another request. 235 * The pointer to message should be preserved until it is sent 236 * over the chan, i.e, tx_done() is made. 237 * This function could be called from atomic context as it simply 238 * queues the data and returns a token against the request. 239 * 240 * Return: Non-negative integer for successful submission (non-blocking mode) 241 * or transmission over chan (blocking mode). 242 * Negative value denotes failure. 243 */ 244 int mbox_send_message(struct mbox_chan *chan, void *mssg) 245 { 246 int t; 247 248 if (!chan || !chan->cl) 249 return -EINVAL; 250 251 t = add_to_rbuf(chan, mssg); 252 if (t < 0) { 253 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); 254 return t; 255 } 256 257 msg_submit(chan); 258 259 if (chan->cl->tx_block) { 260 unsigned long wait; 261 int ret; 262 263 if (!chan->cl->tx_tout) /* wait forever */ 264 wait = msecs_to_jiffies(3600000); 265 else 266 wait = msecs_to_jiffies(chan->cl->tx_tout); 267 268 ret = wait_for_completion_timeout(&chan->tx_complete, wait); 269 if (ret == 0) { 270 t = -ETIME; 271 tx_tick(chan, t); 272 } 273 } 274 275 return t; 276 } 277 EXPORT_SYMBOL_GPL(mbox_send_message); 278 279 /** 280 * mbox_flush - flush a mailbox channel 281 * @chan: mailbox channel to flush 282 * @timeout: time, in milliseconds, to allow the flush operation to succeed 283 * 284 * Mailbox controllers that need to work in atomic context can implement the 285 * ->flush() callback to busy loop until a transmission has been completed. 286 * The implementation must call mbox_chan_txdone() upon success. Clients can 287 * call the mbox_flush() function at any time after mbox_send_message() to 288 * flush the transmission. After the function returns success, the mailbox 289 * transmission is guaranteed to have completed. 290 * 291 * Returns: 0 on success or a negative error code on failure. 292 */ 293 int mbox_flush(struct mbox_chan *chan, unsigned long timeout) 294 { 295 int ret; 296 297 if (!chan->mbox->ops->flush) 298 return -ENOTSUPP; 299 300 ret = chan->mbox->ops->flush(chan, timeout); 301 if (ret < 0) 302 tx_tick(chan, ret); 303 304 return ret; 305 } 306 EXPORT_SYMBOL_GPL(mbox_flush); 307 308 static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 309 { 310 struct device *dev = cl->dev; 311 int ret; 312 313 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { 314 dev_err(dev, "%s: mailbox not free\n", __func__); 315 return -EBUSY; 316 } 317 318 scoped_guard(spinlock_irqsave, &chan->lock) { 319 chan->msg_free = 0; 320 chan->msg_count = 0; 321 chan->active_req = NULL; 322 chan->cl = cl; 323 init_completion(&chan->tx_complete); 324 325 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 326 chan->txdone_method = TXDONE_BY_ACK; 327 } 328 329 if (chan->mbox->ops->startup) { 330 ret = chan->mbox->ops->startup(chan); 331 332 if (ret) { 333 dev_err(dev, "Unable to startup the chan (%d)\n", ret); 334 mbox_free_channel(chan); 335 return ret; 336 } 337 } 338 339 return 0; 340 } 341 342 /** 343 * mbox_bind_client - Request a mailbox channel. 344 * @chan: The mailbox channel to bind the client to. 345 * @cl: Identity of the client requesting the channel. 346 * 347 * The Client specifies its requirements and capabilities while asking for 348 * a mailbox channel. It can't be called from atomic context. 349 * The channel is exclusively allocated and can't be used by another 350 * client before the owner calls mbox_free_channel. 351 * After assignment, any packet received on this channel will be 352 * handed over to the client via the 'rx_callback'. 353 * The framework holds reference to the client, so the mbox_client 354 * structure shouldn't be modified until the mbox_free_channel returns. 355 * 356 * Return: 0 if the channel was assigned to the client successfully. 357 * <0 for request failure. 358 */ 359 int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 360 { 361 guard(mutex)(&con_mutex); 362 363 return __mbox_bind_client(chan, cl); 364 } 365 EXPORT_SYMBOL_GPL(mbox_bind_client); 366 367 /** 368 * mbox_request_channel - Request a mailbox channel. 369 * @cl: Identity of the client requesting the channel. 370 * @index: Index of mailbox specifier in 'mboxes' property. 371 * 372 * The Client specifies its requirements and capabilities while asking for 373 * a mailbox channel. It can't be called from atomic context. 374 * The channel is exclusively allocated and can't be used by another 375 * client before the owner calls mbox_free_channel. 376 * After assignment, any packet received on this channel will be 377 * handed over to the client via the 'rx_callback'. 378 * The framework holds reference to the client, so the mbox_client 379 * structure shouldn't be modified until the mbox_free_channel returns. 380 * 381 * Return: Pointer to the channel assigned to the client if successful. 382 * ERR_PTR for request failure. 383 */ 384 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) 385 { 386 struct device *dev = cl->dev; 387 struct mbox_controller *mbox; 388 struct of_phandle_args spec; 389 struct mbox_chan *chan; 390 int ret; 391 392 if (!dev || !dev->of_node) { 393 pr_debug("%s: No owner device node\n", __func__); 394 return ERR_PTR(-ENODEV); 395 } 396 397 ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", 398 index, &spec); 399 if (ret) { 400 dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__); 401 return ERR_PTR(ret); 402 } 403 404 scoped_guard(mutex, &con_mutex) { 405 chan = ERR_PTR(-EPROBE_DEFER); 406 list_for_each_entry(mbox, &mbox_cons, node) 407 if (mbox->dev->of_node == spec.np) { 408 chan = mbox->of_xlate(mbox, &spec); 409 if (!IS_ERR(chan)) 410 break; 411 } 412 413 of_node_put(spec.np); 414 415 if (IS_ERR(chan)) 416 return chan; 417 418 ret = __mbox_bind_client(chan, cl); 419 if (ret) 420 chan = ERR_PTR(ret); 421 } 422 423 return chan; 424 } 425 EXPORT_SYMBOL_GPL(mbox_request_channel); 426 427 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, 428 const char *name) 429 { 430 struct device_node *np = cl->dev->of_node; 431 int index; 432 433 if (!np) { 434 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); 435 return ERR_PTR(-EINVAL); 436 } 437 438 index = of_property_match_string(np, "mbox-names", name); 439 if (index < 0) { 440 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", 441 __func__, name); 442 return ERR_PTR(index); 443 } 444 return mbox_request_channel(cl, index); 445 } 446 EXPORT_SYMBOL_GPL(mbox_request_channel_byname); 447 448 /** 449 * mbox_free_channel - The client relinquishes control of a mailbox 450 * channel by this call. 451 * @chan: The mailbox channel to be freed. 452 */ 453 void mbox_free_channel(struct mbox_chan *chan) 454 { 455 if (!chan || !chan->cl) 456 return; 457 458 if (chan->mbox->ops->shutdown) 459 chan->mbox->ops->shutdown(chan); 460 461 /* The queued TX requests are simply aborted, no callbacks are made */ 462 scoped_guard(spinlock_irqsave, &chan->lock) { 463 chan->cl = NULL; 464 chan->active_req = NULL; 465 if (chan->txdone_method == TXDONE_BY_ACK) 466 chan->txdone_method = TXDONE_BY_POLL; 467 } 468 469 module_put(chan->mbox->dev->driver->owner); 470 } 471 EXPORT_SYMBOL_GPL(mbox_free_channel); 472 473 static struct mbox_chan * 474 of_mbox_index_xlate(struct mbox_controller *mbox, 475 const struct of_phandle_args *sp) 476 { 477 int ind = sp->args[0]; 478 479 if (ind >= mbox->num_chans) 480 return ERR_PTR(-EINVAL); 481 482 return &mbox->chans[ind]; 483 } 484 485 /** 486 * mbox_controller_register - Register the mailbox controller 487 * @mbox: Pointer to the mailbox controller. 488 * 489 * The controller driver registers its communication channels 490 */ 491 int mbox_controller_register(struct mbox_controller *mbox) 492 { 493 int i, txdone; 494 495 /* Sanity check */ 496 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) 497 return -EINVAL; 498 499 if (mbox->txdone_irq) 500 txdone = TXDONE_BY_IRQ; 501 else if (mbox->txdone_poll) 502 txdone = TXDONE_BY_POLL; 503 else /* It has to be ACK then */ 504 txdone = TXDONE_BY_ACK; 505 506 if (txdone == TXDONE_BY_POLL) { 507 508 if (!mbox->ops->last_tx_done) { 509 dev_err(mbox->dev, "last_tx_done method is absent\n"); 510 return -EINVAL; 511 } 512 513 hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 514 spin_lock_init(&mbox->poll_hrt_lock); 515 } 516 517 for (i = 0; i < mbox->num_chans; i++) { 518 struct mbox_chan *chan = &mbox->chans[i]; 519 520 chan->cl = NULL; 521 chan->mbox = mbox; 522 chan->txdone_method = txdone; 523 spin_lock_init(&chan->lock); 524 } 525 526 if (!mbox->of_xlate) 527 mbox->of_xlate = of_mbox_index_xlate; 528 529 scoped_guard(mutex, &con_mutex) 530 list_add_tail(&mbox->node, &mbox_cons); 531 532 return 0; 533 } 534 EXPORT_SYMBOL_GPL(mbox_controller_register); 535 536 /** 537 * mbox_controller_unregister - Unregister the mailbox controller 538 * @mbox: Pointer to the mailbox controller. 539 */ 540 void mbox_controller_unregister(struct mbox_controller *mbox) 541 { 542 int i; 543 544 if (!mbox) 545 return; 546 547 scoped_guard(mutex, &con_mutex) { 548 list_del(&mbox->node); 549 550 for (i = 0; i < mbox->num_chans; i++) 551 mbox_free_channel(&mbox->chans[i]); 552 553 if (mbox->txdone_poll) 554 hrtimer_cancel(&mbox->poll_hrt); 555 } 556 } 557 EXPORT_SYMBOL_GPL(mbox_controller_unregister); 558 559 static void __devm_mbox_controller_unregister(struct device *dev, void *res) 560 { 561 struct mbox_controller **mbox = res; 562 563 mbox_controller_unregister(*mbox); 564 } 565 566 /** 567 * devm_mbox_controller_register() - managed mbox_controller_register() 568 * @dev: device owning the mailbox controller being registered 569 * @mbox: mailbox controller being registered 570 * 571 * This function adds a device-managed resource that will make sure that the 572 * mailbox controller, which is registered using mbox_controller_register() 573 * as part of this function, will be unregistered along with the rest of 574 * device-managed resources upon driver probe failure or driver removal. 575 * 576 * Returns 0 on success or a negative error code on failure. 577 */ 578 int devm_mbox_controller_register(struct device *dev, 579 struct mbox_controller *mbox) 580 { 581 struct mbox_controller **ptr; 582 int err; 583 584 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr), 585 GFP_KERNEL); 586 if (!ptr) 587 return -ENOMEM; 588 589 err = mbox_controller_register(mbox); 590 if (err < 0) { 591 devres_free(ptr); 592 return err; 593 } 594 595 devres_add(dev, ptr); 596 *ptr = mbox; 597 598 return 0; 599 } 600 EXPORT_SYMBOL_GPL(devm_mbox_controller_register); 601