1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mailbox: Common code for Mailbox controllers and users 4 * 5 * Copyright (C) 2013-2014 Linaro Ltd. 6 * Author: Jassi Brar <jassisinghbrar@gmail.com> 7 */ 8 9 #include <linux/cleanup.h> 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/mailbox_client.h> 14 #include <linux/mailbox_controller.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/of.h> 18 #include <linux/property.h> 19 #include <linux/spinlock.h> 20 21 static LIST_HEAD(mbox_cons); 22 static DEFINE_MUTEX(con_mutex); 23 24 static int add_to_rbuf(struct mbox_chan *chan, void *mssg) 25 { 26 int idx; 27 28 guard(spinlock_irqsave)(&chan->lock); 29 30 /* See if there is any space left */ 31 if (chan->msg_count == MBOX_TX_QUEUE_LEN) 32 return -ENOBUFS; 33 34 idx = chan->msg_free; 35 chan->msg_data[idx] = mssg; 36 chan->msg_count++; 37 38 if (idx == MBOX_TX_QUEUE_LEN - 1) 39 chan->msg_free = 0; 40 else 41 chan->msg_free++; 42 43 return idx; 44 } 45 46 static void msg_submit(struct mbox_chan *chan) 47 { 48 unsigned count, idx; 49 void *data; 50 int err = -EBUSY; 51 52 scoped_guard(spinlock_irqsave, &chan->lock) { 53 if (!chan->msg_count || chan->active_req != MBOX_NO_MSG) 54 break; 55 56 count = chan->msg_count; 57 idx = chan->msg_free; 58 if (idx >= count) 59 idx -= count; 60 else 61 idx += MBOX_TX_QUEUE_LEN - count; 62 63 data = chan->msg_data[idx]; 64 65 if (chan->cl->tx_prepare) 66 chan->cl->tx_prepare(chan->cl, data); 67 /* Try to submit a message to the MBOX controller */ 68 err = chan->mbox->ops->send_data(chan, data); 69 if (!err) { 70 chan->active_req = data; 71 chan->msg_count--; 72 } 73 } 74 75 if (!err && (chan->txdone_method & MBOX_TXDONE_BY_POLL)) { 76 /* kick start the timer immediately to avoid delays */ 77 scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock) 78 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); 79 } 80 } 81 82 static void tx_tick(struct mbox_chan *chan, int r) 83 { 84 void *mssg; 85 86 scoped_guard(spinlock_irqsave, &chan->lock) { 87 mssg = chan->active_req; 88 chan->active_req = MBOX_NO_MSG; 89 } 90 91 /* Submit next message */ 92 msg_submit(chan); 93 94 if (mssg == MBOX_NO_MSG) 95 return; 96 97 /* Notify the client */ 98 if (chan->cl->tx_done) 99 chan->cl->tx_done(chan->cl, mssg, r); 100 101 if (r != -ETIME && chan->cl->tx_block) 102 complete(&chan->tx_complete); 103 } 104 105 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer) 106 { 107 struct mbox_controller *mbox = 108 container_of(hrtimer, struct mbox_controller, poll_hrt); 109 bool txdone, resched = false; 110 int i; 111 112 for (i = 0; i < mbox->num_chans; i++) { 113 struct mbox_chan *chan = &mbox->chans[i]; 114 115 if (chan->active_req != MBOX_NO_MSG && chan->cl) { 116 txdone = chan->mbox->ops->last_tx_done(chan); 117 if (txdone) 118 tx_tick(chan, 0); 119 else 120 resched = true; 121 } 122 } 123 124 if (resched) { 125 scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) { 126 if (!hrtimer_is_queued(hrtimer)) 127 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); 128 } 129 130 return HRTIMER_RESTART; 131 } 132 return HRTIMER_NORESTART; 133 } 134 135 /** 136 * mbox_chan_received_data - A way for controller driver to push data 137 * received from remote to the upper layer. 138 * @chan: Pointer to the mailbox channel on which RX happened. 139 * @mssg: Client specific message typecasted as void * 140 * 141 * After startup and before shutdown any data received on the chan 142 * is passed on to the API via atomic mbox_chan_received_data(). 143 * The controller should ACK the RX only after this call returns. 144 */ 145 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) 146 { 147 /* No buffering the received data */ 148 if (chan->cl->rx_callback) 149 chan->cl->rx_callback(chan->cl, mssg); 150 } 151 EXPORT_SYMBOL_GPL(mbox_chan_received_data); 152 153 /** 154 * mbox_chan_txdone - A way for controller driver to notify the 155 * framework that the last TX has completed. 156 * @chan: Pointer to the mailbox chan on which TX happened. 157 * @r: Status of last TX - OK or ERROR 158 * 159 * The controller that has IRQ for TX ACK calls this atomic API 160 * to tick the TX state machine. It works only if txdone_irq 161 * is set by the controller. 162 */ 163 void mbox_chan_txdone(struct mbox_chan *chan, int r) 164 { 165 if (unlikely(!(chan->txdone_method & MBOX_TXDONE_BY_IRQ))) { 166 dev_err(chan->mbox->dev, 167 "Controller can't run the TX ticker\n"); 168 return; 169 } 170 171 tx_tick(chan, r); 172 } 173 EXPORT_SYMBOL_GPL(mbox_chan_txdone); 174 175 /** 176 * mbox_client_txdone - The way for a client to run the TX state machine. 177 * @chan: Mailbox channel assigned to this client. 178 * @r: Success status of last transmission. 179 * 180 * The client/protocol had received some 'ACK' packet and it notifies 181 * the API that the last packet was sent successfully. This only works 182 * if the controller can't sense TX-Done. 183 */ 184 void mbox_client_txdone(struct mbox_chan *chan, int r) 185 { 186 if (unlikely(!(chan->txdone_method & MBOX_TXDONE_BY_ACK))) { 187 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); 188 return; 189 } 190 191 tx_tick(chan, r); 192 } 193 EXPORT_SYMBOL_GPL(mbox_client_txdone); 194 195 /** 196 * mbox_client_peek_data - A way for client driver to pull data 197 * received from remote by the controller. 198 * @chan: Mailbox channel assigned to this client. 199 * 200 * A poke to controller driver for any received data. 201 * The data is actually passed onto client via the 202 * mbox_chan_received_data() 203 * The call can be made from atomic context, so the controller's 204 * implementation of peek_data() must not sleep. 205 * 206 * Return: True, if controller has, and is going to push after this, 207 * some data. 208 * False, if controller doesn't have any data to be read. 209 */ 210 bool mbox_client_peek_data(struct mbox_chan *chan) 211 { 212 if (chan->mbox->ops->peek_data) 213 return chan->mbox->ops->peek_data(chan); 214 215 return false; 216 } 217 EXPORT_SYMBOL_GPL(mbox_client_peek_data); 218 219 /** 220 * mbox_chan_tx_slots_available - Query the number of available TX queue slots. 221 * @chan: Mailbox channel to query. 222 * 223 * Clients may call this to check how many messages can be queued via 224 * mbox_send_message() before the channel's TX queue is full. This helps 225 * clients avoid the -ENOBUFS error without needing to increase 226 * MBOX_TX_QUEUE_LEN. 227 * This can be called from atomic context. 228 * 229 * Return: Number of available slots in the channel's TX queue. 230 */ 231 unsigned int mbox_chan_tx_slots_available(struct mbox_chan *chan) 232 { 233 unsigned int ret; 234 235 guard(spinlock_irqsave)(&chan->lock); 236 ret = MBOX_TX_QUEUE_LEN - chan->msg_count; 237 238 return ret; 239 } 240 EXPORT_SYMBOL_GPL(mbox_chan_tx_slots_available); 241 242 /** 243 * mbox_send_message - For client to submit a message to be 244 * sent to the remote. 245 * @chan: Mailbox channel assigned to this client. 246 * @mssg: Client specific message typecasted. 247 * 248 * For client to submit data to the controller destined for a remote 249 * processor. If the client had set 'tx_block', the call will return 250 * either when the remote receives the data or when 'tx_tout' millisecs 251 * run out. 252 * In non-blocking mode, the requests are buffered by the API and a 253 * non-negative token is returned for each queued request. If the request 254 * is not queued, a negative token is returned. Upon failure or successful 255 * TX, the API calls 'tx_done' from atomic context, from which the client 256 * could submit yet another request. 257 * The pointer to message should be preserved until it is sent 258 * over the chan, i.e, tx_done() is made. 259 * This function could be called from atomic context as it simply 260 * queues the data and returns a token against the request. 261 * 262 * Return: Non-negative integer for successful submission (non-blocking mode) 263 * or transmission over chan (blocking mode). 264 * Negative value denotes failure. 265 */ 266 int mbox_send_message(struct mbox_chan *chan, void *mssg) 267 { 268 int t; 269 270 if (!chan || !chan->cl || mssg == MBOX_NO_MSG) 271 return -EINVAL; 272 273 t = add_to_rbuf(chan, mssg); 274 if (t < 0) { 275 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); 276 return t; 277 } 278 279 msg_submit(chan); 280 281 if (chan->cl->tx_block) { 282 unsigned long wait; 283 int ret; 284 285 if (!chan->cl->tx_tout) /* wait forever */ 286 wait = msecs_to_jiffies(3600000); 287 else 288 wait = msecs_to_jiffies(chan->cl->tx_tout); 289 290 ret = wait_for_completion_timeout(&chan->tx_complete, wait); 291 if (ret == 0) { 292 t = -ETIME; 293 tx_tick(chan, t); 294 } 295 } 296 297 return t; 298 } 299 EXPORT_SYMBOL_GPL(mbox_send_message); 300 301 /** 302 * mbox_flush - flush a mailbox channel 303 * @chan: mailbox channel to flush 304 * @timeout: time, in milliseconds, to allow the flush operation to succeed 305 * 306 * Mailbox controllers that need to work in atomic context can implement the 307 * ->flush() callback to busy loop until a transmission has been completed. 308 * The implementation must call mbox_chan_txdone() upon success. Clients can 309 * call the mbox_flush() function at any time after mbox_send_message() to 310 * flush the transmission. After the function returns success, the mailbox 311 * transmission is guaranteed to have completed. 312 * 313 * Returns: 0 on success or a negative error code on failure. 314 */ 315 int mbox_flush(struct mbox_chan *chan, unsigned long timeout) 316 { 317 int ret; 318 319 if (!chan->mbox->ops->flush) 320 return -ENOTSUPP; 321 322 ret = chan->mbox->ops->flush(chan, timeout); 323 if (ret < 0) 324 tx_tick(chan, ret); 325 326 return ret; 327 } 328 EXPORT_SYMBOL_GPL(mbox_flush); 329 330 static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 331 { 332 struct device *dev = cl->dev; 333 int ret; 334 335 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { 336 dev_err(dev, "%s: mailbox not free\n", __func__); 337 return -EBUSY; 338 } 339 340 scoped_guard(spinlock_irqsave, &chan->lock) { 341 chan->msg_free = 0; 342 chan->msg_count = 0; 343 chan->active_req = MBOX_NO_MSG; 344 chan->cl = cl; 345 init_completion(&chan->tx_complete); 346 347 if (chan->txdone_method == MBOX_TXDONE_BY_POLL && cl->knows_txdone) 348 chan->txdone_method = MBOX_TXDONE_BY_ACK; 349 } 350 351 if (chan->mbox->ops->startup) { 352 ret = chan->mbox->ops->startup(chan); 353 354 if (ret) { 355 dev_err(dev, "Unable to startup the chan (%d)\n", ret); 356 mbox_free_channel(chan); 357 return ret; 358 } 359 } 360 361 return 0; 362 } 363 364 /** 365 * mbox_bind_client - Bind client to a mailbox channel. 366 * @chan: The mailbox channel to bind the client to. 367 * @cl: Identity of the client requesting the channel. 368 * 369 * The Client specifies its requirements and capabilities while asking for 370 * a mailbox channel. It can't be called from atomic context. 371 * The channel is exclusively allocated and can't be used by another 372 * client before the owner calls mbox_free_channel. 373 * After assignment, any packet received on this channel will be 374 * handed over to the client via the 'rx_callback'. 375 * The framework holds reference to the client, so the mbox_client 376 * structure shouldn't be modified until the mbox_free_channel returns. 377 * 378 * Return: 0 if the channel was assigned to the client successfully. 379 * <0 for request failure. 380 */ 381 int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl) 382 { 383 guard(mutex)(&con_mutex); 384 385 return __mbox_bind_client(chan, cl); 386 } 387 EXPORT_SYMBOL_GPL(mbox_bind_client); 388 389 /** 390 * mbox_request_channel - Request a mailbox channel. 391 * @cl: Identity of the client requesting the channel. 392 * @index: Index of mailbox specifier in 'mboxes' property. 393 * 394 * The Client specifies its requirements and capabilities while asking for 395 * a mailbox channel. It can't be called from atomic context. 396 * The channel is exclusively allocated and can't be used by another 397 * client before the owner calls mbox_free_channel. 398 * After assignment, any packet received on this channel will be 399 * handed over to the client via the 'rx_callback'. 400 * The framework holds reference to the client, so the mbox_client 401 * structure shouldn't be modified until the mbox_free_channel returns. 402 * 403 * Return: Pointer to the channel assigned to the client if successful. 404 * ERR_PTR for request failure. 405 */ 406 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) 407 { 408 struct fwnode_reference_args fwspec; 409 struct fwnode_handle *fwnode; 410 struct mbox_controller *mbox; 411 struct of_phandle_args spec; 412 struct mbox_chan *chan; 413 struct device *dev; 414 unsigned int i; 415 int ret; 416 417 dev = cl->dev; 418 if (!dev) { 419 pr_debug("No owner device\n"); 420 return ERR_PTR(-ENODEV); 421 } 422 423 fwnode = dev_fwnode(dev); 424 if (!fwnode) { 425 dev_dbg(dev, "No owner fwnode\n"); 426 return ERR_PTR(-ENODEV); 427 } 428 429 ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells", 430 0, index, &fwspec); 431 if (ret) { 432 dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes"); 433 return ERR_PTR(ret); 434 } 435 436 spec.np = to_of_node(fwspec.fwnode); 437 spec.args_count = fwspec.nargs; 438 for (i = 0; i < spec.args_count; i++) 439 spec.args[i] = fwspec.args[i]; 440 441 scoped_guard(mutex, &con_mutex) { 442 chan = ERR_PTR(-EPROBE_DEFER); 443 list_for_each_entry(mbox, &mbox_cons, node) { 444 if (device_match_fwnode(mbox->dev, fwspec.fwnode)) { 445 if (mbox->fw_xlate) { 446 chan = mbox->fw_xlate(mbox, &fwspec); 447 if (!IS_ERR(chan)) 448 break; 449 } else if (mbox->of_xlate) { 450 chan = mbox->of_xlate(mbox, &spec); 451 if (!IS_ERR(chan)) 452 break; 453 } 454 } 455 } 456 457 fwnode_handle_put(fwspec.fwnode); 458 459 if (IS_ERR(chan)) 460 return chan; 461 462 ret = __mbox_bind_client(chan, cl); 463 if (ret) 464 chan = ERR_PTR(ret); 465 } 466 467 return chan; 468 } 469 EXPORT_SYMBOL_GPL(mbox_request_channel); 470 471 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, 472 const char *name) 473 { 474 int index = device_property_match_string(cl->dev, "mbox-names", name); 475 476 if (index < 0) { 477 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", 478 __func__, name); 479 return ERR_PTR(index); 480 } 481 return mbox_request_channel(cl, index); 482 } 483 EXPORT_SYMBOL_GPL(mbox_request_channel_byname); 484 485 /** 486 * mbox_free_channel - The client relinquishes control of a mailbox 487 * channel by this call. 488 * @chan: The mailbox channel to be freed. 489 */ 490 void mbox_free_channel(struct mbox_chan *chan) 491 { 492 if (!chan || !chan->cl) 493 return; 494 495 if (chan->mbox->ops->shutdown) 496 chan->mbox->ops->shutdown(chan); 497 498 /* The queued TX requests are simply aborted, no callbacks are made */ 499 scoped_guard(spinlock_irqsave, &chan->lock) { 500 chan->cl = NULL; 501 chan->active_req = MBOX_NO_MSG; 502 if (chan->txdone_method == MBOX_TXDONE_BY_ACK) 503 chan->txdone_method = MBOX_TXDONE_BY_POLL; 504 } 505 506 module_put(chan->mbox->dev->driver->owner); 507 } 508 EXPORT_SYMBOL_GPL(mbox_free_channel); 509 510 static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox, 511 const struct fwnode_reference_args *sp) 512 { 513 if (sp->nargs < 1 || sp->args[0] >= mbox->num_chans) 514 return ERR_PTR(-EINVAL); 515 516 return &mbox->chans[sp->args[0]]; 517 } 518 519 /** 520 * mbox_controller_register - Register the mailbox controller 521 * @mbox: Pointer to the mailbox controller. 522 * 523 * The controller driver registers its communication channels 524 */ 525 int mbox_controller_register(struct mbox_controller *mbox) 526 { 527 int i, txdone; 528 529 if (!mbox || !mbox->dev || !mbox->ops || !mbox->chans || !mbox->num_chans) 530 return -EINVAL; 531 532 if (mbox->txdone_irq) 533 txdone = MBOX_TXDONE_BY_IRQ; 534 else if (mbox->txdone_poll) 535 txdone = MBOX_TXDONE_BY_POLL; 536 else /* It has to be ACK then */ 537 txdone = MBOX_TXDONE_BY_ACK; 538 539 if (txdone == MBOX_TXDONE_BY_POLL) { 540 541 if (!mbox->ops->last_tx_done) { 542 dev_err(mbox->dev, "last_tx_done method is absent\n"); 543 return -EINVAL; 544 } 545 546 hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 547 spin_lock_init(&mbox->poll_hrt_lock); 548 } 549 550 for (i = 0; i < mbox->num_chans; i++) { 551 struct mbox_chan *chan = &mbox->chans[i]; 552 553 chan->cl = NULL; 554 chan->mbox = mbox; 555 chan->active_req = MBOX_NO_MSG; 556 chan->txdone_method = txdone; 557 spin_lock_init(&chan->lock); 558 } 559 560 if (!mbox->fw_xlate && !mbox->of_xlate) 561 mbox->fw_xlate = fw_mbox_index_xlate; 562 563 scoped_guard(mutex, &con_mutex) 564 list_add_tail(&mbox->node, &mbox_cons); 565 566 return 0; 567 } 568 EXPORT_SYMBOL_GPL(mbox_controller_register); 569 570 /** 571 * mbox_controller_unregister - Unregister the mailbox controller 572 * @mbox: Pointer to the mailbox controller. 573 */ 574 void mbox_controller_unregister(struct mbox_controller *mbox) 575 { 576 int i; 577 578 if (!mbox) 579 return; 580 581 scoped_guard(mutex, &con_mutex) { 582 list_del(&mbox->node); 583 584 for (i = 0; i < mbox->num_chans; i++) 585 mbox_free_channel(&mbox->chans[i]); 586 587 if (mbox->txdone_poll) 588 hrtimer_cancel(&mbox->poll_hrt); 589 } 590 } 591 EXPORT_SYMBOL_GPL(mbox_controller_unregister); 592 593 static void __devm_mbox_controller_unregister(struct device *dev, void *res) 594 { 595 struct mbox_controller **mbox = res; 596 597 mbox_controller_unregister(*mbox); 598 } 599 600 /** 601 * devm_mbox_controller_register() - managed mbox_controller_register() 602 * @dev: device owning the mailbox controller being registered 603 * @mbox: mailbox controller being registered 604 * 605 * This function adds a device-managed resource that will make sure that the 606 * mailbox controller, which is registered using mbox_controller_register() 607 * as part of this function, will be unregistered along with the rest of 608 * device-managed resources upon driver probe failure or driver removal. 609 * 610 * Returns 0 on success or a negative error code on failure. 611 */ 612 int devm_mbox_controller_register(struct device *dev, 613 struct mbox_controller *mbox) 614 { 615 struct mbox_controller **ptr; 616 int err; 617 618 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr), 619 GFP_KERNEL); 620 if (!ptr) 621 return -ENOMEM; 622 623 err = mbox_controller_register(mbox); 624 if (err < 0) { 625 devres_free(ptr); 626 return err; 627 } 628 629 devres_add(dev, ptr); 630 *ptr = mbox; 631 632 return 0; 633 } 634 EXPORT_SYMBOL_GPL(devm_mbox_controller_register); 635