Lines Matching +full:tx +full:- +full:mailbox +full:- +full:count

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Mailbox: Common code for Mailbox controllers and users
5 * Copyright (C) 2013-2014 Linaro Ltd.
22 #include "mailbox.h"
32 spin_lock_irqsave(&chan->lock, flags); in add_to_rbuf()
35 if (chan->msg_count == MBOX_TX_QUEUE_LEN) { in add_to_rbuf()
36 spin_unlock_irqrestore(&chan->lock, flags); in add_to_rbuf()
37 return -ENOBUFS; in add_to_rbuf()
40 idx = chan->msg_free; in add_to_rbuf()
41 chan->msg_data[idx] = mssg; in add_to_rbuf()
42 chan->msg_count++; in add_to_rbuf()
44 if (idx == MBOX_TX_QUEUE_LEN - 1) in add_to_rbuf()
45 chan->msg_free = 0; in add_to_rbuf()
47 chan->msg_free++; in add_to_rbuf()
49 spin_unlock_irqrestore(&chan->lock, flags); in add_to_rbuf()
56 unsigned count, idx; in msg_submit() local
59 int err = -EBUSY; in msg_submit()
61 spin_lock_irqsave(&chan->lock, flags); in msg_submit()
63 if (!chan->msg_count || chan->active_req) in msg_submit()
66 count = chan->msg_count; in msg_submit()
67 idx = chan->msg_free; in msg_submit()
68 if (idx >= count) in msg_submit()
69 idx -= count; in msg_submit()
71 idx += MBOX_TX_QUEUE_LEN - count; in msg_submit()
73 data = chan->msg_data[idx]; in msg_submit()
75 if (chan->cl->tx_prepare) in msg_submit()
76 chan->cl->tx_prepare(chan->cl, data); in msg_submit()
78 err = chan->mbox->ops->send_data(chan, data); in msg_submit()
80 chan->active_req = data; in msg_submit()
81 chan->msg_count--; in msg_submit()
84 spin_unlock_irqrestore(&chan->lock, flags); in msg_submit()
86 if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { in msg_submit()
88 spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags); in msg_submit()
89 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL); in msg_submit()
90 spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags); in msg_submit()
99 spin_lock_irqsave(&chan->lock, flags); in tx_tick()
100 mssg = chan->active_req; in tx_tick()
101 chan->active_req = NULL; in tx_tick()
102 spin_unlock_irqrestore(&chan->lock, flags); in tx_tick()
111 if (chan->cl->tx_done) in tx_tick()
112 chan->cl->tx_done(chan->cl, mssg, r); in tx_tick()
114 if (r != -ETIME && chan->cl->tx_block) in tx_tick()
115 complete(&chan->tx_complete); in tx_tick()
126 for (i = 0; i < mbox->num_chans; i++) { in txdone_hrtimer()
127 struct mbox_chan *chan = &mbox->chans[i]; in txdone_hrtimer()
129 if (chan->active_req && chan->cl) { in txdone_hrtimer()
130 txdone = chan->mbox->ops->last_tx_done(chan); in txdone_hrtimer()
139 spin_lock_irqsave(&mbox->poll_hrt_lock, flags); in txdone_hrtimer()
141 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period)); in txdone_hrtimer()
142 spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags); in txdone_hrtimer()
150 * mbox_chan_received_data - A way for controller driver to push data
152 * @chan: Pointer to the mailbox channel on which RX happened.
162 if (chan->cl->rx_callback) in mbox_chan_received_data()
163 chan->cl->rx_callback(chan->cl, mssg); in mbox_chan_received_data()
168 * mbox_chan_txdone - A way for controller driver to notify the
169 * framework that the last TX has completed.
170 * @chan: Pointer to the mailbox chan on which TX happened.
171 * @r: Status of last TX - OK or ERROR
173 * The controller that has IRQ for TX ACK calls this atomic API
174 * to tick the TX state machine. It works only if txdone_irq
179 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { in mbox_chan_txdone()
180 dev_err(chan->mbox->dev, in mbox_chan_txdone()
181 "Controller can't run the TX ticker\n"); in mbox_chan_txdone()
190 * mbox_client_txdone - The way for a client to run the TX state machine.
191 * @chan: Mailbox channel assigned to this client.
196 * if the controller can't sense TX-Done.
200 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { in mbox_client_txdone()
201 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); in mbox_client_txdone()
210 * mbox_client_peek_data - A way for client driver to pull data
212 * @chan: Mailbox channel assigned to this client.
226 if (chan->mbox->ops->peek_data) in mbox_client_peek_data()
227 return chan->mbox->ops->peek_data(chan); in mbox_client_peek_data()
234 * mbox_send_message - For client to submit a message to be
236 * @chan: Mailbox channel assigned to this client.
243 * In non-blocking mode, the requests are buffered by the API and a
244 * non-negative token is returned for each queued request. If the request
246 * TX, the API calls 'tx_done' from atomic context, from which the client
253 * Return: Non-negative integer for successful submission (non-blocking mode)
261 if (!chan || !chan->cl) in mbox_send_message()
262 return -EINVAL; in mbox_send_message()
266 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); in mbox_send_message()
272 if (chan->cl->tx_block) { in mbox_send_message()
276 if (!chan->cl->tx_tout) /* wait forever */ in mbox_send_message()
279 wait = msecs_to_jiffies(chan->cl->tx_tout); in mbox_send_message()
281 ret = wait_for_completion_timeout(&chan->tx_complete, wait); in mbox_send_message()
283 t = -ETIME; in mbox_send_message()
293 * mbox_flush - flush a mailbox channel
294 * @chan: mailbox channel to flush
297 * Mailbox controllers that need to work in atomic context can implement the
298 * ->flush() callback to busy loop until a transmission has been completed.
301 * flush the transmission. After the function returns success, the mailbox
310 if (!chan->mbox->ops->flush) in mbox_flush()
311 return -ENOTSUPP; in mbox_flush()
313 ret = chan->mbox->ops->flush(chan, timeout); in mbox_flush()
323 struct device *dev = cl->dev; in __mbox_bind_client()
327 if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) { in __mbox_bind_client()
328 dev_dbg(dev, "%s: mailbox not free\n", __func__); in __mbox_bind_client()
329 return -EBUSY; in __mbox_bind_client()
332 spin_lock_irqsave(&chan->lock, flags); in __mbox_bind_client()
333 chan->msg_free = 0; in __mbox_bind_client()
334 chan->msg_count = 0; in __mbox_bind_client()
335 chan->active_req = NULL; in __mbox_bind_client()
336 chan->cl = cl; in __mbox_bind_client()
337 init_completion(&chan->tx_complete); in __mbox_bind_client()
339 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) in __mbox_bind_client()
340 chan->txdone_method = TXDONE_BY_ACK; in __mbox_bind_client()
342 spin_unlock_irqrestore(&chan->lock, flags); in __mbox_bind_client()
344 if (chan->mbox->ops->startup) { in __mbox_bind_client()
345 ret = chan->mbox->ops->startup(chan); in __mbox_bind_client()
358 * mbox_bind_client - Request a mailbox channel.
359 * @chan: The mailbox channel to bind the client to.
363 * a mailbox channel. It can't be called from atomic context.
387 * mbox_request_channel - Request a mailbox channel.
389 * @index: Index of mailbox specifier in 'mboxes' property.
392 * a mailbox channel. It can't be called from atomic context.
405 struct device *dev = cl->dev; in mbox_request_channel()
411 if (!dev || !dev->of_node) { in mbox_request_channel()
413 return ERR_PTR(-ENODEV); in mbox_request_channel()
418 if (of_parse_phandle_with_args(dev->of_node, "mboxes", in mbox_request_channel()
419 "#mbox-cells", index, &spec)) { in mbox_request_channel()
422 return ERR_PTR(-ENODEV); in mbox_request_channel()
425 chan = ERR_PTR(-EPROBE_DEFER); in mbox_request_channel()
427 if (mbox->dev->of_node == spec.np) { in mbox_request_channel()
428 chan = mbox->of_xlate(mbox, &spec); in mbox_request_channel()
452 struct device_node *np = cl->dev->of_node; in mbox_request_channel_byname()
456 dev_err(cl->dev, "%s() currently only supports DT\n", __func__); in mbox_request_channel_byname()
457 return ERR_PTR(-EINVAL); in mbox_request_channel_byname()
460 index = of_property_match_string(np, "mbox-names", name); in mbox_request_channel_byname()
462 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", in mbox_request_channel_byname()
464 return ERR_PTR(-EINVAL); in mbox_request_channel_byname()
471 * mbox_free_channel - The client relinquishes control of a mailbox
473 * @chan: The mailbox channel to be freed.
479 if (!chan || !chan->cl) in mbox_free_channel()
482 if (chan->mbox->ops->shutdown) in mbox_free_channel()
483 chan->mbox->ops->shutdown(chan); in mbox_free_channel()
485 /* The queued TX requests are simply aborted, no callbacks are made */ in mbox_free_channel()
486 spin_lock_irqsave(&chan->lock, flags); in mbox_free_channel()
487 chan->cl = NULL; in mbox_free_channel()
488 chan->active_req = NULL; in mbox_free_channel()
489 if (chan->txdone_method == TXDONE_BY_ACK) in mbox_free_channel()
490 chan->txdone_method = TXDONE_BY_POLL; in mbox_free_channel()
492 module_put(chan->mbox->dev->driver->owner); in mbox_free_channel()
493 spin_unlock_irqrestore(&chan->lock, flags); in mbox_free_channel()
501 int ind = sp->args[0]; in of_mbox_index_xlate()
503 if (ind >= mbox->num_chans) in of_mbox_index_xlate()
504 return ERR_PTR(-EINVAL); in of_mbox_index_xlate()
506 return &mbox->chans[ind]; in of_mbox_index_xlate()
510 * mbox_controller_register - Register the mailbox controller
511 * @mbox: Pointer to the mailbox controller.
520 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) in mbox_controller_register()
521 return -EINVAL; in mbox_controller_register()
523 if (mbox->txdone_irq) in mbox_controller_register()
525 else if (mbox->txdone_poll) in mbox_controller_register()
532 if (!mbox->ops->last_tx_done) { in mbox_controller_register()
533 dev_err(mbox->dev, "last_tx_done method is absent\n"); in mbox_controller_register()
534 return -EINVAL; in mbox_controller_register()
537 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, in mbox_controller_register()
539 mbox->poll_hrt.function = txdone_hrtimer; in mbox_controller_register()
540 spin_lock_init(&mbox->poll_hrt_lock); in mbox_controller_register()
543 for (i = 0; i < mbox->num_chans; i++) { in mbox_controller_register()
544 struct mbox_chan *chan = &mbox->chans[i]; in mbox_controller_register()
546 chan->cl = NULL; in mbox_controller_register()
547 chan->mbox = mbox; in mbox_controller_register()
548 chan->txdone_method = txdone; in mbox_controller_register()
549 spin_lock_init(&chan->lock); in mbox_controller_register()
552 if (!mbox->of_xlate) in mbox_controller_register()
553 mbox->of_xlate = of_mbox_index_xlate; in mbox_controller_register()
556 list_add_tail(&mbox->node, &mbox_cons); in mbox_controller_register()
564 * mbox_controller_unregister - Unregister the mailbox controller
565 * @mbox: Pointer to the mailbox controller.
576 list_del(&mbox->node); in mbox_controller_unregister()
578 for (i = 0; i < mbox->num_chans; i++) in mbox_controller_unregister()
579 mbox_free_channel(&mbox->chans[i]); in mbox_controller_unregister()
581 if (mbox->txdone_poll) in mbox_controller_unregister()
582 hrtimer_cancel(&mbox->poll_hrt); in mbox_controller_unregister()
606 * devm_mbox_controller_register() - managed mbox_controller_register()
607 * @dev: device owning the mailbox controller being registered
608 * @mbox: mailbox controller being registered
610 * This function adds a device-managed resource that will make sure that the
611 * mailbox controller, which is registered using mbox_controller_register()
613 * device-managed resources upon driver probe failure or driver removal.
626 return -ENOMEM; in devm_mbox_controller_register()
642 * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
643 * @dev: device owning the mailbox controller being unregistered
644 * @mbox: mailbox controller being unregistered
646 * This function unregisters the mailbox controller and removes the device-
647 * managed resource that was set up to automatically unregister the mailbox