Lines Matching +full:wait +full:- +full:on +full:- +full:read
1 // SPDX-License-Identifier: GPL-2.0
13 * little-endian format.
17 * currently configured on the system under test.
21 * the transport configured on the system.
27 * originated from a test-suite and the normal operations of the SCMI drivers,
40 * `-- 0
41 * |-- atomic_threshold_us
42 * |-- instance_name
43 * |-- raw
44 * | |-- channels
45 * | | |-- 0x10
46 * | | | |-- message
47 * | | | `-- message_async
48 * | | `-- 0x13
49 * | | |-- message
50 * | | `-- message_async
51 * | |-- errors
52 * | |-- message
53 * | |-- message_async
54 * | |-- notification
55 * | `-- reset
56 * `-- transport
57 * |-- is_atomic
58 * |-- max_msg_size
59 * |-- max_rx_timeout_ms
60 * |-- rx_max_msg
61 * |-- tx_max_msg
62 * `-- type
66 * - errors: used to read back timed-out and unexpected replies
67 * - message*: used to send sync/async commands and read back immediate and
69 * - notification: used to read any notification being emitted by the system
71 * - reset: used to flush the queues of messages (of any kind) still pending
72 * to be read; this is useful at test-suite start/stop to get
75 * with the per-channel entries rooted at /channels being present only on a
78 * Such per-channel entries can be used to explicitly choose a specific channel
81 * performed based the protocol embedded in the injected message and on how the
82 * transport is configured on the system.
89 * and sent while the replies or delayed response are read back from those same
93 * on replies and properly choosing SCMI sequence numbers for the outgoing
96 * Injection of multiple in-flight requests is supported as long as the user
100 * parallelism attainable in such scenario is dependent on the characteristics
133 * struct scmi_raw_queue - Generic Raw queue descriptor
137 * @msg_q: A listhead to a queue of snooped messages waiting to be read out
139 * @wq: A waitqueue used to wait and poll on related @msg_q
152 * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
157 * @tx_max_msg: Maximum number of concurrent TX in-flight messages
159 * @chans_q: An XArray mapping optional additional per-channel queues
193 * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
200 * pointed at by xfer->async_done.
212 * struct scmi_raw_buffer - Structure to hold a full SCMI message
226 * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
230 * selected based on protocol.
232 * @tx: A message buffer used to collect TX message on write.
235 * @rx: A message buffer to collect RX message on read.
253 return raw->q[idx]; in scmi_raw_queue_select()
255 return xa_load(&raw->chans_q, chan_id); in scmi_raw_queue_select()
262 struct list_head *head = &q->free_bufs; in scmi_raw_buffer_get()
264 spin_lock_irqsave(&q->free_bufs_lock, flags); in scmi_raw_buffer_get()
267 list_del_init(&rb->node); in scmi_raw_buffer_get()
269 spin_unlock_irqrestore(&q->free_bufs_lock, flags); in scmi_raw_buffer_get()
280 rb->msg.len = rb->max_len; in scmi_raw_buffer_put()
282 spin_lock_irqsave(&q->free_bufs_lock, flags); in scmi_raw_buffer_put()
283 list_add_tail(&rb->node, &q->free_bufs); in scmi_raw_buffer_put()
284 spin_unlock_irqrestore(&q->free_bufs_lock, flags); in scmi_raw_buffer_put()
292 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_buffer_enqueue()
293 list_add_tail(&rb->node, &q->msg_q); in scmi_raw_buffer_enqueue()
294 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_buffer_enqueue()
296 wake_up_interruptible(&q->wq); in scmi_raw_buffer_enqueue()
304 if (!list_empty(&q->msg_q)) { in scmi_raw_buffer_dequeue_unlocked()
305 rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node); in scmi_raw_buffer_dequeue_unlocked()
306 list_del_init(&rb->node); in scmi_raw_buffer_dequeue_unlocked()
317 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_buffer_dequeue()
319 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_buffer_dequeue()
341 mutex_lock(&raw->free_mtx); in scmi_xfer_raw_waiter_get()
342 if (!list_empty(&raw->free_waiters)) { in scmi_xfer_raw_waiter_get()
343 rw = list_first_entry(&raw->free_waiters, in scmi_xfer_raw_waiter_get()
345 list_del_init(&rw->node); in scmi_xfer_raw_waiter_get()
348 reinit_completion(&rw->async_response); in scmi_xfer_raw_waiter_get()
349 xfer->async_done = &rw->async_response; in scmi_xfer_raw_waiter_get()
352 rw->cinfo = cinfo; in scmi_xfer_raw_waiter_get()
353 rw->xfer = xfer; in scmi_xfer_raw_waiter_get()
355 mutex_unlock(&raw->free_mtx); in scmi_xfer_raw_waiter_get()
363 if (rw->xfer) { in scmi_xfer_raw_waiter_put()
364 rw->xfer->async_done = NULL; in scmi_xfer_raw_waiter_put()
365 rw->xfer = NULL; in scmi_xfer_raw_waiter_put()
368 mutex_lock(&raw->free_mtx); in scmi_xfer_raw_waiter_put()
369 list_add_tail(&rw->node, &raw->free_waiters); in scmi_xfer_raw_waiter_put()
370 mutex_unlock(&raw->free_mtx); in scmi_xfer_raw_waiter_put()
377 rw->start_jiffies = jiffies; in scmi_xfer_raw_waiter_enqueue()
379 trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id, in scmi_xfer_raw_waiter_enqueue()
380 rw->xfer->hdr.protocol_id, in scmi_xfer_raw_waiter_enqueue()
381 rw->xfer->hdr.seq, in scmi_xfer_raw_waiter_enqueue()
382 raw->desc->max_rx_timeout_ms, in scmi_xfer_raw_waiter_enqueue()
383 rw->xfer->hdr.poll_completion); in scmi_xfer_raw_waiter_enqueue()
385 mutex_lock(&raw->active_mtx); in scmi_xfer_raw_waiter_enqueue()
386 list_add_tail(&rw->node, &raw->active_waiters); in scmi_xfer_raw_waiter_enqueue()
387 mutex_unlock(&raw->active_mtx); in scmi_xfer_raw_waiter_enqueue()
390 queue_work(raw->wait_wq, &raw->waiters_work); in scmi_xfer_raw_waiter_enqueue()
398 mutex_lock(&raw->active_mtx); in scmi_xfer_raw_waiter_dequeue()
399 if (!list_empty(&raw->active_waiters)) { in scmi_xfer_raw_waiter_dequeue()
400 rw = list_first_entry(&raw->active_waiters, in scmi_xfer_raw_waiter_dequeue()
402 list_del_init(&rw->node); in scmi_xfer_raw_waiter_dequeue()
404 mutex_unlock(&raw->active_mtx); in scmi_xfer_raw_waiter_dequeue()
410 * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
414 * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
415 * cannot wait to receive its response (if any) in the context of the injection
418 * Userspace should and will poll/wait instead on the read syscalls which will
422 * on the RX path, nonetheless we have to properly wait for their completion as
439 dev = raw->handle->dev; in scmi_xfer_raw_worker()
440 max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms); in scmi_xfer_raw_worker()
454 cinfo = rw->cinfo; in scmi_xfer_raw_worker()
455 xfer = rw->xfer; in scmi_xfer_raw_worker()
457 * Waiters are queued by wait-deadline at the end, so some of in scmi_xfer_raw_worker()
461 * fine and we'll have to wait for the asynchronous part (if in scmi_xfer_raw_worker()
465 aging = jiffies - rw->start_jiffies; in scmi_xfer_raw_worker()
467 jiffies_to_msecs(max_tmo - aging) : 1; in scmi_xfer_raw_worker()
471 if (!ret && xfer->hdr.status) in scmi_xfer_raw_worker()
472 ret = scmi_to_linux_errno(xfer->hdr.status); in scmi_xfer_raw_worker()
474 if (raw->desc->ops->mark_txdone) in scmi_xfer_raw_worker()
475 raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer); in scmi_xfer_raw_worker()
477 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in scmi_xfer_raw_worker()
478 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_xfer_raw_worker()
479 ret, scmi_inflight_count(raw->handle)); in scmi_xfer_raw_worker()
481 /* Wait also for an async delayed response if needed */ in scmi_xfer_raw_worker()
482 if (!ret && xfer->async_done) { in scmi_xfer_raw_worker()
485 if (!wait_for_completion_timeout(xfer->async_done, tmo)) in scmi_xfer_raw_worker()
487 "timed out in RAW delayed resp - HDR:%08X\n", in scmi_xfer_raw_worker()
488 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_worker()
492 scmi_xfer_raw_put(raw->handle, xfer); in scmi_xfer_raw_worker()
501 dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n"); in scmi_xfer_raw_reset()
504 scmi_raw_buffer_queue_flush(raw->q[i]); in scmi_xfer_raw_reset()
508 * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
513 * header) in little-endian binary formmat.
522 * sequence-numbers between successive SCMI messages such registration could
524 * had still not released; in such a case we just wait and retry.
526 * Return: 0 on Success
535 struct device *dev = raw->handle->dev; in scmi_xfer_raw_get_init()
538 return -EINVAL; in scmi_xfer_raw_get_init()
540 tx_size = len - sizeof(u32); in scmi_xfer_raw_get_init()
542 if (tx_size > raw->desc->max_msg_size) in scmi_xfer_raw_get_init()
543 return -ERANGE; in scmi_xfer_raw_get_init()
545 xfer = scmi_xfer_raw_get(raw->handle); in scmi_xfer_raw_get_init()
547 dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n"); in scmi_xfer_raw_get_init()
553 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_xfer_raw_get_init()
554 xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr); in scmi_xfer_raw_get_init()
556 xfer->hdr.poll_completion = false; in scmi_xfer_raw_get_init()
557 xfer->hdr.status = SCMI_SUCCESS; in scmi_xfer_raw_get_init()
558 xfer->tx.len = tx_size; in scmi_xfer_raw_get_init()
559 xfer->rx.len = raw->desc->max_msg_size; in scmi_xfer_raw_get_init()
561 memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size); in scmi_xfer_raw_get_init()
562 if (xfer->tx.len) in scmi_xfer_raw_get_init()
563 memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len); in scmi_xfer_raw_get_init()
573 ret = scmi_xfer_raw_inflight_register(raw->handle, xfer); in scmi_xfer_raw_get_init()
578 msleep(raw->desc->max_rx_timeout_ms / in scmi_xfer_raw_get_init()
581 } while (ret && --retry); in scmi_xfer_raw_get_init()
585 "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n", in scmi_xfer_raw_get_init()
586 xfer->hdr.seq, msg_hdr); in scmi_xfer_raw_get_init()
587 scmi_xfer_raw_put(raw->handle, xfer); in scmi_xfer_raw_get_init()
594 * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
599 * selected based on the protocol used.
608 * NOT a common header-flag stating if the command is asynchronous or not)
610 * Return: 0 on Success
619 struct device *dev = raw->handle->dev; in scmi_do_xfer_raw_start()
622 chan_id = xfer->hdr.protocol_id; in scmi_do_xfer_raw_start()
624 xfer->flags |= SCMI_XFER_FLAG_CHAN_SET; in scmi_do_xfer_raw_start()
626 cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id); in scmi_do_xfer_raw_start()
632 dev_warn(dev, "RAW - Cannot get a free waiter !\n"); in scmi_do_xfer_raw_start()
633 return -ENOMEM; in scmi_do_xfer_raw_start()
637 if (is_polling_enabled(cinfo, raw->desc)) in scmi_do_xfer_raw_start()
638 xfer->hdr.poll_completion = true; in scmi_do_xfer_raw_start()
640 reinit_completion(&xfer->done); in scmi_do_xfer_raw_start()
642 smp_store_mb(xfer->state, SCMI_XFER_SENT_OK); in scmi_do_xfer_raw_start()
644 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in scmi_do_xfer_raw_start()
645 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_do_xfer_raw_start()
646 xfer->hdr.poll_completion, in scmi_do_xfer_raw_start()
647 scmi_inflight_count(raw->handle)); in scmi_do_xfer_raw_start()
649 ret = raw->desc->ops->send_message(rw->cinfo, xfer); in scmi_do_xfer_raw_start()
656 trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id, in scmi_do_xfer_raw_start()
657 xfer->hdr.id, "cmnd", xfer->hdr.seq, in scmi_do_xfer_raw_start()
658 xfer->hdr.status, in scmi_do_xfer_raw_start()
659 xfer->tx.buf, xfer->tx.len); in scmi_do_xfer_raw_start()
667 * scmi_raw_message_send - An helper to build and send an SCMI command using
672 * header) in little-endian binary format.
678 * Return: 0 on Success
692 if (is_transport_polling_capable(raw->desc)) { in scmi_raw_message_send()
693 xfer->hdr.poll_completion = true; in scmi_raw_message_send()
695 dev_err(raw->handle->dev, in scmi_raw_message_send()
696 "Failed to send RAW message - Polling NOT supported\n"); in scmi_raw_message_send()
697 return -EINVAL; in scmi_raw_message_send()
703 scmi_xfer_raw_put(raw->handle, xfer); in scmi_raw_message_send()
714 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
715 while (list_empty(&q->msg_q)) { in scmi_raw_message_dequeue()
716 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
719 return ERR_PTR(-EAGAIN); in scmi_raw_message_dequeue()
721 if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q))) in scmi_raw_message_dequeue()
722 return ERR_PTR(-ERESTARTSYS); in scmi_raw_message_dequeue()
724 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
729 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
735 * scmi_raw_message_receive - An helper to dequeue and report the next
740 * in little-endian binary format.
745 * @o_nonblock: A flag to request a non-blocking message dequeue.
747 * Return: 0 on Success
760 return -ENODEV; in scmi_raw_message_receive()
764 dev_dbg(raw->handle->dev, "RAW - No message available!\n"); in scmi_raw_message_receive()
768 if (rb->msg.len <= len) { in scmi_raw_message_receive()
769 memcpy(buf, rb->msg.buf, rb->msg.len); in scmi_raw_message_receive()
770 *size = rb->msg.len; in scmi_raw_message_receive()
772 ret = -ENOSPC; in scmi_raw_message_receive()
788 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_common_read()
790 if (!rd->rx_size) { in scmi_dbg_raw_mode_common_read()
793 ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len, in scmi_dbg_raw_mode_common_read()
794 &rd->rx_size, idx, rd->chan_id, in scmi_dbg_raw_mode_common_read()
795 filp->f_flags & O_NONBLOCK); in scmi_dbg_raw_mode_common_read()
797 rd->rx_size = 0; in scmi_dbg_raw_mode_common_read()
803 } else if (*ppos == rd->rx_size) { in scmi_dbg_raw_mode_common_read()
804 /* Return EOF once all the message has been read-out */ in scmi_dbg_raw_mode_common_read()
805 rd->rx_size = 0; in scmi_dbg_raw_mode_common_read()
810 rd->rx.buf, rd->rx_size); in scmi_dbg_raw_mode_common_read()
821 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_common_write()
823 if (count > rd->tx.len - rd->tx_size) in scmi_dbg_raw_mode_common_write()
824 return -ENOSPC; in scmi_dbg_raw_mode_common_write()
826 /* On first write attempt @count carries the total full message size. */ in scmi_dbg_raw_mode_common_write()
827 if (!rd->tx_size) in scmi_dbg_raw_mode_common_write()
828 rd->tx_req_size = count; in scmi_dbg_raw_mode_common_write()
834 if (rd->tx_size < rd->tx_req_size) { in scmi_dbg_raw_mode_common_write()
837 cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos, in scmi_dbg_raw_mode_common_write()
842 rd->tx_size += cnt; in scmi_dbg_raw_mode_common_write()
847 ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, in scmi_dbg_raw_mode_common_write()
848 rd->chan_id, async, poll); in scmi_dbg_raw_mode_common_write()
851 rd->tx_size = 0; in scmi_dbg_raw_mode_common_write()
858 struct poll_table_struct *wait, in scmi_test_dbg_raw_common_poll() argument
862 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_test_dbg_raw_common_poll()
866 q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id); in scmi_test_dbg_raw_common_poll()
870 poll_wait(filp, &q->wq, wait); in scmi_test_dbg_raw_common_poll()
872 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_test_dbg_raw_common_poll()
873 if (!list_empty(&q->msg_q)) in scmi_test_dbg_raw_common_poll()
875 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_test_dbg_raw_common_poll()
897 struct poll_table_struct *wait) in scmi_dbg_raw_mode_message_poll() argument
899 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE); in scmi_dbg_raw_mode_message_poll()
907 if (!inode->i_private) in scmi_dbg_raw_mode_open()
908 return -ENODEV; in scmi_dbg_raw_mode_open()
910 raw = inode->i_private; in scmi_dbg_raw_mode_open()
913 return -ENOMEM; in scmi_dbg_raw_mode_open()
915 rd->rx.len = raw->desc->max_msg_size + sizeof(u32); in scmi_dbg_raw_mode_open()
916 rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL); in scmi_dbg_raw_mode_open()
917 if (!rd->rx.buf) { in scmi_dbg_raw_mode_open()
919 return -ENOMEM; in scmi_dbg_raw_mode_open()
922 rd->tx.len = raw->desc->max_msg_size + sizeof(u32); in scmi_dbg_raw_mode_open()
923 rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL); in scmi_dbg_raw_mode_open()
924 if (!rd->tx.buf) { in scmi_dbg_raw_mode_open()
925 kfree(rd->rx.buf); in scmi_dbg_raw_mode_open()
927 return -ENOMEM; in scmi_dbg_raw_mode_open()
931 /* not set - reassing 0 we already had after kzalloc() */ in scmi_dbg_raw_mode_open()
932 rd->chan_id = debugfs_get_aux_num(filp); in scmi_dbg_raw_mode_open()
934 rd->raw = raw; in scmi_dbg_raw_mode_open()
935 filp->private_data = rd; in scmi_dbg_raw_mode_open()
942 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_release()
944 kfree(rd->rx.buf); in scmi_dbg_raw_mode_release()
945 kfree(rd->tx.buf); in scmi_dbg_raw_mode_release()
955 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_reset_write()
957 scmi_xfer_raw_reset(rd->raw); in scmi_dbg_raw_mode_reset_write()
972 .read = scmi_dbg_raw_mode_message_read,
989 .read = scmi_dbg_raw_mode_message_read,
1006 .read = scmi_dbg_raw_mode_message_read,
1023 .read = scmi_dbg_raw_mode_message_read,
1039 struct poll_table_struct *wait) in scmi_test_dbg_raw_mode_notif_poll() argument
1041 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE); in scmi_test_dbg_raw_mode_notif_poll()
1047 .read = scmi_test_dbg_raw_mode_notif_read,
1062 struct poll_table_struct *wait) in scmi_test_dbg_raw_mode_errors_poll() argument
1064 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE); in scmi_test_dbg_raw_mode_errors_poll()
1070 .read = scmi_test_dbg_raw_mode_errors_read,
1080 struct device *dev = raw->handle->dev; in scmi_raw_queue_init()
1085 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1087 rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL); in scmi_raw_queue_init()
1089 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1091 spin_lock_init(&q->free_bufs_lock); in scmi_raw_queue_init()
1092 INIT_LIST_HEAD(&q->free_bufs); in scmi_raw_queue_init()
1093 for (i = 0; i < raw->tx_max_msg; i++, rb++) { in scmi_raw_queue_init()
1094 rb->max_len = raw->desc->max_msg_size + sizeof(u32); in scmi_raw_queue_init()
1095 rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL); in scmi_raw_queue_init()
1096 if (!rb->msg.buf) in scmi_raw_queue_init()
1097 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1101 spin_lock_init(&q->msg_q_lock); in scmi_raw_queue_init()
1102 INIT_LIST_HEAD(&q->msg_q); in scmi_raw_queue_init()
1103 init_waitqueue_head(&q->wq); in scmi_raw_queue_init()
1112 struct device *dev = raw->handle->dev; in scmi_xfer_raw_worker_init()
1114 rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL); in scmi_xfer_raw_worker_init()
1116 return -ENOMEM; in scmi_xfer_raw_worker_init()
1118 raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d", in scmi_xfer_raw_worker_init()
1120 WQ_HIGHPRI | WQ_SYSFS, 0, raw->id); in scmi_xfer_raw_worker_init()
1121 if (!raw->wait_wq) in scmi_xfer_raw_worker_init()
1122 return -ENOMEM; in scmi_xfer_raw_worker_init()
1124 mutex_init(&raw->free_mtx); in scmi_xfer_raw_worker_init()
1125 INIT_LIST_HEAD(&raw->free_waiters); in scmi_xfer_raw_worker_init()
1126 mutex_init(&raw->active_mtx); in scmi_xfer_raw_worker_init()
1127 INIT_LIST_HEAD(&raw->active_waiters); in scmi_xfer_raw_worker_init()
1129 for (i = 0; i < raw->tx_max_msg; i++, rw++) { in scmi_xfer_raw_worker_init()
1130 init_completion(&rw->async_response); in scmi_xfer_raw_worker_init()
1133 INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker); in scmi_xfer_raw_worker_init()
1143 struct device *dev = raw->handle->dev; in scmi_raw_mode_setup()
1147 return -ENOMEM; in scmi_raw_mode_setup()
1150 raw->q[idx] = scmi_raw_queue_init(raw); in scmi_raw_mode_setup()
1151 if (IS_ERR(raw->q[idx])) { in scmi_raw_mode_setup()
1152 ret = PTR_ERR(raw->q[idx]); in scmi_raw_mode_setup()
1157 xa_init(&raw->chans_q); in scmi_raw_mode_setup()
1170 ret = xa_insert(&raw->chans_q, channels[i], q, in scmi_raw_mode_setup()
1186 raw->gid = gid; in scmi_raw_mode_setup()
1191 xa_destroy(&raw->chans_q); in scmi_raw_mode_setup()
1198 * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
1207 * @tx_max_msg: Max number of in-flight messages allowed by the transport
1211 * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
1223 return ERR_PTR(-EINVAL); in scmi_raw_mode_init()
1225 dev = handle->dev; in scmi_raw_mode_init()
1228 return ERR_PTR(-ENOMEM); in scmi_raw_mode_init()
1230 raw->handle = handle; in scmi_raw_mode_init()
1231 raw->desc = desc; in scmi_raw_mode_init()
1232 raw->tx_max_msg = tx_max_msg; in scmi_raw_mode_init()
1233 raw->id = instance_id; in scmi_raw_mode_init()
1241 raw->dentry = debugfs_create_dir("raw", top_dentry); in scmi_raw_mode_init()
1243 debugfs_create_file("reset", 0200, raw->dentry, raw, in scmi_raw_mode_init()
1246 debugfs_create_file("message", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1249 debugfs_create_file("message_async", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1252 debugfs_create_file("message_poll", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1255 debugfs_create_file("message_poll_async", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1258 debugfs_create_file("notification", 0400, raw->dentry, raw, in scmi_raw_mode_init()
1261 debugfs_create_file("errors", 0400, raw->dentry, raw, in scmi_raw_mode_init()
1265 * Expose per-channel entries if multiple channels available. in scmi_raw_mode_init()
1273 top_chans = debugfs_create_dir("channels", raw->dentry); in scmi_raw_mode_init()
1300 dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id); in scmi_raw_mode_init()
1306 * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
1317 debugfs_remove_recursive(raw->dentry); in scmi_raw_mode_cleanup()
1319 cancel_work_sync(&raw->waiters_work); in scmi_raw_mode_cleanup()
1320 destroy_workqueue(raw->wait_wq); in scmi_raw_mode_cleanup()
1321 xa_destroy(&raw->chans_q); in scmi_raw_mode_cleanup()
1331 return -EINVAL; in scmi_xfer_raw_collect()
1334 msg_size = xfer->rx.len + sizeof(u32); in scmi_xfer_raw_collect()
1336 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) in scmi_xfer_raw_collect()
1340 return -ENOSPC; in scmi_xfer_raw_collect()
1343 *m = cpu_to_le32(pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_collect()
1344 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) in scmi_xfer_raw_collect()
1345 *++m = cpu_to_le32(xfer->hdr.status); in scmi_xfer_raw_collect()
1347 memcpy(++m, xfer->rx.buf, xfer->rx.len); in scmi_xfer_raw_collect()
1355 * scmi_raw_message_report - Helper to report back valid reponses/notifications
1363 * If Raw mode is enabled, this is called from the SCMI core on the regular RX
1368 * user can read back the raw message payload at its own pace (if ever) without
1384 dev = raw->handle->dev; in scmi_raw_message_report()
1389 "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n", in scmi_raw_message_report()
1397 * buffer to use from the oldest one enqueued and still unread on this in scmi_raw_message_report()
1403 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_report()
1408 * commands MUST be read back from userspace to free the buffers: in scmi_raw_message_report()
1413 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1415 "RAW[%d] - Buffers exhausted. Dropping report.\n", in scmi_raw_message_report()
1433 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1438 rb->msg.len = rb->max_len; in scmi_raw_message_report()
1441 "RAW[%d] - Buffers exhausted. Re-using oldest.\n", in scmi_raw_message_report()
1444 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1446 ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer); in scmi_raw_message_report()
1448 dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n"); in scmi_raw_message_report()
1461 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_xfer_raw_fill()
1462 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_xfer_raw_fill()
1464 memset(xfer->rx.buf, 0x00, xfer->rx.len); in scmi_xfer_raw_fill()
1466 raw->desc->ops->fetch_response(cinfo, xfer); in scmi_xfer_raw_fill()
1470 * scmi_raw_error_report - Helper to report back timed-out or generally
1478 * If Raw mode is enabled, this is called from the SCMI core on the RX path in
1496 xfer.rx.len = raw->desc->max_msg_size; in scmi_raw_error_report()
1499 dev_info(raw->handle->dev, in scmi_raw_error_report()
1500 "Cannot report Raw error for HDR:0x%X - ENOMEM\n", in scmi_raw_error_report()
1505 /* Any transport-provided priv must be passed back down to transport */ in scmi_raw_error_report()