Lines Matching full:raw

3  * System Control and Management Interface (SCMI) Raw mode support
10 * When enabled the SCMI Raw mode support exposes a userspace API which allows
26 * In order to avoid possible interferences between the SCMI Raw transactions
28 * when Raw mode is enabled, by default, all the regular SCMI drivers are
36 * All SCMI Raw entries are rooted under a common top /raw debugfs top directory
43 * |-- raw
133 * struct scmi_raw_queue - Generic Raw queue descriptor
135 * @free_bufs: A freelists listhead used to keep unused raw buffers
152 * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
154 * @id: Sequential Raw instance ID.
158 * @q: An array of Raw queue descriptors
166 * @dentry: Top debugfs root dentry for SCMI Raw
169 * Note that this descriptor is passed back to the core after SCMI Raw is
170 * initialized as an opaque handle to use by subsequent SCMI Raw call hooks.
231 * @raw: A reference to the Raw instance.
240 struct scmi_raw_mode_info *raw;
249 scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx,
253 return raw->q[idx];
255 return xa_load(&raw->chans_q, chan_id);
336 scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer,
341 mutex_lock(&raw->free_mtx);
342 if (!list_empty(&raw->free_waiters)) {
343 rw = list_first_entry(&raw->free_waiters,
355 mutex_unlock(&raw->free_mtx);
360 static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw,
368 mutex_lock(&raw->free_mtx);
369 list_add_tail(&rw->node, &raw->free_waiters);
370 mutex_unlock(&raw->free_mtx);
373 static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw,
382 raw->desc->max_rx_timeout_ms,
385 mutex_lock(&raw->active_mtx);
386 list_add_tail(&rw->node, &raw->active_waiters);
387 mutex_unlock(&raw->active_mtx);
390 queue_work(raw->wait_wq, &raw->waiters_work);
394 scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw)
398 mutex_lock(&raw->active_mtx);
399 if (!list_empty(&raw->active_waiters)) {
400 rw = list_first_entry(&raw->active_waiters,
404 mutex_unlock(&raw->active_mtx);
410 * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
414 * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
421 * Even though reply messages are collected and reported into the SCMI Raw layer
434 struct scmi_raw_mode_info *raw;
438 raw = container_of(work, struct scmi_raw_mode_info, waiters_work);
439 dev = raw->handle->dev;
440 max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms);
450 rw = scmi_xfer_raw_waiter_dequeue(raw);
474 if (raw->desc->ops->mark_txdone)
475 raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer);
486 "timed out in RAW delayed resp - HDR:%08X\n",
491 scmi_xfer_raw_put(raw->handle, xfer);
492 scmi_xfer_raw_waiter_put(raw, rw);
496 static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw)
500 dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n");
503 scmi_raw_buffer_queue_flush(raw->q[i]);
510 * @raw: A reference to the Raw instance.
514 * @p: A pointer to return the initialized Raw xfer.
527 static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf,
534 struct device *dev = raw->handle->dev;
541 if (tx_size > raw->desc->max_msg_size)
544 xfer = scmi_xfer_raw_get(raw->handle);
546 dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n");
558 xfer->rx.len = raw->desc->max_msg_size;
560 memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size);
566 * In flight registration can temporarily fail in case of Raw messages
568 * sequence numbers since, in Raw mode, the xfer (and the token) is
572 ret = scmi_xfer_raw_inflight_register(raw->handle, xfer);
577 msleep(raw->desc->max_rx_timeout_ms /
584 "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n",
586 scmi_xfer_raw_put(raw->handle, xfer);
593 * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
595 * @raw: A reference to the Raw instance.
601 * This function send a previously built raw xfer using an appropriate channel
611 static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw,
618 struct device *dev = raw->handle->dev;
625 cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id);
629 rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async);
631 dev_warn(dev, "RAW - Cannot get a free waiter !\n");
636 if (is_polling_enabled(cinfo, raw->desc))
647 ret = raw->desc->ops->send_message(rw->cinfo, xfer);
649 dev_err(dev, "Failed to send RAW message %d\n", ret);
650 scmi_xfer_raw_waiter_put(raw, rw);
654 trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id,
659 scmi_xfer_raw_waiter_enqueue(raw, rw);
668 * @raw: A reference to the Raw instance.
677 static int scmi_raw_message_send(struct scmi_raw_mode_info *raw,
683 ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer);
687 ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async);
689 scmi_xfer_raw_put(raw->handle, xfer);
722 * available enqueued raw message payload that has been collected.
724 * @raw: A reference to the Raw instance.
735 static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw,
744 q = scmi_raw_queue_select(raw, idx, chan_id);
750 dev_dbg(raw->handle->dev, "RAW - No message available!\n");
766 /* SCMI Raw debugfs helpers */
779 ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len,
818 * before sending it with a single RAW xfer.
833 ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size,
852 q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id);
889 struct scmi_raw_mode_info *raw;
895 raw = inode->i_private;
900 rd->rx.len = raw->desc->max_msg_size + sizeof(u32);
907 rd->tx.len = raw->desc->max_msg_size + sizeof(u32);
919 rd->raw = raw;
942 scmi_xfer_raw_reset(rd->raw);
1026 scmi_raw_queue_init(struct scmi_raw_mode_info *raw)
1030 struct device *dev = raw->handle->dev;
1037 rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL);
1043 for (i = 0; i < raw->tx_max_msg; i++, rb++) {
1044 rb->max_len = raw->desc->max_msg_size + sizeof(u32);
1058 static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
1062 struct device *dev = raw->handle->dev;
1064 rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL);
1068 raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
1070 WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
1071 if (!raw->wait_wq)
1074 mutex_init(&raw->free_mtx);
1075 INIT_LIST_HEAD(&raw->free_waiters);
1076 mutex_init(&raw->active_mtx);
1077 INIT_LIST_HEAD(&raw->active_waiters);
1079 for (i = 0; i < raw->tx_max_msg; i++, rw++) {
1081 scmi_xfer_raw_waiter_put(raw, rw);
1083 INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker);
1088 static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
1093 struct device *dev = raw->handle->dev;
1100 raw->q[idx] = scmi_raw_queue_init(raw);
1101 if (IS_ERR(raw->q[idx])) {
1102 ret = PTR_ERR(raw->q[idx]);
1107 xa_init(&raw->chans_q);
1114 q = scmi_raw_queue_init(raw);
1120 ret = xa_insert(&raw->chans_q, channels[i], q,
1124 "Fail to allocate Raw queue 0x%02X\n",
1131 ret = scmi_xfer_raw_worker_init(raw);
1136 raw->gid = gid;
1141 xa_destroy(&raw->chans_q);
1148 * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
1151 * @top_dentry: A reference to the top Raw debugfs dentry
1153 * this Raw instance
1159 * This function prepare the SCMI Raw stack and creates the debugfs API.
1161 * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise
1169 struct scmi_raw_mode_info *raw;
1176 raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL);
1177 if (!raw)
1180 raw->handle = handle;
1181 raw->desc = desc;
1182 raw->tx_max_msg = tx_max_msg;
1183 raw->id = instance_id;
1185 ret = scmi_raw_mode_setup(raw, channels, num_chans);
1187 devm_kfree(dev, raw);
1191 raw->dentry = debugfs_create_dir("raw", top_dentry);
1193 debugfs_create_file("reset", 0200, raw->dentry, raw,
1196 debugfs_create_file("message", 0600, raw->dentry, raw,
1199 debugfs_create_file("message_async", 0600, raw->dentry, raw,
1202 debugfs_create_file("notification", 0400, raw->dentry, raw,
1205 debugfs_create_file("errors", 0400, raw->dentry, raw,
1211 * have anyway already a working core Raw support.
1217 top_chans = debugfs_create_dir("channels", raw->dentry);
1227 raw, channels[i],
1231 raw, channels[i],
1236 dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id);
1238 return raw;
1242 * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
1244 * @r: An opaque handle to an initialized SCMI Raw instance
1248 struct scmi_raw_mode_info *raw = r;
1250 if (!raw)
1253 debugfs_remove_recursive(raw->dentry);
1255 cancel_work_sync(&raw->waiters_work);
1256 destroy_workqueue(raw->wait_wq);
1257 xa_destroy(&raw->chans_q);
1292 * to raw message requests.
1294 * @r: An opaque reference to the raw instance configuration
1299 * If Raw mode is enabled, this is called from the SCMI core on the regular RX
1304 * user can read back the raw message payload at its own pace (if ever) without
1315 struct scmi_raw_mode_info *raw = r;
1317 if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer)))
1320 dev = raw->handle->dev;
1321 q = scmi_raw_queue_select(raw, idx,
1325 "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
1343 * Immediate and delayed replies to previously injected Raw
1351 "RAW[%d] - Buffers exhausted. Dropping report.\n",
1362 * by Raw requests cannot be distinguished from normal ones, so
1363 * your Raw buffers queues risk to be flooded and depleted by
1377 "RAW[%d] - Buffers exhausted. Re-using oldest.\n",
1384 dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n");
1392 static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw,
1402 raw->desc->ops->fetch_response(cinfo, xfer);
1409 * @r: An opaque reference to the raw instance configuration
1414 * If Raw mode is enabled, this is called from the SCMI core on the RX path in
1427 struct scmi_raw_mode_info *raw = r;
1429 if (!raw)
1432 xfer.rx.len = raw->desc->max_msg_size;
1435 dev_info(raw->handle->dev,
1436 "Cannot report Raw error for HDR:0x%X - ENOMEM\n",
1446 scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr);
1447 scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0);