1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Raw mode support 4 * 5 * Copyright (C) 2022 ARM Ltd. 6 */ 7 /** 8 * DOC: Theory of operation 9 * 10 * When enabled the SCMI Raw mode support exposes a userspace API which allows 11 * to send and receive SCMI commands, replies and notifications from a user 12 * application through injection and snooping of bare SCMI messages in binary 13 * little-endian format. 14 * 15 * Such injected SCMI transactions will then be routed through the SCMI core 16 * stack towards the SCMI backend server using whatever SCMI transport is 17 * currently configured on the system under test. 18 * 19 * It is meant to help in running any sort of SCMI backend server testing, no 20 * matter where the server is placed, as long as it is normally reachable via 21 * the transport configured on the system. 22 * 23 * It is activated by a Kernel configuration option since it is NOT meant to 24 * be used in production but only during development and in CI deployments. 25 * 26 * In order to avoid possible interferences between the SCMI Raw transactions 27 * originated from a test-suite and the normal operations of the SCMI drivers, 28 * when Raw mode is enabled, by default, all the regular SCMI drivers are 29 * inhibited, unless CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX is enabled: in this 30 * latter case the regular SCMI stack drivers will be loaded as usual and it is 31 * up to the user of this interface to take care of manually inhibiting the 32 * regular SCMI drivers in order to avoid interferences during the test runs. 33 * 34 * The exposed API is as follows. 35 * 36 * All SCMI Raw entries are rooted under a common top /raw debugfs top directory 37 * which in turn is rooted under the corresponding underlying SCMI instance. 38 * 39 * /sys/kernel/debug/scmi/ 40 * `-- 0 41 * |-- atomic_threshold_us 42 * |-- instance_name 43 * |-- raw 44 * | |-- channels 45 * | | |-- 0x10 46 * | | | |-- message 47 * | | | `-- message_async 48 * | | `-- 0x13 49 * | | |-- message 50 * | | `-- message_async 51 * | |-- errors 52 * | |-- message 53 * | |-- message_async 54 * | |-- notification 55 * | `-- reset 56 * `-- transport 57 * |-- is_atomic 58 * |-- max_msg_size 59 * |-- max_rx_timeout_ms 60 * |-- rx_max_msg 61 * |-- tx_max_msg 62 * `-- type 63 * 64 * where: 65 * 66 * - errors: used to read back timed-out and unexpected replies 67 * - message*: used to send sync/async commands and read back immediate and 68 * delayed reponses (if any) 69 * - notification: used to read any notification being emitted by the system 70 * (if previously enabled by the user app) 71 * - reset: used to flush the queues of messages (of any kind) still pending 72 * to be read; this is useful at test-suite start/stop to get 73 * rid of any unread messages from the previous run. 74 * 75 * with the per-channel entries rooted at /channels being present only on a 76 * system where multiple transport channels have been configured. 77 * 78 * Such per-channel entries can be used to explicitly choose a specific channel 79 * for SCMI bare message injection, in contrast with the general entries above 80 * where, instead, the selection of the proper channel to use is automatically 81 * performed based the protocol embedded in the injected message and on how the 82 * transport is configured on the system. 83 * 84 * Note that other common general entries are available under transport/ to let 85 * the user applications properly make up their expectations in terms of 86 * timeouts and message characteristics. 87 * 88 * Each write to the message* entries causes one command request to be built 89 * and sent while the replies or delayed response are read back from those same 90 * entries one message at time (receiving an EOF at each message boundary). 91 * 92 * The user application running the test is in charge of handling timeouts 93 * on replies and properly choosing SCMI sequence numbers for the outgoing 94 * requests (using the same sequence number is supported but discouraged). 95 * 96 * Injection of multiple in-flight requests is supported as long as the user 97 * application uses properly distinct sequence numbers for concurrent requests 98 * and takes care to properly manage all the related issues about concurrency 99 * and command/reply pairing. Keep in mind that, anyway, the real level of 100 * parallelism attainable in such scenario is dependent on the characteristics 101 * of the underlying transport being used. 102 * 103 * Since the SCMI core regular stack is partially used to deliver and collect 104 * the messages, late replies arrived after timeouts and any other sort of 105 * unexpected message can be identified by the SCMI core as usual and they will 106 * be reported as messages under "errors" for later analysis. 107 */ 108 109 #include <linux/bitmap.h> 110 #include <linux/debugfs.h> 111 #include <linux/delay.h> 112 #include <linux/device.h> 113 #include <linux/export.h> 114 #include <linux/io.h> 115 #include <linux/kernel.h> 116 #include <linux/fs.h> 117 #include <linux/list.h> 118 #include <linux/module.h> 119 #include <linux/poll.h> 120 #include <linux/of.h> 121 #include <linux/slab.h> 122 #include <linux/xarray.h> 123 124 #include "common.h" 125 126 #include "raw_mode.h" 127 128 #include <trace/events/scmi.h> 129 130 #define SCMI_XFER_RAW_MAX_RETRIES 10 131 132 /** 133 * struct scmi_raw_queue - Generic Raw queue descriptor 134 * 135 * @free_bufs: A freelists listhead used to keep unused raw buffers 136 * @free_bufs_lock: Spinlock used to protect access to @free_bufs 137 * @msg_q: A listhead to a queue of snooped messages waiting to be read out 138 * @msg_q_lock: Spinlock used to protect access to @msg_q 139 * @wq: A waitqueue used to wait and poll on related @msg_q 140 */ 141 struct scmi_raw_queue { 142 struct list_head free_bufs; 143 /* Protect free_bufs[] lists */ 144 spinlock_t free_bufs_lock; 145 struct list_head msg_q; 146 /* Protect msg_q[] lists */ 147 spinlock_t msg_q_lock; 148 wait_queue_head_t wq; 149 }; 150 151 /** 152 * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data 153 * 154 * @id: Sequential Raw instance ID. 155 * @handle: Pointer to SCMI entity handle to use 156 * @desc: Pointer to the transport descriptor to use 157 * @tx_max_msg: Maximum number of concurrent TX in-flight messages 158 * @q: An array of Raw queue descriptors 159 * @chans_q: An XArray mapping optional additional per-channel queues 160 * @free_waiters: Head of freelist for unused waiters 161 * @free_mtx: A mutex to protect the waiters freelist 162 * @active_waiters: Head of list for currently active and used waiters 163 * @active_mtx: A mutex to protect the active waiters list 164 * @waiters_work: A work descriptor to be used with the workqueue machinery 165 * @wait_wq: A workqueue reference to the created workqueue 166 * @dentry: Top debugfs root dentry for SCMI Raw 167 * @gid: A group ID used for devres accounting 168 * 169 * Note that this descriptor is passed back to the core after SCMI Raw is 170 * initialized as an opaque handle to use by subsequent SCMI Raw call hooks. 171 * 172 */ 173 struct scmi_raw_mode_info { 174 unsigned int id; 175 const struct scmi_handle *handle; 176 const struct scmi_desc *desc; 177 int tx_max_msg; 178 struct scmi_raw_queue *q[SCMI_RAW_MAX_QUEUE]; 179 struct xarray chans_q; 180 struct list_head free_waiters; 181 /* Protect free_waiters list */ 182 struct mutex free_mtx; 183 struct list_head active_waiters; 184 /* Protect active_waiters list */ 185 struct mutex active_mtx; 186 struct work_struct waiters_work; 187 struct workqueue_struct *wait_wq; 188 struct dentry *dentry; 189 void *gid; 190 }; 191 192 /** 193 * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for 194 * 195 * @start_jiffies: The timestamp in jiffies of when this structure was queued. 196 * @cinfo: A reference to the channel to use for this transaction 197 * @xfer: A reference to the xfer to be waited for 198 * @async_response: A completion to be, optionally, used for async waits: it 199 * will be setup by @scmi_do_xfer_raw_start, if needed, to be 200 * pointed at by xfer->async_done. 201 * @node: A list node. 202 */ 203 struct scmi_xfer_raw_waiter { 204 unsigned long start_jiffies; 205 struct scmi_chan_info *cinfo; 206 struct scmi_xfer *xfer; 207 struct completion async_response; 208 struct list_head node; 209 }; 210 211 /** 212 * struct scmi_raw_buffer - Structure to hold a full SCMI message 213 * 214 * @max_len: The maximum allowed message size (header included) that can be 215 * stored into @msg 216 * @msg: A message buffer used to collect a full message grabbed from an xfer. 217 * @node: A list node. 218 */ 219 struct scmi_raw_buffer { 220 size_t max_len; 221 struct scmi_msg msg; 222 struct list_head node; 223 }; 224 225 /** 226 * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs 227 * layer 228 * 229 * @chan_id: The preferred channel to use: if zero the channel is automatically 230 * selected based on protocol. 231 * @raw: A reference to the Raw instance. 232 * @tx: A message buffer used to collect TX message on write. 233 * @tx_size: The effective size of the TX message. 234 * @tx_req_size: The final expected size of the complete TX message. 235 * @rx: A message buffer to collect RX message on read. 236 * @rx_size: The effective size of the RX message. 237 */ 238 struct scmi_dbg_raw_data { 239 u8 chan_id; 240 struct scmi_raw_mode_info *raw; 241 struct scmi_msg tx; 242 size_t tx_size; 243 size_t tx_req_size; 244 struct scmi_msg rx; 245 size_t rx_size; 246 }; 247 248 static struct scmi_raw_queue * 249 scmi_raw_queue_select(struct scmi_raw_mode_info *raw, unsigned int idx, 250 unsigned int chan_id) 251 { 252 if (!chan_id) 253 return raw->q[idx]; 254 255 return xa_load(&raw->chans_q, chan_id); 256 } 257 258 static struct scmi_raw_buffer *scmi_raw_buffer_get(struct scmi_raw_queue *q) 259 { 260 unsigned long flags; 261 struct scmi_raw_buffer *rb = NULL; 262 struct list_head *head = &q->free_bufs; 263 264 spin_lock_irqsave(&q->free_bufs_lock, flags); 265 if (!list_empty(head)) { 266 rb = list_first_entry(head, struct scmi_raw_buffer, node); 267 list_del_init(&rb->node); 268 } 269 spin_unlock_irqrestore(&q->free_bufs_lock, flags); 270 271 return rb; 272 } 273 274 static void scmi_raw_buffer_put(struct scmi_raw_queue *q, 275 struct scmi_raw_buffer *rb) 276 { 277 unsigned long flags; 278 279 /* Reset to full buffer length */ 280 rb->msg.len = rb->max_len; 281 282 spin_lock_irqsave(&q->free_bufs_lock, flags); 283 list_add_tail(&rb->node, &q->free_bufs); 284 spin_unlock_irqrestore(&q->free_bufs_lock, flags); 285 } 286 287 static void scmi_raw_buffer_enqueue(struct scmi_raw_queue *q, 288 struct scmi_raw_buffer *rb) 289 { 290 unsigned long flags; 291 292 spin_lock_irqsave(&q->msg_q_lock, flags); 293 list_add_tail(&rb->node, &q->msg_q); 294 spin_unlock_irqrestore(&q->msg_q_lock, flags); 295 296 wake_up_interruptible(&q->wq); 297 } 298 299 static struct scmi_raw_buffer* 300 scmi_raw_buffer_dequeue_unlocked(struct scmi_raw_queue *q) 301 { 302 struct scmi_raw_buffer *rb = NULL; 303 304 if (!list_empty(&q->msg_q)) { 305 rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node); 306 list_del_init(&rb->node); 307 } 308 309 return rb; 310 } 311 312 static struct scmi_raw_buffer *scmi_raw_buffer_dequeue(struct scmi_raw_queue *q) 313 { 314 unsigned long flags; 315 struct scmi_raw_buffer *rb; 316 317 spin_lock_irqsave(&q->msg_q_lock, flags); 318 rb = scmi_raw_buffer_dequeue_unlocked(q); 319 spin_unlock_irqrestore(&q->msg_q_lock, flags); 320 321 return rb; 322 } 323 324 static void scmi_raw_buffer_queue_flush(struct scmi_raw_queue *q) 325 { 326 struct scmi_raw_buffer *rb; 327 328 do { 329 rb = scmi_raw_buffer_dequeue(q); 330 if (rb) 331 scmi_raw_buffer_put(q, rb); 332 } while (rb); 333 } 334 335 static struct scmi_xfer_raw_waiter * 336 scmi_xfer_raw_waiter_get(struct scmi_raw_mode_info *raw, struct scmi_xfer *xfer, 337 struct scmi_chan_info *cinfo, bool async) 338 { 339 struct scmi_xfer_raw_waiter *rw = NULL; 340 341 mutex_lock(&raw->free_mtx); 342 if (!list_empty(&raw->free_waiters)) { 343 rw = list_first_entry(&raw->free_waiters, 344 struct scmi_xfer_raw_waiter, node); 345 list_del_init(&rw->node); 346 347 if (async) { 348 reinit_completion(&rw->async_response); 349 xfer->async_done = &rw->async_response; 350 } 351 352 rw->cinfo = cinfo; 353 rw->xfer = xfer; 354 } 355 mutex_unlock(&raw->free_mtx); 356 357 return rw; 358 } 359 360 static void scmi_xfer_raw_waiter_put(struct scmi_raw_mode_info *raw, 361 struct scmi_xfer_raw_waiter *rw) 362 { 363 if (rw->xfer) { 364 rw->xfer->async_done = NULL; 365 rw->xfer = NULL; 366 } 367 368 mutex_lock(&raw->free_mtx); 369 list_add_tail(&rw->node, &raw->free_waiters); 370 mutex_unlock(&raw->free_mtx); 371 } 372 373 static void scmi_xfer_raw_waiter_enqueue(struct scmi_raw_mode_info *raw, 374 struct scmi_xfer_raw_waiter *rw) 375 { 376 /* A timestamp for the deferred worker to know how much this has aged */ 377 rw->start_jiffies = jiffies; 378 379 trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id, 380 rw->xfer->hdr.protocol_id, 381 rw->xfer->hdr.seq, 382 raw->desc->max_rx_timeout_ms, 383 rw->xfer->hdr.poll_completion); 384 385 mutex_lock(&raw->active_mtx); 386 list_add_tail(&rw->node, &raw->active_waiters); 387 mutex_unlock(&raw->active_mtx); 388 389 /* kick waiter work */ 390 queue_work(raw->wait_wq, &raw->waiters_work); 391 } 392 393 static struct scmi_xfer_raw_waiter * 394 scmi_xfer_raw_waiter_dequeue(struct scmi_raw_mode_info *raw) 395 { 396 struct scmi_xfer_raw_waiter *rw = NULL; 397 398 mutex_lock(&raw->active_mtx); 399 if (!list_empty(&raw->active_waiters)) { 400 rw = list_first_entry(&raw->active_waiters, 401 struct scmi_xfer_raw_waiter, node); 402 list_del_init(&rw->node); 403 } 404 mutex_unlock(&raw->active_mtx); 405 406 return rw; 407 } 408 409 /** 410 * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions 411 * 412 * @work: A reference to the work. 413 * 414 * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we 415 * cannot wait to receive its response (if any) in the context of the injection 416 * routines so as not to leave the userspace write syscall, which delivered the 417 * SCMI message to send, pending till eventually a reply is received. 418 * Userspace should and will poll/wait instead on the read syscalls which will 419 * be in charge of reading a received reply (if any). 420 * 421 * Even though reply messages are collected and reported into the SCMI Raw layer 422 * on the RX path, nonetheless we have to properly wait for their completion as 423 * usual (and async_completion too if needed) in order to properly release the 424 * xfer structure at the end: to do this out of the context of the write/send 425 * these waiting jobs are delegated to this deferred worker. 426 * 427 * Any sent xfer, to be waited for, is timestamped and queued for later 428 * consumption by this worker: queue aging is accounted for while choosing a 429 * timeout for the completion, BUT we do not really care here if we end up 430 * accidentally waiting for a bit too long. 431 */ 432 static void scmi_xfer_raw_worker(struct work_struct *work) 433 { 434 struct scmi_raw_mode_info *raw; 435 struct device *dev; 436 unsigned long max_tmo; 437 438 raw = container_of(work, struct scmi_raw_mode_info, waiters_work); 439 dev = raw->handle->dev; 440 max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms); 441 442 do { 443 int ret = 0; 444 unsigned int timeout_ms; 445 unsigned long aging; 446 struct scmi_xfer *xfer; 447 struct scmi_xfer_raw_waiter *rw; 448 struct scmi_chan_info *cinfo; 449 450 rw = scmi_xfer_raw_waiter_dequeue(raw); 451 if (!rw) 452 return; 453 454 cinfo = rw->cinfo; 455 xfer = rw->xfer; 456 /* 457 * Waiters are queued by wait-deadline at the end, so some of 458 * them could have been already expired when processed, BUT we 459 * have to check the completion status anyway just in case a 460 * virtually expired (aged) transaction was indeed completed 461 * fine and we'll have to wait for the asynchronous part (if 462 * any): for this reason a 1 ms timeout is used for already 463 * expired/aged xfers. 464 */ 465 aging = jiffies - rw->start_jiffies; 466 timeout_ms = max_tmo > aging ? 467 jiffies_to_msecs(max_tmo - aging) : 1; 468 469 ret = scmi_xfer_raw_wait_for_message_response(cinfo, xfer, 470 timeout_ms); 471 if (!ret && xfer->hdr.status) 472 ret = scmi_to_linux_errno(xfer->hdr.status); 473 474 if (raw->desc->ops->mark_txdone) 475 raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer); 476 477 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, 478 xfer->hdr.protocol_id, xfer->hdr.seq, ret); 479 480 /* Wait also for an async delayed response if needed */ 481 if (!ret && xfer->async_done) { 482 unsigned long tmo = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT); 483 484 if (!wait_for_completion_timeout(xfer->async_done, tmo)) 485 dev_err(dev, 486 "timed out in RAW delayed resp - HDR:%08X\n", 487 pack_scmi_header(&xfer->hdr)); 488 } 489 490 /* Release waiter and xfer */ 491 scmi_xfer_raw_put(raw->handle, xfer); 492 scmi_xfer_raw_waiter_put(raw, rw); 493 } while (1); 494 } 495 496 static void scmi_xfer_raw_reset(struct scmi_raw_mode_info *raw) 497 { 498 int i; 499 500 dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n"); 501 502 for (i = 0; i < SCMI_RAW_MAX_QUEUE; i++) 503 scmi_raw_buffer_queue_flush(raw->q[i]); 504 } 505 506 /** 507 * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided 508 * bare SCMI message. 509 * 510 * @raw: A reference to the Raw instance. 511 * @buf: A buffer containing the whole SCMI message to send (including the 512 * header) in little-endian binary formmat. 513 * @len: Length of the message in @buf. 514 * @p: A pointer to return the initialized Raw xfer. 515 * 516 * After an xfer is picked from the TX pool and filled in with the message 517 * content, the xfer is registered as pending with the core in the usual way 518 * using the original sequence number provided by the user with the message. 519 * 520 * Note that, in case the testing user application is NOT using distinct 521 * sequence-numbers between successive SCMI messages such registration could 522 * fail temporarily if the previous message, using the same sequence number, 523 * had still not released; in such a case we just wait and retry. 524 * 525 * Return: 0 on Success 526 */ 527 static int scmi_xfer_raw_get_init(struct scmi_raw_mode_info *raw, void *buf, 528 size_t len, struct scmi_xfer **p) 529 { 530 u32 msg_hdr; 531 size_t tx_size; 532 struct scmi_xfer *xfer; 533 int ret, retry = SCMI_XFER_RAW_MAX_RETRIES; 534 struct device *dev = raw->handle->dev; 535 536 if (!buf || len < sizeof(u32)) 537 return -EINVAL; 538 539 tx_size = len - sizeof(u32); 540 /* Ensure we have sane transfer sizes */ 541 if (tx_size > raw->desc->max_msg_size) 542 return -ERANGE; 543 544 xfer = scmi_xfer_raw_get(raw->handle); 545 if (IS_ERR(xfer)) { 546 dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n"); 547 return PTR_ERR(xfer); 548 } 549 550 /* Build xfer from the provided SCMI bare LE message */ 551 msg_hdr = le32_to_cpu(*((__le32 *)buf)); 552 unpack_scmi_header(msg_hdr, &xfer->hdr); 553 xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr); 554 /* Polling not supported */ 555 xfer->hdr.poll_completion = false; 556 xfer->hdr.status = SCMI_SUCCESS; 557 xfer->tx.len = tx_size; 558 xfer->rx.len = raw->desc->max_msg_size; 559 /* Clear the whole TX buffer */ 560 memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size); 561 if (xfer->tx.len) 562 memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len); 563 *p = xfer; 564 565 /* 566 * In flight registration can temporarily fail in case of Raw messages 567 * if the user injects messages without using monotonically increasing 568 * sequence numbers since, in Raw mode, the xfer (and the token) is 569 * finally released later by a deferred worker. Just retry for a while. 570 */ 571 do { 572 ret = scmi_xfer_raw_inflight_register(raw->handle, xfer); 573 if (ret) { 574 dev_dbg(dev, 575 "...retrying[%d] inflight registration\n", 576 retry); 577 msleep(raw->desc->max_rx_timeout_ms / 578 SCMI_XFER_RAW_MAX_RETRIES); 579 } 580 } while (ret && --retry); 581 582 if (ret) { 583 dev_warn(dev, 584 "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n", 585 xfer->hdr.seq, msg_hdr); 586 scmi_xfer_raw_put(raw->handle, xfer); 587 } 588 589 return ret; 590 } 591 592 /** 593 * scmi_do_xfer_raw_start - An helper to send a valid raw xfer 594 * 595 * @raw: A reference to the Raw instance. 596 * @xfer: The xfer to send 597 * @chan_id: The channel ID to use, if zero the channels is automatically 598 * selected based on the protocol used. 599 * @async: A flag stating if an asynchronous command is required. 600 * 601 * This function send a previously built raw xfer using an appropriate channel 602 * and queues the related waiting work. 603 * 604 * Note that we need to know explicitly if the required command is meant to be 605 * asynchronous in kind since we have to properly setup the waiter. 606 * (and deducing this from the payload is weak and do not scale given there is 607 * NOT a common header-flag stating if the command is asynchronous or not) 608 * 609 * Return: 0 on Success 610 */ 611 static int scmi_do_xfer_raw_start(struct scmi_raw_mode_info *raw, 612 struct scmi_xfer *xfer, u8 chan_id, 613 bool async) 614 { 615 int ret; 616 struct scmi_chan_info *cinfo; 617 struct scmi_xfer_raw_waiter *rw; 618 struct device *dev = raw->handle->dev; 619 620 if (!chan_id) 621 chan_id = xfer->hdr.protocol_id; 622 else 623 xfer->flags |= SCMI_XFER_FLAG_CHAN_SET; 624 625 cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id); 626 if (IS_ERR(cinfo)) 627 return PTR_ERR(cinfo); 628 629 rw = scmi_xfer_raw_waiter_get(raw, xfer, cinfo, async); 630 if (!rw) { 631 dev_warn(dev, "RAW - Cannot get a free waiter !\n"); 632 return -ENOMEM; 633 } 634 635 /* True ONLY if also supported by transport. */ 636 if (is_polling_enabled(cinfo, raw->desc)) 637 xfer->hdr.poll_completion = true; 638 639 reinit_completion(&xfer->done); 640 /* Make sure xfer state update is visible before sending */ 641 smp_store_mb(xfer->state, SCMI_XFER_SENT_OK); 642 643 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, 644 xfer->hdr.protocol_id, xfer->hdr.seq, 645 xfer->hdr.poll_completion); 646 647 ret = raw->desc->ops->send_message(rw->cinfo, xfer); 648 if (ret) { 649 dev_err(dev, "Failed to send RAW message %d\n", ret); 650 scmi_xfer_raw_waiter_put(raw, rw); 651 return ret; 652 } 653 654 trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id, 655 xfer->hdr.id, "cmnd", xfer->hdr.seq, 656 xfer->hdr.status, 657 xfer->tx.buf, xfer->tx.len); 658 659 scmi_xfer_raw_waiter_enqueue(raw, rw); 660 661 return ret; 662 } 663 664 /** 665 * scmi_raw_message_send - An helper to build and send an SCMI command using 666 * the provided SCMI bare message buffer 667 * 668 * @raw: A reference to the Raw instance. 669 * @buf: A buffer containing the whole SCMI message to send (including the 670 * header) in little-endian binary format. 671 * @len: Length of the message in @buf. 672 * @chan_id: The channel ID to use. 673 * @async: A flag stating if an asynchronous command is required. 674 * 675 * Return: 0 on Success 676 */ 677 static int scmi_raw_message_send(struct scmi_raw_mode_info *raw, 678 void *buf, size_t len, u8 chan_id, bool async) 679 { 680 int ret; 681 struct scmi_xfer *xfer; 682 683 ret = scmi_xfer_raw_get_init(raw, buf, len, &xfer); 684 if (ret) 685 return ret; 686 687 ret = scmi_do_xfer_raw_start(raw, xfer, chan_id, async); 688 if (ret) 689 scmi_xfer_raw_put(raw->handle, xfer); 690 691 return ret; 692 } 693 694 static struct scmi_raw_buffer * 695 scmi_raw_message_dequeue(struct scmi_raw_queue *q, bool o_nonblock) 696 { 697 unsigned long flags; 698 struct scmi_raw_buffer *rb; 699 700 spin_lock_irqsave(&q->msg_q_lock, flags); 701 while (list_empty(&q->msg_q)) { 702 spin_unlock_irqrestore(&q->msg_q_lock, flags); 703 704 if (o_nonblock) 705 return ERR_PTR(-EAGAIN); 706 707 if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q))) 708 return ERR_PTR(-ERESTARTSYS); 709 710 spin_lock_irqsave(&q->msg_q_lock, flags); 711 } 712 713 rb = scmi_raw_buffer_dequeue_unlocked(q); 714 715 spin_unlock_irqrestore(&q->msg_q_lock, flags); 716 717 return rb; 718 } 719 720 /** 721 * scmi_raw_message_receive - An helper to dequeue and report the next 722 * available enqueued raw message payload that has been collected. 723 * 724 * @raw: A reference to the Raw instance. 725 * @buf: A buffer to get hold of the whole SCMI message received and represented 726 * in little-endian binary format. 727 * @len: Length of @buf. 728 * @size: The effective size of the message copied into @buf 729 * @idx: The index of the queue to pick the next queued message from. 730 * @chan_id: The channel ID to use. 731 * @o_nonblock: A flag to request a non-blocking message dequeue. 732 * 733 * Return: 0 on Success 734 */ 735 static int scmi_raw_message_receive(struct scmi_raw_mode_info *raw, 736 void *buf, size_t len, size_t *size, 737 unsigned int idx, unsigned int chan_id, 738 bool o_nonblock) 739 { 740 int ret = 0; 741 struct scmi_raw_buffer *rb; 742 struct scmi_raw_queue *q; 743 744 q = scmi_raw_queue_select(raw, idx, chan_id); 745 if (!q) 746 return -ENODEV; 747 748 rb = scmi_raw_message_dequeue(q, o_nonblock); 749 if (IS_ERR(rb)) { 750 dev_dbg(raw->handle->dev, "RAW - No message available!\n"); 751 return PTR_ERR(rb); 752 } 753 754 if (rb->msg.len <= len) { 755 memcpy(buf, rb->msg.buf, rb->msg.len); 756 *size = rb->msg.len; 757 } else { 758 ret = -ENOSPC; 759 } 760 761 scmi_raw_buffer_put(q, rb); 762 763 return ret; 764 } 765 766 /* SCMI Raw debugfs helpers */ 767 768 static ssize_t scmi_dbg_raw_mode_common_read(struct file *filp, 769 char __user *buf, 770 size_t count, loff_t *ppos, 771 unsigned int idx) 772 { 773 ssize_t cnt; 774 struct scmi_dbg_raw_data *rd = filp->private_data; 775 776 if (!rd->rx_size) { 777 int ret; 778 779 ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len, 780 &rd->rx_size, idx, rd->chan_id, 781 filp->f_flags & O_NONBLOCK); 782 if (ret) { 783 rd->rx_size = 0; 784 return ret; 785 } 786 787 /* Reset any previous filepos change, including writes */ 788 *ppos = 0; 789 } else if (*ppos == rd->rx_size) { 790 /* Return EOF once all the message has been read-out */ 791 rd->rx_size = 0; 792 return 0; 793 } 794 795 cnt = simple_read_from_buffer(buf, count, ppos, 796 rd->rx.buf, rd->rx_size); 797 798 return cnt; 799 } 800 801 static ssize_t scmi_dbg_raw_mode_common_write(struct file *filp, 802 const char __user *buf, 803 size_t count, loff_t *ppos, 804 bool async) 805 { 806 int ret; 807 struct scmi_dbg_raw_data *rd = filp->private_data; 808 809 if (count > rd->tx.len - rd->tx_size) 810 return -ENOSPC; 811 812 /* On first write attempt @count carries the total full message size. */ 813 if (!rd->tx_size) 814 rd->tx_req_size = count; 815 816 /* 817 * Gather a full message, possibly across multiple interrupted wrrtes, 818 * before sending it with a single RAW xfer. 819 */ 820 if (rd->tx_size < rd->tx_req_size) { 821 ssize_t cnt; 822 823 cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos, 824 buf, count); 825 if (cnt < 0) 826 return cnt; 827 828 rd->tx_size += cnt; 829 if (cnt < count) 830 return cnt; 831 } 832 833 ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, 834 rd->chan_id, async); 835 836 /* Reset ppos for next message ... */ 837 rd->tx_size = 0; 838 *ppos = 0; 839 840 return ret ?: count; 841 } 842 843 static __poll_t scmi_test_dbg_raw_common_poll(struct file *filp, 844 struct poll_table_struct *wait, 845 unsigned int idx) 846 { 847 unsigned long flags; 848 struct scmi_dbg_raw_data *rd = filp->private_data; 849 struct scmi_raw_queue *q; 850 __poll_t mask = 0; 851 852 q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id); 853 if (!q) 854 return mask; 855 856 poll_wait(filp, &q->wq, wait); 857 858 spin_lock_irqsave(&q->msg_q_lock, flags); 859 if (!list_empty(&q->msg_q)) 860 mask = EPOLLIN | EPOLLRDNORM; 861 spin_unlock_irqrestore(&q->msg_q_lock, flags); 862 863 return mask; 864 } 865 866 static ssize_t scmi_dbg_raw_mode_message_read(struct file *filp, 867 char __user *buf, 868 size_t count, loff_t *ppos) 869 { 870 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, 871 SCMI_RAW_REPLY_QUEUE); 872 } 873 874 static ssize_t scmi_dbg_raw_mode_message_write(struct file *filp, 875 const char __user *buf, 876 size_t count, loff_t *ppos) 877 { 878 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false); 879 } 880 881 static __poll_t scmi_dbg_raw_mode_message_poll(struct file *filp, 882 struct poll_table_struct *wait) 883 { 884 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_REPLY_QUEUE); 885 } 886 887 static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp) 888 { 889 u8 id; 890 struct scmi_raw_mode_info *raw; 891 struct scmi_dbg_raw_data *rd; 892 const char *id_str = filp->f_path.dentry->d_parent->d_name.name; 893 894 if (!inode->i_private) 895 return -ENODEV; 896 897 raw = inode->i_private; 898 rd = kzalloc(sizeof(*rd), GFP_KERNEL); 899 if (!rd) 900 return -ENOMEM; 901 902 rd->rx.len = raw->desc->max_msg_size + sizeof(u32); 903 rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL); 904 if (!rd->rx.buf) { 905 kfree(rd); 906 return -ENOMEM; 907 } 908 909 rd->tx.len = raw->desc->max_msg_size + sizeof(u32); 910 rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL); 911 if (!rd->tx.buf) { 912 kfree(rd->rx.buf); 913 kfree(rd); 914 return -ENOMEM; 915 } 916 917 /* Grab channel ID from debugfs entry naming if any */ 918 if (!kstrtou8(id_str, 16, &id)) 919 rd->chan_id = id; 920 921 rd->raw = raw; 922 filp->private_data = rd; 923 924 return nonseekable_open(inode, filp); 925 } 926 927 static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp) 928 { 929 struct scmi_dbg_raw_data *rd = filp->private_data; 930 931 kfree(rd->rx.buf); 932 kfree(rd->tx.buf); 933 kfree(rd); 934 935 return 0; 936 } 937 938 static ssize_t scmi_dbg_raw_mode_reset_write(struct file *filp, 939 const char __user *buf, 940 size_t count, loff_t *ppos) 941 { 942 struct scmi_dbg_raw_data *rd = filp->private_data; 943 944 scmi_xfer_raw_reset(rd->raw); 945 946 return count; 947 } 948 949 static const struct file_operations scmi_dbg_raw_mode_reset_fops = { 950 .open = scmi_dbg_raw_mode_open, 951 .release = scmi_dbg_raw_mode_release, 952 .write = scmi_dbg_raw_mode_reset_write, 953 .owner = THIS_MODULE, 954 }; 955 956 static const struct file_operations scmi_dbg_raw_mode_message_fops = { 957 .open = scmi_dbg_raw_mode_open, 958 .release = scmi_dbg_raw_mode_release, 959 .read = scmi_dbg_raw_mode_message_read, 960 .write = scmi_dbg_raw_mode_message_write, 961 .poll = scmi_dbg_raw_mode_message_poll, 962 .owner = THIS_MODULE, 963 }; 964 965 static ssize_t scmi_dbg_raw_mode_message_async_write(struct file *filp, 966 const char __user *buf, 967 size_t count, loff_t *ppos) 968 { 969 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true); 970 } 971 972 static const struct file_operations scmi_dbg_raw_mode_message_async_fops = { 973 .open = scmi_dbg_raw_mode_open, 974 .release = scmi_dbg_raw_mode_release, 975 .read = scmi_dbg_raw_mode_message_read, 976 .write = scmi_dbg_raw_mode_message_async_write, 977 .poll = scmi_dbg_raw_mode_message_poll, 978 .owner = THIS_MODULE, 979 }; 980 981 static ssize_t scmi_test_dbg_raw_mode_notif_read(struct file *filp, 982 char __user *buf, 983 size_t count, loff_t *ppos) 984 { 985 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, 986 SCMI_RAW_NOTIF_QUEUE); 987 } 988 989 static __poll_t 990 scmi_test_dbg_raw_mode_notif_poll(struct file *filp, 991 struct poll_table_struct *wait) 992 { 993 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_NOTIF_QUEUE); 994 } 995 996 static const struct file_operations scmi_dbg_raw_mode_notification_fops = { 997 .open = scmi_dbg_raw_mode_open, 998 .release = scmi_dbg_raw_mode_release, 999 .read = scmi_test_dbg_raw_mode_notif_read, 1000 .poll = scmi_test_dbg_raw_mode_notif_poll, 1001 .owner = THIS_MODULE, 1002 }; 1003 1004 static ssize_t scmi_test_dbg_raw_mode_errors_read(struct file *filp, 1005 char __user *buf, 1006 size_t count, loff_t *ppos) 1007 { 1008 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, 1009 SCMI_RAW_ERRS_QUEUE); 1010 } 1011 1012 static __poll_t 1013 scmi_test_dbg_raw_mode_errors_poll(struct file *filp, 1014 struct poll_table_struct *wait) 1015 { 1016 return scmi_test_dbg_raw_common_poll(filp, wait, SCMI_RAW_ERRS_QUEUE); 1017 } 1018 1019 static const struct file_operations scmi_dbg_raw_mode_errors_fops = { 1020 .open = scmi_dbg_raw_mode_open, 1021 .release = scmi_dbg_raw_mode_release, 1022 .read = scmi_test_dbg_raw_mode_errors_read, 1023 .poll = scmi_test_dbg_raw_mode_errors_poll, 1024 .owner = THIS_MODULE, 1025 }; 1026 1027 static struct scmi_raw_queue * 1028 scmi_raw_queue_init(struct scmi_raw_mode_info *raw) 1029 { 1030 int i; 1031 struct scmi_raw_buffer *rb; 1032 struct device *dev = raw->handle->dev; 1033 struct scmi_raw_queue *q; 1034 1035 q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); 1036 if (!q) 1037 return ERR_PTR(-ENOMEM); 1038 1039 rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL); 1040 if (!rb) 1041 return ERR_PTR(-ENOMEM); 1042 1043 spin_lock_init(&q->free_bufs_lock); 1044 INIT_LIST_HEAD(&q->free_bufs); 1045 for (i = 0; i < raw->tx_max_msg; i++, rb++) { 1046 rb->max_len = raw->desc->max_msg_size + sizeof(u32); 1047 rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL); 1048 if (!rb->msg.buf) 1049 return ERR_PTR(-ENOMEM); 1050 scmi_raw_buffer_put(q, rb); 1051 } 1052 1053 spin_lock_init(&q->msg_q_lock); 1054 INIT_LIST_HEAD(&q->msg_q); 1055 init_waitqueue_head(&q->wq); 1056 1057 return q; 1058 } 1059 1060 static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw) 1061 { 1062 int i; 1063 struct scmi_xfer_raw_waiter *rw; 1064 struct device *dev = raw->handle->dev; 1065 1066 rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL); 1067 if (!rw) 1068 return -ENOMEM; 1069 1070 raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d", 1071 WQ_UNBOUND | WQ_FREEZABLE | 1072 WQ_HIGHPRI | WQ_SYSFS, 0, raw->id); 1073 if (!raw->wait_wq) 1074 return -ENOMEM; 1075 1076 mutex_init(&raw->free_mtx); 1077 INIT_LIST_HEAD(&raw->free_waiters); 1078 mutex_init(&raw->active_mtx); 1079 INIT_LIST_HEAD(&raw->active_waiters); 1080 1081 for (i = 0; i < raw->tx_max_msg; i++, rw++) { 1082 init_completion(&rw->async_response); 1083 scmi_xfer_raw_waiter_put(raw, rw); 1084 } 1085 INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker); 1086 1087 return 0; 1088 } 1089 1090 static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw, 1091 u8 *channels, int num_chans) 1092 { 1093 int ret, idx; 1094 void *gid; 1095 struct device *dev = raw->handle->dev; 1096 1097 gid = devres_open_group(dev, NULL, GFP_KERNEL); 1098 if (!gid) 1099 return -ENOMEM; 1100 1101 for (idx = 0; idx < SCMI_RAW_MAX_QUEUE; idx++) { 1102 raw->q[idx] = scmi_raw_queue_init(raw); 1103 if (IS_ERR(raw->q[idx])) { 1104 ret = PTR_ERR(raw->q[idx]); 1105 goto err; 1106 } 1107 } 1108 1109 xa_init(&raw->chans_q); 1110 if (num_chans > 1) { 1111 int i; 1112 1113 for (i = 0; i < num_chans; i++) { 1114 struct scmi_raw_queue *q; 1115 1116 q = scmi_raw_queue_init(raw); 1117 if (IS_ERR(q)) { 1118 ret = PTR_ERR(q); 1119 goto err_xa; 1120 } 1121 1122 ret = xa_insert(&raw->chans_q, channels[i], q, 1123 GFP_KERNEL); 1124 if (ret) { 1125 dev_err(dev, 1126 "Fail to allocate Raw queue 0x%02X\n", 1127 channels[i]); 1128 goto err_xa; 1129 } 1130 } 1131 } 1132 1133 ret = scmi_xfer_raw_worker_init(raw); 1134 if (ret) 1135 goto err_xa; 1136 1137 devres_close_group(dev, gid); 1138 raw->gid = gid; 1139 1140 return 0; 1141 1142 err_xa: 1143 xa_destroy(&raw->chans_q); 1144 err: 1145 devres_release_group(dev, gid); 1146 return ret; 1147 } 1148 1149 /** 1150 * scmi_raw_mode_init - Function to initialize the SCMI Raw stack 1151 * 1152 * @handle: Pointer to SCMI entity handle 1153 * @top_dentry: A reference to the top Raw debugfs dentry 1154 * @instance_id: The ID of the underlying SCMI platform instance represented by 1155 * this Raw instance 1156 * @channels: The list of the existing channels 1157 * @num_chans: The number of entries in @channels 1158 * @desc: Reference to the transport operations 1159 * @tx_max_msg: Max number of in-flight messages allowed by the transport 1160 * 1161 * This function prepare the SCMI Raw stack and creates the debugfs API. 1162 * 1163 * Return: An opaque handle to the Raw instance on Success, an ERR_PTR otherwise 1164 */ 1165 void *scmi_raw_mode_init(const struct scmi_handle *handle, 1166 struct dentry *top_dentry, int instance_id, 1167 u8 *channels, int num_chans, 1168 const struct scmi_desc *desc, int tx_max_msg) 1169 { 1170 int ret; 1171 struct scmi_raw_mode_info *raw; 1172 struct device *dev; 1173 1174 if (!handle || !desc) 1175 return ERR_PTR(-EINVAL); 1176 1177 dev = handle->dev; 1178 raw = devm_kzalloc(dev, sizeof(*raw), GFP_KERNEL); 1179 if (!raw) 1180 return ERR_PTR(-ENOMEM); 1181 1182 raw->handle = handle; 1183 raw->desc = desc; 1184 raw->tx_max_msg = tx_max_msg; 1185 raw->id = instance_id; 1186 1187 ret = scmi_raw_mode_setup(raw, channels, num_chans); 1188 if (ret) { 1189 devm_kfree(dev, raw); 1190 return ERR_PTR(ret); 1191 } 1192 1193 raw->dentry = debugfs_create_dir("raw", top_dentry); 1194 1195 debugfs_create_file("reset", 0200, raw->dentry, raw, 1196 &scmi_dbg_raw_mode_reset_fops); 1197 1198 debugfs_create_file("message", 0600, raw->dentry, raw, 1199 &scmi_dbg_raw_mode_message_fops); 1200 1201 debugfs_create_file("message_async", 0600, raw->dentry, raw, 1202 &scmi_dbg_raw_mode_message_async_fops); 1203 1204 debugfs_create_file("notification", 0400, raw->dentry, raw, 1205 &scmi_dbg_raw_mode_notification_fops); 1206 1207 debugfs_create_file("errors", 0400, raw->dentry, raw, 1208 &scmi_dbg_raw_mode_errors_fops); 1209 1210 /* 1211 * Expose per-channel entries if multiple channels available. 1212 * Just ignore errors while setting up these interfaces since we 1213 * have anyway already a working core Raw support. 1214 */ 1215 if (num_chans > 1) { 1216 int i; 1217 struct dentry *top_chans; 1218 1219 top_chans = debugfs_create_dir("channels", raw->dentry); 1220 1221 for (i = 0; i < num_chans; i++) { 1222 char cdir[8]; 1223 struct dentry *chd; 1224 1225 snprintf(cdir, 8, "0x%02X", channels[i]); 1226 chd = debugfs_create_dir(cdir, top_chans); 1227 1228 debugfs_create_file("message", 0600, chd, raw, 1229 &scmi_dbg_raw_mode_message_fops); 1230 1231 debugfs_create_file("message_async", 0600, chd, raw, 1232 &scmi_dbg_raw_mode_message_async_fops); 1233 } 1234 } 1235 1236 dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id); 1237 1238 return raw; 1239 } 1240 1241 /** 1242 * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack 1243 * 1244 * @r: An opaque handle to an initialized SCMI Raw instance 1245 */ 1246 void scmi_raw_mode_cleanup(void *r) 1247 { 1248 struct scmi_raw_mode_info *raw = r; 1249 1250 if (!raw) 1251 return; 1252 1253 debugfs_remove_recursive(raw->dentry); 1254 1255 cancel_work_sync(&raw->waiters_work); 1256 destroy_workqueue(raw->wait_wq); 1257 xa_destroy(&raw->chans_q); 1258 } 1259 1260 static int scmi_xfer_raw_collect(void *msg, size_t *msg_len, 1261 struct scmi_xfer *xfer) 1262 { 1263 __le32 *m; 1264 size_t msg_size; 1265 1266 if (!xfer || !msg || !msg_len) 1267 return -EINVAL; 1268 1269 /* Account for hdr ...*/ 1270 msg_size = xfer->rx.len + sizeof(u32); 1271 /* ... and status if needed */ 1272 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) 1273 msg_size += sizeof(u32); 1274 1275 if (msg_size > *msg_len) 1276 return -ENOSPC; 1277 1278 m = msg; 1279 *m = cpu_to_le32(pack_scmi_header(&xfer->hdr)); 1280 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) 1281 *++m = cpu_to_le32(xfer->hdr.status); 1282 1283 memcpy(++m, xfer->rx.buf, xfer->rx.len); 1284 1285 *msg_len = msg_size; 1286 1287 return 0; 1288 } 1289 1290 /** 1291 * scmi_raw_message_report - Helper to report back valid reponses/notifications 1292 * to raw message requests. 1293 * 1294 * @r: An opaque reference to the raw instance configuration 1295 * @xfer: The xfer containing the message to be reported 1296 * @idx: The index of the queue. 1297 * @chan_id: The channel ID to use. 1298 * 1299 * If Raw mode is enabled, this is called from the SCMI core on the regular RX 1300 * path to save and enqueue the response/notification payload carried by this 1301 * xfer into a dedicated scmi_raw_buffer for later consumption by the user. 1302 * 1303 * This way the caller can free the related xfer immediately afterwards and the 1304 * user can read back the raw message payload at its own pace (if ever) without 1305 * holding an xfer for too long. 1306 */ 1307 void scmi_raw_message_report(void *r, struct scmi_xfer *xfer, 1308 unsigned int idx, unsigned int chan_id) 1309 { 1310 int ret; 1311 unsigned long flags; 1312 struct scmi_raw_buffer *rb; 1313 struct device *dev; 1314 struct scmi_raw_queue *q; 1315 struct scmi_raw_mode_info *raw = r; 1316 1317 if (!raw || (idx == SCMI_RAW_REPLY_QUEUE && !SCMI_XFER_IS_RAW(xfer))) 1318 return; 1319 1320 dev = raw->handle->dev; 1321 q = scmi_raw_queue_select(raw, idx, 1322 SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0); 1323 if (!q) { 1324 dev_warn(dev, 1325 "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n", 1326 idx, chan_id); 1327 return; 1328 } 1329 1330 /* 1331 * Grab the msg_q_lock upfront to avoid a possible race between 1332 * realizing the free list was empty and effectively picking the next 1333 * buffer to use from the oldest one enqueued and still unread on this 1334 * msg_q. 1335 * 1336 * Note that nowhere else these locks are taken together, so no risk of 1337 * deadlocks du eto inversion. 1338 */ 1339 spin_lock_irqsave(&q->msg_q_lock, flags); 1340 rb = scmi_raw_buffer_get(q); 1341 if (!rb) { 1342 /* 1343 * Immediate and delayed replies to previously injected Raw 1344 * commands MUST be read back from userspace to free the buffers: 1345 * if this is not happening something is seriously broken and 1346 * must be fixed at the application level: complain loudly. 1347 */ 1348 if (idx == SCMI_RAW_REPLY_QUEUE) { 1349 spin_unlock_irqrestore(&q->msg_q_lock, flags); 1350 dev_warn(dev, 1351 "RAW[%d] - Buffers exhausted. Dropping report.\n", 1352 idx); 1353 return; 1354 } 1355 1356 /* 1357 * Notifications and errors queues are instead handled in a 1358 * circular manner: unread old buffers are just overwritten by 1359 * newer ones. 1360 * 1361 * The main reason for this is that notifications originated 1362 * by Raw requests cannot be distinguished from normal ones, so 1363 * your Raw buffers queues risk to be flooded and depleted by 1364 * notifications if you left it mistakenly enabled or when in 1365 * coexistence mode. 1366 */ 1367 rb = scmi_raw_buffer_dequeue_unlocked(q); 1368 if (WARN_ON(!rb)) { 1369 spin_unlock_irqrestore(&q->msg_q_lock, flags); 1370 return; 1371 } 1372 1373 /* Reset to full buffer length */ 1374 rb->msg.len = rb->max_len; 1375 1376 dev_warn_once(dev, 1377 "RAW[%d] - Buffers exhausted. Re-using oldest.\n", 1378 idx); 1379 } 1380 spin_unlock_irqrestore(&q->msg_q_lock, flags); 1381 1382 ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer); 1383 if (ret) { 1384 dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n"); 1385 scmi_raw_buffer_put(q, rb); 1386 return; 1387 } 1388 1389 scmi_raw_buffer_enqueue(q, rb); 1390 } 1391 1392 static void scmi_xfer_raw_fill(struct scmi_raw_mode_info *raw, 1393 struct scmi_chan_info *cinfo, 1394 struct scmi_xfer *xfer, u32 msg_hdr) 1395 { 1396 /* Unpack received HDR as it is */ 1397 unpack_scmi_header(msg_hdr, &xfer->hdr); 1398 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); 1399 1400 memset(xfer->rx.buf, 0x00, xfer->rx.len); 1401 1402 raw->desc->ops->fetch_response(cinfo, xfer); 1403 } 1404 1405 /** 1406 * scmi_raw_error_report - Helper to report back timed-out or generally 1407 * unexpected replies. 1408 * 1409 * @r: An opaque reference to the raw instance configuration 1410 * @cinfo: A reference to the channel to use to retrieve the broken xfer 1411 * @msg_hdr: The SCMI message header of the message to fetch and report 1412 * @priv: Any private data related to the xfer. 1413 * 1414 * If Raw mode is enabled, this is called from the SCMI core on the RX path in 1415 * case of errors to save and enqueue the bad message payload carried by the 1416 * message that has just been received. 1417 * 1418 * Note that we have to manually fetch any available payload into a temporary 1419 * xfer to be able to save and enqueue the message, since the regular RX error 1420 * path which had called this would have not fetched the message payload having 1421 * classified it as an error. 1422 */ 1423 void scmi_raw_error_report(void *r, struct scmi_chan_info *cinfo, 1424 u32 msg_hdr, void *priv) 1425 { 1426 struct scmi_xfer xfer; 1427 struct scmi_raw_mode_info *raw = r; 1428 1429 if (!raw) 1430 return; 1431 1432 xfer.rx.len = raw->desc->max_msg_size; 1433 xfer.rx.buf = kzalloc(xfer.rx.len, GFP_ATOMIC); 1434 if (!xfer.rx.buf) { 1435 dev_info(raw->handle->dev, 1436 "Cannot report Raw error for HDR:0x%X - ENOMEM\n", 1437 msg_hdr); 1438 return; 1439 } 1440 1441 /* Any transport-provided priv must be passed back down to transport */ 1442 if (priv) 1443 /* Ensure priv is visible */ 1444 smp_store_mb(xfer.priv, priv); 1445 1446 scmi_xfer_raw_fill(raw, cinfo, &xfer, msg_hdr); 1447 scmi_raw_message_report(raw, &xfer, SCMI_RAW_ERRS_QUEUE, 0); 1448 1449 kfree(xfer.rx.buf); 1450 } 1451