1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtio Transport driver for Arm System Control and Management Interface 4 * (SCMI). 5 * 6 * Copyright (C) 2020-2022 OpenSynergy. 7 * Copyright (C) 2021-2024 ARM Ltd. 8 */ 9 10 /** 11 * DOC: Theory of Operation 12 * 13 * The scmi-virtio transport implements a driver for the virtio SCMI device. 14 * 15 * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx 16 * channel (virtio eventq, P2A channel). Each channel is implemented through a 17 * virtqueue. Access to each virtqueue is protected by spinlocks. 18 */ 19 20 #include <linux/completion.h> 21 #include <linux/errno.h> 22 #include <linux/platform_device.h> 23 #include <linux/refcount.h> 24 #include <linux/slab.h> 25 #include <linux/virtio.h> 26 #include <linux/virtio_config.h> 27 28 #include <uapi/linux/virtio_ids.h> 29 #include <uapi/linux/virtio_scmi.h> 30 31 #include "../common.h" 32 33 #define VIRTIO_MAX_RX_TIMEOUT_MS 60000 34 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ 35 #define VIRTIO_SCMI_MAX_PDU_SIZE \ 36 (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) 37 #define DESCRIPTORS_PER_TX_MSG 2 38 39 /** 40 * struct scmi_vio_channel - Transport channel information 41 * 42 * @vqueue: Associated virtqueue 43 * @cinfo: SCMI Tx or Rx channel 44 * @free_lock: Protects access to the @free_list. 45 * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only 46 * @deferred_tx_work: Worker for TX deferred replies processing 47 * @deferred_tx_wq: Workqueue for TX deferred replies 48 * @pending_lock: Protects access to the @pending_cmds_list. 49 * @pending_cmds_list: List of pre-fetched commands queueud for later processing 50 * @is_rx: Whether channel is an Rx channel 51 * @max_msg: Maximum number of pending messages for this channel. 52 * @lock: Protects access to all members except users, free_list and 53 * pending_cmds_list. 54 * @shutdown_done: A reference to a completion used when freeing this channel. 55 * @users: A reference count to currently active users of this channel. 56 */ 57 struct scmi_vio_channel { 58 struct virtqueue *vqueue; 59 struct scmi_chan_info *cinfo; 60 /* lock to protect access to the free list. */ 61 spinlock_t free_lock; 62 struct list_head free_list; 63 /* lock to protect access to the pending list. */ 64 spinlock_t pending_lock; 65 struct list_head pending_cmds_list; 66 struct work_struct deferred_tx_work; 67 struct workqueue_struct *deferred_tx_wq; 68 bool is_rx; 69 unsigned int max_msg; 70 /* 71 * Lock to protect access to all members except users, free_list and 72 * pending_cmds_list 73 */ 74 spinlock_t lock; 75 struct completion *shutdown_done; 76 refcount_t users; 77 }; 78 79 enum poll_states { 80 VIO_MSG_NOT_POLLED, 81 VIO_MSG_POLL_TIMEOUT, 82 VIO_MSG_POLLING, 83 VIO_MSG_POLL_DONE, 84 }; 85 86 /** 87 * struct scmi_vio_msg - Transport PDU information 88 * 89 * @request: SDU used for commands 90 * @input: SDU used for (delayed) responses and notifications 91 * @list: List which scmi_vio_msg may be part of 92 * @rx_len: Input SDU size in bytes, once input has been received 93 * @poll_idx: Last used index registered for polling purposes if this message 94 * transaction reply was configured for polling. 95 * @poll_status: Polling state for this message. 96 * @poll_lock: A lock to protect @poll_status 97 * @users: A reference count to track this message users and avoid premature 98 * freeing (and reuse) when polling and IRQ execution paths interleave. 99 */ 100 struct scmi_vio_msg { 101 struct scmi_msg_payld *request; 102 struct scmi_msg_payld *input; 103 struct list_head list; 104 unsigned int rx_len; 105 unsigned int poll_idx; 106 enum poll_states poll_status; 107 /* Lock to protect access to poll_status */ 108 spinlock_t poll_lock; 109 refcount_t users; 110 }; 111 112 static struct scmi_transport_core_operations *core; 113 114 /* Only one SCMI VirtIO device can possibly exist */ 115 static struct virtio_device *scmi_vdev; 116 117 static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch, 118 struct scmi_chan_info *cinfo) 119 { 120 unsigned long flags; 121 122 spin_lock_irqsave(&vioch->lock, flags); 123 cinfo->transport_info = vioch; 124 /* Indirectly setting channel not available any more */ 125 vioch->cinfo = cinfo; 126 spin_unlock_irqrestore(&vioch->lock, flags); 127 128 refcount_set(&vioch->users, 1); 129 } 130 131 static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch) 132 { 133 return refcount_inc_not_zero(&vioch->users); 134 } 135 136 static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch) 137 { 138 if (refcount_dec_and_test(&vioch->users)) { 139 unsigned long flags; 140 141 spin_lock_irqsave(&vioch->lock, flags); 142 if (vioch->shutdown_done) { 143 vioch->cinfo = NULL; 144 complete(vioch->shutdown_done); 145 } 146 spin_unlock_irqrestore(&vioch->lock, flags); 147 } 148 } 149 150 static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) 151 { 152 unsigned long flags; 153 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); 154 155 /* 156 * Prepare to wait for the last release if not already released 157 * or in progress. 158 */ 159 spin_lock_irqsave(&vioch->lock, flags); 160 if (!vioch->cinfo || vioch->shutdown_done) { 161 spin_unlock_irqrestore(&vioch->lock, flags); 162 return; 163 } 164 165 vioch->shutdown_done = &vioch_shutdown_done; 166 if (!vioch->is_rx && vioch->deferred_tx_wq) 167 /* Cannot be kicked anymore after this...*/ 168 vioch->deferred_tx_wq = NULL; 169 spin_unlock_irqrestore(&vioch->lock, flags); 170 171 scmi_vio_channel_release(vioch); 172 173 /* Let any possibly concurrent RX path release the channel */ 174 wait_for_completion(vioch->shutdown_done); 175 } 176 177 /* Assumes to be called with vio channel acquired already */ 178 static struct scmi_vio_msg * 179 scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch) 180 { 181 unsigned long flags; 182 struct scmi_vio_msg *msg; 183 184 spin_lock_irqsave(&vioch->free_lock, flags); 185 if (list_empty(&vioch->free_list)) { 186 spin_unlock_irqrestore(&vioch->free_lock, flags); 187 return NULL; 188 } 189 190 msg = list_first_entry(&vioch->free_list, typeof(*msg), list); 191 list_del_init(&msg->list); 192 spin_unlock_irqrestore(&vioch->free_lock, flags); 193 194 /* Still no users, no need to acquire poll_lock */ 195 msg->poll_status = VIO_MSG_NOT_POLLED; 196 refcount_set(&msg->users, 1); 197 198 return msg; 199 } 200 201 static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg) 202 { 203 return refcount_inc_not_zero(&msg->users); 204 } 205 206 /* Assumes to be called with vio channel acquired already */ 207 static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch, 208 struct scmi_vio_msg *msg) 209 { 210 bool ret; 211 212 ret = refcount_dec_and_test(&msg->users); 213 if (ret) { 214 unsigned long flags; 215 216 spin_lock_irqsave(&vioch->free_lock, flags); 217 list_add_tail(&msg->list, &vioch->free_list); 218 spin_unlock_irqrestore(&vioch->free_lock, flags); 219 } 220 221 return ret; 222 } 223 224 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) 225 { 226 return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); 227 } 228 229 static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, 230 struct scmi_vio_msg *msg) 231 { 232 struct scatterlist sg_in; 233 int rc; 234 unsigned long flags; 235 struct device *dev = &vioch->vqueue->vdev->dev; 236 237 sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); 238 239 spin_lock_irqsave(&vioch->lock, flags); 240 241 rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); 242 if (rc) 243 dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc); 244 else 245 virtqueue_kick(vioch->vqueue); 246 247 spin_unlock_irqrestore(&vioch->lock, flags); 248 249 return rc; 250 } 251 252 /* 253 * Assume to be called with channel already acquired or not ready at all; 254 * vioch->lock MUST NOT have been already acquired. 255 */ 256 static void scmi_finalize_message(struct scmi_vio_channel *vioch, 257 struct scmi_vio_msg *msg) 258 { 259 if (vioch->is_rx) 260 scmi_vio_feed_vq_rx(vioch, msg); 261 else 262 scmi_vio_msg_release(vioch, msg); 263 } 264 265 static void scmi_vio_complete_cb(struct virtqueue *vqueue) 266 { 267 unsigned long flags; 268 unsigned int length; 269 struct scmi_vio_channel *vioch; 270 struct scmi_vio_msg *msg; 271 bool cb_enabled = true; 272 273 if (WARN_ON_ONCE(!vqueue->vdev->priv)) 274 return; 275 vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; 276 277 for (;;) { 278 if (!scmi_vio_channel_acquire(vioch)) 279 return; 280 281 spin_lock_irqsave(&vioch->lock, flags); 282 if (cb_enabled) { 283 virtqueue_disable_cb(vqueue); 284 cb_enabled = false; 285 } 286 287 msg = virtqueue_get_buf(vqueue, &length); 288 if (!msg) { 289 if (virtqueue_enable_cb(vqueue)) { 290 spin_unlock_irqrestore(&vioch->lock, flags); 291 scmi_vio_channel_release(vioch); 292 return; 293 } 294 cb_enabled = true; 295 } 296 spin_unlock_irqrestore(&vioch->lock, flags); 297 298 if (msg) { 299 msg->rx_len = length; 300 core->rx_callback(vioch->cinfo, 301 core->msg->read_header(msg->input), 302 msg); 303 304 scmi_finalize_message(vioch, msg); 305 } 306 307 /* 308 * Release vio channel between loop iterations to allow 309 * virtio_chan_free() to eventually fully release it when 310 * shutting down; in such a case, any outstanding message will 311 * be ignored since this loop will bail out at the next 312 * iteration. 313 */ 314 scmi_vio_channel_release(vioch); 315 } 316 } 317 318 static void scmi_vio_deferred_tx_worker(struct work_struct *work) 319 { 320 unsigned long flags; 321 struct scmi_vio_channel *vioch; 322 struct scmi_vio_msg *msg, *tmp; 323 324 vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work); 325 326 if (!scmi_vio_channel_acquire(vioch)) 327 return; 328 329 /* 330 * Process pre-fetched messages: these could be non-polled messages or 331 * late timed-out replies to polled messages dequeued by chance while 332 * polling for some other messages: this worker is in charge to process 333 * the valid non-expired messages and anyway finally free all of them. 334 */ 335 spin_lock_irqsave(&vioch->pending_lock, flags); 336 337 /* Scan the list of possibly pre-fetched messages during polling. */ 338 list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) { 339 list_del(&msg->list); 340 341 /* 342 * Channel is acquired here (cannot vanish) and this message 343 * is no more processed elsewhere so no poll_lock needed. 344 */ 345 if (msg->poll_status == VIO_MSG_NOT_POLLED) 346 core->rx_callback(vioch->cinfo, 347 core->msg->read_header(msg->input), 348 msg); 349 350 /* Free the processed message once done */ 351 scmi_vio_msg_release(vioch, msg); 352 } 353 354 spin_unlock_irqrestore(&vioch->pending_lock, flags); 355 356 /* Process possibly still pending messages */ 357 scmi_vio_complete_cb(vioch->vqueue); 358 359 scmi_vio_channel_release(vioch); 360 } 361 362 static struct virtqueue_info scmi_vio_vqs_info[] = { 363 { "tx", scmi_vio_complete_cb }, 364 { "rx", scmi_vio_complete_cb }, 365 }; 366 367 static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) 368 { 369 struct scmi_vio_channel *vioch = base_cinfo->transport_info; 370 371 return vioch->max_msg; 372 } 373 374 static bool virtio_chan_available(struct device_node *of_node, int idx) 375 { 376 struct scmi_vio_channel *channels, *vioch = NULL; 377 378 if (WARN_ON_ONCE(!scmi_vdev)) 379 return false; 380 381 channels = (struct scmi_vio_channel *)scmi_vdev->priv; 382 383 switch (idx) { 384 case VIRTIO_SCMI_VQ_TX: 385 vioch = &channels[VIRTIO_SCMI_VQ_TX]; 386 break; 387 case VIRTIO_SCMI_VQ_RX: 388 if (scmi_vio_have_vq_rx(scmi_vdev)) 389 vioch = &channels[VIRTIO_SCMI_VQ_RX]; 390 break; 391 default: 392 return false; 393 } 394 395 return vioch && !vioch->cinfo; 396 } 397 398 static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) 399 { 400 destroy_workqueue(deferred_tx_wq); 401 } 402 403 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 404 bool tx) 405 { 406 struct scmi_vio_channel *vioch; 407 int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; 408 int i; 409 410 if (!scmi_vdev) 411 return -EPROBE_DEFER; 412 413 vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; 414 415 /* Setup a deferred worker for polling. */ 416 if (tx && !vioch->deferred_tx_wq) { 417 int ret; 418 419 vioch->deferred_tx_wq = 420 alloc_workqueue(dev_name(&scmi_vdev->dev), 421 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, 422 0); 423 if (!vioch->deferred_tx_wq) 424 return -ENOMEM; 425 426 ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, 427 vioch->deferred_tx_wq); 428 if (ret) 429 return ret; 430 431 INIT_WORK(&vioch->deferred_tx_work, 432 scmi_vio_deferred_tx_worker); 433 } 434 435 for (i = 0; i < vioch->max_msg; i++) { 436 struct scmi_vio_msg *msg; 437 438 msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); 439 if (!msg) 440 return -ENOMEM; 441 442 if (tx) { 443 msg->request = devm_kzalloc(dev, 444 VIRTIO_SCMI_MAX_PDU_SIZE, 445 GFP_KERNEL); 446 if (!msg->request) 447 return -ENOMEM; 448 spin_lock_init(&msg->poll_lock); 449 refcount_set(&msg->users, 1); 450 } 451 452 msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, 453 GFP_KERNEL); 454 if (!msg->input) 455 return -ENOMEM; 456 457 scmi_finalize_message(vioch, msg); 458 } 459 460 scmi_vio_channel_ready(vioch, cinfo); 461 462 return 0; 463 } 464 465 static int virtio_chan_free(int id, void *p, void *data) 466 { 467 struct scmi_chan_info *cinfo = p; 468 struct scmi_vio_channel *vioch = cinfo->transport_info; 469 470 /* 471 * Break device to inhibit further traffic flowing while shutting down 472 * the channels: doing it later holding vioch->lock creates unsafe 473 * locking dependency chains as reported by LOCKDEP. 474 */ 475 virtio_break_device(vioch->vqueue->vdev); 476 scmi_vio_channel_cleanup_sync(vioch); 477 478 return 0; 479 } 480 481 static int virtio_send_message(struct scmi_chan_info *cinfo, 482 struct scmi_xfer *xfer) 483 { 484 struct scmi_vio_channel *vioch = cinfo->transport_info; 485 struct scatterlist sg_out; 486 struct scatterlist sg_in; 487 struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; 488 unsigned long flags; 489 int rc; 490 struct scmi_vio_msg *msg; 491 492 if (!scmi_vio_channel_acquire(vioch)) 493 return -EINVAL; 494 495 msg = scmi_virtio_get_free_msg(vioch); 496 if (!msg) { 497 scmi_vio_channel_release(vioch); 498 return -EBUSY; 499 } 500 501 core->msg->tx_prepare(msg->request, xfer); 502 503 sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer)); 504 sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer)); 505 506 spin_lock_irqsave(&vioch->lock, flags); 507 508 /* 509 * If polling was requested for this transaction: 510 * - retrieve last used index (will be used as polling reference) 511 * - bind the polled message to the xfer via .priv 512 * - grab an additional msg refcount for the poll-path 513 */ 514 if (xfer->hdr.poll_completion) { 515 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); 516 /* Still no users, no need to acquire poll_lock */ 517 msg->poll_status = VIO_MSG_POLLING; 518 scmi_vio_msg_acquire(msg); 519 /* Ensure initialized msg is visibly bound to xfer */ 520 smp_store_mb(xfer->priv, msg); 521 } 522 523 rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); 524 if (rc) 525 dev_err(vioch->cinfo->dev, 526 "failed to add to TX virtqueue (%d)\n", rc); 527 else 528 virtqueue_kick(vioch->vqueue); 529 530 spin_unlock_irqrestore(&vioch->lock, flags); 531 532 if (rc) { 533 /* Ensure order between xfer->priv clear and vq feeding */ 534 smp_store_mb(xfer->priv, NULL); 535 if (xfer->hdr.poll_completion) 536 scmi_vio_msg_release(vioch, msg); 537 scmi_vio_msg_release(vioch, msg); 538 } 539 540 scmi_vio_channel_release(vioch); 541 542 return rc; 543 } 544 545 static void virtio_fetch_response(struct scmi_chan_info *cinfo, 546 struct scmi_xfer *xfer) 547 { 548 struct scmi_vio_msg *msg = xfer->priv; 549 550 if (msg) 551 core->msg->fetch_response(msg->input, msg->rx_len, xfer); 552 } 553 554 static void virtio_fetch_notification(struct scmi_chan_info *cinfo, 555 size_t max_len, struct scmi_xfer *xfer) 556 { 557 struct scmi_vio_msg *msg = xfer->priv; 558 559 if (msg) 560 core->msg->fetch_notification(msg->input, msg->rx_len, 561 max_len, xfer); 562 } 563 564 /** 565 * virtio_mark_txdone - Mark transmission done 566 * 567 * Free only completed polling transfer messages. 568 * 569 * Note that in the SCMI VirtIO transport we never explicitly release still 570 * outstanding but timed-out messages by forcibly re-adding them to the 571 * free-list inside the TX code path; we instead let IRQ/RX callbacks, or the 572 * TX deferred worker, eventually clean up such messages once, finally, a late 573 * reply is received and discarded (if ever). 574 * 575 * This approach was deemed preferable since those pending timed-out buffers are 576 * still effectively owned by the SCMI platform VirtIO device even after timeout 577 * expiration: forcibly freeing and reusing them before they had been returned 578 * explicitly by the SCMI platform could lead to subtle bugs due to message 579 * corruption. 580 * An SCMI platform VirtIO device which never returns message buffers is 581 * anyway broken and it will quickly lead to exhaustion of available messages. 582 * 583 * For this same reason, here, we take care to free only the polled messages 584 * that had been somehow replied (only if not by chance already processed on the 585 * IRQ path - the initial scmi_vio_msg_release() takes care of this) and also 586 * any timed-out polled message if that indeed appears to have been at least 587 * dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such 588 * messages won't be freed elsewhere. Any other polled message is marked as 589 * VIO_MSG_POLL_TIMEOUT. 590 * 591 * Possible late replies to timed-out polled messages will be eventually freed 592 * by RX callbacks if delivered on the IRQ path or by the deferred TX worker if 593 * dequeued on some other polling path. 594 * 595 * @cinfo: SCMI channel info 596 * @ret: Transmission return code 597 * @xfer: Transfer descriptor 598 */ 599 static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret, 600 struct scmi_xfer *xfer) 601 { 602 unsigned long flags; 603 struct scmi_vio_channel *vioch = cinfo->transport_info; 604 struct scmi_vio_msg *msg = xfer->priv; 605 606 if (!msg || !scmi_vio_channel_acquire(vioch)) 607 return; 608 609 /* Ensure msg is unbound from xfer anyway at this point */ 610 smp_store_mb(xfer->priv, NULL); 611 612 /* Must be a polled xfer and not already freed on the IRQ path */ 613 if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) { 614 scmi_vio_channel_release(vioch); 615 return; 616 } 617 618 spin_lock_irqsave(&msg->poll_lock, flags); 619 /* Do not free timedout polled messages only if still inflight */ 620 if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE) 621 scmi_vio_msg_release(vioch, msg); 622 else if (msg->poll_status == VIO_MSG_POLLING) 623 msg->poll_status = VIO_MSG_POLL_TIMEOUT; 624 spin_unlock_irqrestore(&msg->poll_lock, flags); 625 626 scmi_vio_channel_release(vioch); 627 } 628 629 /** 630 * virtio_poll_done - Provide polling support for VirtIO transport 631 * 632 * @cinfo: SCMI channel info 633 * @xfer: Reference to the transfer being poll for. 634 * 635 * VirtIO core provides a polling mechanism based only on last used indexes: 636 * this means that it is possible to poll the virtqueues waiting for something 637 * new to arrive from the host side, but the only way to check if the freshly 638 * arrived buffer was indeed what we were waiting for is to compare the newly 639 * arrived message descriptor with the one we are polling on. 640 * 641 * As a consequence it can happen to dequeue something different from the buffer 642 * we were poll-waiting for: if that is the case such early fetched buffers are 643 * then added to a the @pending_cmds_list list for later processing by a 644 * dedicated deferred worker. 645 * 646 * So, basically, once something new is spotted we proceed to de-queue all the 647 * freshly received used buffers until we found the one we were polling on, or, 648 * we have 'seemingly' emptied the virtqueue; if some buffers are still pending 649 * in the vqueue at the end of the polling loop (possible due to inherent races 650 * in virtqueues handling mechanisms), we similarly kick the deferred worker 651 * and let it process those, to avoid indefinitely looping in the .poll_done 652 * busy-waiting helper. 653 * 654 * Finally, we delegate to the deferred worker also the final free of any timed 655 * out reply to a polled message that we should dequeue. 656 * 657 * Note that, since we do NOT have per-message suppress notification mechanism, 658 * the message we are polling for could be alternatively delivered via usual 659 * IRQs callbacks on another core which happened to have IRQs enabled while we 660 * are actively polling for it here: in such a case it will be handled as such 661 * by rx_callback() and the polling loop in the SCMI Core TX path will be 662 * transparently terminated anyway. 663 * 664 * Return: True once polling has successfully completed. 665 */ 666 static bool virtio_poll_done(struct scmi_chan_info *cinfo, 667 struct scmi_xfer *xfer) 668 { 669 bool pending, found = false; 670 unsigned int length, any_prefetched = 0; 671 unsigned long flags; 672 struct scmi_vio_msg *next_msg, *msg = xfer->priv; 673 struct scmi_vio_channel *vioch = cinfo->transport_info; 674 675 if (!msg) 676 return true; 677 678 /* 679 * Processed already by other polling loop on another CPU ? 680 * 681 * Note that this message is acquired on the poll path so cannot vanish 682 * while inside this loop iteration even if concurrently processed on 683 * the IRQ path. 684 * 685 * Avoid to acquire poll_lock since polled_status can be changed 686 * in a relevant manner only later in this same thread of execution: 687 * any other possible changes made concurrently by other polling loops 688 * or by a reply delivered on the IRQ path have no meaningful impact on 689 * this loop iteration: in other words it is harmless to allow this 690 * possible race but let has avoid spinlocking with irqs off in this 691 * initial part of the polling loop. 692 */ 693 if (msg->poll_status == VIO_MSG_POLL_DONE) 694 return true; 695 696 if (!scmi_vio_channel_acquire(vioch)) 697 return true; 698 699 /* Has cmdq index moved at all ? */ 700 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); 701 if (!pending) { 702 scmi_vio_channel_release(vioch); 703 return false; 704 } 705 706 spin_lock_irqsave(&vioch->lock, flags); 707 virtqueue_disable_cb(vioch->vqueue); 708 709 /* 710 * Process all new messages till the polled-for message is found OR 711 * the vqueue is empty. 712 */ 713 while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) { 714 bool next_msg_done = false; 715 716 /* 717 * Mark any dequeued buffer message as VIO_MSG_POLL_DONE so 718 * that can be properly freed even on timeout in mark_txdone. 719 */ 720 spin_lock(&next_msg->poll_lock); 721 if (next_msg->poll_status == VIO_MSG_POLLING) { 722 next_msg->poll_status = VIO_MSG_POLL_DONE; 723 next_msg_done = true; 724 } 725 spin_unlock(&next_msg->poll_lock); 726 727 next_msg->rx_len = length; 728 /* Is the message we were polling for ? */ 729 if (next_msg == msg) { 730 found = true; 731 break; 732 } else if (next_msg_done) { 733 /* Skip the rest if this was another polled msg */ 734 continue; 735 } 736 737 /* 738 * Enqueue for later processing any non-polled message and any 739 * timed-out polled one that we happen to have dequeued. 740 */ 741 spin_lock(&next_msg->poll_lock); 742 if (next_msg->poll_status == VIO_MSG_NOT_POLLED || 743 next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) { 744 spin_unlock(&next_msg->poll_lock); 745 746 any_prefetched++; 747 spin_lock(&vioch->pending_lock); 748 list_add_tail(&next_msg->list, 749 &vioch->pending_cmds_list); 750 spin_unlock(&vioch->pending_lock); 751 } else { 752 spin_unlock(&next_msg->poll_lock); 753 } 754 } 755 756 /* 757 * When the polling loop has successfully terminated if something 758 * else was queued in the meantime, it will be served by a deferred 759 * worker OR by the normal IRQ/callback OR by other poll loops. 760 * 761 * If we are still looking for the polled reply, the polling index has 762 * to be updated to the current vqueue last used index. 763 */ 764 if (found) { 765 pending = !virtqueue_enable_cb(vioch->vqueue); 766 } else { 767 msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue); 768 pending = virtqueue_poll(vioch->vqueue, msg->poll_idx); 769 } 770 771 if (vioch->deferred_tx_wq && (any_prefetched || pending)) 772 queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work); 773 774 spin_unlock_irqrestore(&vioch->lock, flags); 775 776 scmi_vio_channel_release(vioch); 777 778 return found; 779 } 780 781 static const struct scmi_transport_ops scmi_virtio_ops = { 782 .chan_available = virtio_chan_available, 783 .chan_setup = virtio_chan_setup, 784 .chan_free = virtio_chan_free, 785 .get_max_msg = virtio_get_max_msg, 786 .send_message = virtio_send_message, 787 .fetch_response = virtio_fetch_response, 788 .fetch_notification = virtio_fetch_notification, 789 .mark_txdone = virtio_mark_txdone, 790 .poll_done = virtio_poll_done, 791 }; 792 793 static struct scmi_desc scmi_virtio_desc = { 794 .ops = &scmi_virtio_ops, 795 /* for non-realtime virtio devices */ 796 .max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS, 797 .max_msg = 0, /* overridden by virtio_get_max_msg() */ 798 .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, 799 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE), 800 }; 801 802 static const struct of_device_id scmi_of_match[] = { 803 { .compatible = "arm,scmi-virtio" }, 804 { /* Sentinel */ }, 805 }; 806 807 DEFINE_SCMI_TRANSPORT_DRIVER(scmi_virtio, scmi_virtio_driver, scmi_virtio_desc, 808 scmi_of_match, core); 809 810 static int scmi_vio_probe(struct virtio_device *vdev) 811 { 812 struct device *dev = &vdev->dev; 813 struct scmi_vio_channel *channels; 814 bool have_vq_rx; 815 int vq_cnt; 816 int i; 817 int ret; 818 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; 819 820 /* Only one SCMI VirtiO device allowed */ 821 if (scmi_vdev) { 822 dev_err(dev, 823 "One SCMI Virtio device was already initialized: only one allowed.\n"); 824 return -EBUSY; 825 } 826 827 have_vq_rx = scmi_vio_have_vq_rx(vdev); 828 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; 829 830 channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); 831 if (!channels) 832 return -ENOMEM; 833 834 if (have_vq_rx) 835 channels[VIRTIO_SCMI_VQ_RX].is_rx = true; 836 837 ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_vqs_info, NULL); 838 if (ret) { 839 dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); 840 return ret; 841 } 842 843 for (i = 0; i < vq_cnt; i++) { 844 unsigned int sz; 845 846 spin_lock_init(&channels[i].lock); 847 spin_lock_init(&channels[i].free_lock); 848 INIT_LIST_HEAD(&channels[i].free_list); 849 spin_lock_init(&channels[i].pending_lock); 850 INIT_LIST_HEAD(&channels[i].pending_cmds_list); 851 channels[i].vqueue = vqs[i]; 852 853 sz = virtqueue_get_vring_size(channels[i].vqueue); 854 /* Tx messages need multiple descriptors. */ 855 if (!channels[i].is_rx) 856 sz /= DESCRIPTORS_PER_TX_MSG; 857 858 if (sz > MSG_TOKEN_MAX) { 859 dev_info(dev, 860 "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", 861 channels[i].is_rx ? "rx" : "tx", 862 sz, MSG_TOKEN_MAX); 863 sz = MSG_TOKEN_MAX; 864 } 865 channels[i].max_msg = sz; 866 } 867 868 vdev->priv = channels; 869 870 /* Ensure initialized scmi_vdev is visible */ 871 smp_store_mb(scmi_vdev, vdev); 872 873 ret = platform_driver_register(&scmi_virtio_driver); 874 if (ret) { 875 vdev->priv = NULL; 876 vdev->config->del_vqs(vdev); 877 /* Ensure NULLified scmi_vdev is visible */ 878 smp_store_mb(scmi_vdev, NULL); 879 880 return ret; 881 } 882 883 return 0; 884 } 885 886 static void scmi_vio_remove(struct virtio_device *vdev) 887 { 888 platform_driver_unregister(&scmi_virtio_driver); 889 890 /* 891 * Once we get here, virtio_chan_free() will have already been called by 892 * the SCMI core for any existing channel and, as a consequence, all the 893 * virtio channels will have been already marked NOT ready, causing any 894 * outstanding message on any vqueue to be ignored by complete_cb: now 895 * we can just stop processing buffers and destroy the vqueues. 896 */ 897 virtio_reset_device(vdev); 898 vdev->config->del_vqs(vdev); 899 /* Ensure scmi_vdev is visible as NULL */ 900 smp_store_mb(scmi_vdev, NULL); 901 } 902 903 static int scmi_vio_validate(struct virtio_device *vdev) 904 { 905 #ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE 906 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { 907 dev_err(&vdev->dev, 908 "device does not comply with spec version 1.x\n"); 909 return -EINVAL; 910 } 911 #endif 912 return 0; 913 } 914 915 static unsigned int features[] = { 916 VIRTIO_SCMI_F_P2A_CHANNELS, 917 }; 918 919 static const struct virtio_device_id id_table[] = { 920 { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, 921 { 0 } 922 }; 923 924 static struct virtio_driver virtio_scmi_driver = { 925 .driver.name = "scmi-virtio", 926 .feature_table = features, 927 .feature_table_size = ARRAY_SIZE(features), 928 .id_table = id_table, 929 .probe = scmi_vio_probe, 930 .remove = scmi_vio_remove, 931 .validate = scmi_vio_validate, 932 }; 933 934 module_virtio_driver(virtio_scmi_driver); 935 936 MODULE_AUTHOR("Igor Skalkin <igor.skalkin@opensynergy.com>"); 937 MODULE_AUTHOR("Peter Hilber <peter.hilber@opensynergy.com>"); 938 MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); 939 MODULE_DESCRIPTION("SCMI VirtIO Transport driver"); 940 MODULE_LICENSE("GPL"); 941