1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/sched/signal.h> 8 #include <linux/wait.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/pm_runtime.h> 12 13 #include <linux/mei.h> 14 15 #include "mei_dev.h" 16 #include "hbm.h" 17 #include "client.h" 18 19 /** 20 * mei_me_cl_init - initialize me client 21 * 22 * @me_cl: me client 23 */ 24 void mei_me_cl_init(struct mei_me_client *me_cl) 25 { 26 INIT_LIST_HEAD(&me_cl->list); 27 kref_init(&me_cl->refcnt); 28 } 29 30 /** 31 * mei_me_cl_get - increases me client refcount 32 * 33 * @me_cl: me client 34 * 35 * Locking: called under "dev->device_lock" lock 36 * 37 * Return: me client or NULL 38 */ 39 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 40 { 41 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 42 return me_cl; 43 44 return NULL; 45 } 46 47 /** 48 * mei_me_cl_release - free me client 49 * 50 * Locking: called under "dev->device_lock" lock 51 * 52 * @ref: me_client refcount 53 */ 54 static void mei_me_cl_release(struct kref *ref) 55 { 56 struct mei_me_client *me_cl = 57 container_of(ref, struct mei_me_client, refcnt); 58 59 kfree(me_cl); 60 } 61 62 /** 63 * mei_me_cl_put - decrease me client refcount and free client if necessary 64 * 65 * Locking: called under "dev->device_lock" lock 66 * 67 * @me_cl: me client 68 */ 69 void mei_me_cl_put(struct mei_me_client *me_cl) 70 { 71 if (me_cl) 72 kref_put(&me_cl->refcnt, mei_me_cl_release); 73 } 74 75 /** 76 * __mei_me_cl_del - delete me client from the list and decrease 77 * reference counter 78 * 79 * @dev: mei device 80 * @me_cl: me client 81 * 82 * Locking: dev->me_clients_rwsem 83 */ 84 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 85 { 86 if (!me_cl) 87 return; 88 89 list_del_init(&me_cl->list); 90 mei_me_cl_put(me_cl); 91 } 92 93 /** 94 * mei_me_cl_del - delete me client from the list and decrease 95 * reference counter 96 * 97 * @dev: mei device 98 * @me_cl: me client 99 */ 100 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 101 { 102 down_write(&dev->me_clients_rwsem); 103 __mei_me_cl_del(dev, me_cl); 104 up_write(&dev->me_clients_rwsem); 105 } 106 107 /** 108 * mei_me_cl_add - add me client to the list 109 * 110 * @dev: mei device 111 * @me_cl: me client 112 */ 113 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 114 { 115 down_write(&dev->me_clients_rwsem); 116 list_add(&me_cl->list, &dev->me_clients); 117 up_write(&dev->me_clients_rwsem); 118 } 119 120 /** 121 * __mei_me_cl_by_uuid - locate me client by uuid 122 * increases ref count 123 * 124 * @dev: mei device 125 * @uuid: me client uuid 126 * 127 * Return: me client or NULL if not found 128 * 129 * Locking: dev->me_clients_rwsem 130 */ 131 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 132 const uuid_le *uuid) 133 { 134 struct mei_me_client *me_cl; 135 const uuid_le *pn; 136 137 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 138 139 list_for_each_entry(me_cl, &dev->me_clients, list) { 140 pn = &me_cl->props.protocol_name; 141 if (uuid_le_cmp(*uuid, *pn) == 0) 142 return mei_me_cl_get(me_cl); 143 } 144 145 return NULL; 146 } 147 148 /** 149 * mei_me_cl_by_uuid - locate me client by uuid 150 * increases ref count 151 * 152 * @dev: mei device 153 * @uuid: me client uuid 154 * 155 * Return: me client or NULL if not found 156 * 157 * Locking: dev->me_clients_rwsem 158 */ 159 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 160 const uuid_le *uuid) 161 { 162 struct mei_me_client *me_cl; 163 164 down_read(&dev->me_clients_rwsem); 165 me_cl = __mei_me_cl_by_uuid(dev, uuid); 166 up_read(&dev->me_clients_rwsem); 167 168 return me_cl; 169 } 170 171 /** 172 * mei_me_cl_by_id - locate me client by client id 173 * increases ref count 174 * 175 * @dev: the device structure 176 * @client_id: me client id 177 * 178 * Return: me client or NULL if not found 179 * 180 * Locking: dev->me_clients_rwsem 181 */ 182 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 183 { 184 185 struct mei_me_client *__me_cl, *me_cl = NULL; 186 187 down_read(&dev->me_clients_rwsem); 188 list_for_each_entry(__me_cl, &dev->me_clients, list) { 189 if (__me_cl->client_id == client_id) { 190 me_cl = mei_me_cl_get(__me_cl); 191 break; 192 } 193 } 194 up_read(&dev->me_clients_rwsem); 195 196 return me_cl; 197 } 198 199 /** 200 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 201 * increases ref count 202 * 203 * @dev: the device structure 204 * @uuid: me client uuid 205 * @client_id: me client id 206 * 207 * Return: me client or null if not found 208 * 209 * Locking: dev->me_clients_rwsem 210 */ 211 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 212 const uuid_le *uuid, u8 client_id) 213 { 214 struct mei_me_client *me_cl; 215 const uuid_le *pn; 216 217 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 218 219 list_for_each_entry(me_cl, &dev->me_clients, list) { 220 pn = &me_cl->props.protocol_name; 221 if (uuid_le_cmp(*uuid, *pn) == 0 && 222 me_cl->client_id == client_id) 223 return mei_me_cl_get(me_cl); 224 } 225 226 return NULL; 227 } 228 229 230 /** 231 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 232 * increases ref count 233 * 234 * @dev: the device structure 235 * @uuid: me client uuid 236 * @client_id: me client id 237 * 238 * Return: me client or null if not found 239 */ 240 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 241 const uuid_le *uuid, u8 client_id) 242 { 243 struct mei_me_client *me_cl; 244 245 down_read(&dev->me_clients_rwsem); 246 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 247 up_read(&dev->me_clients_rwsem); 248 249 return me_cl; 250 } 251 252 /** 253 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 254 * 255 * @dev: the device structure 256 * @uuid: me client uuid 257 * 258 * Locking: called under "dev->device_lock" lock 259 */ 260 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 261 { 262 struct mei_me_client *me_cl; 263 264 dev_dbg(dev->dev, "remove %pUl\n", uuid); 265 266 down_write(&dev->me_clients_rwsem); 267 me_cl = __mei_me_cl_by_uuid(dev, uuid); 268 __mei_me_cl_del(dev, me_cl); 269 up_write(&dev->me_clients_rwsem); 270 } 271 272 /** 273 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 274 * 275 * @dev: the device structure 276 * @uuid: me client uuid 277 * @id: me client id 278 * 279 * Locking: called under "dev->device_lock" lock 280 */ 281 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 282 { 283 struct mei_me_client *me_cl; 284 285 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 286 287 down_write(&dev->me_clients_rwsem); 288 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 289 __mei_me_cl_del(dev, me_cl); 290 up_write(&dev->me_clients_rwsem); 291 } 292 293 /** 294 * mei_me_cl_rm_all - remove all me clients 295 * 296 * @dev: the device structure 297 * 298 * Locking: called under "dev->device_lock" lock 299 */ 300 void mei_me_cl_rm_all(struct mei_device *dev) 301 { 302 struct mei_me_client *me_cl, *next; 303 304 down_write(&dev->me_clients_rwsem); 305 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 306 __mei_me_cl_del(dev, me_cl); 307 up_write(&dev->me_clients_rwsem); 308 } 309 310 /** 311 * mei_io_cb_free - free mei_cb_private related memory 312 * 313 * @cb: mei callback struct 314 */ 315 void mei_io_cb_free(struct mei_cl_cb *cb) 316 { 317 if (cb == NULL) 318 return; 319 320 list_del(&cb->list); 321 kfree(cb->buf.data); 322 kfree(cb); 323 } 324 325 /** 326 * mei_tx_cb_queue - queue tx callback 327 * 328 * Locking: called under "dev->device_lock" lock 329 * 330 * @cb: mei callback struct 331 * @head: an instance of list to queue on 332 */ 333 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, 334 struct list_head *head) 335 { 336 list_add_tail(&cb->list, head); 337 cb->cl->tx_cb_queued++; 338 } 339 340 /** 341 * mei_tx_cb_dequeue - dequeue tx callback 342 * 343 * Locking: called under "dev->device_lock" lock 344 * 345 * @cb: mei callback struct to dequeue and free 346 */ 347 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) 348 { 349 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) 350 cb->cl->tx_cb_queued--; 351 352 mei_io_cb_free(cb); 353 } 354 355 /** 356 * mei_io_cb_init - allocate and initialize io callback 357 * 358 * @cl: mei client 359 * @type: operation type 360 * @fp: pointer to file structure 361 * 362 * Return: mei_cl_cb pointer or NULL; 363 */ 364 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 365 enum mei_cb_file_ops type, 366 const struct file *fp) 367 { 368 struct mei_cl_cb *cb; 369 370 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 371 if (!cb) 372 return NULL; 373 374 INIT_LIST_HEAD(&cb->list); 375 cb->fp = fp; 376 cb->cl = cl; 377 cb->buf_idx = 0; 378 cb->fop_type = type; 379 return cb; 380 } 381 382 /** 383 * mei_io_list_flush_cl - removes cbs belonging to the cl. 384 * 385 * @head: an instance of our list structure 386 * @cl: host client 387 */ 388 static void mei_io_list_flush_cl(struct list_head *head, 389 const struct mei_cl *cl) 390 { 391 struct mei_cl_cb *cb, *next; 392 393 list_for_each_entry_safe(cb, next, head, list) { 394 if (cl == cb->cl) { 395 list_del_init(&cb->list); 396 if (cb->fop_type == MEI_FOP_READ) 397 mei_io_cb_free(cb); 398 } 399 } 400 } 401 402 /** 403 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them 404 * 405 * @head: An instance of our list structure 406 * @cl: host client 407 */ 408 static void mei_io_tx_list_free_cl(struct list_head *head, 409 const struct mei_cl *cl) 410 { 411 struct mei_cl_cb *cb, *next; 412 413 list_for_each_entry_safe(cb, next, head, list) { 414 if (cl == cb->cl) 415 mei_tx_cb_dequeue(cb); 416 } 417 } 418 419 /** 420 * mei_io_list_free_fp - free cb from a list that matches file pointer 421 * 422 * @head: io list 423 * @fp: file pointer (matching cb file object), may be NULL 424 */ 425 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) 426 { 427 struct mei_cl_cb *cb, *next; 428 429 list_for_each_entry_safe(cb, next, head, list) 430 if (!fp || fp == cb->fp) 431 mei_io_cb_free(cb); 432 } 433 434 /** 435 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 436 * 437 * @cl: host client 438 * @length: size of the buffer 439 * @fop_type: operation type 440 * @fp: associated file pointer (might be NULL) 441 * 442 * Return: cb on success and NULL on failure 443 */ 444 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 445 enum mei_cb_file_ops fop_type, 446 const struct file *fp) 447 { 448 struct mei_cl_cb *cb; 449 450 cb = mei_io_cb_init(cl, fop_type, fp); 451 if (!cb) 452 return NULL; 453 454 if (length == 0) 455 return cb; 456 457 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); 458 if (!cb->buf.data) { 459 mei_io_cb_free(cb); 460 return NULL; 461 } 462 cb->buf.size = length; 463 464 return cb; 465 } 466 467 /** 468 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 469 * and enqueuing of the control commands cb 470 * 471 * @cl: host client 472 * @length: size of the buffer 473 * @fop_type: operation type 474 * @fp: associated file pointer (might be NULL) 475 * 476 * Return: cb on success and NULL on failure 477 * Locking: called under "dev->device_lock" lock 478 */ 479 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 480 enum mei_cb_file_ops fop_type, 481 const struct file *fp) 482 { 483 struct mei_cl_cb *cb; 484 485 /* for RX always allocate at least client's mtu */ 486 if (length) 487 length = max_t(size_t, length, mei_cl_mtu(cl)); 488 489 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 490 if (!cb) 491 return NULL; 492 493 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); 494 return cb; 495 } 496 497 /** 498 * mei_cl_read_cb - find this cl's callback in the read list 499 * for a specific file 500 * 501 * @cl: host client 502 * @fp: file pointer (matching cb file object), may be NULL 503 * 504 * Return: cb on success, NULL if cb is not found 505 */ 506 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) 507 { 508 struct mei_cl_cb *cb; 509 510 list_for_each_entry(cb, &cl->rd_completed, list) 511 if (!fp || fp == cb->fp) 512 return cb; 513 514 return NULL; 515 } 516 517 /** 518 * mei_cl_flush_queues - flushes queue lists belonging to cl. 519 * 520 * @cl: host client 521 * @fp: file pointer (matching cb file object), may be NULL 522 * 523 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 524 */ 525 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 526 { 527 struct mei_device *dev; 528 529 if (WARN_ON(!cl || !cl->dev)) 530 return -EINVAL; 531 532 dev = cl->dev; 533 534 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 535 mei_io_tx_list_free_cl(&cl->dev->write_list, cl); 536 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl); 537 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); 538 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); 539 mei_io_list_free_fp(&cl->rd_pending, fp); 540 mei_io_list_free_fp(&cl->rd_completed, fp); 541 542 return 0; 543 } 544 545 /** 546 * mei_cl_init - initializes cl. 547 * 548 * @cl: host client to be initialized 549 * @dev: mei device 550 */ 551 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 552 { 553 memset(cl, 0, sizeof(struct mei_cl)); 554 init_waitqueue_head(&cl->wait); 555 init_waitqueue_head(&cl->rx_wait); 556 init_waitqueue_head(&cl->tx_wait); 557 init_waitqueue_head(&cl->ev_wait); 558 INIT_LIST_HEAD(&cl->rd_completed); 559 INIT_LIST_HEAD(&cl->rd_pending); 560 INIT_LIST_HEAD(&cl->link); 561 cl->writing_state = MEI_IDLE; 562 cl->state = MEI_FILE_UNINITIALIZED; 563 cl->dev = dev; 564 } 565 566 /** 567 * mei_cl_allocate - allocates cl structure and sets it up. 568 * 569 * @dev: mei device 570 * Return: The allocated file or NULL on failure 571 */ 572 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 573 { 574 struct mei_cl *cl; 575 576 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 577 if (!cl) 578 return NULL; 579 580 mei_cl_init(cl, dev); 581 582 return cl; 583 } 584 585 /** 586 * mei_cl_link - allocate host id in the host map 587 * 588 * @cl: host client 589 * 590 * Return: 0 on success 591 * -EINVAL on incorrect values 592 * -EMFILE if open count exceeded. 593 */ 594 int mei_cl_link(struct mei_cl *cl) 595 { 596 struct mei_device *dev; 597 int id; 598 599 if (WARN_ON(!cl || !cl->dev)) 600 return -EINVAL; 601 602 dev = cl->dev; 603 604 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 605 if (id >= MEI_CLIENTS_MAX) { 606 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 607 return -EMFILE; 608 } 609 610 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 611 dev_err(dev->dev, "open_handle_count exceeded %d", 612 MEI_MAX_OPEN_HANDLE_COUNT); 613 return -EMFILE; 614 } 615 616 dev->open_handle_count++; 617 618 cl->host_client_id = id; 619 list_add_tail(&cl->link, &dev->file_list); 620 621 set_bit(id, dev->host_clients_map); 622 623 cl->state = MEI_FILE_INITIALIZING; 624 625 cl_dbg(dev, cl, "link cl\n"); 626 return 0; 627 } 628 629 /** 630 * mei_cl_unlink - remove host client from the list 631 * 632 * @cl: host client 633 * 634 * Return: always 0 635 */ 636 int mei_cl_unlink(struct mei_cl *cl) 637 { 638 struct mei_device *dev; 639 640 /* don't shout on error exit path */ 641 if (!cl) 642 return 0; 643 644 if (WARN_ON(!cl->dev)) 645 return 0; 646 647 dev = cl->dev; 648 649 cl_dbg(dev, cl, "unlink client"); 650 651 if (dev->open_handle_count > 0) 652 dev->open_handle_count--; 653 654 /* never clear the 0 bit */ 655 if (cl->host_client_id) 656 clear_bit(cl->host_client_id, dev->host_clients_map); 657 658 list_del_init(&cl->link); 659 660 cl->state = MEI_FILE_UNINITIALIZED; 661 cl->writing_state = MEI_IDLE; 662 663 WARN_ON(!list_empty(&cl->rd_completed) || 664 !list_empty(&cl->rd_pending) || 665 !list_empty(&cl->link)); 666 667 return 0; 668 } 669 670 void mei_host_client_init(struct mei_device *dev) 671 { 672 mei_set_devstate(dev, MEI_DEV_ENABLED); 673 dev->reset_count = 0; 674 675 schedule_work(&dev->bus_rescan_work); 676 677 pm_runtime_mark_last_busy(dev->dev); 678 dev_dbg(dev->dev, "rpm: autosuspend\n"); 679 pm_request_autosuspend(dev->dev); 680 } 681 682 /** 683 * mei_hbuf_acquire - try to acquire host buffer 684 * 685 * @dev: the device structure 686 * Return: true if host buffer was acquired 687 */ 688 bool mei_hbuf_acquire(struct mei_device *dev) 689 { 690 if (mei_pg_state(dev) == MEI_PG_ON || 691 mei_pg_in_transition(dev)) { 692 dev_dbg(dev->dev, "device is in pg\n"); 693 return false; 694 } 695 696 if (!dev->hbuf_is_ready) { 697 dev_dbg(dev->dev, "hbuf is not ready\n"); 698 return false; 699 } 700 701 dev->hbuf_is_ready = false; 702 703 return true; 704 } 705 706 /** 707 * mei_cl_wake_all - wake up readers, writers and event waiters so 708 * they can be interrupted 709 * 710 * @cl: host client 711 */ 712 static void mei_cl_wake_all(struct mei_cl *cl) 713 { 714 struct mei_device *dev = cl->dev; 715 716 /* synchronized under device mutex */ 717 if (waitqueue_active(&cl->rx_wait)) { 718 cl_dbg(dev, cl, "Waking up reading client!\n"); 719 wake_up_interruptible(&cl->rx_wait); 720 } 721 /* synchronized under device mutex */ 722 if (waitqueue_active(&cl->tx_wait)) { 723 cl_dbg(dev, cl, "Waking up writing client!\n"); 724 wake_up_interruptible(&cl->tx_wait); 725 } 726 /* synchronized under device mutex */ 727 if (waitqueue_active(&cl->ev_wait)) { 728 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 729 wake_up_interruptible(&cl->ev_wait); 730 } 731 /* synchronized under device mutex */ 732 if (waitqueue_active(&cl->wait)) { 733 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 734 wake_up(&cl->wait); 735 } 736 } 737 738 /** 739 * mei_cl_set_disconnected - set disconnected state and clear 740 * associated states and resources 741 * 742 * @cl: host client 743 */ 744 static void mei_cl_set_disconnected(struct mei_cl *cl) 745 { 746 struct mei_device *dev = cl->dev; 747 748 if (cl->state == MEI_FILE_DISCONNECTED || 749 cl->state <= MEI_FILE_INITIALIZING) 750 return; 751 752 cl->state = MEI_FILE_DISCONNECTED; 753 mei_io_tx_list_free_cl(&dev->write_list, cl); 754 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl); 755 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 756 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 757 mei_cl_wake_all(cl); 758 cl->rx_flow_ctrl_creds = 0; 759 cl->tx_flow_ctrl_creds = 0; 760 cl->timer_count = 0; 761 762 if (!cl->me_cl) 763 return; 764 765 if (!WARN_ON(cl->me_cl->connect_count == 0)) 766 cl->me_cl->connect_count--; 767 768 if (cl->me_cl->connect_count == 0) 769 cl->me_cl->tx_flow_ctrl_creds = 0; 770 771 mei_me_cl_put(cl->me_cl); 772 cl->me_cl = NULL; 773 } 774 775 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 776 { 777 if (!mei_me_cl_get(me_cl)) 778 return -ENOENT; 779 780 /* only one connection is allowed for fixed address clients */ 781 if (me_cl->props.fixed_address) { 782 if (me_cl->connect_count) { 783 mei_me_cl_put(me_cl); 784 return -EBUSY; 785 } 786 } 787 788 cl->me_cl = me_cl; 789 cl->state = MEI_FILE_CONNECTING; 790 cl->me_cl->connect_count++; 791 792 return 0; 793 } 794 795 /* 796 * mei_cl_send_disconnect - send disconnect request 797 * 798 * @cl: host client 799 * @cb: callback block 800 * 801 * Return: 0, OK; otherwise, error. 802 */ 803 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 804 { 805 struct mei_device *dev; 806 int ret; 807 808 dev = cl->dev; 809 810 ret = mei_hbm_cl_disconnect_req(dev, cl); 811 cl->status = ret; 812 if (ret) { 813 cl->state = MEI_FILE_DISCONNECT_REPLY; 814 return ret; 815 } 816 817 list_move_tail(&cb->list, &dev->ctrl_rd_list); 818 cl->timer_count = MEI_CONNECT_TIMEOUT; 819 mei_schedule_stall_timer(dev); 820 821 return 0; 822 } 823 824 /** 825 * mei_cl_irq_disconnect - processes close related operation from 826 * interrupt thread context - send disconnect request 827 * 828 * @cl: client 829 * @cb: callback block. 830 * @cmpl_list: complete list. 831 * 832 * Return: 0, OK; otherwise, error. 833 */ 834 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 835 struct list_head *cmpl_list) 836 { 837 struct mei_device *dev = cl->dev; 838 u32 msg_slots; 839 int slots; 840 int ret; 841 842 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 843 slots = mei_hbuf_empty_slots(dev); 844 if (slots < 0) 845 return -EOVERFLOW; 846 847 if ((u32)slots < msg_slots) 848 return -EMSGSIZE; 849 850 ret = mei_cl_send_disconnect(cl, cb); 851 if (ret) 852 list_move_tail(&cb->list, cmpl_list); 853 854 return ret; 855 } 856 857 /** 858 * __mei_cl_disconnect - disconnect host client from the me one 859 * internal function runtime pm has to be already acquired 860 * 861 * @cl: host client 862 * 863 * Return: 0 on success, <0 on failure. 864 */ 865 static int __mei_cl_disconnect(struct mei_cl *cl) 866 { 867 struct mei_device *dev; 868 struct mei_cl_cb *cb; 869 int rets; 870 871 dev = cl->dev; 872 873 cl->state = MEI_FILE_DISCONNECTING; 874 875 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 876 if (!cb) { 877 rets = -ENOMEM; 878 goto out; 879 } 880 881 if (mei_hbuf_acquire(dev)) { 882 rets = mei_cl_send_disconnect(cl, cb); 883 if (rets) { 884 cl_err(dev, cl, "failed to disconnect.\n"); 885 goto out; 886 } 887 } 888 889 mutex_unlock(&dev->device_lock); 890 wait_event_timeout(cl->wait, 891 cl->state == MEI_FILE_DISCONNECT_REPLY || 892 cl->state == MEI_FILE_DISCONNECTED, 893 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 894 mutex_lock(&dev->device_lock); 895 896 rets = cl->status; 897 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 898 cl->state != MEI_FILE_DISCONNECTED) { 899 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 900 rets = -ETIME; 901 } 902 903 out: 904 /* we disconnect also on error */ 905 mei_cl_set_disconnected(cl); 906 if (!rets) 907 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 908 909 mei_io_cb_free(cb); 910 return rets; 911 } 912 913 /** 914 * mei_cl_disconnect - disconnect host client from the me one 915 * 916 * @cl: host client 917 * 918 * Locking: called under "dev->device_lock" lock 919 * 920 * Return: 0 on success, <0 on failure. 921 */ 922 int mei_cl_disconnect(struct mei_cl *cl) 923 { 924 struct mei_device *dev; 925 int rets; 926 927 if (WARN_ON(!cl || !cl->dev)) 928 return -ENODEV; 929 930 dev = cl->dev; 931 932 cl_dbg(dev, cl, "disconnecting"); 933 934 if (!mei_cl_is_connected(cl)) 935 return 0; 936 937 if (mei_cl_is_fixed_address(cl)) { 938 mei_cl_set_disconnected(cl); 939 return 0; 940 } 941 942 if (dev->dev_state == MEI_DEV_POWER_DOWN) { 943 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); 944 mei_cl_set_disconnected(cl); 945 return 0; 946 } 947 948 rets = pm_runtime_get(dev->dev); 949 if (rets < 0 && rets != -EINPROGRESS) { 950 pm_runtime_put_noidle(dev->dev); 951 cl_err(dev, cl, "rpm: get failed %d\n", rets); 952 return rets; 953 } 954 955 rets = __mei_cl_disconnect(cl); 956 957 cl_dbg(dev, cl, "rpm: autosuspend\n"); 958 pm_runtime_mark_last_busy(dev->dev); 959 pm_runtime_put_autosuspend(dev->dev); 960 961 return rets; 962 } 963 964 965 /** 966 * mei_cl_is_other_connecting - checks if other 967 * client with the same me client id is connecting 968 * 969 * @cl: private data of the file object 970 * 971 * Return: true if other client is connected, false - otherwise. 972 */ 973 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 974 { 975 struct mei_device *dev; 976 struct mei_cl_cb *cb; 977 978 dev = cl->dev; 979 980 list_for_each_entry(cb, &dev->ctrl_rd_list, list) { 981 if (cb->fop_type == MEI_FOP_CONNECT && 982 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 983 return true; 984 } 985 986 return false; 987 } 988 989 /** 990 * mei_cl_send_connect - send connect request 991 * 992 * @cl: host client 993 * @cb: callback block 994 * 995 * Return: 0, OK; otherwise, error. 996 */ 997 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 998 { 999 struct mei_device *dev; 1000 int ret; 1001 1002 dev = cl->dev; 1003 1004 ret = mei_hbm_cl_connect_req(dev, cl); 1005 cl->status = ret; 1006 if (ret) { 1007 cl->state = MEI_FILE_DISCONNECT_REPLY; 1008 return ret; 1009 } 1010 1011 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1012 cl->timer_count = MEI_CONNECT_TIMEOUT; 1013 mei_schedule_stall_timer(dev); 1014 return 0; 1015 } 1016 1017 /** 1018 * mei_cl_irq_connect - send connect request in irq_thread context 1019 * 1020 * @cl: host client 1021 * @cb: callback block 1022 * @cmpl_list: complete list 1023 * 1024 * Return: 0, OK; otherwise, error. 1025 */ 1026 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1027 struct list_head *cmpl_list) 1028 { 1029 struct mei_device *dev = cl->dev; 1030 u32 msg_slots; 1031 int slots; 1032 int rets; 1033 1034 if (mei_cl_is_other_connecting(cl)) 1035 return 0; 1036 1037 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1038 slots = mei_hbuf_empty_slots(dev); 1039 if (slots < 0) 1040 return -EOVERFLOW; 1041 1042 if ((u32)slots < msg_slots) 1043 return -EMSGSIZE; 1044 1045 rets = mei_cl_send_connect(cl, cb); 1046 if (rets) 1047 list_move_tail(&cb->list, cmpl_list); 1048 1049 return rets; 1050 } 1051 1052 /** 1053 * mei_cl_connect - connect host client to the me one 1054 * 1055 * @cl: host client 1056 * @me_cl: me client 1057 * @fp: pointer to file structure 1058 * 1059 * Locking: called under "dev->device_lock" lock 1060 * 1061 * Return: 0 on success, <0 on failure. 1062 */ 1063 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1064 const struct file *fp) 1065 { 1066 struct mei_device *dev; 1067 struct mei_cl_cb *cb; 1068 int rets; 1069 1070 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1071 return -ENODEV; 1072 1073 dev = cl->dev; 1074 1075 rets = mei_cl_set_connecting(cl, me_cl); 1076 if (rets) 1077 goto nortpm; 1078 1079 if (mei_cl_is_fixed_address(cl)) { 1080 cl->state = MEI_FILE_CONNECTED; 1081 rets = 0; 1082 goto nortpm; 1083 } 1084 1085 rets = pm_runtime_get(dev->dev); 1086 if (rets < 0 && rets != -EINPROGRESS) { 1087 pm_runtime_put_noidle(dev->dev); 1088 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1089 goto nortpm; 1090 } 1091 1092 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1093 if (!cb) { 1094 rets = -ENOMEM; 1095 goto out; 1096 } 1097 1098 /* run hbuf acquire last so we don't have to undo */ 1099 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1100 rets = mei_cl_send_connect(cl, cb); 1101 if (rets) 1102 goto out; 1103 } 1104 1105 mutex_unlock(&dev->device_lock); 1106 wait_event_timeout(cl->wait, 1107 (cl->state == MEI_FILE_CONNECTED || 1108 cl->state == MEI_FILE_DISCONNECTED || 1109 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1110 cl->state == MEI_FILE_DISCONNECT_REPLY), 1111 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1112 mutex_lock(&dev->device_lock); 1113 1114 if (!mei_cl_is_connected(cl)) { 1115 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1116 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 1117 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 1118 /* ignore disconnect return valuue; 1119 * in case of failure reset will be invoked 1120 */ 1121 __mei_cl_disconnect(cl); 1122 rets = -EFAULT; 1123 goto out; 1124 } 1125 1126 /* timeout or something went really wrong */ 1127 if (!cl->status) 1128 cl->status = -EFAULT; 1129 } 1130 1131 rets = cl->status; 1132 out: 1133 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1134 pm_runtime_mark_last_busy(dev->dev); 1135 pm_runtime_put_autosuspend(dev->dev); 1136 1137 mei_io_cb_free(cb); 1138 1139 nortpm: 1140 if (!mei_cl_is_connected(cl)) 1141 mei_cl_set_disconnected(cl); 1142 1143 return rets; 1144 } 1145 1146 /** 1147 * mei_cl_alloc_linked - allocate and link host client 1148 * 1149 * @dev: the device structure 1150 * 1151 * Return: cl on success ERR_PTR on failure 1152 */ 1153 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1154 { 1155 struct mei_cl *cl; 1156 int ret; 1157 1158 cl = mei_cl_allocate(dev); 1159 if (!cl) { 1160 ret = -ENOMEM; 1161 goto err; 1162 } 1163 1164 ret = mei_cl_link(cl); 1165 if (ret) 1166 goto err; 1167 1168 return cl; 1169 err: 1170 kfree(cl); 1171 return ERR_PTR(ret); 1172 } 1173 1174 /** 1175 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1176 * 1177 * @cl: host client 1178 * 1179 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1180 */ 1181 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1182 { 1183 if (WARN_ON(!cl || !cl->me_cl)) 1184 return -EINVAL; 1185 1186 if (cl->tx_flow_ctrl_creds > 0) 1187 return 1; 1188 1189 if (mei_cl_is_fixed_address(cl)) 1190 return 1; 1191 1192 if (mei_cl_is_single_recv_buf(cl)) { 1193 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1194 return 1; 1195 } 1196 return 0; 1197 } 1198 1199 /** 1200 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1201 * for a client 1202 * 1203 * @cl: host client 1204 * 1205 * Return: 1206 * 0 on success 1207 * -EINVAL when ctrl credits are <= 0 1208 */ 1209 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1210 { 1211 if (WARN_ON(!cl || !cl->me_cl)) 1212 return -EINVAL; 1213 1214 if (mei_cl_is_fixed_address(cl)) 1215 return 0; 1216 1217 if (mei_cl_is_single_recv_buf(cl)) { 1218 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1219 return -EINVAL; 1220 cl->me_cl->tx_flow_ctrl_creds--; 1221 } else { 1222 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1223 return -EINVAL; 1224 cl->tx_flow_ctrl_creds--; 1225 } 1226 return 0; 1227 } 1228 1229 /** 1230 * mei_cl_notify_fop2req - convert fop to proper request 1231 * 1232 * @fop: client notification start response command 1233 * 1234 * Return: MEI_HBM_NOTIFICATION_START/STOP 1235 */ 1236 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1237 { 1238 if (fop == MEI_FOP_NOTIFY_START) 1239 return MEI_HBM_NOTIFICATION_START; 1240 else 1241 return MEI_HBM_NOTIFICATION_STOP; 1242 } 1243 1244 /** 1245 * mei_cl_notify_req2fop - convert notification request top file operation type 1246 * 1247 * @req: hbm notification request type 1248 * 1249 * Return: MEI_FOP_NOTIFY_START/STOP 1250 */ 1251 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1252 { 1253 if (req == MEI_HBM_NOTIFICATION_START) 1254 return MEI_FOP_NOTIFY_START; 1255 else 1256 return MEI_FOP_NOTIFY_STOP; 1257 } 1258 1259 /** 1260 * mei_cl_irq_notify - send notification request in irq_thread context 1261 * 1262 * @cl: client 1263 * @cb: callback block. 1264 * @cmpl_list: complete list. 1265 * 1266 * Return: 0 on such and error otherwise. 1267 */ 1268 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1269 struct list_head *cmpl_list) 1270 { 1271 struct mei_device *dev = cl->dev; 1272 u32 msg_slots; 1273 int slots; 1274 int ret; 1275 bool request; 1276 1277 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1278 slots = mei_hbuf_empty_slots(dev); 1279 if (slots < 0) 1280 return -EOVERFLOW; 1281 1282 if ((u32)slots < msg_slots) 1283 return -EMSGSIZE; 1284 1285 request = mei_cl_notify_fop2req(cb->fop_type); 1286 ret = mei_hbm_cl_notify_req(dev, cl, request); 1287 if (ret) { 1288 cl->status = ret; 1289 list_move_tail(&cb->list, cmpl_list); 1290 return ret; 1291 } 1292 1293 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1294 return 0; 1295 } 1296 1297 /** 1298 * mei_cl_notify_request - send notification stop/start request 1299 * 1300 * @cl: host client 1301 * @fp: associate request with file 1302 * @request: 1 for start or 0 for stop 1303 * 1304 * Locking: called under "dev->device_lock" lock 1305 * 1306 * Return: 0 on such and error otherwise. 1307 */ 1308 int mei_cl_notify_request(struct mei_cl *cl, 1309 const struct file *fp, u8 request) 1310 { 1311 struct mei_device *dev; 1312 struct mei_cl_cb *cb; 1313 enum mei_cb_file_ops fop_type; 1314 int rets; 1315 1316 if (WARN_ON(!cl || !cl->dev)) 1317 return -ENODEV; 1318 1319 dev = cl->dev; 1320 1321 if (!dev->hbm_f_ev_supported) { 1322 cl_dbg(dev, cl, "notifications not supported\n"); 1323 return -EOPNOTSUPP; 1324 } 1325 1326 if (!mei_cl_is_connected(cl)) 1327 return -ENODEV; 1328 1329 rets = pm_runtime_get(dev->dev); 1330 if (rets < 0 && rets != -EINPROGRESS) { 1331 pm_runtime_put_noidle(dev->dev); 1332 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1333 return rets; 1334 } 1335 1336 fop_type = mei_cl_notify_req2fop(request); 1337 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1338 if (!cb) { 1339 rets = -ENOMEM; 1340 goto out; 1341 } 1342 1343 if (mei_hbuf_acquire(dev)) { 1344 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1345 rets = -ENODEV; 1346 goto out; 1347 } 1348 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1349 } 1350 1351 mutex_unlock(&dev->device_lock); 1352 wait_event_timeout(cl->wait, 1353 cl->notify_en == request || 1354 cl->status || 1355 !mei_cl_is_connected(cl), 1356 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1357 mutex_lock(&dev->device_lock); 1358 1359 if (cl->notify_en != request && !cl->status) 1360 cl->status = -EFAULT; 1361 1362 rets = cl->status; 1363 1364 out: 1365 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1366 pm_runtime_mark_last_busy(dev->dev); 1367 pm_runtime_put_autosuspend(dev->dev); 1368 1369 mei_io_cb_free(cb); 1370 return rets; 1371 } 1372 1373 /** 1374 * mei_cl_notify - raise notification 1375 * 1376 * @cl: host client 1377 * 1378 * Locking: called under "dev->device_lock" lock 1379 */ 1380 void mei_cl_notify(struct mei_cl *cl) 1381 { 1382 struct mei_device *dev; 1383 1384 if (!cl || !cl->dev) 1385 return; 1386 1387 dev = cl->dev; 1388 1389 if (!cl->notify_en) 1390 return; 1391 1392 cl_dbg(dev, cl, "notify event"); 1393 cl->notify_ev = true; 1394 if (!mei_cl_bus_notify_event(cl)) 1395 wake_up_interruptible(&cl->ev_wait); 1396 1397 if (cl->ev_async) 1398 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1399 1400 } 1401 1402 /** 1403 * mei_cl_notify_get - get or wait for notification event 1404 * 1405 * @cl: host client 1406 * @block: this request is blocking 1407 * @notify_ev: true if notification event was received 1408 * 1409 * Locking: called under "dev->device_lock" lock 1410 * 1411 * Return: 0 on such and error otherwise. 1412 */ 1413 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1414 { 1415 struct mei_device *dev; 1416 int rets; 1417 1418 *notify_ev = false; 1419 1420 if (WARN_ON(!cl || !cl->dev)) 1421 return -ENODEV; 1422 1423 dev = cl->dev; 1424 1425 if (!dev->hbm_f_ev_supported) { 1426 cl_dbg(dev, cl, "notifications not supported\n"); 1427 return -EOPNOTSUPP; 1428 } 1429 1430 if (!mei_cl_is_connected(cl)) 1431 return -ENODEV; 1432 1433 if (cl->notify_ev) 1434 goto out; 1435 1436 if (!block) 1437 return -EAGAIN; 1438 1439 mutex_unlock(&dev->device_lock); 1440 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1441 mutex_lock(&dev->device_lock); 1442 1443 if (rets < 0) 1444 return rets; 1445 1446 out: 1447 *notify_ev = cl->notify_ev; 1448 cl->notify_ev = false; 1449 return 0; 1450 } 1451 1452 /** 1453 * mei_cl_read_start - the start read client message function. 1454 * 1455 * @cl: host client 1456 * @length: number of bytes to read 1457 * @fp: pointer to file structure 1458 * 1459 * Return: 0 on success, <0 on failure. 1460 */ 1461 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1462 { 1463 struct mei_device *dev; 1464 struct mei_cl_cb *cb; 1465 int rets; 1466 1467 if (WARN_ON(!cl || !cl->dev)) 1468 return -ENODEV; 1469 1470 dev = cl->dev; 1471 1472 if (!mei_cl_is_connected(cl)) 1473 return -ENODEV; 1474 1475 if (!mei_me_cl_is_active(cl->me_cl)) { 1476 cl_err(dev, cl, "no such me client\n"); 1477 return -ENOTTY; 1478 } 1479 1480 if (mei_cl_is_fixed_address(cl)) 1481 return 0; 1482 1483 /* HW currently supports only one pending read */ 1484 if (cl->rx_flow_ctrl_creds) 1485 return -EBUSY; 1486 1487 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1488 if (!cb) 1489 return -ENOMEM; 1490 1491 rets = pm_runtime_get(dev->dev); 1492 if (rets < 0 && rets != -EINPROGRESS) { 1493 pm_runtime_put_noidle(dev->dev); 1494 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1495 goto nortpm; 1496 } 1497 1498 rets = 0; 1499 if (mei_hbuf_acquire(dev)) { 1500 rets = mei_hbm_cl_flow_control_req(dev, cl); 1501 if (rets < 0) 1502 goto out; 1503 1504 list_move_tail(&cb->list, &cl->rd_pending); 1505 } 1506 cl->rx_flow_ctrl_creds++; 1507 1508 out: 1509 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1510 pm_runtime_mark_last_busy(dev->dev); 1511 pm_runtime_put_autosuspend(dev->dev); 1512 nortpm: 1513 if (rets) 1514 mei_io_cb_free(cb); 1515 1516 return rets; 1517 } 1518 1519 /** 1520 * mei_msg_hdr_init - initialize mei message header 1521 * 1522 * @mei_hdr: mei message header 1523 * @cb: message callback structure 1524 */ 1525 static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb) 1526 { 1527 mei_hdr->host_addr = mei_cl_host_addr(cb->cl); 1528 mei_hdr->me_addr = mei_cl_me_id(cb->cl); 1529 mei_hdr->length = 0; 1530 mei_hdr->reserved = 0; 1531 mei_hdr->msg_complete = 0; 1532 mei_hdr->dma_ring = 0; 1533 mei_hdr->internal = cb->internal; 1534 } 1535 1536 /** 1537 * mei_cl_irq_write - write a message to device 1538 * from the interrupt thread context 1539 * 1540 * @cl: client 1541 * @cb: callback block. 1542 * @cmpl_list: complete list. 1543 * 1544 * Return: 0, OK; otherwise error. 1545 */ 1546 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1547 struct list_head *cmpl_list) 1548 { 1549 struct mei_device *dev; 1550 struct mei_msg_data *buf; 1551 struct mei_msg_hdr mei_hdr; 1552 size_t hdr_len = sizeof(mei_hdr); 1553 size_t len; 1554 size_t hbuf_len, dr_len; 1555 int hbuf_slots; 1556 u32 dr_slots; 1557 u32 dma_len; 1558 int rets; 1559 bool first_chunk; 1560 const void *data; 1561 1562 if (WARN_ON(!cl || !cl->dev)) 1563 return -ENODEV; 1564 1565 dev = cl->dev; 1566 1567 buf = &cb->buf; 1568 1569 first_chunk = cb->buf_idx == 0; 1570 1571 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1572 if (rets < 0) 1573 goto err; 1574 1575 if (rets == 0) { 1576 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1577 return 0; 1578 } 1579 1580 len = buf->size - cb->buf_idx; 1581 data = buf->data + cb->buf_idx; 1582 hbuf_slots = mei_hbuf_empty_slots(dev); 1583 if (hbuf_slots < 0) { 1584 rets = -EOVERFLOW; 1585 goto err; 1586 } 1587 1588 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 1589 dr_slots = mei_dma_ring_empty_slots(dev); 1590 dr_len = mei_slots2data(dr_slots); 1591 1592 mei_msg_hdr_init(&mei_hdr, cb); 1593 1594 /** 1595 * Split the message only if we can write the whole host buffer 1596 * otherwise wait for next time the host buffer is empty. 1597 */ 1598 if (len + hdr_len <= hbuf_len) { 1599 mei_hdr.length = len; 1600 mei_hdr.msg_complete = 1; 1601 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1602 mei_hdr.dma_ring = 1; 1603 if (len > dr_len) 1604 len = dr_len; 1605 else 1606 mei_hdr.msg_complete = 1; 1607 1608 mei_hdr.length = sizeof(dma_len); 1609 dma_len = len; 1610 data = &dma_len; 1611 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1612 len = hbuf_len - hdr_len; 1613 mei_hdr.length = len; 1614 } else { 1615 return 0; 1616 } 1617 1618 if (mei_hdr.dma_ring) 1619 mei_dma_ring_write(dev, buf->data + cb->buf_idx, len); 1620 1621 rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length); 1622 if (rets) 1623 goto err; 1624 1625 cl->status = 0; 1626 cl->writing_state = MEI_WRITING; 1627 cb->buf_idx += len; 1628 1629 if (first_chunk) { 1630 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1631 rets = -EIO; 1632 goto err; 1633 } 1634 } 1635 1636 if (mei_hdr.msg_complete) 1637 list_move_tail(&cb->list, &dev->write_waiting_list); 1638 1639 return 0; 1640 1641 err: 1642 cl->status = rets; 1643 list_move_tail(&cb->list, cmpl_list); 1644 return rets; 1645 } 1646 1647 /** 1648 * mei_cl_write - submit a write cb to mei device 1649 * assumes device_lock is locked 1650 * 1651 * @cl: host client 1652 * @cb: write callback with filled data 1653 * 1654 * Return: number of bytes sent on success, <0 on failure. 1655 */ 1656 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) 1657 { 1658 struct mei_device *dev; 1659 struct mei_msg_data *buf; 1660 struct mei_msg_hdr mei_hdr; 1661 size_t hdr_len = sizeof(mei_hdr); 1662 size_t len, hbuf_len, dr_len; 1663 int hbuf_slots; 1664 u32 dr_slots; 1665 u32 dma_len; 1666 ssize_t rets; 1667 bool blocking; 1668 const void *data; 1669 1670 if (WARN_ON(!cl || !cl->dev)) 1671 return -ENODEV; 1672 1673 if (WARN_ON(!cb)) 1674 return -EINVAL; 1675 1676 dev = cl->dev; 1677 1678 buf = &cb->buf; 1679 len = buf->size; 1680 1681 cl_dbg(dev, cl, "len=%zd\n", len); 1682 1683 blocking = cb->blocking; 1684 data = buf->data; 1685 1686 rets = pm_runtime_get(dev->dev); 1687 if (rets < 0 && rets != -EINPROGRESS) { 1688 pm_runtime_put_noidle(dev->dev); 1689 cl_err(dev, cl, "rpm: get failed %zd\n", rets); 1690 goto free; 1691 } 1692 1693 cb->buf_idx = 0; 1694 cl->writing_state = MEI_IDLE; 1695 1696 1697 rets = mei_cl_tx_flow_ctrl_creds(cl); 1698 if (rets < 0) 1699 goto err; 1700 1701 mei_msg_hdr_init(&mei_hdr, cb); 1702 1703 if (rets == 0) { 1704 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1705 rets = len; 1706 goto out; 1707 } 1708 1709 if (!mei_hbuf_acquire(dev)) { 1710 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1711 rets = len; 1712 goto out; 1713 } 1714 1715 hbuf_slots = mei_hbuf_empty_slots(dev); 1716 if (hbuf_slots < 0) { 1717 rets = -EOVERFLOW; 1718 goto out; 1719 } 1720 1721 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 1722 dr_slots = mei_dma_ring_empty_slots(dev); 1723 dr_len = mei_slots2data(dr_slots); 1724 1725 if (len + hdr_len <= hbuf_len) { 1726 mei_hdr.length = len; 1727 mei_hdr.msg_complete = 1; 1728 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1729 mei_hdr.dma_ring = 1; 1730 if (len > dr_len) 1731 len = dr_len; 1732 else 1733 mei_hdr.msg_complete = 1; 1734 1735 mei_hdr.length = sizeof(dma_len); 1736 dma_len = len; 1737 data = &dma_len; 1738 } else { 1739 len = hbuf_len - hdr_len; 1740 mei_hdr.length = len; 1741 } 1742 1743 if (mei_hdr.dma_ring) 1744 mei_dma_ring_write(dev, buf->data, len); 1745 1746 rets = mei_write_message(dev, &mei_hdr, hdr_len, 1747 data, mei_hdr.length); 1748 if (rets) 1749 goto err; 1750 1751 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 1752 if (rets) 1753 goto err; 1754 1755 cl->writing_state = MEI_WRITING; 1756 cb->buf_idx = len; 1757 /* restore return value */ 1758 len = buf->size; 1759 1760 out: 1761 if (mei_hdr.msg_complete) 1762 mei_tx_cb_enqueue(cb, &dev->write_waiting_list); 1763 else 1764 mei_tx_cb_enqueue(cb, &dev->write_list); 1765 1766 cb = NULL; 1767 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1768 1769 mutex_unlock(&dev->device_lock); 1770 rets = wait_event_interruptible(cl->tx_wait, 1771 cl->writing_state == MEI_WRITE_COMPLETE || 1772 (!mei_cl_is_connected(cl))); 1773 mutex_lock(&dev->device_lock); 1774 /* wait_event_interruptible returns -ERESTARTSYS */ 1775 if (rets) { 1776 if (signal_pending(current)) 1777 rets = -EINTR; 1778 goto err; 1779 } 1780 if (cl->writing_state != MEI_WRITE_COMPLETE) { 1781 rets = -EFAULT; 1782 goto err; 1783 } 1784 } 1785 1786 rets = len; 1787 err: 1788 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1789 pm_runtime_mark_last_busy(dev->dev); 1790 pm_runtime_put_autosuspend(dev->dev); 1791 free: 1792 mei_io_cb_free(cb); 1793 1794 return rets; 1795 } 1796 1797 1798 /** 1799 * mei_cl_complete - processes completed operation for a client 1800 * 1801 * @cl: private data of the file object. 1802 * @cb: callback block. 1803 */ 1804 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1805 { 1806 struct mei_device *dev = cl->dev; 1807 1808 switch (cb->fop_type) { 1809 case MEI_FOP_WRITE: 1810 mei_tx_cb_dequeue(cb); 1811 cl->writing_state = MEI_WRITE_COMPLETE; 1812 if (waitqueue_active(&cl->tx_wait)) { 1813 wake_up_interruptible(&cl->tx_wait); 1814 } else { 1815 pm_runtime_mark_last_busy(dev->dev); 1816 pm_request_autosuspend(dev->dev); 1817 } 1818 break; 1819 1820 case MEI_FOP_READ: 1821 list_add_tail(&cb->list, &cl->rd_completed); 1822 if (!mei_cl_is_fixed_address(cl) && 1823 !WARN_ON(!cl->rx_flow_ctrl_creds)) 1824 cl->rx_flow_ctrl_creds--; 1825 if (!mei_cl_bus_rx_event(cl)) 1826 wake_up_interruptible(&cl->rx_wait); 1827 break; 1828 1829 case MEI_FOP_CONNECT: 1830 case MEI_FOP_DISCONNECT: 1831 case MEI_FOP_NOTIFY_STOP: 1832 case MEI_FOP_NOTIFY_START: 1833 if (waitqueue_active(&cl->wait)) 1834 wake_up(&cl->wait); 1835 1836 break; 1837 case MEI_FOP_DISCONNECT_RSP: 1838 mei_io_cb_free(cb); 1839 mei_cl_set_disconnected(cl); 1840 break; 1841 default: 1842 BUG_ON(0); 1843 } 1844 } 1845 1846 1847 /** 1848 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1849 * 1850 * @dev: mei device 1851 */ 1852 void mei_cl_all_disconnect(struct mei_device *dev) 1853 { 1854 struct mei_cl *cl; 1855 1856 list_for_each_entry(cl, &dev->file_list, link) 1857 mei_cl_set_disconnected(cl); 1858 } 1859