1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/sched.h> 18 #include <linux/wait.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_init - initialize me client 31 * 32 * @me_cl: me client 33 */ 34 void mei_me_cl_init(struct mei_me_client *me_cl) 35 { 36 INIT_LIST_HEAD(&me_cl->list); 37 kref_init(&me_cl->refcnt); 38 } 39 40 /** 41 * mei_me_cl_get - increases me client refcount 42 * 43 * @me_cl: me client 44 * 45 * Locking: called under "dev->device_lock" lock 46 * 47 * Return: me client or NULL 48 */ 49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 50 { 51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 52 return me_cl; 53 54 return NULL; 55 } 56 57 /** 58 * mei_me_cl_release - free me client 59 * 60 * Locking: called under "dev->device_lock" lock 61 * 62 * @ref: me_client refcount 63 */ 64 static void mei_me_cl_release(struct kref *ref) 65 { 66 struct mei_me_client *me_cl = 67 container_of(ref, struct mei_me_client, refcnt); 68 69 kfree(me_cl); 70 } 71 72 /** 73 * mei_me_cl_put - decrease me client refcount and free client if necessary 74 * 75 * Locking: called under "dev->device_lock" lock 76 * 77 * @me_cl: me client 78 */ 79 void mei_me_cl_put(struct mei_me_client *me_cl) 80 { 81 if (me_cl) 82 kref_put(&me_cl->refcnt, mei_me_cl_release); 83 } 84 85 /** 86 * __mei_me_cl_del - delete me client from the list and decrease 87 * reference counter 88 * 89 * @dev: mei device 90 * @me_cl: me client 91 * 92 * Locking: dev->me_clients_rwsem 93 */ 94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 95 { 96 if (!me_cl) 97 return; 98 99 list_del_init(&me_cl->list); 100 mei_me_cl_put(me_cl); 101 } 102 103 /** 104 * mei_me_cl_del - delete me client from the list and decrease 105 * reference counter 106 * 107 * @dev: mei device 108 * @me_cl: me client 109 */ 110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 111 { 112 down_write(&dev->me_clients_rwsem); 113 __mei_me_cl_del(dev, me_cl); 114 up_write(&dev->me_clients_rwsem); 115 } 116 117 /** 118 * mei_me_cl_add - add me client to the list 119 * 120 * @dev: mei device 121 * @me_cl: me client 122 */ 123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 124 { 125 down_write(&dev->me_clients_rwsem); 126 list_add(&me_cl->list, &dev->me_clients); 127 up_write(&dev->me_clients_rwsem); 128 } 129 130 /** 131 * __mei_me_cl_by_uuid - locate me client by uuid 132 * increases ref count 133 * 134 * @dev: mei device 135 * @uuid: me client uuid 136 * 137 * Return: me client or NULL if not found 138 * 139 * Locking: dev->me_clients_rwsem 140 */ 141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 142 const uuid_le *uuid) 143 { 144 struct mei_me_client *me_cl; 145 const uuid_le *pn; 146 147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 148 149 list_for_each_entry(me_cl, &dev->me_clients, list) { 150 pn = &me_cl->props.protocol_name; 151 if (uuid_le_cmp(*uuid, *pn) == 0) 152 return mei_me_cl_get(me_cl); 153 } 154 155 return NULL; 156 } 157 158 /** 159 * mei_me_cl_by_uuid - locate me client by uuid 160 * increases ref count 161 * 162 * @dev: mei device 163 * @uuid: me client uuid 164 * 165 * Return: me client or NULL if not found 166 * 167 * Locking: dev->me_clients_rwsem 168 */ 169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 170 const uuid_le *uuid) 171 { 172 struct mei_me_client *me_cl; 173 174 down_read(&dev->me_clients_rwsem); 175 me_cl = __mei_me_cl_by_uuid(dev, uuid); 176 up_read(&dev->me_clients_rwsem); 177 178 return me_cl; 179 } 180 181 /** 182 * mei_me_cl_by_id - locate me client by client id 183 * increases ref count 184 * 185 * @dev: the device structure 186 * @client_id: me client id 187 * 188 * Return: me client or NULL if not found 189 * 190 * Locking: dev->me_clients_rwsem 191 */ 192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 193 { 194 195 struct mei_me_client *__me_cl, *me_cl = NULL; 196 197 down_read(&dev->me_clients_rwsem); 198 list_for_each_entry(__me_cl, &dev->me_clients, list) { 199 if (__me_cl->client_id == client_id) { 200 me_cl = mei_me_cl_get(__me_cl); 201 break; 202 } 203 } 204 up_read(&dev->me_clients_rwsem); 205 206 return me_cl; 207 } 208 209 /** 210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 211 * increases ref count 212 * 213 * @dev: the device structure 214 * @uuid: me client uuid 215 * @client_id: me client id 216 * 217 * Return: me client or null if not found 218 * 219 * Locking: dev->me_clients_rwsem 220 */ 221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 222 const uuid_le *uuid, u8 client_id) 223 { 224 struct mei_me_client *me_cl; 225 const uuid_le *pn; 226 227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 228 229 list_for_each_entry(me_cl, &dev->me_clients, list) { 230 pn = &me_cl->props.protocol_name; 231 if (uuid_le_cmp(*uuid, *pn) == 0 && 232 me_cl->client_id == client_id) 233 return mei_me_cl_get(me_cl); 234 } 235 236 return NULL; 237 } 238 239 240 /** 241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 242 * increases ref count 243 * 244 * @dev: the device structure 245 * @uuid: me client uuid 246 * @client_id: me client id 247 * 248 * Return: me client or null if not found 249 */ 250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 251 const uuid_le *uuid, u8 client_id) 252 { 253 struct mei_me_client *me_cl; 254 255 down_read(&dev->me_clients_rwsem); 256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 257 up_read(&dev->me_clients_rwsem); 258 259 return me_cl; 260 } 261 262 /** 263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 264 * 265 * @dev: the device structure 266 * @uuid: me client uuid 267 * 268 * Locking: called under "dev->device_lock" lock 269 */ 270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 271 { 272 struct mei_me_client *me_cl; 273 274 dev_dbg(dev->dev, "remove %pUl\n", uuid); 275 276 down_write(&dev->me_clients_rwsem); 277 me_cl = __mei_me_cl_by_uuid(dev, uuid); 278 __mei_me_cl_del(dev, me_cl); 279 up_write(&dev->me_clients_rwsem); 280 } 281 282 /** 283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 284 * 285 * @dev: the device structure 286 * @uuid: me client uuid 287 * @id: me client id 288 * 289 * Locking: called under "dev->device_lock" lock 290 */ 291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 292 { 293 struct mei_me_client *me_cl; 294 295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 296 297 down_write(&dev->me_clients_rwsem); 298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 299 __mei_me_cl_del(dev, me_cl); 300 up_write(&dev->me_clients_rwsem); 301 } 302 303 /** 304 * mei_me_cl_rm_all - remove all me clients 305 * 306 * @dev: the device structure 307 * 308 * Locking: called under "dev->device_lock" lock 309 */ 310 void mei_me_cl_rm_all(struct mei_device *dev) 311 { 312 struct mei_me_client *me_cl, *next; 313 314 down_write(&dev->me_clients_rwsem); 315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 316 __mei_me_cl_del(dev, me_cl); 317 up_write(&dev->me_clients_rwsem); 318 } 319 320 /** 321 * mei_cl_cmp_id - tells if the clients are the same 322 * 323 * @cl1: host client 1 324 * @cl2: host client 2 325 * 326 * Return: true - if the clients has same host and me ids 327 * false - otherwise 328 */ 329 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 330 const struct mei_cl *cl2) 331 { 332 return cl1 && cl2 && 333 (cl1->host_client_id == cl2->host_client_id) && 334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2)); 335 } 336 337 /** 338 * mei_io_cb_free - free mei_cb_private related memory 339 * 340 * @cb: mei callback struct 341 */ 342 void mei_io_cb_free(struct mei_cl_cb *cb) 343 { 344 if (cb == NULL) 345 return; 346 347 list_del(&cb->list); 348 kfree(cb->buf.data); 349 kfree(cb); 350 } 351 352 /** 353 * mei_io_cb_init - allocate and initialize io callback 354 * 355 * @cl: mei client 356 * @type: operation type 357 * @fp: pointer to file structure 358 * 359 * Return: mei_cl_cb pointer or NULL; 360 */ 361 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type, 362 const struct file *fp) 363 { 364 struct mei_cl_cb *cb; 365 366 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 367 if (!cb) 368 return NULL; 369 370 INIT_LIST_HEAD(&cb->list); 371 cb->fp = fp; 372 cb->cl = cl; 373 cb->buf_idx = 0; 374 cb->fop_type = type; 375 return cb; 376 } 377 378 /** 379 * __mei_io_list_flush - removes and frees cbs belonging to cl. 380 * 381 * @list: an instance of our list structure 382 * @cl: host client, can be NULL for flushing the whole list 383 * @free: whether to free the cbs 384 */ 385 static void __mei_io_list_flush(struct mei_cl_cb *list, 386 struct mei_cl *cl, bool free) 387 { 388 struct mei_cl_cb *cb, *next; 389 390 /* enable removing everything if no cl is specified */ 391 list_for_each_entry_safe(cb, next, &list->list, list) { 392 if (!cl || mei_cl_cmp_id(cl, cb->cl)) { 393 list_del_init(&cb->list); 394 if (free) 395 mei_io_cb_free(cb); 396 } 397 } 398 } 399 400 /** 401 * mei_io_list_flush - removes list entry belonging to cl. 402 * 403 * @list: An instance of our list structure 404 * @cl: host client 405 */ 406 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 407 { 408 __mei_io_list_flush(list, cl, false); 409 } 410 411 /** 412 * mei_io_list_free - removes cb belonging to cl and free them 413 * 414 * @list: An instance of our list structure 415 * @cl: host client 416 */ 417 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 418 { 419 __mei_io_list_flush(list, cl, true); 420 } 421 422 /** 423 * mei_io_cb_alloc_buf - allocate callback buffer 424 * 425 * @cb: io callback structure 426 * @length: size of the buffer 427 * 428 * Return: 0 on success 429 * -EINVAL if cb is NULL 430 * -ENOMEM if allocation failed 431 */ 432 int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length) 433 { 434 if (!cb) 435 return -EINVAL; 436 437 if (length == 0) 438 return 0; 439 440 cb->buf.data = kmalloc(length, GFP_KERNEL); 441 if (!cb->buf.data) 442 return -ENOMEM; 443 cb->buf.size = length; 444 return 0; 445 } 446 447 /** 448 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 449 * 450 * @cl: host client 451 * @length: size of the buffer 452 * @type: operation type 453 * @fp: associated file pointer (might be NULL) 454 * 455 * Return: cb on success and NULL on failure 456 */ 457 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 458 enum mei_cb_file_ops type, 459 const struct file *fp) 460 { 461 struct mei_cl_cb *cb; 462 463 cb = mei_io_cb_init(cl, type, fp); 464 if (!cb) 465 return NULL; 466 467 if (mei_io_cb_alloc_buf(cb, length)) { 468 mei_io_cb_free(cb); 469 return NULL; 470 } 471 472 return cb; 473 } 474 475 /** 476 * mei_cl_read_cb - find this cl's callback in the read list 477 * for a specific file 478 * 479 * @cl: host client 480 * @fp: file pointer (matching cb file object), may be NULL 481 * 482 * Return: cb on success, NULL if cb is not found 483 */ 484 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp) 485 { 486 struct mei_cl_cb *cb; 487 488 list_for_each_entry(cb, &cl->rd_completed, list) 489 if (!fp || fp == cb->fp) 490 return cb; 491 492 return NULL; 493 } 494 495 /** 496 * mei_cl_read_cb_flush - free client's read pending and completed cbs 497 * for a specific file 498 * 499 * @cl: host client 500 * @fp: file pointer (matching cb file object), may be NULL 501 */ 502 void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp) 503 { 504 struct mei_cl_cb *cb, *next; 505 506 list_for_each_entry_safe(cb, next, &cl->rd_completed, list) 507 if (!fp || fp == cb->fp) 508 mei_io_cb_free(cb); 509 510 511 list_for_each_entry_safe(cb, next, &cl->rd_pending, list) 512 if (!fp || fp == cb->fp) 513 mei_io_cb_free(cb); 514 } 515 516 /** 517 * mei_cl_flush_queues - flushes queue lists belonging to cl. 518 * 519 * @cl: host client 520 * @fp: file pointer (matching cb file object), may be NULL 521 * 522 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 523 */ 524 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 525 { 526 struct mei_device *dev; 527 528 if (WARN_ON(!cl || !cl->dev)) 529 return -EINVAL; 530 531 dev = cl->dev; 532 533 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 534 mei_io_list_free(&cl->dev->write_list, cl); 535 mei_io_list_free(&cl->dev->write_waiting_list, cl); 536 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 537 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 538 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 539 540 mei_cl_read_cb_flush(cl, fp); 541 542 return 0; 543 } 544 545 546 /** 547 * mei_cl_init - initializes cl. 548 * 549 * @cl: host client to be initialized 550 * @dev: mei device 551 */ 552 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 553 { 554 memset(cl, 0, sizeof(struct mei_cl)); 555 init_waitqueue_head(&cl->wait); 556 init_waitqueue_head(&cl->rx_wait); 557 init_waitqueue_head(&cl->tx_wait); 558 init_waitqueue_head(&cl->ev_wait); 559 INIT_LIST_HEAD(&cl->rd_completed); 560 INIT_LIST_HEAD(&cl->rd_pending); 561 INIT_LIST_HEAD(&cl->link); 562 cl->writing_state = MEI_IDLE; 563 cl->state = MEI_FILE_INITIALIZING; 564 cl->dev = dev; 565 } 566 567 /** 568 * mei_cl_allocate - allocates cl structure and sets it up. 569 * 570 * @dev: mei device 571 * Return: The allocated file or NULL on failure 572 */ 573 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 574 { 575 struct mei_cl *cl; 576 577 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 578 if (!cl) 579 return NULL; 580 581 mei_cl_init(cl, dev); 582 583 return cl; 584 } 585 586 /** 587 * mei_cl_link - allocate host id in the host map 588 * 589 * @cl: host client 590 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one 591 * 592 * Return: 0 on success 593 * -EINVAL on incorrect values 594 * -EMFILE if open count exceeded. 595 */ 596 int mei_cl_link(struct mei_cl *cl, int id) 597 { 598 struct mei_device *dev; 599 long open_handle_count; 600 601 if (WARN_ON(!cl || !cl->dev)) 602 return -EINVAL; 603 604 dev = cl->dev; 605 606 /* If Id is not assigned get one*/ 607 if (id == MEI_HOST_CLIENT_ID_ANY) 608 id = find_first_zero_bit(dev->host_clients_map, 609 MEI_CLIENTS_MAX); 610 611 if (id >= MEI_CLIENTS_MAX) { 612 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 613 return -EMFILE; 614 } 615 616 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 617 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 618 dev_err(dev->dev, "open_handle_count exceeded %d", 619 MEI_MAX_OPEN_HANDLE_COUNT); 620 return -EMFILE; 621 } 622 623 dev->open_handle_count++; 624 625 cl->host_client_id = id; 626 list_add_tail(&cl->link, &dev->file_list); 627 628 set_bit(id, dev->host_clients_map); 629 630 cl->state = MEI_FILE_INITIALIZING; 631 632 cl_dbg(dev, cl, "link cl\n"); 633 return 0; 634 } 635 636 /** 637 * mei_cl_unlink - remove host client from the list 638 * 639 * @cl: host client 640 * 641 * Return: always 0 642 */ 643 int mei_cl_unlink(struct mei_cl *cl) 644 { 645 struct mei_device *dev; 646 647 /* don't shout on error exit path */ 648 if (!cl) 649 return 0; 650 651 /* amthif might not be initialized */ 652 if (!cl->dev) 653 return 0; 654 655 dev = cl->dev; 656 657 cl_dbg(dev, cl, "unlink client"); 658 659 if (dev->open_handle_count > 0) 660 dev->open_handle_count--; 661 662 /* never clear the 0 bit */ 663 if (cl->host_client_id) 664 clear_bit(cl->host_client_id, dev->host_clients_map); 665 666 list_del_init(&cl->link); 667 668 cl->state = MEI_FILE_INITIALIZING; 669 670 return 0; 671 } 672 673 674 void mei_host_client_init(struct work_struct *work) 675 { 676 struct mei_device *dev = 677 container_of(work, struct mei_device, init_work); 678 struct mei_me_client *me_cl; 679 680 mutex_lock(&dev->device_lock); 681 682 me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid); 683 if (me_cl) 684 mei_amthif_host_init(dev, me_cl); 685 mei_me_cl_put(me_cl); 686 687 dev->dev_state = MEI_DEV_ENABLED; 688 dev->reset_count = 0; 689 mutex_unlock(&dev->device_lock); 690 691 mei_cl_bus_rescan(dev); 692 693 pm_runtime_mark_last_busy(dev->dev); 694 dev_dbg(dev->dev, "rpm: autosuspend\n"); 695 pm_runtime_autosuspend(dev->dev); 696 } 697 698 /** 699 * mei_hbuf_acquire - try to acquire host buffer 700 * 701 * @dev: the device structure 702 * Return: true if host buffer was acquired 703 */ 704 bool mei_hbuf_acquire(struct mei_device *dev) 705 { 706 if (mei_pg_state(dev) == MEI_PG_ON || 707 mei_pg_in_transition(dev)) { 708 dev_dbg(dev->dev, "device is in pg\n"); 709 return false; 710 } 711 712 if (!dev->hbuf_is_ready) { 713 dev_dbg(dev->dev, "hbuf is not ready\n"); 714 return false; 715 } 716 717 dev->hbuf_is_ready = false; 718 719 return true; 720 } 721 722 /** 723 * mei_cl_set_disconnected - set disconnected state and clear 724 * associated states and resources 725 * 726 * @cl: host client 727 */ 728 void mei_cl_set_disconnected(struct mei_cl *cl) 729 { 730 struct mei_device *dev = cl->dev; 731 732 if (cl->state == MEI_FILE_DISCONNECTED || 733 cl->state == MEI_FILE_INITIALIZING) 734 return; 735 736 cl->state = MEI_FILE_DISCONNECTED; 737 mei_io_list_flush(&dev->ctrl_rd_list, cl); 738 mei_io_list_flush(&dev->ctrl_wr_list, cl); 739 cl->mei_flow_ctrl_creds = 0; 740 cl->timer_count = 0; 741 742 if (!cl->me_cl) 743 return; 744 745 if (!WARN_ON(cl->me_cl->connect_count == 0)) 746 cl->me_cl->connect_count--; 747 748 if (cl->me_cl->connect_count == 0) 749 cl->me_cl->mei_flow_ctrl_creds = 0; 750 751 mei_me_cl_put(cl->me_cl); 752 cl->me_cl = NULL; 753 } 754 755 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 756 { 757 if (!mei_me_cl_get(me_cl)) 758 return -ENOENT; 759 760 /* only one connection is allowed for fixed address clients */ 761 if (me_cl->props.fixed_address) { 762 if (me_cl->connect_count) { 763 mei_me_cl_put(me_cl); 764 return -EBUSY; 765 } 766 } 767 768 cl->me_cl = me_cl; 769 cl->state = MEI_FILE_CONNECTING; 770 cl->me_cl->connect_count++; 771 772 return 0; 773 } 774 775 /* 776 * mei_cl_send_disconnect - send disconnect request 777 * 778 * @cl: host client 779 * @cb: callback block 780 * 781 * Return: 0, OK; otherwise, error. 782 */ 783 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 784 { 785 struct mei_device *dev; 786 int ret; 787 788 dev = cl->dev; 789 790 ret = mei_hbm_cl_disconnect_req(dev, cl); 791 cl->status = ret; 792 if (ret) { 793 cl->state = MEI_FILE_DISCONNECT_REPLY; 794 return ret; 795 } 796 797 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 798 cl->timer_count = MEI_CONNECT_TIMEOUT; 799 800 return 0; 801 } 802 803 /** 804 * mei_cl_irq_disconnect - processes close related operation from 805 * interrupt thread context - send disconnect request 806 * 807 * @cl: client 808 * @cb: callback block. 809 * @cmpl_list: complete list. 810 * 811 * Return: 0, OK; otherwise, error. 812 */ 813 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 814 struct mei_cl_cb *cmpl_list) 815 { 816 struct mei_device *dev = cl->dev; 817 u32 msg_slots; 818 int slots; 819 int ret; 820 821 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 822 slots = mei_hbuf_empty_slots(dev); 823 824 if (slots < msg_slots) 825 return -EMSGSIZE; 826 827 ret = mei_cl_send_disconnect(cl, cb); 828 if (ret) 829 list_move_tail(&cb->list, &cmpl_list->list); 830 831 return ret; 832 } 833 834 /** 835 * __mei_cl_disconnect - disconnect host client from the me one 836 * internal function runtime pm has to be already acquired 837 * 838 * @cl: host client 839 * 840 * Return: 0 on success, <0 on failure. 841 */ 842 static int __mei_cl_disconnect(struct mei_cl *cl) 843 { 844 struct mei_device *dev; 845 struct mei_cl_cb *cb; 846 int rets; 847 848 dev = cl->dev; 849 850 cl->state = MEI_FILE_DISCONNECTING; 851 852 cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL); 853 rets = cb ? 0 : -ENOMEM; 854 if (rets) 855 goto out; 856 857 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 858 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 859 860 if (mei_hbuf_acquire(dev)) { 861 rets = mei_cl_send_disconnect(cl, cb); 862 if (rets) { 863 cl_err(dev, cl, "failed to disconnect.\n"); 864 goto out; 865 } 866 } 867 868 mutex_unlock(&dev->device_lock); 869 wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY, 870 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 871 mutex_lock(&dev->device_lock); 872 873 rets = cl->status; 874 if (cl->state != MEI_FILE_DISCONNECT_REPLY) { 875 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 876 rets = -ETIME; 877 } 878 879 out: 880 /* we disconnect also on error */ 881 mei_cl_set_disconnected(cl); 882 if (!rets) 883 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 884 885 mei_io_cb_free(cb); 886 return rets; 887 } 888 889 /** 890 * mei_cl_disconnect - disconnect host client from the me one 891 * 892 * @cl: host client 893 * 894 * Locking: called under "dev->device_lock" lock 895 * 896 * Return: 0 on success, <0 on failure. 897 */ 898 int mei_cl_disconnect(struct mei_cl *cl) 899 { 900 struct mei_device *dev; 901 int rets; 902 903 if (WARN_ON(!cl || !cl->dev)) 904 return -ENODEV; 905 906 dev = cl->dev; 907 908 cl_dbg(dev, cl, "disconnecting"); 909 910 if (!mei_cl_is_connected(cl)) 911 return 0; 912 913 if (mei_cl_is_fixed_address(cl)) { 914 mei_cl_set_disconnected(cl); 915 return 0; 916 } 917 918 rets = pm_runtime_get(dev->dev); 919 if (rets < 0 && rets != -EINPROGRESS) { 920 pm_runtime_put_noidle(dev->dev); 921 cl_err(dev, cl, "rpm: get failed %d\n", rets); 922 return rets; 923 } 924 925 rets = __mei_cl_disconnect(cl); 926 927 cl_dbg(dev, cl, "rpm: autosuspend\n"); 928 pm_runtime_mark_last_busy(dev->dev); 929 pm_runtime_put_autosuspend(dev->dev); 930 931 return rets; 932 } 933 934 935 /** 936 * mei_cl_is_other_connecting - checks if other 937 * client with the same me client id is connecting 938 * 939 * @cl: private data of the file object 940 * 941 * Return: true if other client is connected, false - otherwise. 942 */ 943 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 944 { 945 struct mei_device *dev; 946 struct mei_cl_cb *cb; 947 948 dev = cl->dev; 949 950 list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) { 951 if (cb->fop_type == MEI_FOP_CONNECT && 952 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 953 return true; 954 } 955 956 return false; 957 } 958 959 /** 960 * mei_cl_send_connect - send connect request 961 * 962 * @cl: host client 963 * @cb: callback block 964 * 965 * Return: 0, OK; otherwise, error. 966 */ 967 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 968 { 969 struct mei_device *dev; 970 int ret; 971 972 dev = cl->dev; 973 974 ret = mei_hbm_cl_connect_req(dev, cl); 975 cl->status = ret; 976 if (ret) { 977 cl->state = MEI_FILE_DISCONNECT_REPLY; 978 return ret; 979 } 980 981 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 982 cl->timer_count = MEI_CONNECT_TIMEOUT; 983 return 0; 984 } 985 986 /** 987 * mei_cl_irq_connect - send connect request in irq_thread context 988 * 989 * @cl: host client 990 * @cb: callback block 991 * @cmpl_list: complete list 992 * 993 * Return: 0, OK; otherwise, error. 994 */ 995 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 996 struct mei_cl_cb *cmpl_list) 997 { 998 struct mei_device *dev = cl->dev; 999 u32 msg_slots; 1000 int slots; 1001 int rets; 1002 1003 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1004 slots = mei_hbuf_empty_slots(dev); 1005 1006 if (mei_cl_is_other_connecting(cl)) 1007 return 0; 1008 1009 if (slots < msg_slots) 1010 return -EMSGSIZE; 1011 1012 rets = mei_cl_send_connect(cl, cb); 1013 if (rets) 1014 list_move_tail(&cb->list, &cmpl_list->list); 1015 1016 return rets; 1017 } 1018 1019 /** 1020 * mei_cl_connect - connect host client to the me one 1021 * 1022 * @cl: host client 1023 * @me_cl: me client 1024 * @file: pointer to file structure 1025 * 1026 * Locking: called under "dev->device_lock" lock 1027 * 1028 * Return: 0 on success, <0 on failure. 1029 */ 1030 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1031 const struct file *file) 1032 { 1033 struct mei_device *dev; 1034 struct mei_cl_cb *cb; 1035 int rets; 1036 1037 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1038 return -ENODEV; 1039 1040 dev = cl->dev; 1041 1042 rets = mei_cl_set_connecting(cl, me_cl); 1043 if (rets) 1044 return rets; 1045 1046 if (mei_cl_is_fixed_address(cl)) { 1047 cl->state = MEI_FILE_CONNECTED; 1048 return 0; 1049 } 1050 1051 rets = pm_runtime_get(dev->dev); 1052 if (rets < 0 && rets != -EINPROGRESS) { 1053 pm_runtime_put_noidle(dev->dev); 1054 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1055 goto nortpm; 1056 } 1057 1058 cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file); 1059 rets = cb ? 0 : -ENOMEM; 1060 if (rets) 1061 goto out; 1062 1063 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1064 1065 /* run hbuf acquire last so we don't have to undo */ 1066 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1067 rets = mei_cl_send_connect(cl, cb); 1068 if (rets) 1069 goto out; 1070 } 1071 1072 mutex_unlock(&dev->device_lock); 1073 wait_event_timeout(cl->wait, 1074 (cl->state == MEI_FILE_CONNECTED || 1075 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1076 cl->state == MEI_FILE_DISCONNECT_REPLY), 1077 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1078 mutex_lock(&dev->device_lock); 1079 1080 if (!mei_cl_is_connected(cl)) { 1081 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1082 mei_io_list_flush(&dev->ctrl_rd_list, cl); 1083 mei_io_list_flush(&dev->ctrl_wr_list, cl); 1084 /* ignore disconnect return valuue; 1085 * in case of failure reset will be invoked 1086 */ 1087 __mei_cl_disconnect(cl); 1088 rets = -EFAULT; 1089 goto out; 1090 } 1091 1092 /* timeout or something went really wrong */ 1093 if (!cl->status) 1094 cl->status = -EFAULT; 1095 } 1096 1097 rets = cl->status; 1098 out: 1099 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1100 pm_runtime_mark_last_busy(dev->dev); 1101 pm_runtime_put_autosuspend(dev->dev); 1102 1103 mei_io_cb_free(cb); 1104 1105 nortpm: 1106 if (!mei_cl_is_connected(cl)) 1107 mei_cl_set_disconnected(cl); 1108 1109 return rets; 1110 } 1111 1112 /** 1113 * mei_cl_alloc_linked - allocate and link host client 1114 * 1115 * @dev: the device structure 1116 * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one 1117 * 1118 * Return: cl on success ERR_PTR on failure 1119 */ 1120 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id) 1121 { 1122 struct mei_cl *cl; 1123 int ret; 1124 1125 cl = mei_cl_allocate(dev); 1126 if (!cl) { 1127 ret = -ENOMEM; 1128 goto err; 1129 } 1130 1131 ret = mei_cl_link(cl, id); 1132 if (ret) 1133 goto err; 1134 1135 return cl; 1136 err: 1137 kfree(cl); 1138 return ERR_PTR(ret); 1139 } 1140 1141 1142 1143 /** 1144 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 1145 * 1146 * @cl: private data of the file object 1147 * 1148 * Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 1149 */ 1150 static int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 1151 { 1152 int rets; 1153 1154 if (WARN_ON(!cl || !cl->me_cl)) 1155 return -EINVAL; 1156 1157 if (cl->mei_flow_ctrl_creds > 0) 1158 return 1; 1159 1160 if (mei_cl_is_fixed_address(cl)) { 1161 rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL); 1162 if (rets && rets != -EBUSY) 1163 return rets; 1164 return 1; 1165 } 1166 1167 if (mei_cl_is_single_recv_buf(cl)) { 1168 if (cl->me_cl->mei_flow_ctrl_creds > 0) 1169 return 1; 1170 } 1171 return 0; 1172 } 1173 1174 /** 1175 * mei_cl_flow_ctrl_reduce - reduces flow_control. 1176 * 1177 * @cl: private data of the file object 1178 * 1179 * Return: 1180 * 0 on success 1181 * -EINVAL when ctrl credits are <= 0 1182 */ 1183 static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 1184 { 1185 if (WARN_ON(!cl || !cl->me_cl)) 1186 return -EINVAL; 1187 1188 if (mei_cl_is_fixed_address(cl)) 1189 return 0; 1190 1191 if (mei_cl_is_single_recv_buf(cl)) { 1192 if (WARN_ON(cl->me_cl->mei_flow_ctrl_creds <= 0)) 1193 return -EINVAL; 1194 cl->me_cl->mei_flow_ctrl_creds--; 1195 } else { 1196 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 1197 return -EINVAL; 1198 cl->mei_flow_ctrl_creds--; 1199 } 1200 return 0; 1201 } 1202 1203 /** 1204 * mei_cl_notify_fop2req - convert fop to proper request 1205 * 1206 * @fop: client notification start response command 1207 * 1208 * Return: MEI_HBM_NOTIFICATION_START/STOP 1209 */ 1210 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1211 { 1212 if (fop == MEI_FOP_NOTIFY_START) 1213 return MEI_HBM_NOTIFICATION_START; 1214 else 1215 return MEI_HBM_NOTIFICATION_STOP; 1216 } 1217 1218 /** 1219 * mei_cl_notify_req2fop - convert notification request top file operation type 1220 * 1221 * @req: hbm notification request type 1222 * 1223 * Return: MEI_FOP_NOTIFY_START/STOP 1224 */ 1225 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1226 { 1227 if (req == MEI_HBM_NOTIFICATION_START) 1228 return MEI_FOP_NOTIFY_START; 1229 else 1230 return MEI_FOP_NOTIFY_STOP; 1231 } 1232 1233 /** 1234 * mei_cl_irq_notify - send notification request in irq_thread context 1235 * 1236 * @cl: client 1237 * @cb: callback block. 1238 * @cmpl_list: complete list. 1239 * 1240 * Return: 0 on such and error otherwise. 1241 */ 1242 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1243 struct mei_cl_cb *cmpl_list) 1244 { 1245 struct mei_device *dev = cl->dev; 1246 u32 msg_slots; 1247 int slots; 1248 int ret; 1249 bool request; 1250 1251 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 1252 slots = mei_hbuf_empty_slots(dev); 1253 1254 if (slots < msg_slots) 1255 return -EMSGSIZE; 1256 1257 request = mei_cl_notify_fop2req(cb->fop_type); 1258 ret = mei_hbm_cl_notify_req(dev, cl, request); 1259 if (ret) { 1260 cl->status = ret; 1261 list_move_tail(&cb->list, &cmpl_list->list); 1262 return ret; 1263 } 1264 1265 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 1266 return 0; 1267 } 1268 1269 /** 1270 * mei_cl_notify_request - send notification stop/start request 1271 * 1272 * @cl: host client 1273 * @file: associate request with file 1274 * @request: 1 for start or 0 for stop 1275 * 1276 * Locking: called under "dev->device_lock" lock 1277 * 1278 * Return: 0 on such and error otherwise. 1279 */ 1280 int mei_cl_notify_request(struct mei_cl *cl, 1281 const struct file *file, u8 request) 1282 { 1283 struct mei_device *dev; 1284 struct mei_cl_cb *cb; 1285 enum mei_cb_file_ops fop_type; 1286 int rets; 1287 1288 if (WARN_ON(!cl || !cl->dev)) 1289 return -ENODEV; 1290 1291 dev = cl->dev; 1292 1293 if (!dev->hbm_f_ev_supported) { 1294 cl_dbg(dev, cl, "notifications not supported\n"); 1295 return -EOPNOTSUPP; 1296 } 1297 1298 rets = pm_runtime_get(dev->dev); 1299 if (rets < 0 && rets != -EINPROGRESS) { 1300 pm_runtime_put_noidle(dev->dev); 1301 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1302 return rets; 1303 } 1304 1305 fop_type = mei_cl_notify_req2fop(request); 1306 cb = mei_io_cb_init(cl, fop_type, file); 1307 if (!cb) { 1308 rets = -ENOMEM; 1309 goto out; 1310 } 1311 1312 if (mei_hbuf_acquire(dev)) { 1313 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1314 rets = -ENODEV; 1315 goto out; 1316 } 1317 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 1318 } else { 1319 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1320 } 1321 1322 mutex_unlock(&dev->device_lock); 1323 wait_event_timeout(cl->wait, cl->notify_en == request, 1324 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1325 mutex_lock(&dev->device_lock); 1326 1327 if (cl->notify_en != request) { 1328 mei_io_list_flush(&dev->ctrl_rd_list, cl); 1329 mei_io_list_flush(&dev->ctrl_wr_list, cl); 1330 if (!cl->status) 1331 cl->status = -EFAULT; 1332 } 1333 1334 rets = cl->status; 1335 1336 out: 1337 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1338 pm_runtime_mark_last_busy(dev->dev); 1339 pm_runtime_put_autosuspend(dev->dev); 1340 1341 mei_io_cb_free(cb); 1342 return rets; 1343 } 1344 1345 /** 1346 * mei_cl_notify - raise notification 1347 * 1348 * @cl: host client 1349 * 1350 * Locking: called under "dev->device_lock" lock 1351 */ 1352 void mei_cl_notify(struct mei_cl *cl) 1353 { 1354 struct mei_device *dev; 1355 1356 if (!cl || !cl->dev) 1357 return; 1358 1359 dev = cl->dev; 1360 1361 if (!cl->notify_en) 1362 return; 1363 1364 cl_dbg(dev, cl, "notify event"); 1365 cl->notify_ev = true; 1366 wake_up_interruptible_all(&cl->ev_wait); 1367 1368 if (cl->ev_async) 1369 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1370 1371 mei_cl_bus_notify_event(cl); 1372 } 1373 1374 /** 1375 * mei_cl_notify_get - get or wait for notification event 1376 * 1377 * @cl: host client 1378 * @block: this request is blocking 1379 * @notify_ev: true if notification event was received 1380 * 1381 * Locking: called under "dev->device_lock" lock 1382 * 1383 * Return: 0 on such and error otherwise. 1384 */ 1385 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1386 { 1387 struct mei_device *dev; 1388 int rets; 1389 1390 *notify_ev = false; 1391 1392 if (WARN_ON(!cl || !cl->dev)) 1393 return -ENODEV; 1394 1395 dev = cl->dev; 1396 1397 if (!mei_cl_is_connected(cl)) 1398 return -ENODEV; 1399 1400 if (cl->notify_ev) 1401 goto out; 1402 1403 if (!block) 1404 return -EAGAIN; 1405 1406 mutex_unlock(&dev->device_lock); 1407 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1408 mutex_lock(&dev->device_lock); 1409 1410 if (rets < 0) 1411 return rets; 1412 1413 out: 1414 *notify_ev = cl->notify_ev; 1415 cl->notify_ev = false; 1416 return 0; 1417 } 1418 1419 /** 1420 * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control 1421 * for given host client 1422 * 1423 * @cl: host client 1424 * 1425 * Return: true, if found at least one cb. 1426 */ 1427 static bool mei_cl_is_read_fc_cb(struct mei_cl *cl) 1428 { 1429 struct mei_device *dev = cl->dev; 1430 struct mei_cl_cb *cb; 1431 1432 list_for_each_entry(cb, &dev->ctrl_wr_list.list, list) 1433 if (cb->fop_type == MEI_FOP_READ && cb->cl == cl) 1434 return true; 1435 return false; 1436 } 1437 1438 /** 1439 * mei_cl_read_start - the start read client message function. 1440 * 1441 * @cl: host client 1442 * @length: number of bytes to read 1443 * @fp: pointer to file structure 1444 * 1445 * Return: 0 on success, <0 on failure. 1446 */ 1447 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1448 { 1449 struct mei_device *dev; 1450 struct mei_cl_cb *cb; 1451 int rets; 1452 1453 if (WARN_ON(!cl || !cl->dev)) 1454 return -ENODEV; 1455 1456 dev = cl->dev; 1457 1458 if (!mei_cl_is_connected(cl)) 1459 return -ENODEV; 1460 1461 /* HW currently supports only one pending read */ 1462 if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl)) 1463 return -EBUSY; 1464 1465 if (!mei_me_cl_is_active(cl->me_cl)) { 1466 cl_err(dev, cl, "no such me client\n"); 1467 return -ENOTTY; 1468 } 1469 1470 /* always allocate at least client max message */ 1471 length = max_t(size_t, length, mei_cl_mtu(cl)); 1472 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp); 1473 if (!cb) 1474 return -ENOMEM; 1475 1476 if (mei_cl_is_fixed_address(cl)) { 1477 list_add_tail(&cb->list, &cl->rd_pending); 1478 return 0; 1479 } 1480 1481 rets = pm_runtime_get(dev->dev); 1482 if (rets < 0 && rets != -EINPROGRESS) { 1483 pm_runtime_put_noidle(dev->dev); 1484 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1485 goto nortpm; 1486 } 1487 1488 if (mei_hbuf_acquire(dev)) { 1489 rets = mei_hbm_cl_flow_control_req(dev, cl); 1490 if (rets < 0) 1491 goto out; 1492 1493 list_add_tail(&cb->list, &cl->rd_pending); 1494 } else { 1495 rets = 0; 1496 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 1497 } 1498 1499 out: 1500 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1501 pm_runtime_mark_last_busy(dev->dev); 1502 pm_runtime_put_autosuspend(dev->dev); 1503 nortpm: 1504 if (rets) 1505 mei_io_cb_free(cb); 1506 1507 return rets; 1508 } 1509 1510 /** 1511 * mei_cl_irq_write - write a message to device 1512 * from the interrupt thread context 1513 * 1514 * @cl: client 1515 * @cb: callback block. 1516 * @cmpl_list: complete list. 1517 * 1518 * Return: 0, OK; otherwise error. 1519 */ 1520 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1521 struct mei_cl_cb *cmpl_list) 1522 { 1523 struct mei_device *dev; 1524 struct mei_msg_data *buf; 1525 struct mei_msg_hdr mei_hdr; 1526 size_t len; 1527 u32 msg_slots; 1528 int slots; 1529 int rets; 1530 bool first_chunk; 1531 1532 if (WARN_ON(!cl || !cl->dev)) 1533 return -ENODEV; 1534 1535 dev = cl->dev; 1536 1537 buf = &cb->buf; 1538 1539 first_chunk = cb->buf_idx == 0; 1540 1541 rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1; 1542 if (rets < 0) 1543 return rets; 1544 1545 if (rets == 0) { 1546 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1547 return 0; 1548 } 1549 1550 slots = mei_hbuf_empty_slots(dev); 1551 len = buf->size - cb->buf_idx; 1552 msg_slots = mei_data2slots(len); 1553 1554 mei_hdr.host_addr = mei_cl_host_addr(cl); 1555 mei_hdr.me_addr = mei_cl_me_id(cl); 1556 mei_hdr.reserved = 0; 1557 mei_hdr.internal = cb->internal; 1558 1559 if (slots >= msg_slots) { 1560 mei_hdr.length = len; 1561 mei_hdr.msg_complete = 1; 1562 /* Split the message only if we can write the whole host buffer */ 1563 } else if (slots == dev->hbuf_depth) { 1564 msg_slots = slots; 1565 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 1566 mei_hdr.length = len; 1567 mei_hdr.msg_complete = 0; 1568 } else { 1569 /* wait for next time the host buffer is empty */ 1570 return 0; 1571 } 1572 1573 cl_dbg(dev, cl, "buf: size = %zd idx = %zd\n", 1574 cb->buf.size, cb->buf_idx); 1575 1576 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1577 if (rets) { 1578 cl->status = rets; 1579 list_move_tail(&cb->list, &cmpl_list->list); 1580 return rets; 1581 } 1582 1583 cl->status = 0; 1584 cl->writing_state = MEI_WRITING; 1585 cb->buf_idx += mei_hdr.length; 1586 cb->completed = mei_hdr.msg_complete == 1; 1587 1588 if (first_chunk) { 1589 if (mei_cl_flow_ctrl_reduce(cl)) 1590 return -EIO; 1591 } 1592 1593 if (mei_hdr.msg_complete) 1594 list_move_tail(&cb->list, &dev->write_waiting_list.list); 1595 1596 return 0; 1597 } 1598 1599 /** 1600 * mei_cl_write - submit a write cb to mei device 1601 * assumes device_lock is locked 1602 * 1603 * @cl: host client 1604 * @cb: write callback with filled data 1605 * @blocking: block until completed 1606 * 1607 * Return: number of bytes sent on success, <0 on failure. 1608 */ 1609 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 1610 { 1611 struct mei_device *dev; 1612 struct mei_msg_data *buf; 1613 struct mei_msg_hdr mei_hdr; 1614 int size; 1615 int rets; 1616 1617 1618 if (WARN_ON(!cl || !cl->dev)) 1619 return -ENODEV; 1620 1621 if (WARN_ON(!cb)) 1622 return -EINVAL; 1623 1624 dev = cl->dev; 1625 1626 buf = &cb->buf; 1627 size = buf->size; 1628 1629 cl_dbg(dev, cl, "size=%d\n", size); 1630 1631 rets = pm_runtime_get(dev->dev); 1632 if (rets < 0 && rets != -EINPROGRESS) { 1633 pm_runtime_put_noidle(dev->dev); 1634 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1635 return rets; 1636 } 1637 1638 cb->buf_idx = 0; 1639 cl->writing_state = MEI_IDLE; 1640 1641 mei_hdr.host_addr = mei_cl_host_addr(cl); 1642 mei_hdr.me_addr = mei_cl_me_id(cl); 1643 mei_hdr.reserved = 0; 1644 mei_hdr.msg_complete = 0; 1645 mei_hdr.internal = cb->internal; 1646 1647 rets = mei_cl_flow_ctrl_creds(cl); 1648 if (rets < 0) 1649 goto err; 1650 1651 if (rets == 0) { 1652 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1653 rets = size; 1654 goto out; 1655 } 1656 if (!mei_hbuf_acquire(dev)) { 1657 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 1658 rets = size; 1659 goto out; 1660 } 1661 1662 /* Check for a maximum length */ 1663 if (size > mei_hbuf_max_len(dev)) { 1664 mei_hdr.length = mei_hbuf_max_len(dev); 1665 mei_hdr.msg_complete = 0; 1666 } else { 1667 mei_hdr.length = size; 1668 mei_hdr.msg_complete = 1; 1669 } 1670 1671 rets = mei_write_message(dev, &mei_hdr, buf->data); 1672 if (rets) 1673 goto err; 1674 1675 rets = mei_cl_flow_ctrl_reduce(cl); 1676 if (rets) 1677 goto err; 1678 1679 cl->writing_state = MEI_WRITING; 1680 cb->buf_idx = mei_hdr.length; 1681 cb->completed = mei_hdr.msg_complete == 1; 1682 1683 out: 1684 if (mei_hdr.msg_complete) 1685 list_add_tail(&cb->list, &dev->write_waiting_list.list); 1686 else 1687 list_add_tail(&cb->list, &dev->write_list.list); 1688 1689 cb = NULL; 1690 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 1691 1692 mutex_unlock(&dev->device_lock); 1693 rets = wait_event_interruptible(cl->tx_wait, 1694 cl->writing_state == MEI_WRITE_COMPLETE); 1695 mutex_lock(&dev->device_lock); 1696 /* wait_event_interruptible returns -ERESTARTSYS */ 1697 if (rets) { 1698 if (signal_pending(current)) 1699 rets = -EINTR; 1700 goto err; 1701 } 1702 } 1703 1704 rets = size; 1705 err: 1706 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1707 pm_runtime_mark_last_busy(dev->dev); 1708 pm_runtime_put_autosuspend(dev->dev); 1709 1710 return rets; 1711 } 1712 1713 1714 /** 1715 * mei_cl_complete - processes completed operation for a client 1716 * 1717 * @cl: private data of the file object. 1718 * @cb: callback block. 1719 */ 1720 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 1721 { 1722 struct mei_device *dev = cl->dev; 1723 1724 switch (cb->fop_type) { 1725 case MEI_FOP_WRITE: 1726 mei_io_cb_free(cb); 1727 cl->writing_state = MEI_WRITE_COMPLETE; 1728 if (waitqueue_active(&cl->tx_wait)) { 1729 wake_up_interruptible(&cl->tx_wait); 1730 } else { 1731 pm_runtime_mark_last_busy(dev->dev); 1732 pm_request_autosuspend(dev->dev); 1733 } 1734 break; 1735 1736 case MEI_FOP_READ: 1737 list_add_tail(&cb->list, &cl->rd_completed); 1738 if (!mei_cl_bus_rx_event(cl)) 1739 wake_up_interruptible(&cl->rx_wait); 1740 break; 1741 1742 case MEI_FOP_CONNECT: 1743 case MEI_FOP_DISCONNECT: 1744 case MEI_FOP_NOTIFY_STOP: 1745 case MEI_FOP_NOTIFY_START: 1746 if (waitqueue_active(&cl->wait)) 1747 wake_up(&cl->wait); 1748 1749 break; 1750 default: 1751 BUG_ON(0); 1752 } 1753 } 1754 1755 1756 /** 1757 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1758 * 1759 * @dev: mei device 1760 */ 1761 void mei_cl_all_disconnect(struct mei_device *dev) 1762 { 1763 struct mei_cl *cl; 1764 1765 list_for_each_entry(cl, &dev->file_list, link) 1766 mei_cl_set_disconnected(cl); 1767 } 1768 1769 1770 /** 1771 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted 1772 * 1773 * @dev: mei device 1774 */ 1775 void mei_cl_all_wakeup(struct mei_device *dev) 1776 { 1777 struct mei_cl *cl; 1778 1779 list_for_each_entry(cl, &dev->file_list, link) { 1780 if (waitqueue_active(&cl->rx_wait)) { 1781 cl_dbg(dev, cl, "Waking up reading client!\n"); 1782 wake_up_interruptible(&cl->rx_wait); 1783 } 1784 if (waitqueue_active(&cl->tx_wait)) { 1785 cl_dbg(dev, cl, "Waking up writing client!\n"); 1786 wake_up_interruptible(&cl->tx_wait); 1787 } 1788 1789 /* synchronized under device mutex */ 1790 if (waitqueue_active(&cl->ev_wait)) { 1791 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 1792 wake_up_interruptible(&cl->ev_wait); 1793 } 1794 } 1795 } 1796 1797 /** 1798 * mei_cl_all_write_clear - clear all pending writes 1799 * 1800 * @dev: mei device 1801 */ 1802 void mei_cl_all_write_clear(struct mei_device *dev) 1803 { 1804 mei_io_list_free(&dev->write_list, NULL); 1805 mei_io_list_free(&dev->write_waiting_list, NULL); 1806 } 1807 1808 1809