1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/sched/signal.h> 8 #include <linux/wait.h> 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/dma-mapping.h> 13 14 #include <linux/mei.h> 15 16 #include "mei_dev.h" 17 #include "hbm.h" 18 #include "client.h" 19 20 /** 21 * mei_me_cl_init - initialize me client 22 * 23 * @me_cl: me client 24 */ 25 void mei_me_cl_init(struct mei_me_client *me_cl) 26 { 27 INIT_LIST_HEAD(&me_cl->list); 28 kref_init(&me_cl->refcnt); 29 } 30 31 /** 32 * mei_me_cl_get - increases me client refcount 33 * 34 * @me_cl: me client 35 * 36 * Locking: called under "dev->device_lock" lock 37 * 38 * Return: me client or NULL 39 */ 40 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) 41 { 42 if (me_cl && kref_get_unless_zero(&me_cl->refcnt)) 43 return me_cl; 44 45 return NULL; 46 } 47 48 /** 49 * mei_me_cl_release - free me client 50 * 51 * Locking: called under "dev->device_lock" lock 52 * 53 * @ref: me_client refcount 54 */ 55 static void mei_me_cl_release(struct kref *ref) 56 { 57 struct mei_me_client *me_cl = 58 container_of(ref, struct mei_me_client, refcnt); 59 60 kfree(me_cl); 61 } 62 63 /** 64 * mei_me_cl_put - decrease me client refcount and free client if necessary 65 * 66 * Locking: called under "dev->device_lock" lock 67 * 68 * @me_cl: me client 69 */ 70 void mei_me_cl_put(struct mei_me_client *me_cl) 71 { 72 if (me_cl) 73 kref_put(&me_cl->refcnt, mei_me_cl_release); 74 } 75 76 /** 77 * __mei_me_cl_del - delete me client from the list and decrease 78 * reference counter 79 * 80 * @dev: mei device 81 * @me_cl: me client 82 * 83 * Locking: dev->me_clients_rwsem 84 */ 85 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 86 { 87 if (!me_cl) 88 return; 89 90 list_del_init(&me_cl->list); 91 mei_me_cl_put(me_cl); 92 } 93 94 /** 95 * mei_me_cl_del - delete me client from the list and decrease 96 * reference counter 97 * 98 * @dev: mei device 99 * @me_cl: me client 100 */ 101 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) 102 { 103 down_write(&dev->me_clients_rwsem); 104 __mei_me_cl_del(dev, me_cl); 105 up_write(&dev->me_clients_rwsem); 106 } 107 108 /** 109 * mei_me_cl_add - add me client to the list 110 * 111 * @dev: mei device 112 * @me_cl: me client 113 */ 114 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) 115 { 116 down_write(&dev->me_clients_rwsem); 117 list_add(&me_cl->list, &dev->me_clients); 118 up_write(&dev->me_clients_rwsem); 119 } 120 121 /** 122 * __mei_me_cl_by_uuid - locate me client by uuid 123 * increases ref count 124 * 125 * @dev: mei device 126 * @uuid: me client uuid 127 * 128 * Return: me client or NULL if not found 129 * 130 * Locking: dev->me_clients_rwsem 131 */ 132 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, 133 const uuid_le *uuid) 134 { 135 struct mei_me_client *me_cl; 136 const uuid_le *pn; 137 138 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 139 140 list_for_each_entry(me_cl, &dev->me_clients, list) { 141 pn = &me_cl->props.protocol_name; 142 if (uuid_le_cmp(*uuid, *pn) == 0) 143 return mei_me_cl_get(me_cl); 144 } 145 146 return NULL; 147 } 148 149 /** 150 * mei_me_cl_by_uuid - locate me client by uuid 151 * increases ref count 152 * 153 * @dev: mei device 154 * @uuid: me client uuid 155 * 156 * Return: me client or NULL if not found 157 * 158 * Locking: dev->me_clients_rwsem 159 */ 160 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, 161 const uuid_le *uuid) 162 { 163 struct mei_me_client *me_cl; 164 165 down_read(&dev->me_clients_rwsem); 166 me_cl = __mei_me_cl_by_uuid(dev, uuid); 167 up_read(&dev->me_clients_rwsem); 168 169 return me_cl; 170 } 171 172 /** 173 * mei_me_cl_by_id - locate me client by client id 174 * increases ref count 175 * 176 * @dev: the device structure 177 * @client_id: me client id 178 * 179 * Return: me client or NULL if not found 180 * 181 * Locking: dev->me_clients_rwsem 182 */ 183 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 184 { 185 186 struct mei_me_client *__me_cl, *me_cl = NULL; 187 188 down_read(&dev->me_clients_rwsem); 189 list_for_each_entry(__me_cl, &dev->me_clients, list) { 190 if (__me_cl->client_id == client_id) { 191 me_cl = mei_me_cl_get(__me_cl); 192 break; 193 } 194 } 195 up_read(&dev->me_clients_rwsem); 196 197 return me_cl; 198 } 199 200 /** 201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid 202 * increases ref count 203 * 204 * @dev: the device structure 205 * @uuid: me client uuid 206 * @client_id: me client id 207 * 208 * Return: me client or null if not found 209 * 210 * Locking: dev->me_clients_rwsem 211 */ 212 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, 213 const uuid_le *uuid, u8 client_id) 214 { 215 struct mei_me_client *me_cl; 216 const uuid_le *pn; 217 218 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); 219 220 list_for_each_entry(me_cl, &dev->me_clients, list) { 221 pn = &me_cl->props.protocol_name; 222 if (uuid_le_cmp(*uuid, *pn) == 0 && 223 me_cl->client_id == client_id) 224 return mei_me_cl_get(me_cl); 225 } 226 227 return NULL; 228 } 229 230 231 /** 232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid 233 * increases ref count 234 * 235 * @dev: the device structure 236 * @uuid: me client uuid 237 * @client_id: me client id 238 * 239 * Return: me client or null if not found 240 */ 241 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, 242 const uuid_le *uuid, u8 client_id) 243 { 244 struct mei_me_client *me_cl; 245 246 down_read(&dev->me_clients_rwsem); 247 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); 248 up_read(&dev->me_clients_rwsem); 249 250 return me_cl; 251 } 252 253 /** 254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid 255 * 256 * @dev: the device structure 257 * @uuid: me client uuid 258 * 259 * Locking: called under "dev->device_lock" lock 260 */ 261 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) 262 { 263 struct mei_me_client *me_cl; 264 265 dev_dbg(dev->dev, "remove %pUl\n", uuid); 266 267 down_write(&dev->me_clients_rwsem); 268 me_cl = __mei_me_cl_by_uuid(dev, uuid); 269 __mei_me_cl_del(dev, me_cl); 270 mei_me_cl_put(me_cl); 271 up_write(&dev->me_clients_rwsem); 272 } 273 274 /** 275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id 276 * 277 * @dev: the device structure 278 * @uuid: me client uuid 279 * @id: me client id 280 * 281 * Locking: called under "dev->device_lock" lock 282 */ 283 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) 284 { 285 struct mei_me_client *me_cl; 286 287 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id); 288 289 down_write(&dev->me_clients_rwsem); 290 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id); 291 __mei_me_cl_del(dev, me_cl); 292 mei_me_cl_put(me_cl); 293 up_write(&dev->me_clients_rwsem); 294 } 295 296 /** 297 * mei_me_cl_rm_all - remove all me clients 298 * 299 * @dev: the device structure 300 * 301 * Locking: called under "dev->device_lock" lock 302 */ 303 void mei_me_cl_rm_all(struct mei_device *dev) 304 { 305 struct mei_me_client *me_cl, *next; 306 307 down_write(&dev->me_clients_rwsem); 308 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) 309 __mei_me_cl_del(dev, me_cl); 310 up_write(&dev->me_clients_rwsem); 311 } 312 313 /** 314 * mei_io_cb_free - free mei_cb_private related memory 315 * 316 * @cb: mei callback struct 317 */ 318 void mei_io_cb_free(struct mei_cl_cb *cb) 319 { 320 if (cb == NULL) 321 return; 322 323 list_del(&cb->list); 324 kfree(cb->buf.data); 325 kfree(cb->ext_hdr); 326 kfree(cb); 327 } 328 329 /** 330 * mei_tx_cb_enqueue - queue tx callback 331 * 332 * Locking: called under "dev->device_lock" lock 333 * 334 * @cb: mei callback struct 335 * @head: an instance of list to queue on 336 */ 337 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, 338 struct list_head *head) 339 { 340 list_add_tail(&cb->list, head); 341 cb->cl->tx_cb_queued++; 342 } 343 344 /** 345 * mei_tx_cb_dequeue - dequeue tx callback 346 * 347 * Locking: called under "dev->device_lock" lock 348 * 349 * @cb: mei callback struct to dequeue and free 350 */ 351 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) 352 { 353 if (!WARN_ON(cb->cl->tx_cb_queued == 0)) 354 cb->cl->tx_cb_queued--; 355 356 mei_io_cb_free(cb); 357 } 358 359 /** 360 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp 361 * 362 * Locking: called under "dev->device_lock" lock 363 * 364 * @cl: mei client 365 * @fp: pointer to file structure 366 */ 367 static void mei_cl_set_read_by_fp(const struct mei_cl *cl, 368 const struct file *fp) 369 { 370 struct mei_cl_vtag *cl_vtag; 371 372 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 373 if (cl_vtag->fp == fp) { 374 cl_vtag->pending_read = true; 375 return; 376 } 377 } 378 } 379 380 /** 381 * mei_io_cb_init - allocate and initialize io callback 382 * 383 * @cl: mei client 384 * @type: operation type 385 * @fp: pointer to file structure 386 * 387 * Return: mei_cl_cb pointer or NULL; 388 */ 389 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, 390 enum mei_cb_file_ops type, 391 const struct file *fp) 392 { 393 struct mei_cl_cb *cb; 394 395 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 396 if (!cb) 397 return NULL; 398 399 INIT_LIST_HEAD(&cb->list); 400 cb->fp = fp; 401 cb->cl = cl; 402 cb->buf_idx = 0; 403 cb->fop_type = type; 404 cb->vtag = 0; 405 cb->ext_hdr = NULL; 406 407 return cb; 408 } 409 410 /** 411 * mei_io_list_flush_cl - removes cbs belonging to the cl. 412 * 413 * @head: an instance of our list structure 414 * @cl: host client 415 */ 416 static void mei_io_list_flush_cl(struct list_head *head, 417 const struct mei_cl *cl) 418 { 419 struct mei_cl_cb *cb, *next; 420 421 list_for_each_entry_safe(cb, next, head, list) { 422 if (cl == cb->cl) { 423 list_del_init(&cb->list); 424 if (cb->fop_type == MEI_FOP_READ) 425 mei_io_cb_free(cb); 426 } 427 } 428 } 429 430 /** 431 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them 432 * 433 * @head: An instance of our list structure 434 * @cl: host client 435 * @fp: file pointer (matching cb file object), may be NULL 436 */ 437 static void mei_io_tx_list_free_cl(struct list_head *head, 438 const struct mei_cl *cl, 439 const struct file *fp) 440 { 441 struct mei_cl_cb *cb, *next; 442 443 list_for_each_entry_safe(cb, next, head, list) { 444 if (cl == cb->cl && (!fp || fp == cb->fp)) 445 mei_tx_cb_dequeue(cb); 446 } 447 } 448 449 /** 450 * mei_io_list_free_fp - free cb from a list that matches file pointer 451 * 452 * @head: io list 453 * @fp: file pointer (matching cb file object), may be NULL 454 */ 455 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) 456 { 457 struct mei_cl_cb *cb, *next; 458 459 list_for_each_entry_safe(cb, next, head, list) 460 if (!fp || fp == cb->fp) 461 mei_io_cb_free(cb); 462 } 463 464 /** 465 * mei_cl_free_pending - free pending cb 466 * 467 * @cl: host client 468 */ 469 static void mei_cl_free_pending(struct mei_cl *cl) 470 { 471 struct mei_cl_cb *cb; 472 473 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 474 mei_io_cb_free(cb); 475 } 476 477 /** 478 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb 479 * 480 * @cl: host client 481 * @length: size of the buffer 482 * @fop_type: operation type 483 * @fp: associated file pointer (might be NULL) 484 * 485 * Return: cb on success and NULL on failure 486 */ 487 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, 488 enum mei_cb_file_ops fop_type, 489 const struct file *fp) 490 { 491 struct mei_cl_cb *cb; 492 493 cb = mei_io_cb_init(cl, fop_type, fp); 494 if (!cb) 495 return NULL; 496 497 if (length == 0) 498 return cb; 499 500 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); 501 if (!cb->buf.data) { 502 mei_io_cb_free(cb); 503 return NULL; 504 } 505 cb->buf.size = length; 506 507 return cb; 508 } 509 510 /** 511 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating 512 * and enqueuing of the control commands cb 513 * 514 * @cl: host client 515 * @length: size of the buffer 516 * @fop_type: operation type 517 * @fp: associated file pointer (might be NULL) 518 * 519 * Return: cb on success and NULL on failure 520 * Locking: called under "dev->device_lock" lock 521 */ 522 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, 523 enum mei_cb_file_ops fop_type, 524 const struct file *fp) 525 { 526 struct mei_cl_cb *cb; 527 528 /* for RX always allocate at least client's mtu */ 529 if (length) 530 length = max_t(size_t, length, mei_cl_mtu(cl)); 531 532 cb = mei_cl_alloc_cb(cl, length, fop_type, fp); 533 if (!cb) 534 return NULL; 535 536 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list); 537 return cb; 538 } 539 540 /** 541 * mei_cl_read_cb - find this cl's callback in the read list 542 * for a specific file 543 * 544 * @cl: host client 545 * @fp: file pointer (matching cb file object), may be NULL 546 * 547 * Return: cb on success, NULL if cb is not found 548 */ 549 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) 550 { 551 struct mei_cl_cb *cb; 552 struct mei_cl_cb *ret_cb = NULL; 553 554 spin_lock(&cl->rd_completed_lock); 555 list_for_each_entry(cb, &cl->rd_completed, list) 556 if (!fp || fp == cb->fp) { 557 ret_cb = cb; 558 break; 559 } 560 spin_unlock(&cl->rd_completed_lock); 561 return ret_cb; 562 } 563 564 /** 565 * mei_cl_flush_queues - flushes queue lists belonging to cl. 566 * 567 * @cl: host client 568 * @fp: file pointer (matching cb file object), may be NULL 569 * 570 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. 571 */ 572 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) 573 { 574 struct mei_device *dev; 575 576 if (WARN_ON(!cl || !cl->dev)) 577 return -EINVAL; 578 579 dev = cl->dev; 580 581 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 582 mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp); 583 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); 584 /* free pending and control cb only in final flush */ 585 if (!fp) { 586 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl); 587 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl); 588 mei_cl_free_pending(cl); 589 } 590 spin_lock(&cl->rd_completed_lock); 591 mei_io_list_free_fp(&cl->rd_completed, fp); 592 spin_unlock(&cl->rd_completed_lock); 593 594 return 0; 595 } 596 597 /** 598 * mei_cl_init - initializes cl. 599 * 600 * @cl: host client to be initialized 601 * @dev: mei device 602 */ 603 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 604 { 605 memset(cl, 0, sizeof(*cl)); 606 init_waitqueue_head(&cl->wait); 607 init_waitqueue_head(&cl->rx_wait); 608 init_waitqueue_head(&cl->tx_wait); 609 init_waitqueue_head(&cl->ev_wait); 610 INIT_LIST_HEAD(&cl->vtag_map); 611 spin_lock_init(&cl->rd_completed_lock); 612 INIT_LIST_HEAD(&cl->rd_completed); 613 INIT_LIST_HEAD(&cl->rd_pending); 614 INIT_LIST_HEAD(&cl->link); 615 cl->writing_state = MEI_IDLE; 616 cl->state = MEI_FILE_UNINITIALIZED; 617 cl->dev = dev; 618 } 619 620 /** 621 * mei_cl_allocate - allocates cl structure and sets it up. 622 * 623 * @dev: mei device 624 * Return: The allocated file or NULL on failure 625 */ 626 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 627 { 628 struct mei_cl *cl; 629 630 cl = kmalloc(sizeof(*cl), GFP_KERNEL); 631 if (!cl) 632 return NULL; 633 634 mei_cl_init(cl, dev); 635 636 return cl; 637 } 638 639 /** 640 * mei_cl_link - allocate host id in the host map 641 * 642 * @cl: host client 643 * 644 * Return: 0 on success 645 * -EINVAL on incorrect values 646 * -EMFILE if open count exceeded. 647 */ 648 int mei_cl_link(struct mei_cl *cl) 649 { 650 struct mei_device *dev; 651 int id; 652 653 if (WARN_ON(!cl || !cl->dev)) 654 return -EINVAL; 655 656 dev = cl->dev; 657 658 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); 659 if (id >= MEI_CLIENTS_MAX) { 660 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 661 return -EMFILE; 662 } 663 664 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 665 dev_err(dev->dev, "open_handle_count exceeded %d", 666 MEI_MAX_OPEN_HANDLE_COUNT); 667 return -EMFILE; 668 } 669 670 dev->open_handle_count++; 671 672 cl->host_client_id = id; 673 list_add_tail(&cl->link, &dev->file_list); 674 675 set_bit(id, dev->host_clients_map); 676 677 cl->state = MEI_FILE_INITIALIZING; 678 679 cl_dbg(dev, cl, "link cl\n"); 680 return 0; 681 } 682 683 /** 684 * mei_cl_unlink - remove host client from the list 685 * 686 * @cl: host client 687 * 688 * Return: always 0 689 */ 690 int mei_cl_unlink(struct mei_cl *cl) 691 { 692 struct mei_device *dev; 693 694 /* don't shout on error exit path */ 695 if (!cl) 696 return 0; 697 698 if (WARN_ON(!cl->dev)) 699 return 0; 700 701 dev = cl->dev; 702 703 cl_dbg(dev, cl, "unlink client"); 704 705 if (cl->state == MEI_FILE_UNINITIALIZED) 706 return 0; 707 708 if (dev->open_handle_count > 0) 709 dev->open_handle_count--; 710 711 /* never clear the 0 bit */ 712 if (cl->host_client_id) 713 clear_bit(cl->host_client_id, dev->host_clients_map); 714 715 list_del_init(&cl->link); 716 717 cl->state = MEI_FILE_UNINITIALIZED; 718 cl->writing_state = MEI_IDLE; 719 720 WARN_ON(!list_empty(&cl->rd_completed) || 721 !list_empty(&cl->rd_pending) || 722 !list_empty(&cl->link)); 723 724 return 0; 725 } 726 727 void mei_host_client_init(struct mei_device *dev) 728 { 729 mei_set_devstate(dev, MEI_DEV_ENABLED); 730 dev->reset_count = 0; 731 732 schedule_work(&dev->bus_rescan_work); 733 734 pm_runtime_mark_last_busy(dev->dev); 735 dev_dbg(dev->dev, "rpm: autosuspend\n"); 736 pm_request_autosuspend(dev->dev); 737 } 738 739 /** 740 * mei_hbuf_acquire - try to acquire host buffer 741 * 742 * @dev: the device structure 743 * Return: true if host buffer was acquired 744 */ 745 bool mei_hbuf_acquire(struct mei_device *dev) 746 { 747 if (mei_pg_state(dev) == MEI_PG_ON || 748 mei_pg_in_transition(dev)) { 749 dev_dbg(dev->dev, "device is in pg\n"); 750 return false; 751 } 752 753 if (!dev->hbuf_is_ready) { 754 dev_dbg(dev->dev, "hbuf is not ready\n"); 755 return false; 756 } 757 758 dev->hbuf_is_ready = false; 759 760 return true; 761 } 762 763 /** 764 * mei_cl_wake_all - wake up readers, writers and event waiters so 765 * they can be interrupted 766 * 767 * @cl: host client 768 */ 769 static void mei_cl_wake_all(struct mei_cl *cl) 770 { 771 struct mei_device *dev = cl->dev; 772 773 /* synchronized under device mutex */ 774 if (waitqueue_active(&cl->rx_wait)) { 775 cl_dbg(dev, cl, "Waking up reading client!\n"); 776 wake_up_interruptible(&cl->rx_wait); 777 } 778 /* synchronized under device mutex */ 779 if (waitqueue_active(&cl->tx_wait)) { 780 cl_dbg(dev, cl, "Waking up writing client!\n"); 781 wake_up_interruptible(&cl->tx_wait); 782 } 783 /* synchronized under device mutex */ 784 if (waitqueue_active(&cl->ev_wait)) { 785 cl_dbg(dev, cl, "Waking up waiting for event clients!\n"); 786 wake_up_interruptible(&cl->ev_wait); 787 } 788 /* synchronized under device mutex */ 789 if (waitqueue_active(&cl->wait)) { 790 cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); 791 wake_up(&cl->wait); 792 } 793 } 794 795 /** 796 * mei_cl_set_disconnected - set disconnected state and clear 797 * associated states and resources 798 * 799 * @cl: host client 800 */ 801 static void mei_cl_set_disconnected(struct mei_cl *cl) 802 { 803 struct mei_device *dev = cl->dev; 804 805 if (cl->state == MEI_FILE_DISCONNECTED || 806 cl->state <= MEI_FILE_INITIALIZING) 807 return; 808 809 cl->state = MEI_FILE_DISCONNECTED; 810 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); 811 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); 812 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 813 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 814 mei_cl_wake_all(cl); 815 cl->rx_flow_ctrl_creds = 0; 816 cl->tx_flow_ctrl_creds = 0; 817 cl->timer_count = 0; 818 819 if (!cl->me_cl) 820 return; 821 822 if (!WARN_ON(cl->me_cl->connect_count == 0)) 823 cl->me_cl->connect_count--; 824 825 if (cl->me_cl->connect_count == 0) 826 cl->me_cl->tx_flow_ctrl_creds = 0; 827 828 mei_me_cl_put(cl->me_cl); 829 cl->me_cl = NULL; 830 } 831 832 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) 833 { 834 if (!mei_me_cl_get(me_cl)) 835 return -ENOENT; 836 837 /* only one connection is allowed for fixed address clients */ 838 if (me_cl->props.fixed_address) { 839 if (me_cl->connect_count) { 840 mei_me_cl_put(me_cl); 841 return -EBUSY; 842 } 843 } 844 845 cl->me_cl = me_cl; 846 cl->state = MEI_FILE_CONNECTING; 847 cl->me_cl->connect_count++; 848 849 return 0; 850 } 851 852 /* 853 * mei_cl_send_disconnect - send disconnect request 854 * 855 * @cl: host client 856 * @cb: callback block 857 * 858 * Return: 0, OK; otherwise, error. 859 */ 860 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) 861 { 862 struct mei_device *dev; 863 int ret; 864 865 dev = cl->dev; 866 867 ret = mei_hbm_cl_disconnect_req(dev, cl); 868 cl->status = ret; 869 if (ret) { 870 cl->state = MEI_FILE_DISCONNECT_REPLY; 871 return ret; 872 } 873 874 list_move_tail(&cb->list, &dev->ctrl_rd_list); 875 cl->timer_count = dev->timeouts.connect; 876 mei_schedule_stall_timer(dev); 877 878 return 0; 879 } 880 881 /** 882 * mei_cl_irq_disconnect - processes close related operation from 883 * interrupt thread context - send disconnect request 884 * 885 * @cl: client 886 * @cb: callback block. 887 * @cmpl_list: complete list. 888 * 889 * Return: 0, OK; otherwise, error. 890 */ 891 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 892 struct list_head *cmpl_list) 893 { 894 struct mei_device *dev = cl->dev; 895 u32 msg_slots; 896 int slots; 897 int ret; 898 899 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 900 slots = mei_hbuf_empty_slots(dev); 901 if (slots < 0) 902 return -EOVERFLOW; 903 904 if ((u32)slots < msg_slots) 905 return -EMSGSIZE; 906 907 ret = mei_cl_send_disconnect(cl, cb); 908 if (ret) 909 list_move_tail(&cb->list, cmpl_list); 910 911 return ret; 912 } 913 914 /** 915 * __mei_cl_disconnect - disconnect host client from the me one 916 * internal function runtime pm has to be already acquired 917 * 918 * @cl: host client 919 * 920 * Return: 0 on success, <0 on failure. 921 */ 922 static int __mei_cl_disconnect(struct mei_cl *cl) 923 { 924 struct mei_device *dev; 925 struct mei_cl_cb *cb; 926 int rets; 927 928 dev = cl->dev; 929 930 cl->state = MEI_FILE_DISCONNECTING; 931 932 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL); 933 if (!cb) { 934 rets = -ENOMEM; 935 goto out; 936 } 937 938 if (mei_hbuf_acquire(dev)) { 939 rets = mei_cl_send_disconnect(cl, cb); 940 if (rets) { 941 cl_err(dev, cl, "failed to disconnect.\n"); 942 goto out; 943 } 944 } 945 946 mutex_unlock(&dev->device_lock); 947 wait_event_timeout(cl->wait, 948 cl->state == MEI_FILE_DISCONNECT_REPLY || 949 cl->state == MEI_FILE_DISCONNECTED, 950 dev->timeouts.cl_connect); 951 mutex_lock(&dev->device_lock); 952 953 rets = cl->status; 954 if (cl->state != MEI_FILE_DISCONNECT_REPLY && 955 cl->state != MEI_FILE_DISCONNECTED) { 956 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 957 rets = -ETIME; 958 } 959 960 out: 961 /* we disconnect also on error */ 962 mei_cl_set_disconnected(cl); 963 if (!rets) 964 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 965 966 mei_io_cb_free(cb); 967 return rets; 968 } 969 970 /** 971 * mei_cl_disconnect - disconnect host client from the me one 972 * 973 * @cl: host client 974 * 975 * Locking: called under "dev->device_lock" lock 976 * 977 * Return: 0 on success, <0 on failure. 978 */ 979 int mei_cl_disconnect(struct mei_cl *cl) 980 { 981 struct mei_device *dev; 982 int rets; 983 984 if (WARN_ON(!cl || !cl->dev)) 985 return -ENODEV; 986 987 dev = cl->dev; 988 989 cl_dbg(dev, cl, "disconnecting"); 990 991 if (!mei_cl_is_connected(cl)) 992 return 0; 993 994 if (mei_cl_is_fixed_address(cl)) { 995 mei_cl_set_disconnected(cl); 996 return 0; 997 } 998 999 if (dev->dev_state == MEI_DEV_POWERING_DOWN || 1000 dev->dev_state == MEI_DEV_POWER_DOWN) { 1001 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); 1002 mei_cl_set_disconnected(cl); 1003 return 0; 1004 } 1005 1006 rets = pm_runtime_get(dev->dev); 1007 if (rets < 0 && rets != -EINPROGRESS) { 1008 pm_runtime_put_noidle(dev->dev); 1009 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1010 return rets; 1011 } 1012 1013 rets = __mei_cl_disconnect(cl); 1014 1015 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1016 pm_runtime_mark_last_busy(dev->dev); 1017 pm_runtime_put_autosuspend(dev->dev); 1018 1019 return rets; 1020 } 1021 1022 1023 /** 1024 * mei_cl_is_other_connecting - checks if other 1025 * client with the same me client id is connecting 1026 * 1027 * @cl: private data of the file object 1028 * 1029 * Return: true if other client is connected, false - otherwise. 1030 */ 1031 static bool mei_cl_is_other_connecting(struct mei_cl *cl) 1032 { 1033 struct mei_device *dev; 1034 struct mei_cl_cb *cb; 1035 1036 dev = cl->dev; 1037 1038 list_for_each_entry(cb, &dev->ctrl_rd_list, list) { 1039 if (cb->fop_type == MEI_FOP_CONNECT && 1040 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl)) 1041 return true; 1042 } 1043 1044 return false; 1045 } 1046 1047 /** 1048 * mei_cl_send_connect - send connect request 1049 * 1050 * @cl: host client 1051 * @cb: callback block 1052 * 1053 * Return: 0, OK; otherwise, error. 1054 */ 1055 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) 1056 { 1057 struct mei_device *dev; 1058 int ret; 1059 1060 dev = cl->dev; 1061 1062 ret = mei_hbm_cl_connect_req(dev, cl); 1063 cl->status = ret; 1064 if (ret) { 1065 cl->state = MEI_FILE_DISCONNECT_REPLY; 1066 return ret; 1067 } 1068 1069 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1070 cl->timer_count = dev->timeouts.connect; 1071 mei_schedule_stall_timer(dev); 1072 return 0; 1073 } 1074 1075 /** 1076 * mei_cl_irq_connect - send connect request in irq_thread context 1077 * 1078 * @cl: host client 1079 * @cb: callback block 1080 * @cmpl_list: complete list 1081 * 1082 * Return: 0, OK; otherwise, error. 1083 */ 1084 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 1085 struct list_head *cmpl_list) 1086 { 1087 struct mei_device *dev = cl->dev; 1088 u32 msg_slots; 1089 int slots; 1090 int rets; 1091 1092 if (mei_cl_is_other_connecting(cl)) 1093 return 0; 1094 1095 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1096 slots = mei_hbuf_empty_slots(dev); 1097 if (slots < 0) 1098 return -EOVERFLOW; 1099 1100 if ((u32)slots < msg_slots) 1101 return -EMSGSIZE; 1102 1103 rets = mei_cl_send_connect(cl, cb); 1104 if (rets) 1105 list_move_tail(&cb->list, cmpl_list); 1106 1107 return rets; 1108 } 1109 1110 /** 1111 * mei_cl_connect - connect host client to the me one 1112 * 1113 * @cl: host client 1114 * @me_cl: me client 1115 * @fp: pointer to file structure 1116 * 1117 * Locking: called under "dev->device_lock" lock 1118 * 1119 * Return: 0 on success, <0 on failure. 1120 */ 1121 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, 1122 const struct file *fp) 1123 { 1124 struct mei_device *dev; 1125 struct mei_cl_cb *cb; 1126 int rets; 1127 1128 if (WARN_ON(!cl || !cl->dev || !me_cl)) 1129 return -ENODEV; 1130 1131 dev = cl->dev; 1132 1133 rets = mei_cl_set_connecting(cl, me_cl); 1134 if (rets) 1135 goto nortpm; 1136 1137 if (mei_cl_is_fixed_address(cl)) { 1138 cl->state = MEI_FILE_CONNECTED; 1139 rets = 0; 1140 goto nortpm; 1141 } 1142 1143 rets = pm_runtime_get(dev->dev); 1144 if (rets < 0 && rets != -EINPROGRESS) { 1145 pm_runtime_put_noidle(dev->dev); 1146 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1147 goto nortpm; 1148 } 1149 1150 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp); 1151 if (!cb) { 1152 rets = -ENOMEM; 1153 goto out; 1154 } 1155 1156 /* run hbuf acquire last so we don't have to undo */ 1157 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 1158 rets = mei_cl_send_connect(cl, cb); 1159 if (rets) 1160 goto out; 1161 } 1162 1163 mutex_unlock(&dev->device_lock); 1164 wait_event_timeout(cl->wait, 1165 (cl->state == MEI_FILE_CONNECTED || 1166 cl->state == MEI_FILE_DISCONNECTED || 1167 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1168 cl->state == MEI_FILE_DISCONNECT_REPLY), 1169 dev->timeouts.cl_connect); 1170 mutex_lock(&dev->device_lock); 1171 1172 if (!mei_cl_is_connected(cl)) { 1173 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { 1174 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl); 1175 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); 1176 /* ignore disconnect return valuue; 1177 * in case of failure reset will be invoked 1178 */ 1179 __mei_cl_disconnect(cl); 1180 rets = -EFAULT; 1181 goto out; 1182 } 1183 1184 /* timeout or something went really wrong */ 1185 if (!cl->status) 1186 cl->status = -EFAULT; 1187 } 1188 1189 rets = cl->status; 1190 out: 1191 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1192 pm_runtime_mark_last_busy(dev->dev); 1193 pm_runtime_put_autosuspend(dev->dev); 1194 1195 mei_io_cb_free(cb); 1196 1197 nortpm: 1198 if (!mei_cl_is_connected(cl)) 1199 mei_cl_set_disconnected(cl); 1200 1201 return rets; 1202 } 1203 1204 /** 1205 * mei_cl_alloc_linked - allocate and link host client 1206 * 1207 * @dev: the device structure 1208 * 1209 * Return: cl on success ERR_PTR on failure 1210 */ 1211 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) 1212 { 1213 struct mei_cl *cl; 1214 int ret; 1215 1216 cl = mei_cl_allocate(dev); 1217 if (!cl) { 1218 ret = -ENOMEM; 1219 goto err; 1220 } 1221 1222 ret = mei_cl_link(cl); 1223 if (ret) 1224 goto err; 1225 1226 return cl; 1227 err: 1228 kfree(cl); 1229 return ERR_PTR(ret); 1230 } 1231 1232 /** 1233 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. 1234 * 1235 * @cl: host client 1236 * 1237 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. 1238 */ 1239 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) 1240 { 1241 if (WARN_ON(!cl || !cl->me_cl)) 1242 return -EINVAL; 1243 1244 if (cl->tx_flow_ctrl_creds > 0) 1245 return 1; 1246 1247 if (mei_cl_is_fixed_address(cl)) 1248 return 1; 1249 1250 if (mei_cl_is_single_recv_buf(cl)) { 1251 if (cl->me_cl->tx_flow_ctrl_creds > 0) 1252 return 1; 1253 } 1254 return 0; 1255 } 1256 1257 /** 1258 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits 1259 * for a client 1260 * 1261 * @cl: host client 1262 * 1263 * Return: 1264 * 0 on success 1265 * -EINVAL when ctrl credits are <= 0 1266 */ 1267 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) 1268 { 1269 if (WARN_ON(!cl || !cl->me_cl)) 1270 return -EINVAL; 1271 1272 if (mei_cl_is_fixed_address(cl)) 1273 return 0; 1274 1275 if (mei_cl_is_single_recv_buf(cl)) { 1276 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) 1277 return -EINVAL; 1278 cl->me_cl->tx_flow_ctrl_creds--; 1279 } else { 1280 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) 1281 return -EINVAL; 1282 cl->tx_flow_ctrl_creds--; 1283 } 1284 return 0; 1285 } 1286 1287 /** 1288 * mei_cl_vtag_alloc - allocate and fill the vtag structure 1289 * 1290 * @fp: pointer to file structure 1291 * @vtag: vm tag 1292 * 1293 * Return: 1294 * * Pointer to allocated struct - on success 1295 * * ERR_PTR(-ENOMEM) on memory allocation failure 1296 */ 1297 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) 1298 { 1299 struct mei_cl_vtag *cl_vtag; 1300 1301 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL); 1302 if (!cl_vtag) 1303 return ERR_PTR(-ENOMEM); 1304 1305 INIT_LIST_HEAD(&cl_vtag->list); 1306 cl_vtag->vtag = vtag; 1307 cl_vtag->fp = fp; 1308 1309 return cl_vtag; 1310 } 1311 1312 /** 1313 * mei_cl_fp_by_vtag - obtain the file pointer by vtag 1314 * 1315 * @cl: host client 1316 * @vtag: virtual tag 1317 * 1318 * Return: 1319 * * A file pointer - on success 1320 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list 1321 */ 1322 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) 1323 { 1324 struct mei_cl_vtag *vtag_l; 1325 1326 list_for_each_entry(vtag_l, &cl->vtag_map, list) 1327 /* The client on bus has one fixed fp */ 1328 if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || 1329 vtag_l->vtag == vtag) 1330 return vtag_l->fp; 1331 1332 return ERR_PTR(-ENOENT); 1333 } 1334 1335 /** 1336 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag 1337 * 1338 * @cl: host client 1339 * @vtag: vm tag 1340 */ 1341 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) 1342 { 1343 struct mei_cl_vtag *vtag_l; 1344 1345 list_for_each_entry(vtag_l, &cl->vtag_map, list) { 1346 if (vtag_l->vtag == vtag) { 1347 vtag_l->pending_read = false; 1348 break; 1349 } 1350 } 1351 } 1352 1353 /** 1354 * mei_cl_read_vtag_add_fc - add flow control for next pending reader 1355 * in the vtag list 1356 * 1357 * @cl: host client 1358 */ 1359 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) 1360 { 1361 struct mei_cl_vtag *cl_vtag; 1362 1363 list_for_each_entry(cl_vtag, &cl->vtag_map, list) { 1364 if (cl_vtag->pending_read) { 1365 if (mei_cl_enqueue_ctrl_wr_cb(cl, 1366 mei_cl_mtu(cl), 1367 MEI_FOP_READ, 1368 cl_vtag->fp)) 1369 cl->rx_flow_ctrl_creds++; 1370 break; 1371 } 1372 } 1373 } 1374 1375 /** 1376 * mei_cl_vt_support_check - check if client support vtags 1377 * 1378 * @cl: host client 1379 * 1380 * Return: 1381 * * 0 - supported, or not connected at all 1382 * * -EOPNOTSUPP - vtags are not supported by client 1383 */ 1384 int mei_cl_vt_support_check(const struct mei_cl *cl) 1385 { 1386 struct mei_device *dev = cl->dev; 1387 1388 if (!dev->hbm_f_vt_supported) 1389 return -EOPNOTSUPP; 1390 1391 if (!cl->me_cl) 1392 return 0; 1393 1394 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; 1395 } 1396 1397 /** 1398 * mei_cl_add_rd_completed - add read completed callback to list with lock 1399 * and vtag check 1400 * 1401 * @cl: host client 1402 * @cb: callback block 1403 * 1404 */ 1405 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1406 { 1407 const struct file *fp; 1408 1409 if (!mei_cl_vt_support_check(cl)) { 1410 fp = mei_cl_fp_by_vtag(cl, cb->vtag); 1411 if (IS_ERR(fp)) { 1412 /* client already disconnected, discarding */ 1413 mei_io_cb_free(cb); 1414 return; 1415 } 1416 cb->fp = fp; 1417 mei_cl_reset_read_by_vtag(cl, cb->vtag); 1418 mei_cl_read_vtag_add_fc(cl); 1419 } 1420 1421 spin_lock(&cl->rd_completed_lock); 1422 list_add_tail(&cb->list, &cl->rd_completed); 1423 spin_unlock(&cl->rd_completed_lock); 1424 } 1425 1426 /** 1427 * mei_cl_del_rd_completed - free read completed callback with lock 1428 * 1429 * @cl: host client 1430 * @cb: callback block 1431 * 1432 */ 1433 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) 1434 { 1435 spin_lock(&cl->rd_completed_lock); 1436 mei_io_cb_free(cb); 1437 spin_unlock(&cl->rd_completed_lock); 1438 } 1439 1440 /** 1441 * mei_cl_notify_fop2req - convert fop to proper request 1442 * 1443 * @fop: client notification start response command 1444 * 1445 * Return: MEI_HBM_NOTIFICATION_START/STOP 1446 */ 1447 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) 1448 { 1449 if (fop == MEI_FOP_NOTIFY_START) 1450 return MEI_HBM_NOTIFICATION_START; 1451 else 1452 return MEI_HBM_NOTIFICATION_STOP; 1453 } 1454 1455 /** 1456 * mei_cl_notify_req2fop - convert notification request top file operation type 1457 * 1458 * @req: hbm notification request type 1459 * 1460 * Return: MEI_FOP_NOTIFY_START/STOP 1461 */ 1462 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) 1463 { 1464 if (req == MEI_HBM_NOTIFICATION_START) 1465 return MEI_FOP_NOTIFY_START; 1466 else 1467 return MEI_FOP_NOTIFY_STOP; 1468 } 1469 1470 /** 1471 * mei_cl_irq_notify - send notification request in irq_thread context 1472 * 1473 * @cl: client 1474 * @cb: callback block. 1475 * @cmpl_list: complete list. 1476 * 1477 * Return: 0 on such and error otherwise. 1478 */ 1479 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, 1480 struct list_head *cmpl_list) 1481 { 1482 struct mei_device *dev = cl->dev; 1483 u32 msg_slots; 1484 int slots; 1485 int ret; 1486 bool request; 1487 1488 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request)); 1489 slots = mei_hbuf_empty_slots(dev); 1490 if (slots < 0) 1491 return -EOVERFLOW; 1492 1493 if ((u32)slots < msg_slots) 1494 return -EMSGSIZE; 1495 1496 request = mei_cl_notify_fop2req(cb->fop_type); 1497 ret = mei_hbm_cl_notify_req(dev, cl, request); 1498 if (ret) { 1499 cl->status = ret; 1500 list_move_tail(&cb->list, cmpl_list); 1501 return ret; 1502 } 1503 1504 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1505 return 0; 1506 } 1507 1508 /** 1509 * mei_cl_notify_request - send notification stop/start request 1510 * 1511 * @cl: host client 1512 * @fp: associate request with file 1513 * @request: 1 for start or 0 for stop 1514 * 1515 * Locking: called under "dev->device_lock" lock 1516 * 1517 * Return: 0 on such and error otherwise. 1518 */ 1519 int mei_cl_notify_request(struct mei_cl *cl, 1520 const struct file *fp, u8 request) 1521 { 1522 struct mei_device *dev; 1523 struct mei_cl_cb *cb; 1524 enum mei_cb_file_ops fop_type; 1525 int rets; 1526 1527 if (WARN_ON(!cl || !cl->dev)) 1528 return -ENODEV; 1529 1530 dev = cl->dev; 1531 1532 if (!dev->hbm_f_ev_supported) { 1533 cl_dbg(dev, cl, "notifications not supported\n"); 1534 return -EOPNOTSUPP; 1535 } 1536 1537 if (!mei_cl_is_connected(cl)) 1538 return -ENODEV; 1539 1540 rets = pm_runtime_get(dev->dev); 1541 if (rets < 0 && rets != -EINPROGRESS) { 1542 pm_runtime_put_noidle(dev->dev); 1543 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1544 return rets; 1545 } 1546 1547 fop_type = mei_cl_notify_req2fop(request); 1548 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp); 1549 if (!cb) { 1550 rets = -ENOMEM; 1551 goto out; 1552 } 1553 1554 if (mei_hbuf_acquire(dev)) { 1555 if (mei_hbm_cl_notify_req(dev, cl, request)) { 1556 rets = -ENODEV; 1557 goto out; 1558 } 1559 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1560 } 1561 1562 mutex_unlock(&dev->device_lock); 1563 wait_event_timeout(cl->wait, 1564 cl->notify_en == request || 1565 cl->status || 1566 !mei_cl_is_connected(cl), 1567 dev->timeouts.cl_connect); 1568 mutex_lock(&dev->device_lock); 1569 1570 if (cl->notify_en != request && !cl->status) 1571 cl->status = -EFAULT; 1572 1573 rets = cl->status; 1574 1575 out: 1576 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1577 pm_runtime_mark_last_busy(dev->dev); 1578 pm_runtime_put_autosuspend(dev->dev); 1579 1580 mei_io_cb_free(cb); 1581 return rets; 1582 } 1583 1584 /** 1585 * mei_cl_notify - raise notification 1586 * 1587 * @cl: host client 1588 * 1589 * Locking: called under "dev->device_lock" lock 1590 */ 1591 void mei_cl_notify(struct mei_cl *cl) 1592 { 1593 struct mei_device *dev; 1594 1595 if (!cl || !cl->dev) 1596 return; 1597 1598 dev = cl->dev; 1599 1600 if (!cl->notify_en) 1601 return; 1602 1603 cl_dbg(dev, cl, "notify event"); 1604 cl->notify_ev = true; 1605 if (!mei_cl_bus_notify_event(cl)) 1606 wake_up_interruptible(&cl->ev_wait); 1607 1608 if (cl->ev_async) 1609 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); 1610 1611 } 1612 1613 /** 1614 * mei_cl_notify_get - get or wait for notification event 1615 * 1616 * @cl: host client 1617 * @block: this request is blocking 1618 * @notify_ev: true if notification event was received 1619 * 1620 * Locking: called under "dev->device_lock" lock 1621 * 1622 * Return: 0 on such and error otherwise. 1623 */ 1624 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) 1625 { 1626 struct mei_device *dev; 1627 int rets; 1628 1629 *notify_ev = false; 1630 1631 if (WARN_ON(!cl || !cl->dev)) 1632 return -ENODEV; 1633 1634 dev = cl->dev; 1635 1636 if (!dev->hbm_f_ev_supported) { 1637 cl_dbg(dev, cl, "notifications not supported\n"); 1638 return -EOPNOTSUPP; 1639 } 1640 1641 if (!mei_cl_is_connected(cl)) 1642 return -ENODEV; 1643 1644 if (cl->notify_ev) 1645 goto out; 1646 1647 if (!block) 1648 return -EAGAIN; 1649 1650 mutex_unlock(&dev->device_lock); 1651 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); 1652 mutex_lock(&dev->device_lock); 1653 1654 if (rets < 0) 1655 return rets; 1656 1657 out: 1658 *notify_ev = cl->notify_ev; 1659 cl->notify_ev = false; 1660 return 0; 1661 } 1662 1663 /** 1664 * mei_cl_read_start - the start read client message function. 1665 * 1666 * @cl: host client 1667 * @length: number of bytes to read 1668 * @fp: pointer to file structure 1669 * 1670 * Return: 0 on success, <0 on failure. 1671 */ 1672 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) 1673 { 1674 struct mei_device *dev; 1675 struct mei_cl_cb *cb; 1676 int rets; 1677 1678 if (WARN_ON(!cl || !cl->dev)) 1679 return -ENODEV; 1680 1681 dev = cl->dev; 1682 1683 if (!mei_cl_is_connected(cl)) 1684 return -ENODEV; 1685 1686 if (!mei_me_cl_is_active(cl->me_cl)) { 1687 cl_err(dev, cl, "no such me client\n"); 1688 return -ENOTTY; 1689 } 1690 1691 if (mei_cl_is_fixed_address(cl)) 1692 return 0; 1693 1694 /* HW currently supports only one pending read */ 1695 if (cl->rx_flow_ctrl_creds) { 1696 mei_cl_set_read_by_fp(cl, fp); 1697 return -EBUSY; 1698 } 1699 1700 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp); 1701 if (!cb) 1702 return -ENOMEM; 1703 1704 mei_cl_set_read_by_fp(cl, fp); 1705 1706 rets = pm_runtime_get(dev->dev); 1707 if (rets < 0 && rets != -EINPROGRESS) { 1708 pm_runtime_put_noidle(dev->dev); 1709 cl_err(dev, cl, "rpm: get failed %d\n", rets); 1710 goto nortpm; 1711 } 1712 1713 rets = 0; 1714 if (mei_hbuf_acquire(dev)) { 1715 rets = mei_hbm_cl_flow_control_req(dev, cl); 1716 if (rets < 0) 1717 goto out; 1718 1719 list_move_tail(&cb->list, &cl->rd_pending); 1720 } 1721 cl->rx_flow_ctrl_creds++; 1722 1723 out: 1724 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1725 pm_runtime_mark_last_busy(dev->dev); 1726 pm_runtime_put_autosuspend(dev->dev); 1727 nortpm: 1728 if (rets) 1729 mei_io_cb_free(cb); 1730 1731 return rets; 1732 } 1733 1734 static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag) 1735 { 1736 struct mei_ext_hdr_vtag *vtag_hdr = ext; 1737 1738 vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG; 1739 vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr)); 1740 vtag_hdr->vtag = vtag; 1741 vtag_hdr->reserved = 0; 1742 return vtag_hdr->hdr.length; 1743 } 1744 1745 static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext) 1746 { 1747 return ext && ext->type == MEI_EXT_HDR_GSC; 1748 } 1749 1750 static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr) 1751 { 1752 memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr)); 1753 return ext->length; 1754 } 1755 1756 /** 1757 * mei_msg_hdr_init - allocate and initialize mei message header 1758 * 1759 * @cb: message callback structure 1760 * 1761 * Return: a pointer to initialized header or ERR_PTR on failure 1762 */ 1763 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) 1764 { 1765 size_t hdr_len; 1766 struct mei_ext_meta_hdr *meta; 1767 struct mei_msg_hdr *mei_hdr; 1768 bool is_ext, is_hbm, is_gsc, is_vtag; 1769 struct mei_ext_hdr *next_ext; 1770 1771 if (!cb) 1772 return ERR_PTR(-EINVAL); 1773 1774 /* Extended header for vtag is attached only on the first fragment */ 1775 is_vtag = (cb->vtag && cb->buf_idx == 0); 1776 is_hbm = cb->cl->me_cl->client_id == 0; 1777 is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(cb->ext_hdr)); 1778 is_ext = is_vtag || is_gsc; 1779 1780 /* Compute extended header size */ 1781 hdr_len = sizeof(*mei_hdr); 1782 1783 if (!is_ext) 1784 goto setup_hdr; 1785 1786 hdr_len += sizeof(*meta); 1787 if (is_vtag) 1788 hdr_len += sizeof(struct mei_ext_hdr_vtag); 1789 1790 if (is_gsc) 1791 hdr_len += mei_ext_hdr_len(cb->ext_hdr); 1792 1793 setup_hdr: 1794 mei_hdr = kzalloc(hdr_len, GFP_KERNEL); 1795 if (!mei_hdr) 1796 return ERR_PTR(-ENOMEM); 1797 1798 mei_hdr->host_addr = mei_cl_host_addr(cb->cl); 1799 mei_hdr->me_addr = mei_cl_me_id(cb->cl); 1800 mei_hdr->internal = cb->internal; 1801 mei_hdr->extended = is_ext; 1802 1803 if (!is_ext) 1804 goto out; 1805 1806 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; 1807 meta->size = 0; 1808 next_ext = (struct mei_ext_hdr *)meta->hdrs; 1809 if (is_vtag) { 1810 meta->count++; 1811 meta->size += mei_ext_hdr_set_vtag(next_ext, cb->vtag); 1812 next_ext = mei_ext_next(next_ext); 1813 } 1814 1815 if (is_gsc) { 1816 meta->count++; 1817 meta->size += mei_ext_hdr_set_gsc(next_ext, cb->ext_hdr); 1818 next_ext = mei_ext_next(next_ext); 1819 } 1820 1821 out: 1822 mei_hdr->length = hdr_len - sizeof(*mei_hdr); 1823 return mei_hdr; 1824 } 1825 1826 /** 1827 * mei_cl_irq_write - write a message to device 1828 * from the interrupt thread context 1829 * 1830 * @cl: client 1831 * @cb: callback block. 1832 * @cmpl_list: complete list. 1833 * 1834 * Return: 0, OK; otherwise error. 1835 */ 1836 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 1837 struct list_head *cmpl_list) 1838 { 1839 struct mei_device *dev; 1840 struct mei_msg_data *buf; 1841 struct mei_msg_hdr *mei_hdr = NULL; 1842 size_t hdr_len; 1843 size_t hbuf_len, dr_len; 1844 size_t buf_len = 0; 1845 size_t data_len; 1846 int hbuf_slots; 1847 u32 dr_slots; 1848 u32 dma_len; 1849 int rets; 1850 bool first_chunk; 1851 const void *data = NULL; 1852 1853 if (WARN_ON(!cl || !cl->dev)) 1854 return -ENODEV; 1855 1856 dev = cl->dev; 1857 1858 buf = &cb->buf; 1859 1860 first_chunk = cb->buf_idx == 0; 1861 1862 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1863 if (rets < 0) 1864 goto err; 1865 1866 if (rets == 0) { 1867 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1868 return 0; 1869 } 1870 1871 if (buf->data) { 1872 buf_len = buf->size - cb->buf_idx; 1873 data = buf->data + cb->buf_idx; 1874 } 1875 hbuf_slots = mei_hbuf_empty_slots(dev); 1876 if (hbuf_slots < 0) { 1877 rets = -EOVERFLOW; 1878 goto err; 1879 } 1880 1881 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 1882 dr_slots = mei_dma_ring_empty_slots(dev); 1883 dr_len = mei_slots2data(dr_slots); 1884 1885 mei_hdr = mei_msg_hdr_init(cb); 1886 if (IS_ERR(mei_hdr)) { 1887 rets = PTR_ERR(mei_hdr); 1888 mei_hdr = NULL; 1889 goto err; 1890 } 1891 1892 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 1893 1894 /** 1895 * Split the message only if we can write the whole host buffer 1896 * otherwise wait for next time the host buffer is empty. 1897 */ 1898 if (hdr_len + buf_len <= hbuf_len) { 1899 data_len = buf_len; 1900 mei_hdr->msg_complete = 1; 1901 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 1902 mei_hdr->dma_ring = 1; 1903 if (buf_len > dr_len) 1904 buf_len = dr_len; 1905 else 1906 mei_hdr->msg_complete = 1; 1907 1908 data_len = sizeof(dma_len); 1909 dma_len = buf_len; 1910 data = &dma_len; 1911 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { 1912 buf_len = hbuf_len - hdr_len; 1913 data_len = buf_len; 1914 } else { 1915 kfree(mei_hdr); 1916 return 0; 1917 } 1918 mei_hdr->length += data_len; 1919 1920 if (mei_hdr->dma_ring && buf->data) 1921 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len); 1922 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 1923 1924 if (rets) 1925 goto err; 1926 1927 cl->status = 0; 1928 cl->writing_state = MEI_WRITING; 1929 cb->buf_idx += buf_len; 1930 1931 if (first_chunk) { 1932 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { 1933 rets = -EIO; 1934 goto err; 1935 } 1936 } 1937 1938 if (mei_hdr->msg_complete) 1939 list_move_tail(&cb->list, &dev->write_waiting_list); 1940 1941 kfree(mei_hdr); 1942 return 0; 1943 1944 err: 1945 kfree(mei_hdr); 1946 cl->status = rets; 1947 list_move_tail(&cb->list, cmpl_list); 1948 return rets; 1949 } 1950 1951 /** 1952 * mei_cl_write - submit a write cb to mei device 1953 * assumes device_lock is locked 1954 * 1955 * @cl: host client 1956 * @cb: write callback with filled data 1957 * @timeout: send timeout in milliseconds. 1958 * effective only for blocking writes: the cb->blocking is set. 1959 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. 1960 * 1961 * Return: number of bytes sent on success, <0 on failure. 1962 */ 1963 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout) 1964 { 1965 struct mei_device *dev; 1966 struct mei_msg_data *buf; 1967 struct mei_msg_hdr *mei_hdr = NULL; 1968 size_t hdr_len; 1969 size_t hbuf_len, dr_len; 1970 size_t buf_len; 1971 size_t data_len; 1972 int hbuf_slots; 1973 u32 dr_slots; 1974 u32 dma_len; 1975 ssize_t rets; 1976 bool blocking; 1977 const void *data; 1978 1979 if (WARN_ON(!cl || !cl->dev)) 1980 return -ENODEV; 1981 1982 if (WARN_ON(!cb)) 1983 return -EINVAL; 1984 1985 dev = cl->dev; 1986 1987 buf = &cb->buf; 1988 buf_len = buf->size; 1989 1990 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len); 1991 1992 blocking = cb->blocking; 1993 data = buf->data; 1994 1995 rets = pm_runtime_get(dev->dev); 1996 if (rets < 0 && rets != -EINPROGRESS) { 1997 pm_runtime_put_noidle(dev->dev); 1998 cl_err(dev, cl, "rpm: get failed %zd\n", rets); 1999 goto free; 2000 } 2001 2002 cb->buf_idx = 0; 2003 cl->writing_state = MEI_IDLE; 2004 2005 2006 rets = mei_cl_tx_flow_ctrl_creds(cl); 2007 if (rets < 0) 2008 goto err; 2009 2010 mei_hdr = mei_msg_hdr_init(cb); 2011 if (IS_ERR(mei_hdr)) { 2012 rets = -PTR_ERR(mei_hdr); 2013 mei_hdr = NULL; 2014 goto err; 2015 } 2016 2017 hdr_len = sizeof(*mei_hdr) + mei_hdr->length; 2018 2019 if (rets == 0) { 2020 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 2021 rets = buf_len; 2022 goto out; 2023 } 2024 2025 if (!mei_hbuf_acquire(dev)) { 2026 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 2027 rets = buf_len; 2028 goto out; 2029 } 2030 2031 hbuf_slots = mei_hbuf_empty_slots(dev); 2032 if (hbuf_slots < 0) { 2033 rets = -EOVERFLOW; 2034 goto out; 2035 } 2036 2037 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK; 2038 dr_slots = mei_dma_ring_empty_slots(dev); 2039 dr_len = mei_slots2data(dr_slots); 2040 2041 if (hdr_len + buf_len <= hbuf_len) { 2042 data_len = buf_len; 2043 mei_hdr->msg_complete = 1; 2044 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { 2045 mei_hdr->dma_ring = 1; 2046 if (buf_len > dr_len) 2047 buf_len = dr_len; 2048 else 2049 mei_hdr->msg_complete = 1; 2050 2051 data_len = sizeof(dma_len); 2052 dma_len = buf_len; 2053 data = &dma_len; 2054 } else { 2055 buf_len = hbuf_len - hdr_len; 2056 data_len = buf_len; 2057 } 2058 2059 mei_hdr->length += data_len; 2060 2061 if (mei_hdr->dma_ring && buf->data) 2062 mei_dma_ring_write(dev, buf->data, buf_len); 2063 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len); 2064 2065 if (rets) 2066 goto err; 2067 2068 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); 2069 if (rets) 2070 goto err; 2071 2072 cl->writing_state = MEI_WRITING; 2073 cb->buf_idx = buf_len; 2074 /* restore return value */ 2075 buf_len = buf->size; 2076 2077 out: 2078 if (mei_hdr->msg_complete) 2079 mei_tx_cb_enqueue(cb, &dev->write_waiting_list); 2080 else 2081 mei_tx_cb_enqueue(cb, &dev->write_list); 2082 2083 cb = NULL; 2084 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 2085 2086 mutex_unlock(&dev->device_lock); 2087 rets = wait_event_interruptible_timeout(cl->tx_wait, 2088 cl->writing_state == MEI_WRITE_COMPLETE || 2089 (!mei_cl_is_connected(cl)), 2090 msecs_to_jiffies(timeout)); 2091 mutex_lock(&dev->device_lock); 2092 /* clean all queue on timeout as something fatal happened */ 2093 if (rets == 0) { 2094 rets = -ETIME; 2095 mei_io_tx_list_free_cl(&dev->write_list, cl, NULL); 2096 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL); 2097 } 2098 /* wait_event_interruptible returns -ERESTARTSYS */ 2099 if (rets > 0) 2100 rets = 0; 2101 if (rets) { 2102 if (signal_pending(current)) 2103 rets = -EINTR; 2104 goto err; 2105 } 2106 if (cl->writing_state != MEI_WRITE_COMPLETE) { 2107 rets = -EFAULT; 2108 goto err; 2109 } 2110 } 2111 2112 rets = buf_len; 2113 err: 2114 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2115 pm_runtime_mark_last_busy(dev->dev); 2116 pm_runtime_put_autosuspend(dev->dev); 2117 free: 2118 mei_io_cb_free(cb); 2119 2120 kfree(mei_hdr); 2121 2122 return rets; 2123 } 2124 2125 /** 2126 * mei_cl_complete - processes completed operation for a client 2127 * 2128 * @cl: private data of the file object. 2129 * @cb: callback block. 2130 */ 2131 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 2132 { 2133 struct mei_device *dev = cl->dev; 2134 2135 switch (cb->fop_type) { 2136 case MEI_FOP_WRITE: 2137 mei_tx_cb_dequeue(cb); 2138 cl->writing_state = MEI_WRITE_COMPLETE; 2139 if (waitqueue_active(&cl->tx_wait)) { 2140 wake_up_interruptible(&cl->tx_wait); 2141 } else { 2142 pm_runtime_mark_last_busy(dev->dev); 2143 pm_request_autosuspend(dev->dev); 2144 } 2145 break; 2146 2147 case MEI_FOP_READ: 2148 mei_cl_add_rd_completed(cl, cb); 2149 if (!mei_cl_is_fixed_address(cl) && 2150 !WARN_ON(!cl->rx_flow_ctrl_creds)) 2151 cl->rx_flow_ctrl_creds--; 2152 if (!mei_cl_bus_rx_event(cl)) 2153 wake_up_interruptible(&cl->rx_wait); 2154 break; 2155 2156 case MEI_FOP_CONNECT: 2157 case MEI_FOP_DISCONNECT: 2158 case MEI_FOP_NOTIFY_STOP: 2159 case MEI_FOP_NOTIFY_START: 2160 case MEI_FOP_DMA_MAP: 2161 case MEI_FOP_DMA_UNMAP: 2162 if (waitqueue_active(&cl->wait)) 2163 wake_up(&cl->wait); 2164 2165 break; 2166 case MEI_FOP_DISCONNECT_RSP: 2167 mei_io_cb_free(cb); 2168 mei_cl_set_disconnected(cl); 2169 break; 2170 default: 2171 BUG_ON(0); 2172 } 2173 } 2174 2175 2176 /** 2177 * mei_cl_all_disconnect - disconnect forcefully all connected clients 2178 * 2179 * @dev: mei device 2180 */ 2181 void mei_cl_all_disconnect(struct mei_device *dev) 2182 { 2183 struct mei_cl *cl; 2184 2185 list_for_each_entry(cl, &dev->file_list, link) 2186 mei_cl_set_disconnected(cl); 2187 } 2188 EXPORT_SYMBOL_GPL(mei_cl_all_disconnect); 2189 2190 static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) 2191 { 2192 struct mei_cl *cl; 2193 2194 list_for_each_entry(cl, &dev->file_list, link) 2195 if (cl->dma.buffer_id == buffer_id) 2196 return cl; 2197 return NULL; 2198 } 2199 2200 /** 2201 * mei_cl_irq_dma_map - send client dma map request in irq_thread context 2202 * 2203 * @cl: client 2204 * @cb: callback block. 2205 * @cmpl_list: complete list. 2206 * 2207 * Return: 0 on such and error otherwise. 2208 */ 2209 int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, 2210 struct list_head *cmpl_list) 2211 { 2212 struct mei_device *dev = cl->dev; 2213 u32 msg_slots; 2214 int slots; 2215 int ret; 2216 2217 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request)); 2218 slots = mei_hbuf_empty_slots(dev); 2219 if (slots < 0) 2220 return -EOVERFLOW; 2221 2222 if ((u32)slots < msg_slots) 2223 return -EMSGSIZE; 2224 2225 ret = mei_hbm_cl_dma_map_req(dev, cl); 2226 if (ret) { 2227 cl->status = ret; 2228 list_move_tail(&cb->list, cmpl_list); 2229 return ret; 2230 } 2231 2232 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2233 return 0; 2234 } 2235 2236 /** 2237 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context 2238 * 2239 * @cl: client 2240 * @cb: callback block. 2241 * @cmpl_list: complete list. 2242 * 2243 * Return: 0 on such and error otherwise. 2244 */ 2245 int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, 2246 struct list_head *cmpl_list) 2247 { 2248 struct mei_device *dev = cl->dev; 2249 u32 msg_slots; 2250 int slots; 2251 int ret; 2252 2253 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request)); 2254 slots = mei_hbuf_empty_slots(dev); 2255 if (slots < 0) 2256 return -EOVERFLOW; 2257 2258 if ((u32)slots < msg_slots) 2259 return -EMSGSIZE; 2260 2261 ret = mei_hbm_cl_dma_unmap_req(dev, cl); 2262 if (ret) { 2263 cl->status = ret; 2264 list_move_tail(&cb->list, cmpl_list); 2265 return ret; 2266 } 2267 2268 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2269 return 0; 2270 } 2271 2272 static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) 2273 { 2274 cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size, 2275 &cl->dma.daddr, GFP_KERNEL); 2276 if (!cl->dma.vaddr) 2277 return -ENOMEM; 2278 2279 cl->dma.buffer_id = buf_id; 2280 cl->dma.size = size; 2281 2282 return 0; 2283 } 2284 2285 static void mei_cl_dma_free(struct mei_cl *cl) 2286 { 2287 cl->dma.buffer_id = 0; 2288 dmam_free_coherent(cl->dev->dev, 2289 cl->dma.size, cl->dma.vaddr, cl->dma.daddr); 2290 cl->dma.size = 0; 2291 cl->dma.vaddr = NULL; 2292 cl->dma.daddr = 0; 2293 } 2294 2295 /** 2296 * mei_cl_dma_alloc_and_map - send client dma map request 2297 * 2298 * @cl: host client 2299 * @fp: pointer to file structure 2300 * @buffer_id: id of the mapped buffer 2301 * @size: size of the buffer 2302 * 2303 * Locking: called under "dev->device_lock" lock 2304 * 2305 * Return: 2306 * * -ENODEV 2307 * * -EINVAL 2308 * * -EOPNOTSUPP 2309 * * -EPROTO 2310 * * -ENOMEM; 2311 */ 2312 int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, 2313 u8 buffer_id, size_t size) 2314 { 2315 struct mei_device *dev; 2316 struct mei_cl_cb *cb; 2317 int rets; 2318 2319 if (WARN_ON(!cl || !cl->dev)) 2320 return -ENODEV; 2321 2322 dev = cl->dev; 2323 2324 if (!dev->hbm_f_cd_supported) { 2325 cl_dbg(dev, cl, "client dma is not supported\n"); 2326 return -EOPNOTSUPP; 2327 } 2328 2329 if (buffer_id == 0) 2330 return -EINVAL; 2331 2332 if (mei_cl_is_connected(cl)) 2333 return -EPROTO; 2334 2335 if (cl->dma_mapped) 2336 return -EPROTO; 2337 2338 if (mei_cl_dma_map_find(dev, buffer_id)) { 2339 cl_dbg(dev, cl, "client dma with id %d is already allocated\n", 2340 cl->dma.buffer_id); 2341 return -EPROTO; 2342 } 2343 2344 rets = pm_runtime_get(dev->dev); 2345 if (rets < 0 && rets != -EINPROGRESS) { 2346 pm_runtime_put_noidle(dev->dev); 2347 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2348 return rets; 2349 } 2350 2351 rets = mei_cl_dma_alloc(cl, buffer_id, size); 2352 if (rets) { 2353 pm_runtime_put_noidle(dev->dev); 2354 return rets; 2355 } 2356 2357 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp); 2358 if (!cb) { 2359 rets = -ENOMEM; 2360 goto out; 2361 } 2362 2363 if (mei_hbuf_acquire(dev)) { 2364 if (mei_hbm_cl_dma_map_req(dev, cl)) { 2365 rets = -ENODEV; 2366 goto out; 2367 } 2368 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2369 } 2370 2371 cl->status = 0; 2372 2373 mutex_unlock(&dev->device_lock); 2374 wait_event_timeout(cl->wait, 2375 cl->dma_mapped || cl->status, 2376 dev->timeouts.cl_connect); 2377 mutex_lock(&dev->device_lock); 2378 2379 if (!cl->dma_mapped && !cl->status) 2380 cl->status = -EFAULT; 2381 2382 rets = cl->status; 2383 2384 out: 2385 if (rets) 2386 mei_cl_dma_free(cl); 2387 2388 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2389 pm_runtime_mark_last_busy(dev->dev); 2390 pm_runtime_put_autosuspend(dev->dev); 2391 2392 mei_io_cb_free(cb); 2393 return rets; 2394 } 2395 2396 /** 2397 * mei_cl_dma_unmap - send client dma unmap request 2398 * 2399 * @cl: host client 2400 * @fp: pointer to file structure 2401 * 2402 * Locking: called under "dev->device_lock" lock 2403 * 2404 * Return: 0 on such and error otherwise. 2405 */ 2406 int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) 2407 { 2408 struct mei_device *dev; 2409 struct mei_cl_cb *cb; 2410 int rets; 2411 2412 if (WARN_ON(!cl || !cl->dev)) 2413 return -ENODEV; 2414 2415 dev = cl->dev; 2416 2417 if (!dev->hbm_f_cd_supported) { 2418 cl_dbg(dev, cl, "client dma is not supported\n"); 2419 return -EOPNOTSUPP; 2420 } 2421 2422 /* do not allow unmap for connected client */ 2423 if (mei_cl_is_connected(cl)) 2424 return -EPROTO; 2425 2426 if (!cl->dma_mapped) 2427 return -EPROTO; 2428 2429 rets = pm_runtime_get(dev->dev); 2430 if (rets < 0 && rets != -EINPROGRESS) { 2431 pm_runtime_put_noidle(dev->dev); 2432 cl_err(dev, cl, "rpm: get failed %d\n", rets); 2433 return rets; 2434 } 2435 2436 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp); 2437 if (!cb) { 2438 rets = -ENOMEM; 2439 goto out; 2440 } 2441 2442 if (mei_hbuf_acquire(dev)) { 2443 if (mei_hbm_cl_dma_unmap_req(dev, cl)) { 2444 rets = -ENODEV; 2445 goto out; 2446 } 2447 list_move_tail(&cb->list, &dev->ctrl_rd_list); 2448 } 2449 2450 cl->status = 0; 2451 2452 mutex_unlock(&dev->device_lock); 2453 wait_event_timeout(cl->wait, 2454 !cl->dma_mapped || cl->status, 2455 dev->timeouts.cl_connect); 2456 mutex_lock(&dev->device_lock); 2457 2458 if (cl->dma_mapped && !cl->status) 2459 cl->status = -EFAULT; 2460 2461 rets = cl->status; 2462 2463 if (!rets) 2464 mei_cl_dma_free(cl); 2465 out: 2466 cl_dbg(dev, cl, "rpm: autosuspend\n"); 2467 pm_runtime_mark_last_busy(dev->dev); 2468 pm_runtime_put_autosuspend(dev->dev); 2469 2470 mei_io_cb_free(cb); 2471 return rets; 2472 } 2473