1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/pci.h> 18 #include <linux/sched.h> 19 #include <linux/wait.h> 20 #include <linux/delay.h> 21 #include <linux/pm_runtime.h> 22 23 #include <linux/mei.h> 24 25 #include "mei_dev.h" 26 #include "hbm.h" 27 #include "client.h" 28 29 /** 30 * mei_me_cl_by_uuid - locate index of me client 31 * 32 * @dev: mei device 33 * 34 * Locking: called under "dev->device_lock" lock 35 * 36 * returns me client index or -ENOENT if not found 37 */ 38 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) 39 { 40 int i; 41 42 for (i = 0; i < dev->me_clients_num; ++i) 43 if (uuid_le_cmp(*uuid, 44 dev->me_clients[i].props.protocol_name) == 0) 45 return i; 46 47 return -ENOENT; 48 } 49 50 51 /** 52 * mei_me_cl_by_id return index to me_clients for client_id 53 * 54 * @dev: the device structure 55 * @client_id: me client id 56 * 57 * Locking: called under "dev->device_lock" lock 58 * 59 * returns index on success, -ENOENT on failure. 60 */ 61 62 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 63 { 64 int i; 65 66 for (i = 0; i < dev->me_clients_num; i++) 67 if (dev->me_clients[i].client_id == client_id) 68 return i; 69 70 return -ENOENT; 71 } 72 73 74 /** 75 * mei_cl_cmp_id - tells if the clients are the same 76 * 77 * @cl1: host client 1 78 * @cl2: host client 2 79 * 80 * returns true - if the clients has same host and me ids 81 * false - otherwise 82 */ 83 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, 84 const struct mei_cl *cl2) 85 { 86 return cl1 && cl2 && 87 (cl1->host_client_id == cl2->host_client_id) && 88 (cl1->me_client_id == cl2->me_client_id); 89 } 90 91 /** 92 * mei_io_list_flush - removes cbs belonging to cl. 93 * 94 * @list: an instance of our list structure 95 * @cl: host client, can be NULL for flushing the whole list 96 * @free: whether to free the cbs 97 */ 98 static void __mei_io_list_flush(struct mei_cl_cb *list, 99 struct mei_cl *cl, bool free) 100 { 101 struct mei_cl_cb *cb; 102 struct mei_cl_cb *next; 103 104 /* enable removing everything if no cl is specified */ 105 list_for_each_entry_safe(cb, next, &list->list, list) { 106 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { 107 list_del(&cb->list); 108 if (free) 109 mei_io_cb_free(cb); 110 } 111 } 112 } 113 114 /** 115 * mei_io_list_flush - removes list entry belonging to cl. 116 * 117 * @list: An instance of our list structure 118 * @cl: host client 119 */ 120 static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 121 { 122 __mei_io_list_flush(list, cl, false); 123 } 124 125 126 /** 127 * mei_io_list_free - removes cb belonging to cl and free them 128 * 129 * @list: An instance of our list structure 130 * @cl: host client 131 */ 132 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) 133 { 134 __mei_io_list_flush(list, cl, true); 135 } 136 137 /** 138 * mei_io_cb_free - free mei_cb_private related memory 139 * 140 * @cb: mei callback struct 141 */ 142 void mei_io_cb_free(struct mei_cl_cb *cb) 143 { 144 if (cb == NULL) 145 return; 146 147 kfree(cb->request_buffer.data); 148 kfree(cb->response_buffer.data); 149 kfree(cb); 150 } 151 152 /** 153 * mei_io_cb_init - allocate and initialize io callback 154 * 155 * @cl - mei client 156 * @fp: pointer to file structure 157 * 158 * returns mei_cl_cb pointer or NULL; 159 */ 160 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 161 { 162 struct mei_cl_cb *cb; 163 164 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 165 if (!cb) 166 return NULL; 167 168 mei_io_list_init(cb); 169 170 cb->file_object = fp; 171 cb->cl = cl; 172 cb->buf_idx = 0; 173 return cb; 174 } 175 176 /** 177 * mei_io_cb_alloc_req_buf - allocate request buffer 178 * 179 * @cb: io callback structure 180 * @length: size of the buffer 181 * 182 * returns 0 on success 183 * -EINVAL if cb is NULL 184 * -ENOMEM if allocation failed 185 */ 186 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 187 { 188 if (!cb) 189 return -EINVAL; 190 191 if (length == 0) 192 return 0; 193 194 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 195 if (!cb->request_buffer.data) 196 return -ENOMEM; 197 cb->request_buffer.size = length; 198 return 0; 199 } 200 /** 201 * mei_io_cb_alloc_resp_buf - allocate response buffer 202 * 203 * @cb: io callback structure 204 * @length: size of the buffer 205 * 206 * returns 0 on success 207 * -EINVAL if cb is NULL 208 * -ENOMEM if allocation failed 209 */ 210 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 211 { 212 if (!cb) 213 return -EINVAL; 214 215 if (length == 0) 216 return 0; 217 218 cb->response_buffer.data = kmalloc(length, GFP_KERNEL); 219 if (!cb->response_buffer.data) 220 return -ENOMEM; 221 cb->response_buffer.size = length; 222 return 0; 223 } 224 225 226 227 /** 228 * mei_cl_flush_queues - flushes queue lists belonging to cl. 229 * 230 * @cl: host client 231 */ 232 int mei_cl_flush_queues(struct mei_cl *cl) 233 { 234 struct mei_device *dev; 235 236 if (WARN_ON(!cl || !cl->dev)) 237 return -EINVAL; 238 239 dev = cl->dev; 240 241 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 242 mei_io_list_flush(&cl->dev->read_list, cl); 243 mei_io_list_free(&cl->dev->write_list, cl); 244 mei_io_list_free(&cl->dev->write_waiting_list, cl); 245 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 246 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 247 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 248 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 249 return 0; 250 } 251 252 253 /** 254 * mei_cl_init - initializes cl. 255 * 256 * @cl: host client to be initialized 257 * @dev: mei device 258 */ 259 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 260 { 261 memset(cl, 0, sizeof(struct mei_cl)); 262 init_waitqueue_head(&cl->wait); 263 init_waitqueue_head(&cl->rx_wait); 264 init_waitqueue_head(&cl->tx_wait); 265 INIT_LIST_HEAD(&cl->link); 266 INIT_LIST_HEAD(&cl->device_link); 267 cl->reading_state = MEI_IDLE; 268 cl->writing_state = MEI_IDLE; 269 cl->dev = dev; 270 } 271 272 /** 273 * mei_cl_allocate - allocates cl structure and sets it up. 274 * 275 * @dev: mei device 276 * returns The allocated file or NULL on failure 277 */ 278 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 279 { 280 struct mei_cl *cl; 281 282 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 283 if (!cl) 284 return NULL; 285 286 mei_cl_init(cl, dev); 287 288 return cl; 289 } 290 291 /** 292 * mei_cl_find_read_cb - find this cl's callback in the read list 293 * 294 * @cl: host client 295 * 296 * returns cb on success, NULL on error 297 */ 298 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) 299 { 300 struct mei_device *dev = cl->dev; 301 struct mei_cl_cb *cb; 302 303 list_for_each_entry(cb, &dev->read_list.list, list) 304 if (mei_cl_cmp_id(cl, cb->cl)) 305 return cb; 306 return NULL; 307 } 308 309 /** mei_cl_link: allocate host id in the host map 310 * 311 * @cl - host client 312 * @id - fixed host id or -1 for generic one 313 * 314 * returns 0 on success 315 * -EINVAL on incorrect values 316 * -ENONET if client not found 317 */ 318 int mei_cl_link(struct mei_cl *cl, int id) 319 { 320 struct mei_device *dev; 321 long open_handle_count; 322 323 if (WARN_ON(!cl || !cl->dev)) 324 return -EINVAL; 325 326 dev = cl->dev; 327 328 /* If Id is not assigned get one*/ 329 if (id == MEI_HOST_CLIENT_ID_ANY) 330 id = find_first_zero_bit(dev->host_clients_map, 331 MEI_CLIENTS_MAX); 332 333 if (id >= MEI_CLIENTS_MAX) { 334 dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); 335 return -EMFILE; 336 } 337 338 open_handle_count = dev->open_handle_count + dev->iamthif_open_count; 339 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 340 dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", 341 MEI_MAX_OPEN_HANDLE_COUNT); 342 return -EMFILE; 343 } 344 345 dev->open_handle_count++; 346 347 cl->host_client_id = id; 348 list_add_tail(&cl->link, &dev->file_list); 349 350 set_bit(id, dev->host_clients_map); 351 352 cl->state = MEI_FILE_INITIALIZING; 353 354 cl_dbg(dev, cl, "link cl\n"); 355 return 0; 356 } 357 358 /** 359 * mei_cl_unlink - remove me_cl from the list 360 * 361 * @cl: host client 362 */ 363 int mei_cl_unlink(struct mei_cl *cl) 364 { 365 struct mei_device *dev; 366 367 /* don't shout on error exit path */ 368 if (!cl) 369 return 0; 370 371 /* wd and amthif might not be initialized */ 372 if (!cl->dev) 373 return 0; 374 375 dev = cl->dev; 376 377 cl_dbg(dev, cl, "unlink client"); 378 379 if (dev->open_handle_count > 0) 380 dev->open_handle_count--; 381 382 /* never clear the 0 bit */ 383 if (cl->host_client_id) 384 clear_bit(cl->host_client_id, dev->host_clients_map); 385 386 list_del_init(&cl->link); 387 388 cl->state = MEI_FILE_INITIALIZING; 389 390 return 0; 391 } 392 393 394 void mei_host_client_init(struct work_struct *work) 395 { 396 struct mei_device *dev = container_of(work, 397 struct mei_device, init_work); 398 struct mei_client_properties *client_props; 399 int i; 400 401 mutex_lock(&dev->device_lock); 402 403 for (i = 0; i < dev->me_clients_num; i++) { 404 client_props = &dev->me_clients[i].props; 405 406 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) 407 mei_amthif_host_init(dev); 408 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) 409 mei_wd_host_init(dev); 410 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) 411 mei_nfc_host_init(dev); 412 413 } 414 415 dev->dev_state = MEI_DEV_ENABLED; 416 dev->reset_count = 0; 417 418 mutex_unlock(&dev->device_lock); 419 420 pm_runtime_mark_last_busy(&dev->pdev->dev); 421 dev_dbg(&dev->pdev->dev, "rpm: autosuspend\n"); 422 pm_runtime_autosuspend(&dev->pdev->dev); 423 } 424 425 /** 426 * mei_hbuf_acquire: try to acquire host buffer 427 * 428 * @dev: the device structure 429 * returns true if host buffer was acquired 430 */ 431 bool mei_hbuf_acquire(struct mei_device *dev) 432 { 433 if (mei_pg_state(dev) == MEI_PG_ON || 434 dev->pg_event == MEI_PG_EVENT_WAIT) { 435 dev_dbg(&dev->pdev->dev, "device is in pg\n"); 436 return false; 437 } 438 439 if (!dev->hbuf_is_ready) { 440 dev_dbg(&dev->pdev->dev, "hbuf is not ready\n"); 441 return false; 442 } 443 444 dev->hbuf_is_ready = false; 445 446 return true; 447 } 448 449 /** 450 * mei_cl_disconnect - disconnect host client from the me one 451 * 452 * @cl: host client 453 * 454 * Locking: called under "dev->device_lock" lock 455 * 456 * returns 0 on success, <0 on failure. 457 */ 458 int mei_cl_disconnect(struct mei_cl *cl) 459 { 460 struct mei_device *dev; 461 struct mei_cl_cb *cb; 462 int rets; 463 464 if (WARN_ON(!cl || !cl->dev)) 465 return -ENODEV; 466 467 dev = cl->dev; 468 469 cl_dbg(dev, cl, "disconnecting"); 470 471 if (cl->state != MEI_FILE_DISCONNECTING) 472 return 0; 473 474 rets = pm_runtime_get(&dev->pdev->dev); 475 if (rets < 0 && rets != -EINPROGRESS) { 476 pm_runtime_put_noidle(&dev->pdev->dev); 477 cl_err(dev, cl, "rpm: get failed %d\n", rets); 478 return rets; 479 } 480 481 cb = mei_io_cb_init(cl, NULL); 482 if (!cb) { 483 rets = -ENOMEM; 484 goto free; 485 } 486 487 cb->fop_type = MEI_FOP_CLOSE; 488 if (mei_hbuf_acquire(dev)) { 489 if (mei_hbm_cl_disconnect_req(dev, cl)) { 490 rets = -ENODEV; 491 cl_err(dev, cl, "failed to disconnect.\n"); 492 goto free; 493 } 494 cl->timer_count = MEI_CONNECT_TIMEOUT; 495 mdelay(10); /* Wait for hardware disconnection ready */ 496 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 497 } else { 498 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 499 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 500 501 } 502 mutex_unlock(&dev->device_lock); 503 504 wait_event_timeout(dev->wait_recvd_msg, 505 MEI_FILE_DISCONNECTED == cl->state, 506 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 507 508 mutex_lock(&dev->device_lock); 509 510 if (MEI_FILE_DISCONNECTED == cl->state) { 511 rets = 0; 512 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 513 } else { 514 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); 515 rets = -ETIME; 516 } 517 518 mei_io_list_flush(&dev->ctrl_rd_list, cl); 519 mei_io_list_flush(&dev->ctrl_wr_list, cl); 520 free: 521 cl_dbg(dev, cl, "rpm: autosuspend\n"); 522 pm_runtime_mark_last_busy(&dev->pdev->dev); 523 pm_runtime_put_autosuspend(&dev->pdev->dev); 524 525 mei_io_cb_free(cb); 526 return rets; 527 } 528 529 530 /** 531 * mei_cl_is_other_connecting - checks if other 532 * client with the same me client id is connecting 533 * 534 * @cl: private data of the file object 535 * 536 * returns true if other client is connected, false - otherwise. 537 */ 538 bool mei_cl_is_other_connecting(struct mei_cl *cl) 539 { 540 struct mei_device *dev; 541 struct mei_cl *ocl; /* the other client */ 542 543 if (WARN_ON(!cl || !cl->dev)) 544 return false; 545 546 dev = cl->dev; 547 548 list_for_each_entry(ocl, &dev->file_list, link) { 549 if (ocl->state == MEI_FILE_CONNECTING && 550 ocl != cl && 551 cl->me_client_id == ocl->me_client_id) 552 return true; 553 554 } 555 556 return false; 557 } 558 559 /** 560 * mei_cl_connect - connect host client to the me one 561 * 562 * @cl: host client 563 * 564 * Locking: called under "dev->device_lock" lock 565 * 566 * returns 0 on success, <0 on failure. 567 */ 568 int mei_cl_connect(struct mei_cl *cl, struct file *file) 569 { 570 struct mei_device *dev; 571 struct mei_cl_cb *cb; 572 int rets; 573 574 if (WARN_ON(!cl || !cl->dev)) 575 return -ENODEV; 576 577 dev = cl->dev; 578 579 rets = pm_runtime_get(&dev->pdev->dev); 580 if (rets < 0 && rets != -EINPROGRESS) { 581 pm_runtime_put_noidle(&dev->pdev->dev); 582 cl_err(dev, cl, "rpm: get failed %d\n", rets); 583 return rets; 584 } 585 586 cb = mei_io_cb_init(cl, file); 587 if (!cb) { 588 rets = -ENOMEM; 589 goto out; 590 } 591 592 cb->fop_type = MEI_FOP_CONNECT; 593 594 /* run hbuf acquire last so we don't have to undo */ 595 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { 596 cl->state = MEI_FILE_CONNECTING; 597 if (mei_hbm_cl_connect_req(dev, cl)) { 598 rets = -ENODEV; 599 goto out; 600 } 601 cl->timer_count = MEI_CONNECT_TIMEOUT; 602 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 603 } else { 604 cl->state = MEI_FILE_INITIALIZING; 605 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 606 } 607 608 mutex_unlock(&dev->device_lock); 609 wait_event_timeout(dev->wait_recvd_msg, 610 (cl->state == MEI_FILE_CONNECTED || 611 cl->state == MEI_FILE_DISCONNECTED), 612 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 613 mutex_lock(&dev->device_lock); 614 615 if (cl->state != MEI_FILE_CONNECTED) { 616 cl->state = MEI_FILE_DISCONNECTED; 617 /* something went really wrong */ 618 if (!cl->status) 619 cl->status = -EFAULT; 620 621 mei_io_list_flush(&dev->ctrl_rd_list, cl); 622 mei_io_list_flush(&dev->ctrl_wr_list, cl); 623 } 624 625 rets = cl->status; 626 627 out: 628 cl_dbg(dev, cl, "rpm: autosuspend\n"); 629 pm_runtime_mark_last_busy(&dev->pdev->dev); 630 pm_runtime_put_autosuspend(&dev->pdev->dev); 631 632 mei_io_cb_free(cb); 633 return rets; 634 } 635 636 /** 637 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 638 * 639 * @cl: private data of the file object 640 * 641 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 642 * -ENOENT if mei_cl is not present 643 * -EINVAL if single_recv_buf == 0 644 */ 645 int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 646 { 647 struct mei_device *dev; 648 struct mei_me_client *me_cl; 649 int id; 650 651 if (WARN_ON(!cl || !cl->dev)) 652 return -EINVAL; 653 654 dev = cl->dev; 655 656 if (!dev->me_clients_num) 657 return 0; 658 659 if (cl->mei_flow_ctrl_creds > 0) 660 return 1; 661 662 id = mei_me_cl_by_id(dev, cl->me_client_id); 663 if (id < 0) { 664 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 665 return id; 666 } 667 668 me_cl = &dev->me_clients[id]; 669 if (me_cl->mei_flow_ctrl_creds) { 670 if (WARN_ON(me_cl->props.single_recv_buf == 0)) 671 return -EINVAL; 672 return 1; 673 } 674 return 0; 675 } 676 677 /** 678 * mei_cl_flow_ctrl_reduce - reduces flow_control. 679 * 680 * @cl: private data of the file object 681 * 682 * @returns 683 * 0 on success 684 * -ENOENT when me client is not found 685 * -EINVAL when ctrl credits are <= 0 686 */ 687 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 688 { 689 struct mei_device *dev; 690 struct mei_me_client *me_cl; 691 int id; 692 693 if (WARN_ON(!cl || !cl->dev)) 694 return -EINVAL; 695 696 dev = cl->dev; 697 698 id = mei_me_cl_by_id(dev, cl->me_client_id); 699 if (id < 0) { 700 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 701 return id; 702 } 703 704 me_cl = &dev->me_clients[id]; 705 if (me_cl->props.single_recv_buf != 0) { 706 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) 707 return -EINVAL; 708 me_cl->mei_flow_ctrl_creds--; 709 } else { 710 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 711 return -EINVAL; 712 cl->mei_flow_ctrl_creds--; 713 } 714 return 0; 715 } 716 717 /** 718 * mei_cl_read_start - the start read client message function. 719 * 720 * @cl: host client 721 * 722 * returns 0 on success, <0 on failure. 723 */ 724 int mei_cl_read_start(struct mei_cl *cl, size_t length) 725 { 726 struct mei_device *dev; 727 struct mei_cl_cb *cb; 728 int rets; 729 int i; 730 731 if (WARN_ON(!cl || !cl->dev)) 732 return -ENODEV; 733 734 dev = cl->dev; 735 736 if (!mei_cl_is_connected(cl)) 737 return -ENODEV; 738 739 if (cl->read_cb) { 740 cl_dbg(dev, cl, "read is pending.\n"); 741 return -EBUSY; 742 } 743 i = mei_me_cl_by_id(dev, cl->me_client_id); 744 if (i < 0) { 745 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 746 return -ENOTTY; 747 } 748 749 rets = pm_runtime_get(&dev->pdev->dev); 750 if (rets < 0 && rets != -EINPROGRESS) { 751 pm_runtime_put_noidle(&dev->pdev->dev); 752 cl_err(dev, cl, "rpm: get failed %d\n", rets); 753 return rets; 754 } 755 756 cb = mei_io_cb_init(cl, NULL); 757 if (!cb) { 758 rets = -ENOMEM; 759 goto out; 760 } 761 762 /* always allocate at least client max message */ 763 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); 764 rets = mei_io_cb_alloc_resp_buf(cb, length); 765 if (rets) 766 goto out; 767 768 cb->fop_type = MEI_FOP_READ; 769 if (mei_hbuf_acquire(dev)) { 770 rets = mei_hbm_cl_flow_control_req(dev, cl); 771 if (rets < 0) 772 goto out; 773 774 list_add_tail(&cb->list, &dev->read_list.list); 775 } else { 776 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 777 } 778 779 cl->read_cb = cb; 780 781 out: 782 cl_dbg(dev, cl, "rpm: autosuspend\n"); 783 pm_runtime_mark_last_busy(&dev->pdev->dev); 784 pm_runtime_put_autosuspend(&dev->pdev->dev); 785 786 if (rets) 787 mei_io_cb_free(cb); 788 789 return rets; 790 } 791 792 /** 793 * mei_cl_irq_write - write a message to device 794 * from the interrupt thread context 795 * 796 * @cl: client 797 * @cb: callback block. 798 * @cmpl_list: complete list. 799 * 800 * returns 0, OK; otherwise error. 801 */ 802 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, 803 struct mei_cl_cb *cmpl_list) 804 { 805 struct mei_device *dev; 806 struct mei_msg_data *buf; 807 struct mei_msg_hdr mei_hdr; 808 size_t len; 809 u32 msg_slots; 810 int slots; 811 int rets; 812 813 if (WARN_ON(!cl || !cl->dev)) 814 return -ENODEV; 815 816 dev = cl->dev; 817 818 buf = &cb->request_buffer; 819 820 rets = mei_cl_flow_ctrl_creds(cl); 821 if (rets < 0) 822 return rets; 823 824 if (rets == 0) { 825 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 826 return 0; 827 } 828 829 slots = mei_hbuf_empty_slots(dev); 830 len = buf->size - cb->buf_idx; 831 msg_slots = mei_data2slots(len); 832 833 mei_hdr.host_addr = cl->host_client_id; 834 mei_hdr.me_addr = cl->me_client_id; 835 mei_hdr.reserved = 0; 836 mei_hdr.internal = cb->internal; 837 838 if (slots >= msg_slots) { 839 mei_hdr.length = len; 840 mei_hdr.msg_complete = 1; 841 /* Split the message only if we can write the whole host buffer */ 842 } else if (slots == dev->hbuf_depth) { 843 msg_slots = slots; 844 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 845 mei_hdr.length = len; 846 mei_hdr.msg_complete = 0; 847 } else { 848 /* wait for next time the host buffer is empty */ 849 return 0; 850 } 851 852 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 853 cb->request_buffer.size, cb->buf_idx); 854 855 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 856 if (rets) { 857 cl->status = rets; 858 list_move_tail(&cb->list, &cmpl_list->list); 859 return rets; 860 } 861 862 cl->status = 0; 863 cl->writing_state = MEI_WRITING; 864 cb->buf_idx += mei_hdr.length; 865 866 if (mei_hdr.msg_complete) { 867 if (mei_cl_flow_ctrl_reduce(cl)) 868 return -EIO; 869 list_move_tail(&cb->list, &dev->write_waiting_list.list); 870 } 871 872 return 0; 873 } 874 875 /** 876 * mei_cl_write - submit a write cb to mei device 877 assumes device_lock is locked 878 * 879 * @cl: host client 880 * @cl: write callback with filled data 881 * 882 * returns number of bytes sent on success, <0 on failure. 883 */ 884 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 885 { 886 struct mei_device *dev; 887 struct mei_msg_data *buf; 888 struct mei_msg_hdr mei_hdr; 889 int rets; 890 891 892 if (WARN_ON(!cl || !cl->dev)) 893 return -ENODEV; 894 895 if (WARN_ON(!cb)) 896 return -EINVAL; 897 898 dev = cl->dev; 899 900 901 buf = &cb->request_buffer; 902 903 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); 904 905 rets = pm_runtime_get(&dev->pdev->dev); 906 if (rets < 0 && rets != -EINPROGRESS) { 907 pm_runtime_put_noidle(&dev->pdev->dev); 908 cl_err(dev, cl, "rpm: get failed %d\n", rets); 909 return rets; 910 } 911 912 cb->fop_type = MEI_FOP_WRITE; 913 cb->buf_idx = 0; 914 cl->writing_state = MEI_IDLE; 915 916 mei_hdr.host_addr = cl->host_client_id; 917 mei_hdr.me_addr = cl->me_client_id; 918 mei_hdr.reserved = 0; 919 mei_hdr.msg_complete = 0; 920 mei_hdr.internal = cb->internal; 921 922 rets = mei_cl_flow_ctrl_creds(cl); 923 if (rets < 0) 924 goto err; 925 926 if (rets == 0) { 927 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 928 rets = buf->size; 929 goto out; 930 } 931 if (!mei_hbuf_acquire(dev)) { 932 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n"); 933 rets = buf->size; 934 goto out; 935 } 936 937 /* Check for a maximum length */ 938 if (buf->size > mei_hbuf_max_len(dev)) { 939 mei_hdr.length = mei_hbuf_max_len(dev); 940 mei_hdr.msg_complete = 0; 941 } else { 942 mei_hdr.length = buf->size; 943 mei_hdr.msg_complete = 1; 944 } 945 946 rets = mei_write_message(dev, &mei_hdr, buf->data); 947 if (rets) 948 goto err; 949 950 cl->writing_state = MEI_WRITING; 951 cb->buf_idx = mei_hdr.length; 952 953 out: 954 if (mei_hdr.msg_complete) { 955 rets = mei_cl_flow_ctrl_reduce(cl); 956 if (rets < 0) 957 goto err; 958 959 list_add_tail(&cb->list, &dev->write_waiting_list.list); 960 } else { 961 list_add_tail(&cb->list, &dev->write_list.list); 962 } 963 964 965 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 966 967 mutex_unlock(&dev->device_lock); 968 rets = wait_event_interruptible(cl->tx_wait, 969 cl->writing_state == MEI_WRITE_COMPLETE); 970 mutex_lock(&dev->device_lock); 971 /* wait_event_interruptible returns -ERESTARTSYS */ 972 if (rets) { 973 if (signal_pending(current)) 974 rets = -EINTR; 975 goto err; 976 } 977 } 978 979 rets = buf->size; 980 err: 981 cl_dbg(dev, cl, "rpm: autosuspend\n"); 982 pm_runtime_mark_last_busy(&dev->pdev->dev); 983 pm_runtime_put_autosuspend(&dev->pdev->dev); 984 985 return rets; 986 } 987 988 989 /** 990 * mei_cl_complete - processes completed operation for a client 991 * 992 * @cl: private data of the file object. 993 * @cb: callback block. 994 */ 995 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 996 { 997 if (cb->fop_type == MEI_FOP_WRITE) { 998 mei_io_cb_free(cb); 999 cb = NULL; 1000 cl->writing_state = MEI_WRITE_COMPLETE; 1001 if (waitqueue_active(&cl->tx_wait)) 1002 wake_up_interruptible(&cl->tx_wait); 1003 1004 } else if (cb->fop_type == MEI_FOP_READ && 1005 MEI_READING == cl->reading_state) { 1006 cl->reading_state = MEI_READ_COMPLETE; 1007 if (waitqueue_active(&cl->rx_wait)) 1008 wake_up_interruptible(&cl->rx_wait); 1009 else 1010 mei_cl_bus_rx_event(cl); 1011 1012 } 1013 } 1014 1015 1016 /** 1017 * mei_cl_all_disconnect - disconnect forcefully all connected clients 1018 * 1019 * @dev - mei device 1020 */ 1021 1022 void mei_cl_all_disconnect(struct mei_device *dev) 1023 { 1024 struct mei_cl *cl; 1025 1026 list_for_each_entry(cl, &dev->file_list, link) { 1027 cl->state = MEI_FILE_DISCONNECTED; 1028 cl->mei_flow_ctrl_creds = 0; 1029 cl->timer_count = 0; 1030 } 1031 } 1032 1033 1034 /** 1035 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted 1036 * 1037 * @dev - mei device 1038 */ 1039 void mei_cl_all_wakeup(struct mei_device *dev) 1040 { 1041 struct mei_cl *cl; 1042 list_for_each_entry(cl, &dev->file_list, link) { 1043 if (waitqueue_active(&cl->rx_wait)) { 1044 cl_dbg(dev, cl, "Waking up reading client!\n"); 1045 wake_up_interruptible(&cl->rx_wait); 1046 } 1047 if (waitqueue_active(&cl->tx_wait)) { 1048 cl_dbg(dev, cl, "Waking up writing client!\n"); 1049 wake_up_interruptible(&cl->tx_wait); 1050 } 1051 } 1052 } 1053 1054 /** 1055 * mei_cl_all_write_clear - clear all pending writes 1056 1057 * @dev - mei device 1058 */ 1059 void mei_cl_all_write_clear(struct mei_device *dev) 1060 { 1061 mei_io_list_free(&dev->write_list, NULL); 1062 mei_io_list_free(&dev->write_waiting_list, NULL); 1063 } 1064 1065 1066