1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 #include <linux/pci.h> 18 #include <linux/sched.h> 19 #include <linux/wait.h> 20 #include <linux/delay.h> 21 22 #include <linux/mei.h> 23 24 #include "mei_dev.h" 25 #include "hbm.h" 26 #include "client.h" 27 28 /** 29 * mei_me_cl_by_uuid - locate index of me client 30 * 31 * @dev: mei device 32 * returns me client index or -ENOENT if not found 33 */ 34 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid) 35 { 36 int i, res = -ENOENT; 37 38 for (i = 0; i < dev->me_clients_num; ++i) 39 if (uuid_le_cmp(*uuid, 40 dev->me_clients[i].props.protocol_name) == 0) { 41 res = i; 42 break; 43 } 44 45 return res; 46 } 47 48 49 /** 50 * mei_me_cl_by_id return index to me_clients for client_id 51 * 52 * @dev: the device structure 53 * @client_id: me client id 54 * 55 * Locking: called under "dev->device_lock" lock 56 * 57 * returns index on success, -ENOENT on failure. 58 */ 59 60 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id) 61 { 62 int i; 63 for (i = 0; i < dev->me_clients_num; i++) 64 if (dev->me_clients[i].client_id == client_id) 65 break; 66 if (WARN_ON(dev->me_clients[i].client_id != client_id)) 67 return -ENOENT; 68 69 if (i == dev->me_clients_num) 70 return -ENOENT; 71 72 return i; 73 } 74 75 76 /** 77 * mei_io_list_flush - removes list entry belonging to cl. 78 * 79 * @list: An instance of our list structure 80 * @cl: host client 81 */ 82 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) 83 { 84 struct mei_cl_cb *cb; 85 struct mei_cl_cb *next; 86 87 list_for_each_entry_safe(cb, next, &list->list, list) { 88 if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) 89 list_del(&cb->list); 90 } 91 } 92 93 /** 94 * mei_io_cb_free - free mei_cb_private related memory 95 * 96 * @cb: mei callback struct 97 */ 98 void mei_io_cb_free(struct mei_cl_cb *cb) 99 { 100 if (cb == NULL) 101 return; 102 103 kfree(cb->request_buffer.data); 104 kfree(cb->response_buffer.data); 105 kfree(cb); 106 } 107 108 /** 109 * mei_io_cb_init - allocate and initialize io callback 110 * 111 * @cl - mei client 112 * @fp: pointer to file structure 113 * 114 * returns mei_cl_cb pointer or NULL; 115 */ 116 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp) 117 { 118 struct mei_cl_cb *cb; 119 120 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL); 121 if (!cb) 122 return NULL; 123 124 mei_io_list_init(cb); 125 126 cb->file_object = fp; 127 cb->cl = cl; 128 cb->buf_idx = 0; 129 return cb; 130 } 131 132 /** 133 * mei_io_cb_alloc_req_buf - allocate request buffer 134 * 135 * @cb: io callback structure 136 * @length: size of the buffer 137 * 138 * returns 0 on success 139 * -EINVAL if cb is NULL 140 * -ENOMEM if allocation failed 141 */ 142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length) 143 { 144 if (!cb) 145 return -EINVAL; 146 147 if (length == 0) 148 return 0; 149 150 cb->request_buffer.data = kmalloc(length, GFP_KERNEL); 151 if (!cb->request_buffer.data) 152 return -ENOMEM; 153 cb->request_buffer.size = length; 154 return 0; 155 } 156 /** 157 * mei_io_cb_alloc_resp_buf - allocate respose buffer 158 * 159 * @cb: io callback structure 160 * @length: size of the buffer 161 * 162 * returns 0 on success 163 * -EINVAL if cb is NULL 164 * -ENOMEM if allocation failed 165 */ 166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length) 167 { 168 if (!cb) 169 return -EINVAL; 170 171 if (length == 0) 172 return 0; 173 174 cb->response_buffer.data = kmalloc(length, GFP_KERNEL); 175 if (!cb->response_buffer.data) 176 return -ENOMEM; 177 cb->response_buffer.size = length; 178 return 0; 179 } 180 181 182 183 /** 184 * mei_cl_flush_queues - flushes queue lists belonging to cl. 185 * 186 * @cl: host client 187 */ 188 int mei_cl_flush_queues(struct mei_cl *cl) 189 { 190 struct mei_device *dev; 191 192 if (WARN_ON(!cl || !cl->dev)) 193 return -EINVAL; 194 195 dev = cl->dev; 196 197 cl_dbg(dev, cl, "remove list entry belonging to cl\n"); 198 mei_io_list_flush(&cl->dev->read_list, cl); 199 mei_io_list_flush(&cl->dev->write_list, cl); 200 mei_io_list_flush(&cl->dev->write_waiting_list, cl); 201 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); 202 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); 203 mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); 204 mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl); 205 return 0; 206 } 207 208 209 /** 210 * mei_cl_init - initializes intialize cl. 211 * 212 * @cl: host client to be initialized 213 * @dev: mei device 214 */ 215 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) 216 { 217 memset(cl, 0, sizeof(struct mei_cl)); 218 init_waitqueue_head(&cl->wait); 219 init_waitqueue_head(&cl->rx_wait); 220 init_waitqueue_head(&cl->tx_wait); 221 INIT_LIST_HEAD(&cl->link); 222 INIT_LIST_HEAD(&cl->device_link); 223 cl->reading_state = MEI_IDLE; 224 cl->writing_state = MEI_IDLE; 225 cl->dev = dev; 226 } 227 228 /** 229 * mei_cl_allocate - allocates cl structure and sets it up. 230 * 231 * @dev: mei device 232 * returns The allocated file or NULL on failure 233 */ 234 struct mei_cl *mei_cl_allocate(struct mei_device *dev) 235 { 236 struct mei_cl *cl; 237 238 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL); 239 if (!cl) 240 return NULL; 241 242 mei_cl_init(cl, dev); 243 244 return cl; 245 } 246 247 /** 248 * mei_cl_find_read_cb - find this cl's callback in the read list 249 * 250 * @cl: host client 251 * 252 * returns cb on success, NULL on error 253 */ 254 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl) 255 { 256 struct mei_device *dev = cl->dev; 257 struct mei_cl_cb *cb = NULL; 258 struct mei_cl_cb *next = NULL; 259 260 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) 261 if (mei_cl_cmp_id(cl, cb->cl)) 262 return cb; 263 return NULL; 264 } 265 266 /** mei_cl_link: allocte host id in the host map 267 * 268 * @cl - host client 269 * @id - fixed host id or -1 for genereting one 270 * 271 * returns 0 on success 272 * -EINVAL on incorrect values 273 * -ENONET if client not found 274 */ 275 int mei_cl_link(struct mei_cl *cl, int id) 276 { 277 struct mei_device *dev; 278 279 if (WARN_ON(!cl || !cl->dev)) 280 return -EINVAL; 281 282 dev = cl->dev; 283 284 /* If Id is not asigned get one*/ 285 if (id == MEI_HOST_CLIENT_ID_ANY) 286 id = find_first_zero_bit(dev->host_clients_map, 287 MEI_CLIENTS_MAX); 288 289 if (id >= MEI_CLIENTS_MAX) { 290 dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; 291 return -ENOENT; 292 } 293 294 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { 295 dev_err(&dev->pdev->dev, "open_handle_count exceded %d", 296 MEI_MAX_OPEN_HANDLE_COUNT); 297 return -ENOENT; 298 } 299 300 dev->open_handle_count++; 301 302 cl->host_client_id = id; 303 list_add_tail(&cl->link, &dev->file_list); 304 305 set_bit(id, dev->host_clients_map); 306 307 cl->state = MEI_FILE_INITIALIZING; 308 309 cl_dbg(dev, cl, "link cl\n"); 310 return 0; 311 } 312 313 /** 314 * mei_cl_unlink - remove me_cl from the list 315 * 316 * @cl: host client 317 */ 318 int mei_cl_unlink(struct mei_cl *cl) 319 { 320 struct mei_device *dev; 321 322 /* don't shout on error exit path */ 323 if (!cl) 324 return 0; 325 326 /* wd and amthif might not be initialized */ 327 if (!cl->dev) 328 return 0; 329 330 dev = cl->dev; 331 332 cl_dbg(dev, cl, "unlink client"); 333 334 list_del_init(&cl->link); 335 336 return 0; 337 } 338 339 340 void mei_host_client_init(struct work_struct *work) 341 { 342 struct mei_device *dev = container_of(work, 343 struct mei_device, init_work); 344 struct mei_client_properties *client_props; 345 int i; 346 347 mutex_lock(&dev->device_lock); 348 349 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); 350 dev->open_handle_count = 0; 351 352 /* 353 * Reserving the first three client IDs 354 * 0: Reserved for MEI Bus Message communications 355 * 1: Reserved for Watchdog 356 * 2: Reserved for AMTHI 357 */ 358 bitmap_set(dev->host_clients_map, 0, 3); 359 360 for (i = 0; i < dev->me_clients_num; i++) { 361 client_props = &dev->me_clients[i].props; 362 363 if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid)) 364 mei_amthif_host_init(dev); 365 else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid)) 366 mei_wd_host_init(dev); 367 else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid)) 368 mei_nfc_host_init(dev); 369 370 } 371 372 dev->dev_state = MEI_DEV_ENABLED; 373 374 mutex_unlock(&dev->device_lock); 375 } 376 377 378 /** 379 * mei_cl_disconnect - disconnect host clinet form the me one 380 * 381 * @cl: host client 382 * 383 * Locking: called under "dev->device_lock" lock 384 * 385 * returns 0 on success, <0 on failure. 386 */ 387 int mei_cl_disconnect(struct mei_cl *cl) 388 { 389 struct mei_device *dev; 390 struct mei_cl_cb *cb; 391 int rets, err; 392 393 if (WARN_ON(!cl || !cl->dev)) 394 return -ENODEV; 395 396 dev = cl->dev; 397 398 cl_dbg(dev, cl, "disconnecting"); 399 400 if (cl->state != MEI_FILE_DISCONNECTING) 401 return 0; 402 403 cb = mei_io_cb_init(cl, NULL); 404 if (!cb) 405 return -ENOMEM; 406 407 cb->fop_type = MEI_FOP_CLOSE; 408 if (dev->hbuf_is_ready) { 409 dev->hbuf_is_ready = false; 410 if (mei_hbm_cl_disconnect_req(dev, cl)) { 411 rets = -ENODEV; 412 cl_err(dev, cl, "failed to disconnect.\n"); 413 goto free; 414 } 415 mdelay(10); /* Wait for hardware disconnection ready */ 416 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 417 } else { 418 cl_dbg(dev, cl, "add disconnect cb to control write list\n"); 419 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 420 421 } 422 mutex_unlock(&dev->device_lock); 423 424 err = wait_event_timeout(dev->wait_recvd_msg, 425 MEI_FILE_DISCONNECTED == cl->state, 426 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 427 428 mutex_lock(&dev->device_lock); 429 if (MEI_FILE_DISCONNECTED == cl->state) { 430 rets = 0; 431 cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); 432 } else { 433 rets = -ENODEV; 434 if (MEI_FILE_DISCONNECTED != cl->state) 435 cl_err(dev, cl, "wrong status client disconnect.\n"); 436 437 if (err) 438 cl_dbg(dev, cl, "wait failed disconnect err=%08x\n", 439 err); 440 441 cl_err(dev, cl, "failed to disconnect from FW client.\n"); 442 } 443 444 mei_io_list_flush(&dev->ctrl_rd_list, cl); 445 mei_io_list_flush(&dev->ctrl_wr_list, cl); 446 free: 447 mei_io_cb_free(cb); 448 return rets; 449 } 450 451 452 /** 453 * mei_cl_is_other_connecting - checks if other 454 * client with the same me client id is connecting 455 * 456 * @cl: private data of the file object 457 * 458 * returns ture if other client is connected, 0 - otherwise. 459 */ 460 bool mei_cl_is_other_connecting(struct mei_cl *cl) 461 { 462 struct mei_device *dev; 463 struct mei_cl *pos; 464 struct mei_cl *next; 465 466 if (WARN_ON(!cl || !cl->dev)) 467 return false; 468 469 dev = cl->dev; 470 471 list_for_each_entry_safe(pos, next, &dev->file_list, link) { 472 if ((pos->state == MEI_FILE_CONNECTING) && 473 (pos != cl) && cl->me_client_id == pos->me_client_id) 474 return true; 475 476 } 477 478 return false; 479 } 480 481 /** 482 * mei_cl_connect - connect host clinet to the me one 483 * 484 * @cl: host client 485 * 486 * Locking: called under "dev->device_lock" lock 487 * 488 * returns 0 on success, <0 on failure. 489 */ 490 int mei_cl_connect(struct mei_cl *cl, struct file *file) 491 { 492 struct mei_device *dev; 493 struct mei_cl_cb *cb; 494 int rets; 495 496 if (WARN_ON(!cl || !cl->dev)) 497 return -ENODEV; 498 499 dev = cl->dev; 500 501 cb = mei_io_cb_init(cl, file); 502 if (!cb) { 503 rets = -ENOMEM; 504 goto out; 505 } 506 507 cb->fop_type = MEI_FOP_IOCTL; 508 509 if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) { 510 dev->hbuf_is_ready = false; 511 512 if (mei_hbm_cl_connect_req(dev, cl)) { 513 rets = -ENODEV; 514 goto out; 515 } 516 cl->timer_count = MEI_CONNECT_TIMEOUT; 517 list_add_tail(&cb->list, &dev->ctrl_rd_list.list); 518 } else { 519 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 520 } 521 522 mutex_unlock(&dev->device_lock); 523 rets = wait_event_timeout(dev->wait_recvd_msg, 524 (cl->state == MEI_FILE_CONNECTED || 525 cl->state == MEI_FILE_DISCONNECTED), 526 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 527 mutex_lock(&dev->device_lock); 528 529 if (cl->state != MEI_FILE_CONNECTED) { 530 rets = -EFAULT; 531 532 mei_io_list_flush(&dev->ctrl_rd_list, cl); 533 mei_io_list_flush(&dev->ctrl_wr_list, cl); 534 goto out; 535 } 536 537 rets = cl->status; 538 539 out: 540 mei_io_cb_free(cb); 541 return rets; 542 } 543 544 /** 545 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl. 546 * 547 * @cl: private data of the file object 548 * 549 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise. 550 * -ENOENT if mei_cl is not present 551 * -EINVAL if single_recv_buf == 0 552 */ 553 int mei_cl_flow_ctrl_creds(struct mei_cl *cl) 554 { 555 struct mei_device *dev; 556 int i; 557 558 if (WARN_ON(!cl || !cl->dev)) 559 return -EINVAL; 560 561 dev = cl->dev; 562 563 if (!dev->me_clients_num) 564 return 0; 565 566 if (cl->mei_flow_ctrl_creds > 0) 567 return 1; 568 569 for (i = 0; i < dev->me_clients_num; i++) { 570 struct mei_me_client *me_cl = &dev->me_clients[i]; 571 if (me_cl->client_id == cl->me_client_id) { 572 if (me_cl->mei_flow_ctrl_creds) { 573 if (WARN_ON(me_cl->props.single_recv_buf == 0)) 574 return -EINVAL; 575 return 1; 576 } else { 577 return 0; 578 } 579 } 580 } 581 return -ENOENT; 582 } 583 584 /** 585 * mei_cl_flow_ctrl_reduce - reduces flow_control. 586 * 587 * @cl: private data of the file object 588 * 589 * @returns 590 * 0 on success 591 * -ENOENT when me client is not found 592 * -EINVAL when ctrl credits are <= 0 593 */ 594 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl) 595 { 596 struct mei_device *dev; 597 int i; 598 599 if (WARN_ON(!cl || !cl->dev)) 600 return -EINVAL; 601 602 dev = cl->dev; 603 604 if (!dev->me_clients_num) 605 return -ENOENT; 606 607 for (i = 0; i < dev->me_clients_num; i++) { 608 struct mei_me_client *me_cl = &dev->me_clients[i]; 609 if (me_cl->client_id == cl->me_client_id) { 610 if (me_cl->props.single_recv_buf != 0) { 611 if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0)) 612 return -EINVAL; 613 dev->me_clients[i].mei_flow_ctrl_creds--; 614 } else { 615 if (WARN_ON(cl->mei_flow_ctrl_creds <= 0)) 616 return -EINVAL; 617 cl->mei_flow_ctrl_creds--; 618 } 619 return 0; 620 } 621 } 622 return -ENOENT; 623 } 624 625 /** 626 * mei_cl_read_start - the start read client message function. 627 * 628 * @cl: host client 629 * 630 * returns 0 on success, <0 on failure. 631 */ 632 int mei_cl_read_start(struct mei_cl *cl, size_t length) 633 { 634 struct mei_device *dev; 635 struct mei_cl_cb *cb; 636 int rets; 637 int i; 638 639 if (WARN_ON(!cl || !cl->dev)) 640 return -ENODEV; 641 642 dev = cl->dev; 643 644 if (!mei_cl_is_connected(cl)) 645 return -ENODEV; 646 647 if (cl->read_cb) { 648 cl_dbg(dev, cl, "read is pending.\n"); 649 return -EBUSY; 650 } 651 i = mei_me_cl_by_id(dev, cl->me_client_id); 652 if (i < 0) { 653 cl_err(dev, cl, "no such me client %d\n", cl->me_client_id); 654 return -ENODEV; 655 } 656 657 cb = mei_io_cb_init(cl, NULL); 658 if (!cb) 659 return -ENOMEM; 660 661 /* always allocate at least client max message */ 662 length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length); 663 rets = mei_io_cb_alloc_resp_buf(cb, length); 664 if (rets) 665 goto err; 666 667 cb->fop_type = MEI_FOP_READ; 668 cl->read_cb = cb; 669 if (dev->hbuf_is_ready) { 670 dev->hbuf_is_ready = false; 671 if (mei_hbm_cl_flow_control_req(dev, cl)) { 672 cl_err(dev, cl, "flow control send failed\n"); 673 rets = -ENODEV; 674 goto err; 675 } 676 list_add_tail(&cb->list, &dev->read_list.list); 677 } else { 678 list_add_tail(&cb->list, &dev->ctrl_wr_list.list); 679 } 680 return rets; 681 err: 682 mei_io_cb_free(cb); 683 return rets; 684 } 685 686 /** 687 * mei_cl_irq_write_complete - write a message to device 688 * from the interrupt thread context 689 * 690 * @cl: client 691 * @cb: callback block. 692 * @slots: free slots. 693 * @cmpl_list: complete list. 694 * 695 * returns 0, OK; otherwise error. 696 */ 697 int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb, 698 s32 *slots, struct mei_cl_cb *cmpl_list) 699 { 700 struct mei_device *dev; 701 struct mei_msg_data *buf; 702 struct mei_msg_hdr mei_hdr; 703 size_t len; 704 u32 msg_slots; 705 int rets; 706 707 708 if (WARN_ON(!cl || !cl->dev)) 709 return -ENODEV; 710 711 dev = cl->dev; 712 713 buf = &cb->request_buffer; 714 715 rets = mei_cl_flow_ctrl_creds(cl); 716 if (rets < 0) 717 return rets; 718 719 if (rets == 0) { 720 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 721 return 0; 722 } 723 724 len = buf->size - cb->buf_idx; 725 msg_slots = mei_data2slots(len); 726 727 mei_hdr.host_addr = cl->host_client_id; 728 mei_hdr.me_addr = cl->me_client_id; 729 mei_hdr.reserved = 0; 730 731 if (*slots >= msg_slots) { 732 mei_hdr.length = len; 733 mei_hdr.msg_complete = 1; 734 /* Split the message only if we can write the whole host buffer */ 735 } else if (*slots == dev->hbuf_depth) { 736 msg_slots = *slots; 737 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr); 738 mei_hdr.length = len; 739 mei_hdr.msg_complete = 0; 740 } else { 741 /* wait for next time the host buffer is empty */ 742 return 0; 743 } 744 745 cl_dbg(dev, cl, "buf: size = %d idx = %lu\n", 746 cb->request_buffer.size, cb->buf_idx); 747 748 *slots -= msg_slots; 749 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 750 if (rets) { 751 cl->status = rets; 752 list_move_tail(&cb->list, &cmpl_list->list); 753 return rets; 754 } 755 756 cl->status = 0; 757 cl->writing_state = MEI_WRITING; 758 cb->buf_idx += mei_hdr.length; 759 760 if (mei_hdr.msg_complete) { 761 if (mei_cl_flow_ctrl_reduce(cl)) 762 return -EIO; 763 list_move_tail(&cb->list, &dev->write_waiting_list.list); 764 } 765 766 return 0; 767 } 768 769 /** 770 * mei_cl_write - submit a write cb to mei device 771 assumes device_lock is locked 772 * 773 * @cl: host client 774 * @cl: write callback with filled data 775 * 776 * returns numbe of bytes sent on success, <0 on failure. 777 */ 778 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) 779 { 780 struct mei_device *dev; 781 struct mei_msg_data *buf; 782 struct mei_msg_hdr mei_hdr; 783 int rets; 784 785 786 if (WARN_ON(!cl || !cl->dev)) 787 return -ENODEV; 788 789 if (WARN_ON(!cb)) 790 return -EINVAL; 791 792 dev = cl->dev; 793 794 795 buf = &cb->request_buffer; 796 797 cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size); 798 799 800 cb->fop_type = MEI_FOP_WRITE; 801 802 rets = mei_cl_flow_ctrl_creds(cl); 803 if (rets < 0) 804 goto err; 805 806 /* Host buffer is not ready, we queue the request */ 807 if (rets == 0 || !dev->hbuf_is_ready) { 808 cb->buf_idx = 0; 809 /* unseting complete will enqueue the cb for write */ 810 mei_hdr.msg_complete = 0; 811 rets = buf->size; 812 goto out; 813 } 814 815 dev->hbuf_is_ready = false; 816 817 /* Check for a maximum length */ 818 if (buf->size > mei_hbuf_max_len(dev)) { 819 mei_hdr.length = mei_hbuf_max_len(dev); 820 mei_hdr.msg_complete = 0; 821 } else { 822 mei_hdr.length = buf->size; 823 mei_hdr.msg_complete = 1; 824 } 825 826 mei_hdr.host_addr = cl->host_client_id; 827 mei_hdr.me_addr = cl->me_client_id; 828 mei_hdr.reserved = 0; 829 830 831 rets = mei_write_message(dev, &mei_hdr, buf->data); 832 if (rets) 833 goto err; 834 835 cl->writing_state = MEI_WRITING; 836 cb->buf_idx = mei_hdr.length; 837 838 rets = buf->size; 839 out: 840 if (mei_hdr.msg_complete) { 841 if (mei_cl_flow_ctrl_reduce(cl)) { 842 rets = -ENODEV; 843 goto err; 844 } 845 list_add_tail(&cb->list, &dev->write_waiting_list.list); 846 } else { 847 list_add_tail(&cb->list, &dev->write_list.list); 848 } 849 850 851 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { 852 853 mutex_unlock(&dev->device_lock); 854 if (wait_event_interruptible(cl->tx_wait, 855 cl->writing_state == MEI_WRITE_COMPLETE)) { 856 if (signal_pending(current)) 857 rets = -EINTR; 858 else 859 rets = -ERESTARTSYS; 860 } 861 mutex_lock(&dev->device_lock); 862 } 863 err: 864 return rets; 865 } 866 867 868 /** 869 * mei_cl_complete - processes completed operation for a client 870 * 871 * @cl: private data of the file object. 872 * @cb: callback block. 873 */ 874 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) 875 { 876 if (cb->fop_type == MEI_FOP_WRITE) { 877 mei_io_cb_free(cb); 878 cb = NULL; 879 cl->writing_state = MEI_WRITE_COMPLETE; 880 if (waitqueue_active(&cl->tx_wait)) 881 wake_up_interruptible(&cl->tx_wait); 882 883 } else if (cb->fop_type == MEI_FOP_READ && 884 MEI_READING == cl->reading_state) { 885 cl->reading_state = MEI_READ_COMPLETE; 886 if (waitqueue_active(&cl->rx_wait)) 887 wake_up_interruptible(&cl->rx_wait); 888 else 889 mei_cl_bus_rx_event(cl); 890 891 } 892 } 893 894 895 /** 896 * mei_cl_all_disconnect - disconnect forcefully all connected clients 897 * 898 * @dev - mei device 899 */ 900 901 void mei_cl_all_disconnect(struct mei_device *dev) 902 { 903 struct mei_cl *cl, *next; 904 905 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 906 cl->state = MEI_FILE_DISCONNECTED; 907 cl->mei_flow_ctrl_creds = 0; 908 cl->read_cb = NULL; 909 cl->timer_count = 0; 910 } 911 } 912 913 914 /** 915 * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted 916 * 917 * @dev - mei device 918 */ 919 void mei_cl_all_wakeup(struct mei_device *dev) 920 { 921 struct mei_cl *cl, *next; 922 list_for_each_entry_safe(cl, next, &dev->file_list, link) { 923 if (waitqueue_active(&cl->rx_wait)) { 924 cl_dbg(dev, cl, "Waking up reading client!\n"); 925 wake_up_interruptible(&cl->rx_wait); 926 } 927 if (waitqueue_active(&cl->tx_wait)) { 928 cl_dbg(dev, cl, "Waking up writing client!\n"); 929 wake_up_interruptible(&cl->tx_wait); 930 } 931 } 932 } 933 934 /** 935 * mei_cl_all_write_clear - clear all pending writes 936 937 * @dev - mei device 938 */ 939 void mei_cl_all_write_clear(struct mei_device *dev) 940 { 941 struct mei_cl_cb *cb, *next; 942 943 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 944 list_del(&cb->list); 945 mei_io_cb_free(cb); 946 } 947 } 948 949 950