1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/device.h> 9 #include <linux/kernel.h> 10 #include <linux/sched/signal.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/slab.h> 14 #include <linux/mutex.h> 15 #include <linux/interrupt.h> 16 #include <linux/scatterlist.h> 17 #include <linux/mei_cl_bus.h> 18 19 #include "mei_dev.h" 20 #include "client.h" 21 22 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) 23 24 /** 25 * __mei_cl_send - internal client send (write) 26 * 27 * @cl: host client 28 * @buf: buffer to send 29 * @length: buffer length 30 * @vtag: virtual tag 31 * @mode: sending mode 32 * 33 * Return: written size bytes or < 0 on error 34 */ 35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 36 unsigned int mode) 37 { 38 return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT); 39 } 40 41 /** 42 * __mei_cl_send_timeout - internal client send (write) 43 * 44 * @cl: host client 45 * @buf: buffer to send 46 * @length: buffer length 47 * @vtag: virtual tag 48 * @mode: sending mode 49 * @timeout: send timeout in milliseconds. 50 * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set. 51 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. 52 * 53 * Return: written size bytes or < 0 on error 54 */ 55 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, 56 unsigned int mode, unsigned long timeout) 57 { 58 struct mei_device *bus; 59 struct mei_cl_cb *cb; 60 ssize_t rets; 61 62 if (WARN_ON(!cl || !cl->dev)) 63 return -ENODEV; 64 65 bus = cl->dev; 66 67 mutex_lock(&bus->device_lock); 68 if (bus->dev_state != MEI_DEV_ENABLED && 69 bus->dev_state != MEI_DEV_POWERING_DOWN) { 70 rets = -ENODEV; 71 goto out; 72 } 73 74 if (!mei_cl_is_connected(cl)) { 75 rets = -ENODEV; 76 goto out; 77 } 78 79 /* Check if we have an ME client device */ 80 if (!mei_me_cl_is_active(cl->me_cl)) { 81 rets = -ENOTTY; 82 goto out; 83 } 84 85 if (vtag) { 86 /* Check if vtag is supported by client */ 87 rets = mei_cl_vt_support_check(cl); 88 if (rets) 89 goto out; 90 } 91 92 if (length > mei_cl_mtu(cl)) { 93 rets = -EFBIG; 94 goto out; 95 } 96 97 while (cl->tx_cb_queued >= bus->tx_queue_limit) { 98 mutex_unlock(&bus->device_lock); 99 rets = wait_event_interruptible(cl->tx_wait, 100 cl->writing_state == MEI_WRITE_COMPLETE || 101 (!mei_cl_is_connected(cl))); 102 mutex_lock(&bus->device_lock); 103 if (rets) { 104 if (signal_pending(current)) 105 rets = -EINTR; 106 goto out; 107 } 108 if (!mei_cl_is_connected(cl)) { 109 rets = -ENODEV; 110 goto out; 111 } 112 } 113 114 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL); 115 if (!cb) { 116 rets = -ENOMEM; 117 goto out; 118 } 119 cb->vtag = vtag; 120 121 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); 122 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); 123 memcpy(cb->buf.data, buf, length); 124 /* hack we point data to header */ 125 if (mode & MEI_CL_IO_SGL) { 126 cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data; 127 cb->buf.data = NULL; 128 cb->buf.size = 0; 129 } 130 131 rets = mei_cl_write(cl, cb, timeout); 132 133 if (mode & MEI_CL_IO_SGL && rets == 0) 134 rets = length; 135 136 out: 137 mutex_unlock(&bus->device_lock); 138 139 return rets; 140 } 141 142 /** 143 * __mei_cl_recv - internal client receive (read) 144 * 145 * @cl: host client 146 * @buf: buffer to receive 147 * @length: buffer length 148 * @mode: io mode 149 * @vtag: virtual tag 150 * @timeout: recv timeout, 0 for infinite timeout 151 * 152 * Return: read size in bytes of < 0 on error 153 */ 154 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, 155 unsigned int mode, unsigned long timeout) 156 { 157 struct mei_device *bus; 158 struct mei_cl_cb *cb; 159 size_t r_length; 160 ssize_t rets; 161 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK); 162 163 if (WARN_ON(!cl || !cl->dev)) 164 return -ENODEV; 165 166 bus = cl->dev; 167 168 mutex_lock(&bus->device_lock); 169 if (bus->dev_state != MEI_DEV_ENABLED && 170 bus->dev_state != MEI_DEV_POWERING_DOWN) { 171 rets = -ENODEV; 172 goto out; 173 } 174 175 cb = mei_cl_read_cb(cl, NULL); 176 if (cb) 177 goto copy; 178 179 rets = mei_cl_read_start(cl, length, NULL); 180 if (rets && rets != -EBUSY) 181 goto out; 182 183 if (nonblock) { 184 rets = -EAGAIN; 185 goto out; 186 } 187 188 /* wait on event only if there is no other waiter */ 189 /* synchronized under device mutex */ 190 if (!waitqueue_active(&cl->rx_wait)) { 191 192 mutex_unlock(&bus->device_lock); 193 194 if (timeout) { 195 rets = wait_event_interruptible_timeout 196 (cl->rx_wait, 197 mei_cl_read_cb(cl, NULL) || 198 (!mei_cl_is_connected(cl)), 199 msecs_to_jiffies(timeout)); 200 if (rets == 0) 201 return -ETIME; 202 if (rets < 0) { 203 if (signal_pending(current)) 204 return -EINTR; 205 return -ERESTARTSYS; 206 } 207 } else { 208 if (wait_event_interruptible 209 (cl->rx_wait, 210 mei_cl_read_cb(cl, NULL) || 211 (!mei_cl_is_connected(cl)))) { 212 if (signal_pending(current)) 213 return -EINTR; 214 return -ERESTARTSYS; 215 } 216 } 217 218 mutex_lock(&bus->device_lock); 219 220 if (!mei_cl_is_connected(cl)) { 221 rets = -ENODEV; 222 goto out; 223 } 224 } 225 226 cb = mei_cl_read_cb(cl, NULL); 227 if (!cb) { 228 rets = 0; 229 goto out; 230 } 231 232 copy: 233 if (cb->status) { 234 rets = cb->status; 235 goto free; 236 } 237 238 /* for the GSC type - copy the extended header to the buffer */ 239 if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) { 240 r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32)); 241 memcpy(buf, cb->ext_hdr, r_length); 242 } else { 243 r_length = min_t(size_t, length, cb->buf_idx); 244 memcpy(buf, cb->buf.data, r_length); 245 } 246 rets = r_length; 247 248 if (vtag) 249 *vtag = cb->vtag; 250 251 free: 252 mei_cl_del_rd_completed(cl, cb); 253 out: 254 mutex_unlock(&bus->device_lock); 255 256 return rets; 257 } 258 259 /** 260 * mei_cldev_send_vtag - me device send with vtag (write) 261 * 262 * @cldev: me client device 263 * @buf: buffer to send 264 * @length: buffer length 265 * @vtag: virtual tag 266 * 267 * Return: 268 * * written size in bytes 269 * * < 0 on error 270 */ 271 272 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, 273 size_t length, u8 vtag) 274 { 275 struct mei_cl *cl = cldev->cl; 276 277 return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING); 278 } 279 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag); 280 281 /** 282 * mei_cldev_recv_vtag - client receive with vtag (read) 283 * 284 * @cldev: me client device 285 * @buf: buffer to receive 286 * @length: buffer length 287 * @vtag: virtual tag 288 * 289 * Return: 290 * * read size in bytes 291 * * < 0 on error 292 */ 293 294 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, 295 u8 *vtag) 296 { 297 struct mei_cl *cl = cldev->cl; 298 299 return __mei_cl_recv(cl, buf, length, vtag, 0, 0); 300 } 301 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag); 302 303 /** 304 * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read) 305 * 306 * @cldev: me client device 307 * @buf: buffer to receive 308 * @length: buffer length 309 * @vtag: virtual tag 310 * 311 * Return: 312 * * read size in bytes 313 * * -EAGAIN if function will block. 314 * * < 0 on other error 315 */ 316 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, 317 size_t length, u8 *vtag) 318 { 319 struct mei_cl *cl = cldev->cl; 320 321 return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0); 322 } 323 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); 324 325 /** 326 * mei_cldev_send - me device send (write) 327 * 328 * @cldev: me client device 329 * @buf: buffer to send 330 * @length: buffer length 331 * 332 * Return: 333 * * written size in bytes 334 * * < 0 on error 335 */ 336 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length) 337 { 338 return mei_cldev_send_vtag(cldev, buf, length, 0); 339 } 340 EXPORT_SYMBOL_GPL(mei_cldev_send); 341 342 /** 343 * mei_cldev_recv - client receive (read) 344 * 345 * @cldev: me client device 346 * @buf: buffer to receive 347 * @length: buffer length 348 * 349 * Return: read size in bytes of < 0 on error 350 */ 351 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) 352 { 353 return mei_cldev_recv_vtag(cldev, buf, length, NULL); 354 } 355 EXPORT_SYMBOL_GPL(mei_cldev_recv); 356 357 /** 358 * mei_cldev_recv_nonblock - non block client receive (read) 359 * 360 * @cldev: me client device 361 * @buf: buffer to receive 362 * @length: buffer length 363 * 364 * Return: read size in bytes of < 0 on error 365 * -EAGAIN if function will block. 366 */ 367 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, 368 size_t length) 369 { 370 return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL); 371 } 372 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); 373 374 /** 375 * mei_cl_bus_rx_work - dispatch rx event for a bus device 376 * 377 * @work: work 378 */ 379 static void mei_cl_bus_rx_work(struct work_struct *work) 380 { 381 struct mei_cl_device *cldev; 382 struct mei_device *bus; 383 384 cldev = container_of(work, struct mei_cl_device, rx_work); 385 386 bus = cldev->bus; 387 388 if (cldev->rx_cb) 389 cldev->rx_cb(cldev); 390 391 mutex_lock(&bus->device_lock); 392 if (mei_cl_is_connected(cldev->cl)) 393 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 394 mutex_unlock(&bus->device_lock); 395 } 396 397 /** 398 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device 399 * 400 * @work: work 401 */ 402 static void mei_cl_bus_notif_work(struct work_struct *work) 403 { 404 struct mei_cl_device *cldev; 405 406 cldev = container_of(work, struct mei_cl_device, notif_work); 407 408 if (cldev->notif_cb) 409 cldev->notif_cb(cldev); 410 } 411 412 /** 413 * mei_cl_bus_notify_event - schedule notify cb on bus client 414 * 415 * @cl: host client 416 * 417 * Return: true if event was scheduled 418 * false if the client is not waiting for event 419 */ 420 bool mei_cl_bus_notify_event(struct mei_cl *cl) 421 { 422 struct mei_cl_device *cldev = cl->cldev; 423 424 if (!cldev || !cldev->notif_cb) 425 return false; 426 427 if (!cl->notify_ev) 428 return false; 429 430 schedule_work(&cldev->notif_work); 431 432 cl->notify_ev = false; 433 434 return true; 435 } 436 437 /** 438 * mei_cl_bus_rx_event - schedule rx event 439 * 440 * @cl: host client 441 * 442 * Return: true if event was scheduled 443 * false if the client is not waiting for event 444 */ 445 bool mei_cl_bus_rx_event(struct mei_cl *cl) 446 { 447 struct mei_cl_device *cldev = cl->cldev; 448 449 if (!cldev || !cldev->rx_cb) 450 return false; 451 452 schedule_work(&cldev->rx_work); 453 454 return true; 455 } 456 457 /** 458 * mei_cldev_register_rx_cb - register Rx event callback 459 * 460 * @cldev: me client devices 461 * @rx_cb: callback function 462 * 463 * Return: 0 on success 464 * -EALREADY if an callback is already registered 465 * <0 on other errors 466 */ 467 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) 468 { 469 struct mei_device *bus = cldev->bus; 470 int ret; 471 472 if (!rx_cb) 473 return -EINVAL; 474 if (cldev->rx_cb) 475 return -EALREADY; 476 477 cldev->rx_cb = rx_cb; 478 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); 479 480 mutex_lock(&bus->device_lock); 481 if (mei_cl_is_connected(cldev->cl)) 482 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 483 else 484 ret = -ENODEV; 485 mutex_unlock(&bus->device_lock); 486 if (ret && ret != -EBUSY) { 487 cancel_work_sync(&cldev->rx_work); 488 cldev->rx_cb = NULL; 489 return ret; 490 } 491 492 return 0; 493 } 494 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb); 495 496 /** 497 * mei_cldev_register_notif_cb - register FW notification event callback 498 * 499 * @cldev: me client devices 500 * @notif_cb: callback function 501 * 502 * Return: 0 on success 503 * -EALREADY if an callback is already registered 504 * <0 on other errors 505 */ 506 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 507 mei_cldev_cb_t notif_cb) 508 { 509 struct mei_device *bus = cldev->bus; 510 int ret; 511 512 if (!notif_cb) 513 return -EINVAL; 514 515 if (cldev->notif_cb) 516 return -EALREADY; 517 518 cldev->notif_cb = notif_cb; 519 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work); 520 521 mutex_lock(&bus->device_lock); 522 ret = mei_cl_notify_request(cldev->cl, NULL, 1); 523 mutex_unlock(&bus->device_lock); 524 if (ret) { 525 cancel_work_sync(&cldev->notif_work); 526 cldev->notif_cb = NULL; 527 return ret; 528 } 529 530 return 0; 531 } 532 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb); 533 534 /** 535 * mei_cldev_get_drvdata - driver data getter 536 * 537 * @cldev: mei client device 538 * 539 * Return: driver private data 540 */ 541 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev) 542 { 543 return dev_get_drvdata(&cldev->dev); 544 } 545 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata); 546 547 /** 548 * mei_cldev_set_drvdata - driver data setter 549 * 550 * @cldev: mei client device 551 * @data: data to store 552 */ 553 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data) 554 { 555 dev_set_drvdata(&cldev->dev, data); 556 } 557 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata); 558 559 /** 560 * mei_cldev_uuid - return uuid of the underlying me client 561 * 562 * @cldev: mei client device 563 * 564 * Return: me client uuid 565 */ 566 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev) 567 { 568 return mei_me_cl_uuid(cldev->me_cl); 569 } 570 EXPORT_SYMBOL_GPL(mei_cldev_uuid); 571 572 /** 573 * mei_cldev_ver - return protocol version of the underlying me client 574 * 575 * @cldev: mei client device 576 * 577 * Return: me client protocol version 578 */ 579 u8 mei_cldev_ver(const struct mei_cl_device *cldev) 580 { 581 return mei_me_cl_ver(cldev->me_cl); 582 } 583 EXPORT_SYMBOL_GPL(mei_cldev_ver); 584 585 /** 586 * mei_cldev_enabled - check whether the device is enabled 587 * 588 * @cldev: mei client device 589 * 590 * Return: true if me client is initialized and connected 591 */ 592 bool mei_cldev_enabled(const struct mei_cl_device *cldev) 593 { 594 return mei_cl_is_connected(cldev->cl); 595 } 596 EXPORT_SYMBOL_GPL(mei_cldev_enabled); 597 598 /** 599 * mei_cl_bus_module_get - acquire module of the underlying 600 * hw driver. 601 * 602 * @cldev: mei client device 603 * 604 * Return: true on success; false if the module was removed. 605 */ 606 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev) 607 { 608 return try_module_get(cldev->bus->dev->driver->owner); 609 } 610 611 /** 612 * mei_cl_bus_module_put - release the underlying hw module. 613 * 614 * @cldev: mei client device 615 */ 616 static void mei_cl_bus_module_put(struct mei_cl_device *cldev) 617 { 618 module_put(cldev->bus->dev->driver->owner); 619 } 620 621 /** 622 * mei_cl_bus_vtag - get bus vtag entry wrapper 623 * The tag for bus client is always first. 624 * 625 * @cl: host client 626 * 627 * Return: bus vtag or NULL 628 */ 629 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl) 630 { 631 return list_first_entry_or_null(&cl->vtag_map, 632 struct mei_cl_vtag, list); 633 } 634 635 /** 636 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map 637 * 638 * @cldev: me client device 639 * 640 * Return: 641 * * 0 on success 642 * * -ENOMEM if memory allocation failed 643 */ 644 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev) 645 { 646 struct mei_cl *cl = cldev->cl; 647 struct mei_cl_vtag *cl_vtag; 648 649 /* 650 * Bail out if the client does not supports vtags 651 * or has already allocated one 652 */ 653 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl)) 654 return 0; 655 656 cl_vtag = mei_cl_vtag_alloc(NULL, 0); 657 if (IS_ERR(cl_vtag)) 658 return -ENOMEM; 659 660 list_add_tail(&cl_vtag->list, &cl->vtag_map); 661 662 return 0; 663 } 664 665 /** 666 * mei_cl_bus_vtag_free - remove the bus entry from vtag map 667 * 668 * @cldev: me client device 669 */ 670 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev) 671 { 672 struct mei_cl *cl = cldev->cl; 673 struct mei_cl_vtag *cl_vtag; 674 675 cl_vtag = mei_cl_bus_vtag(cl); 676 if (!cl_vtag) 677 return; 678 679 list_del(&cl_vtag->list); 680 kfree(cl_vtag); 681 } 682 683 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size) 684 { 685 struct mei_device *bus; 686 struct mei_cl *cl; 687 int ret; 688 689 if (!cldev || !buffer_id || !size) 690 return ERR_PTR(-EINVAL); 691 692 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) { 693 dev_err(&cldev->dev, "Map size should be aligned to %lu\n", 694 MEI_FW_PAGE_SIZE); 695 return ERR_PTR(-EINVAL); 696 } 697 698 cl = cldev->cl; 699 bus = cldev->bus; 700 701 mutex_lock(&bus->device_lock); 702 if (cl->state == MEI_FILE_UNINITIALIZED) { 703 ret = mei_cl_link(cl); 704 if (ret) 705 goto out; 706 /* update pointers */ 707 cl->cldev = cldev; 708 } 709 710 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size); 711 out: 712 mutex_unlock(&bus->device_lock); 713 if (ret) 714 return ERR_PTR(ret); 715 return cl->dma.vaddr; 716 } 717 EXPORT_SYMBOL_GPL(mei_cldev_dma_map); 718 719 int mei_cldev_dma_unmap(struct mei_cl_device *cldev) 720 { 721 struct mei_device *bus; 722 struct mei_cl *cl; 723 int ret; 724 725 if (!cldev) 726 return -EINVAL; 727 728 cl = cldev->cl; 729 bus = cldev->bus; 730 731 mutex_lock(&bus->device_lock); 732 ret = mei_cl_dma_unmap(cl, NULL); 733 734 mei_cl_flush_queues(cl, NULL); 735 mei_cl_unlink(cl); 736 mutex_unlock(&bus->device_lock); 737 return ret; 738 } 739 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap); 740 741 /** 742 * mei_cldev_enable - enable me client device 743 * create connection with me client 744 * 745 * @cldev: me client device 746 * 747 * Return: 0 on success and < 0 on error 748 */ 749 int mei_cldev_enable(struct mei_cl_device *cldev) 750 { 751 struct mei_device *bus = cldev->bus; 752 struct mei_cl *cl; 753 int ret; 754 755 cl = cldev->cl; 756 757 mutex_lock(&bus->device_lock); 758 if (cl->state == MEI_FILE_UNINITIALIZED) { 759 ret = mei_cl_link(cl); 760 if (ret) 761 goto out; 762 /* update pointers */ 763 cl->cldev = cldev; 764 } 765 766 if (mei_cl_is_connected(cl)) { 767 ret = 0; 768 goto out; 769 } 770 771 if (!mei_me_cl_is_active(cldev->me_cl)) { 772 dev_err(&cldev->dev, "me client is not active\n"); 773 ret = -ENOTTY; 774 goto out; 775 } 776 777 ret = mei_cl_bus_vtag_alloc(cldev); 778 if (ret) 779 goto out; 780 781 ret = mei_cl_connect(cl, cldev->me_cl, NULL); 782 if (ret < 0) { 783 dev_err(&cldev->dev, "cannot connect\n"); 784 mei_cl_bus_vtag_free(cldev); 785 } 786 787 out: 788 mutex_unlock(&bus->device_lock); 789 790 return ret; 791 } 792 EXPORT_SYMBOL_GPL(mei_cldev_enable); 793 794 /** 795 * mei_cldev_unregister_callbacks - internal wrapper for unregistering 796 * callbacks. 797 * 798 * @cldev: client device 799 */ 800 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev) 801 { 802 if (cldev->rx_cb) { 803 cancel_work_sync(&cldev->rx_work); 804 cldev->rx_cb = NULL; 805 } 806 807 if (cldev->notif_cb) { 808 cancel_work_sync(&cldev->notif_work); 809 cldev->notif_cb = NULL; 810 } 811 } 812 813 /** 814 * mei_cldev_disable - disable me client device 815 * disconnect form the me client 816 * 817 * @cldev: me client device 818 * 819 * Return: 0 on success and < 0 on error 820 */ 821 int mei_cldev_disable(struct mei_cl_device *cldev) 822 { 823 struct mei_device *bus; 824 struct mei_cl *cl; 825 int err; 826 827 if (!cldev) 828 return -ENODEV; 829 830 cl = cldev->cl; 831 832 bus = cldev->bus; 833 834 mei_cldev_unregister_callbacks(cldev); 835 836 mutex_lock(&bus->device_lock); 837 838 mei_cl_bus_vtag_free(cldev); 839 840 if (!mei_cl_is_connected(cl)) { 841 dev_dbg(bus->dev, "Already disconnected\n"); 842 err = 0; 843 goto out; 844 } 845 846 err = mei_cl_disconnect(cl); 847 if (err < 0) 848 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 849 850 out: 851 /* Flush queues and remove any pending read unless we have mapped DMA */ 852 if (!cl->dma_mapped) { 853 mei_cl_flush_queues(cl, NULL); 854 mei_cl_unlink(cl); 855 } 856 857 mutex_unlock(&bus->device_lock); 858 return err; 859 } 860 EXPORT_SYMBOL_GPL(mei_cldev_disable); 861 862 /** 863 * mei_cldev_send_gsc_command - sends a gsc command, by sending 864 * a gsl mei message to gsc and receiving reply from gsc 865 * 866 * @cldev: me client device 867 * @client_id: client id to send the command to 868 * @fence_id: fence id to send the command to 869 * @sg_in: scatter gather list containing addresses for rx message buffer 870 * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes 871 * @sg_out: scatter gather list containing addresses for tx message buffer 872 * 873 * Return: 874 * * written size in bytes 875 * * < 0 on error 876 */ 877 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev, 878 u8 client_id, u32 fence_id, 879 struct scatterlist *sg_in, 880 size_t total_in_len, 881 struct scatterlist *sg_out) 882 { 883 struct mei_cl *cl; 884 struct mei_device *bus; 885 ssize_t ret = 0; 886 887 struct mei_ext_hdr_gsc_h2f *ext_hdr; 888 size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f); 889 int sg_out_nents, sg_in_nents; 890 int i; 891 struct scatterlist *sg; 892 struct mei_ext_hdr_gsc_f2h rx_msg; 893 unsigned int sg_len; 894 895 if (!cldev || !sg_in || !sg_out) 896 return -EINVAL; 897 898 cl = cldev->cl; 899 bus = cldev->bus; 900 901 dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id); 902 903 if (!bus->hbm_f_gsc_supported) 904 return -EOPNOTSUPP; 905 906 sg_out_nents = sg_nents(sg_out); 907 sg_in_nents = sg_nents(sg_in); 908 /* at least one entry in tx and rx sgls must be present */ 909 if (sg_out_nents <= 0 || sg_in_nents <= 0) 910 return -EINVAL; 911 912 buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl); 913 ext_hdr = kzalloc(buf_sz, GFP_KERNEL); 914 if (!ext_hdr) 915 return -ENOMEM; 916 917 /* construct the GSC message */ 918 ext_hdr->hdr.type = MEI_EXT_HDR_GSC; 919 ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */ 920 921 ext_hdr->client_id = client_id; 922 ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL; 923 ext_hdr->fence_id = fence_id; 924 ext_hdr->input_address_count = sg_in_nents; 925 ext_hdr->output_address_count = sg_out_nents; 926 ext_hdr->reserved[0] = 0; 927 ext_hdr->reserved[1] = 0; 928 929 /* copy in-sgl to the message */ 930 for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) { 931 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg)); 932 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg)); 933 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE); 934 ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len; 935 total_in_len -= ext_hdr->sgl[i].length; 936 } 937 938 /* copy out-sgl to the message */ 939 for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) { 940 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg)); 941 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg)); 942 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE); 943 ext_hdr->sgl[i].length = sg_len; 944 } 945 946 /* send the message to GSC */ 947 ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL); 948 if (ret < 0) { 949 dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret); 950 goto end; 951 } 952 if (ret != buf_sz) { 953 dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n", 954 ret, buf_sz); 955 ret = -EIO; 956 goto end; 957 } 958 959 /* receive the reply from GSC, note that at this point sg_in should contain the reply */ 960 ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0); 961 962 if (ret != sizeof(rx_msg)) { 963 dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n", 964 ret, sizeof(rx_msg)); 965 if (ret >= 0) 966 ret = -EIO; 967 goto end; 968 } 969 970 /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */ 971 if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) { 972 dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n", 973 rx_msg.client_id, rx_msg.fence_id, client_id, fence_id); 974 ret = -EFAULT; 975 goto end; 976 } 977 978 dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written); 979 ret = rx_msg.written; 980 981 end: 982 kfree(ext_hdr); 983 return ret; 984 } 985 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command); 986 987 /** 988 * mei_cl_device_find - find matching entry in the driver id table 989 * 990 * @cldev: me client device 991 * @cldrv: me client driver 992 * 993 * Return: id on success; NULL if no id is matching 994 */ 995 static const 996 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev, 997 const struct mei_cl_driver *cldrv) 998 { 999 const struct mei_cl_device_id *id; 1000 const uuid_le *uuid; 1001 u8 version; 1002 bool match; 1003 1004 uuid = mei_me_cl_uuid(cldev->me_cl); 1005 version = mei_me_cl_ver(cldev->me_cl); 1006 1007 id = cldrv->id_table; 1008 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) { 1009 if (!uuid_le_cmp(*uuid, id->uuid)) { 1010 match = true; 1011 1012 if (cldev->name[0]) 1013 if (strncmp(cldev->name, id->name, 1014 sizeof(id->name))) 1015 match = false; 1016 1017 if (id->version != MEI_CL_VERSION_ANY) 1018 if (id->version != version) 1019 match = false; 1020 if (match) 1021 return id; 1022 } 1023 1024 id++; 1025 } 1026 1027 return NULL; 1028 } 1029 1030 /** 1031 * mei_cl_device_match - device match function 1032 * 1033 * @dev: device 1034 * @drv: driver 1035 * 1036 * Return: 1 if matching device was found 0 otherwise 1037 */ 1038 static int mei_cl_device_match(struct device *dev, struct device_driver *drv) 1039 { 1040 const struct mei_cl_device *cldev = to_mei_cl_device(dev); 1041 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv); 1042 const struct mei_cl_device_id *found_id; 1043 1044 if (!cldev) 1045 return 0; 1046 1047 if (!cldev->do_match) 1048 return 0; 1049 1050 if (!cldrv || !cldrv->id_table) 1051 return 0; 1052 1053 found_id = mei_cl_device_find(cldev, cldrv); 1054 if (found_id) 1055 return 1; 1056 1057 return 0; 1058 } 1059 1060 /** 1061 * mei_cl_device_probe - bus probe function 1062 * 1063 * @dev: device 1064 * 1065 * Return: 0 on success; < 0 otherwise 1066 */ 1067 static int mei_cl_device_probe(struct device *dev) 1068 { 1069 struct mei_cl_device *cldev; 1070 struct mei_cl_driver *cldrv; 1071 const struct mei_cl_device_id *id; 1072 int ret; 1073 1074 cldev = to_mei_cl_device(dev); 1075 cldrv = to_mei_cl_driver(dev->driver); 1076 1077 if (!cldev) 1078 return 0; 1079 1080 if (!cldrv || !cldrv->probe) 1081 return -ENODEV; 1082 1083 id = mei_cl_device_find(cldev, cldrv); 1084 if (!id) 1085 return -ENODEV; 1086 1087 if (!mei_cl_bus_module_get(cldev)) { 1088 dev_err(&cldev->dev, "get hw module failed"); 1089 return -ENODEV; 1090 } 1091 1092 ret = cldrv->probe(cldev, id); 1093 if (ret) { 1094 mei_cl_bus_module_put(cldev); 1095 return ret; 1096 } 1097 1098 __module_get(THIS_MODULE); 1099 return 0; 1100 } 1101 1102 /** 1103 * mei_cl_device_remove - remove device from the bus 1104 * 1105 * @dev: device 1106 * 1107 * Return: 0 on success; < 0 otherwise 1108 */ 1109 static void mei_cl_device_remove(struct device *dev) 1110 { 1111 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1112 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver); 1113 1114 if (cldrv->remove) 1115 cldrv->remove(cldev); 1116 1117 mei_cldev_unregister_callbacks(cldev); 1118 1119 mei_cl_bus_module_put(cldev); 1120 module_put(THIS_MODULE); 1121 } 1122 1123 static ssize_t name_show(struct device *dev, struct device_attribute *a, 1124 char *buf) 1125 { 1126 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1127 1128 return scnprintf(buf, PAGE_SIZE, "%s", cldev->name); 1129 } 1130 static DEVICE_ATTR_RO(name); 1131 1132 static ssize_t uuid_show(struct device *dev, struct device_attribute *a, 1133 char *buf) 1134 { 1135 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1136 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1137 1138 return sprintf(buf, "%pUl", uuid); 1139 } 1140 static DEVICE_ATTR_RO(uuid); 1141 1142 static ssize_t version_show(struct device *dev, struct device_attribute *a, 1143 char *buf) 1144 { 1145 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1146 u8 version = mei_me_cl_ver(cldev->me_cl); 1147 1148 return sprintf(buf, "%02X", version); 1149 } 1150 static DEVICE_ATTR_RO(version); 1151 1152 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 1153 char *buf) 1154 { 1155 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1156 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1157 u8 version = mei_me_cl_ver(cldev->me_cl); 1158 1159 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", 1160 cldev->name, uuid, version); 1161 } 1162 static DEVICE_ATTR_RO(modalias); 1163 1164 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a, 1165 char *buf) 1166 { 1167 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1168 u8 maxconn = mei_me_cl_max_conn(cldev->me_cl); 1169 1170 return sprintf(buf, "%d", maxconn); 1171 } 1172 static DEVICE_ATTR_RO(max_conn); 1173 1174 static ssize_t fixed_show(struct device *dev, struct device_attribute *a, 1175 char *buf) 1176 { 1177 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1178 u8 fixed = mei_me_cl_fixed(cldev->me_cl); 1179 1180 return sprintf(buf, "%d", fixed); 1181 } 1182 static DEVICE_ATTR_RO(fixed); 1183 1184 static ssize_t vtag_show(struct device *dev, struct device_attribute *a, 1185 char *buf) 1186 { 1187 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1188 bool vt = mei_me_cl_vt(cldev->me_cl); 1189 1190 return sprintf(buf, "%d", vt); 1191 } 1192 static DEVICE_ATTR_RO(vtag); 1193 1194 static ssize_t max_len_show(struct device *dev, struct device_attribute *a, 1195 char *buf) 1196 { 1197 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1198 u32 maxlen = mei_me_cl_max_len(cldev->me_cl); 1199 1200 return sprintf(buf, "%u", maxlen); 1201 } 1202 static DEVICE_ATTR_RO(max_len); 1203 1204 static struct attribute *mei_cldev_attrs[] = { 1205 &dev_attr_name.attr, 1206 &dev_attr_uuid.attr, 1207 &dev_attr_version.attr, 1208 &dev_attr_modalias.attr, 1209 &dev_attr_max_conn.attr, 1210 &dev_attr_fixed.attr, 1211 &dev_attr_vtag.attr, 1212 &dev_attr_max_len.attr, 1213 NULL, 1214 }; 1215 ATTRIBUTE_GROUPS(mei_cldev); 1216 1217 /** 1218 * mei_cl_device_uevent - me client bus uevent handler 1219 * 1220 * @dev: device 1221 * @env: uevent kobject 1222 * 1223 * Return: 0 on success -ENOMEM on when add_uevent_var fails 1224 */ 1225 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env) 1226 { 1227 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1228 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 1229 u8 version = mei_me_cl_ver(cldev->me_cl); 1230 1231 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version)) 1232 return -ENOMEM; 1233 1234 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid)) 1235 return -ENOMEM; 1236 1237 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name)) 1238 return -ENOMEM; 1239 1240 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:", 1241 cldev->name, uuid, version)) 1242 return -ENOMEM; 1243 1244 return 0; 1245 } 1246 1247 static struct bus_type mei_cl_bus_type = { 1248 .name = "mei", 1249 .dev_groups = mei_cldev_groups, 1250 .match = mei_cl_device_match, 1251 .probe = mei_cl_device_probe, 1252 .remove = mei_cl_device_remove, 1253 .uevent = mei_cl_device_uevent, 1254 }; 1255 1256 static struct mei_device *mei_dev_bus_get(struct mei_device *bus) 1257 { 1258 if (bus) 1259 get_device(bus->dev); 1260 1261 return bus; 1262 } 1263 1264 static void mei_dev_bus_put(struct mei_device *bus) 1265 { 1266 if (bus) 1267 put_device(bus->dev); 1268 } 1269 1270 static void mei_cl_bus_dev_release(struct device *dev) 1271 { 1272 struct mei_cl_device *cldev = to_mei_cl_device(dev); 1273 1274 if (!cldev) 1275 return; 1276 1277 mei_cl_flush_queues(cldev->cl, NULL); 1278 mei_me_cl_put(cldev->me_cl); 1279 mei_dev_bus_put(cldev->bus); 1280 mei_cl_unlink(cldev->cl); 1281 kfree(cldev->cl); 1282 kfree(cldev); 1283 } 1284 1285 static const struct device_type mei_cl_device_type = { 1286 .release = mei_cl_bus_dev_release, 1287 }; 1288 1289 /** 1290 * mei_cl_bus_set_name - set device name for me client device 1291 * <controller>-<client device> 1292 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb 1293 * 1294 * @cldev: me client device 1295 */ 1296 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev) 1297 { 1298 dev_set_name(&cldev->dev, "%s-%pUl", 1299 dev_name(cldev->bus->dev), 1300 mei_me_cl_uuid(cldev->me_cl)); 1301 } 1302 1303 /** 1304 * mei_cl_bus_dev_alloc - initialize and allocate mei client device 1305 * 1306 * @bus: mei device 1307 * @me_cl: me client 1308 * 1309 * Return: allocated device structur or NULL on allocation failure 1310 */ 1311 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, 1312 struct mei_me_client *me_cl) 1313 { 1314 struct mei_cl_device *cldev; 1315 struct mei_cl *cl; 1316 1317 cldev = kzalloc(sizeof(*cldev), GFP_KERNEL); 1318 if (!cldev) 1319 return NULL; 1320 1321 cl = mei_cl_allocate(bus); 1322 if (!cl) { 1323 kfree(cldev); 1324 return NULL; 1325 } 1326 1327 device_initialize(&cldev->dev); 1328 cldev->dev.parent = bus->dev; 1329 cldev->dev.bus = &mei_cl_bus_type; 1330 cldev->dev.type = &mei_cl_device_type; 1331 cldev->bus = mei_dev_bus_get(bus); 1332 cldev->me_cl = mei_me_cl_get(me_cl); 1333 cldev->cl = cl; 1334 mei_cl_bus_set_name(cldev); 1335 cldev->is_added = 0; 1336 INIT_LIST_HEAD(&cldev->bus_list); 1337 1338 return cldev; 1339 } 1340 1341 /** 1342 * mei_cl_bus_dev_setup - setup me client device 1343 * run fix up routines and set the device name 1344 * 1345 * @bus: mei device 1346 * @cldev: me client device 1347 * 1348 * Return: true if the device is eligible for enumeration 1349 */ 1350 static bool mei_cl_bus_dev_setup(struct mei_device *bus, 1351 struct mei_cl_device *cldev) 1352 { 1353 cldev->do_match = 1; 1354 mei_cl_bus_dev_fixup(cldev); 1355 1356 /* the device name can change during fix up */ 1357 if (cldev->do_match) 1358 mei_cl_bus_set_name(cldev); 1359 1360 return cldev->do_match == 1; 1361 } 1362 1363 /** 1364 * mei_cl_bus_dev_add - add me client devices 1365 * 1366 * @cldev: me client device 1367 * 1368 * Return: 0 on success; < 0 on failre 1369 */ 1370 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev) 1371 { 1372 int ret; 1373 1374 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n", 1375 mei_me_cl_uuid(cldev->me_cl), 1376 mei_me_cl_ver(cldev->me_cl)); 1377 ret = device_add(&cldev->dev); 1378 if (!ret) 1379 cldev->is_added = 1; 1380 1381 return ret; 1382 } 1383 1384 /** 1385 * mei_cl_bus_dev_stop - stop the driver 1386 * 1387 * @cldev: me client device 1388 */ 1389 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev) 1390 { 1391 if (cldev->is_added) 1392 device_release_driver(&cldev->dev); 1393 } 1394 1395 /** 1396 * mei_cl_bus_dev_destroy - destroy me client devices object 1397 * 1398 * @cldev: me client device 1399 * 1400 * Locking: called under "dev->cl_bus_lock" lock 1401 */ 1402 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev) 1403 { 1404 1405 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock)); 1406 1407 if (!cldev->is_added) 1408 return; 1409 1410 device_del(&cldev->dev); 1411 1412 list_del_init(&cldev->bus_list); 1413 1414 cldev->is_added = 0; 1415 put_device(&cldev->dev); 1416 } 1417 1418 /** 1419 * mei_cl_bus_remove_device - remove a devices form the bus 1420 * 1421 * @cldev: me client device 1422 */ 1423 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev) 1424 { 1425 mei_cl_bus_dev_stop(cldev); 1426 mei_cl_bus_dev_destroy(cldev); 1427 } 1428 1429 /** 1430 * mei_cl_bus_remove_devices - remove all devices form the bus 1431 * 1432 * @bus: mei device 1433 */ 1434 void mei_cl_bus_remove_devices(struct mei_device *bus) 1435 { 1436 struct mei_cl_device *cldev, *next; 1437 1438 mutex_lock(&bus->cl_bus_lock); 1439 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list) 1440 mei_cl_bus_remove_device(cldev); 1441 mutex_unlock(&bus->cl_bus_lock); 1442 } 1443 1444 1445 /** 1446 * mei_cl_bus_dev_init - allocate and initializes an mei client devices 1447 * based on me client 1448 * 1449 * @bus: mei device 1450 * @me_cl: me client 1451 * 1452 * Locking: called under "dev->cl_bus_lock" lock 1453 */ 1454 static void mei_cl_bus_dev_init(struct mei_device *bus, 1455 struct mei_me_client *me_cl) 1456 { 1457 struct mei_cl_device *cldev; 1458 1459 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock)); 1460 1461 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl)); 1462 1463 if (me_cl->bus_added) 1464 return; 1465 1466 cldev = mei_cl_bus_dev_alloc(bus, me_cl); 1467 if (!cldev) 1468 return; 1469 1470 me_cl->bus_added = true; 1471 list_add_tail(&cldev->bus_list, &bus->device_list); 1472 1473 } 1474 1475 /** 1476 * mei_cl_bus_rescan - scan me clients list and add create 1477 * devices for eligible clients 1478 * 1479 * @bus: mei device 1480 */ 1481 static void mei_cl_bus_rescan(struct mei_device *bus) 1482 { 1483 struct mei_cl_device *cldev, *n; 1484 struct mei_me_client *me_cl; 1485 1486 mutex_lock(&bus->cl_bus_lock); 1487 1488 down_read(&bus->me_clients_rwsem); 1489 list_for_each_entry(me_cl, &bus->me_clients, list) 1490 mei_cl_bus_dev_init(bus, me_cl); 1491 up_read(&bus->me_clients_rwsem); 1492 1493 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) { 1494 1495 if (!mei_me_cl_is_active(cldev->me_cl)) { 1496 mei_cl_bus_remove_device(cldev); 1497 continue; 1498 } 1499 1500 if (cldev->is_added) 1501 continue; 1502 1503 if (mei_cl_bus_dev_setup(bus, cldev)) 1504 mei_cl_bus_dev_add(cldev); 1505 else { 1506 list_del_init(&cldev->bus_list); 1507 put_device(&cldev->dev); 1508 } 1509 } 1510 mutex_unlock(&bus->cl_bus_lock); 1511 1512 dev_dbg(bus->dev, "rescan end"); 1513 } 1514 1515 void mei_cl_bus_rescan_work(struct work_struct *work) 1516 { 1517 struct mei_device *bus = 1518 container_of(work, struct mei_device, bus_rescan_work); 1519 1520 mei_cl_bus_rescan(bus); 1521 } 1522 1523 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv, 1524 struct module *owner) 1525 { 1526 int err; 1527 1528 cldrv->driver.name = cldrv->name; 1529 cldrv->driver.owner = owner; 1530 cldrv->driver.bus = &mei_cl_bus_type; 1531 1532 err = driver_register(&cldrv->driver); 1533 if (err) 1534 return err; 1535 1536 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name); 1537 1538 return 0; 1539 } 1540 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register); 1541 1542 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv) 1543 { 1544 driver_unregister(&cldrv->driver); 1545 1546 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name); 1547 } 1548 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister); 1549 1550 1551 int __init mei_cl_bus_init(void) 1552 { 1553 return bus_register(&mei_cl_bus_type); 1554 } 1555 1556 void __exit mei_cl_bus_exit(void) 1557 { 1558 bus_unregister(&mei_cl_bus_type); 1559 } 1560