1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 #include <linux/module.h> 17 #include <linux/moduleparam.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/slab.h> 21 #include <linux/fs.h> 22 #include <linux/errno.h> 23 #include <linux/types.h> 24 #include <linux/fcntl.h> 25 #include <linux/poll.h> 26 #include <linux/init.h> 27 #include <linux/ioctl.h> 28 #include <linux/cdev.h> 29 #include <linux/sched.h> 30 #include <linux/uuid.h> 31 #include <linux/compat.h> 32 #include <linux/jiffies.h> 33 #include <linux/interrupt.h> 34 35 #include <linux/mei.h> 36 37 #include "mei_dev.h" 38 #include "client.h" 39 40 /** 41 * mei_open - the open function 42 * 43 * @inode: pointer to inode structure 44 * @file: pointer to file structure 45 * 46 * Return: 0 on success, <0 on error 47 */ 48 static int mei_open(struct inode *inode, struct file *file) 49 { 50 struct mei_device *dev; 51 struct mei_cl *cl; 52 53 int err; 54 55 dev = container_of(inode->i_cdev, struct mei_device, cdev); 56 if (!dev) 57 return -ENODEV; 58 59 mutex_lock(&dev->device_lock); 60 61 cl = NULL; 62 63 err = -ENODEV; 64 if (dev->dev_state != MEI_DEV_ENABLED) { 65 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 66 mei_dev_state_str(dev->dev_state)); 67 goto err_unlock; 68 } 69 70 err = -ENOMEM; 71 cl = mei_cl_allocate(dev); 72 if (!cl) 73 goto err_unlock; 74 75 /* open_handle_count check is handled in the mei_cl_link */ 76 err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY); 77 if (err) 78 goto err_unlock; 79 80 file->private_data = cl; 81 82 mutex_unlock(&dev->device_lock); 83 84 return nonseekable_open(inode, file); 85 86 err_unlock: 87 mutex_unlock(&dev->device_lock); 88 kfree(cl); 89 return err; 90 } 91 92 /** 93 * mei_release - the release function 94 * 95 * @inode: pointer to inode structure 96 * @file: pointer to file structure 97 * 98 * Return: 0 on success, <0 on error 99 */ 100 static int mei_release(struct inode *inode, struct file *file) 101 { 102 struct mei_cl *cl = file->private_data; 103 struct mei_cl_cb *cb; 104 struct mei_device *dev; 105 int rets = 0; 106 107 if (WARN_ON(!cl || !cl->dev)) 108 return -ENODEV; 109 110 dev = cl->dev; 111 112 mutex_lock(&dev->device_lock); 113 if (cl == &dev->iamthif_cl) { 114 rets = mei_amthif_release(dev, file); 115 goto out; 116 } 117 if (cl->state == MEI_FILE_CONNECTED) { 118 cl->state = MEI_FILE_DISCONNECTING; 119 cl_dbg(dev, cl, "disconnecting\n"); 120 rets = mei_cl_disconnect(cl); 121 } 122 mei_cl_flush_queues(cl); 123 cl_dbg(dev, cl, "removing\n"); 124 125 mei_cl_unlink(cl); 126 127 128 /* free read cb */ 129 cb = NULL; 130 if (cl->read_cb) { 131 cb = mei_cl_find_read_cb(cl); 132 /* Remove entry from read list */ 133 if (cb) 134 list_del(&cb->list); 135 136 cb = cl->read_cb; 137 cl->read_cb = NULL; 138 } 139 140 file->private_data = NULL; 141 142 mei_io_cb_free(cb); 143 144 kfree(cl); 145 out: 146 mutex_unlock(&dev->device_lock); 147 return rets; 148 } 149 150 151 /** 152 * mei_read - the read function. 153 * 154 * @file: pointer to file structure 155 * @ubuf: pointer to user buffer 156 * @length: buffer length 157 * @offset: data offset in buffer 158 * 159 * Return: >=0 data length on success , <0 on error 160 */ 161 static ssize_t mei_read(struct file *file, char __user *ubuf, 162 size_t length, loff_t *offset) 163 { 164 struct mei_cl *cl = file->private_data; 165 struct mei_cl_cb *cb_pos = NULL; 166 struct mei_cl_cb *cb = NULL; 167 struct mei_device *dev; 168 int rets; 169 int err; 170 171 172 if (WARN_ON(!cl || !cl->dev)) 173 return -ENODEV; 174 175 dev = cl->dev; 176 177 178 mutex_lock(&dev->device_lock); 179 if (dev->dev_state != MEI_DEV_ENABLED) { 180 rets = -ENODEV; 181 goto out; 182 } 183 184 if (length == 0) { 185 rets = 0; 186 goto out; 187 } 188 189 if (cl == &dev->iamthif_cl) { 190 rets = mei_amthif_read(dev, file, ubuf, length, offset); 191 goto out; 192 } 193 194 if (cl->read_cb) { 195 cb = cl->read_cb; 196 /* read what left */ 197 if (cb->buf_idx > *offset) 198 goto copy_buffer; 199 /* offset is beyond buf_idx we have no more data return 0 */ 200 if (cb->buf_idx > 0 && cb->buf_idx <= *offset) { 201 rets = 0; 202 goto free; 203 } 204 /* Offset needs to be cleaned for contiguous reads*/ 205 if (cb->buf_idx == 0 && *offset > 0) 206 *offset = 0; 207 } else if (*offset > 0) { 208 *offset = 0; 209 } 210 211 err = mei_cl_read_start(cl, length); 212 if (err && err != -EBUSY) { 213 dev_dbg(dev->dev, 214 "mei start read failure with status = %d\n", err); 215 rets = err; 216 goto out; 217 } 218 219 if (MEI_READ_COMPLETE != cl->reading_state && 220 !waitqueue_active(&cl->rx_wait)) { 221 if (file->f_flags & O_NONBLOCK) { 222 rets = -EAGAIN; 223 goto out; 224 } 225 226 mutex_unlock(&dev->device_lock); 227 228 if (wait_event_interruptible(cl->rx_wait, 229 MEI_READ_COMPLETE == cl->reading_state || 230 mei_cl_is_transitioning(cl))) { 231 232 if (signal_pending(current)) 233 return -EINTR; 234 return -ERESTARTSYS; 235 } 236 237 mutex_lock(&dev->device_lock); 238 if (mei_cl_is_transitioning(cl)) { 239 rets = -EBUSY; 240 goto out; 241 } 242 } 243 244 cb = cl->read_cb; 245 246 if (!cb) { 247 rets = -ENODEV; 248 goto out; 249 } 250 if (cl->reading_state != MEI_READ_COMPLETE) { 251 rets = 0; 252 goto out; 253 } 254 /* now copy the data to user space */ 255 copy_buffer: 256 dev_dbg(dev->dev, "buf.size = %d buf.idx= %ld\n", 257 cb->response_buffer.size, cb->buf_idx); 258 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) { 259 rets = -EMSGSIZE; 260 goto free; 261 } 262 263 /* length is being truncated to PAGE_SIZE, 264 * however buf_idx may point beyond that */ 265 length = min_t(size_t, length, cb->buf_idx - *offset); 266 267 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) { 268 dev_dbg(dev->dev, "failed to copy data to userland\n"); 269 rets = -EFAULT; 270 goto free; 271 } 272 273 rets = length; 274 *offset += length; 275 if ((unsigned long)*offset < cb->buf_idx) 276 goto out; 277 278 free: 279 cb_pos = mei_cl_find_read_cb(cl); 280 /* Remove entry from read list */ 281 if (cb_pos) 282 list_del(&cb_pos->list); 283 mei_io_cb_free(cb); 284 cl->reading_state = MEI_IDLE; 285 cl->read_cb = NULL; 286 out: 287 dev_dbg(dev->dev, "end mei read rets= %d\n", rets); 288 mutex_unlock(&dev->device_lock); 289 return rets; 290 } 291 /** 292 * mei_write - the write function. 293 * 294 * @file: pointer to file structure 295 * @ubuf: pointer to user buffer 296 * @length: buffer length 297 * @offset: data offset in buffer 298 * 299 * Return: >=0 data length on success , <0 on error 300 */ 301 static ssize_t mei_write(struct file *file, const char __user *ubuf, 302 size_t length, loff_t *offset) 303 { 304 struct mei_cl *cl = file->private_data; 305 struct mei_me_client *me_cl = NULL; 306 struct mei_cl_cb *write_cb = NULL; 307 struct mei_device *dev; 308 unsigned long timeout = 0; 309 int rets; 310 311 if (WARN_ON(!cl || !cl->dev)) 312 return -ENODEV; 313 314 dev = cl->dev; 315 316 mutex_lock(&dev->device_lock); 317 318 if (dev->dev_state != MEI_DEV_ENABLED) { 319 rets = -ENODEV; 320 goto out; 321 } 322 323 me_cl = mei_me_cl_by_uuid_id(dev, &cl->cl_uuid, cl->me_client_id); 324 if (!me_cl) { 325 rets = -ENOTTY; 326 goto out; 327 } 328 329 if (length == 0) { 330 rets = 0; 331 goto out; 332 } 333 334 if (length > me_cl->props.max_msg_length) { 335 rets = -EFBIG; 336 goto out; 337 } 338 339 if (cl->state != MEI_FILE_CONNECTED) { 340 dev_err(dev->dev, "host client = %d, is not connected to ME client = %d", 341 cl->host_client_id, cl->me_client_id); 342 rets = -ENODEV; 343 goto out; 344 } 345 if (cl == &dev->iamthif_cl) { 346 write_cb = mei_amthif_find_read_list_entry(dev, file); 347 348 if (write_cb) { 349 timeout = write_cb->read_time + 350 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 351 352 if (time_after(jiffies, timeout) || 353 cl->reading_state == MEI_READ_COMPLETE) { 354 *offset = 0; 355 list_del(&write_cb->list); 356 mei_io_cb_free(write_cb); 357 write_cb = NULL; 358 } 359 } 360 } 361 362 /* free entry used in read */ 363 if (cl->reading_state == MEI_READ_COMPLETE) { 364 *offset = 0; 365 write_cb = mei_cl_find_read_cb(cl); 366 if (write_cb) { 367 list_del(&write_cb->list); 368 mei_io_cb_free(write_cb); 369 write_cb = NULL; 370 cl->reading_state = MEI_IDLE; 371 cl->read_cb = NULL; 372 } 373 } else if (cl->reading_state == MEI_IDLE) 374 *offset = 0; 375 376 377 write_cb = mei_io_cb_init(cl, file); 378 if (!write_cb) { 379 rets = -ENOMEM; 380 goto out; 381 } 382 rets = mei_io_cb_alloc_req_buf(write_cb, length); 383 if (rets) 384 goto out; 385 386 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length); 387 if (rets) { 388 dev_dbg(dev->dev, "failed to copy data from userland\n"); 389 rets = -EFAULT; 390 goto out; 391 } 392 393 if (cl == &dev->iamthif_cl) { 394 rets = mei_amthif_write(dev, write_cb); 395 396 if (rets) { 397 dev_err(dev->dev, 398 "amthif write failed with status = %d\n", rets); 399 goto out; 400 } 401 mei_me_cl_put(me_cl); 402 mutex_unlock(&dev->device_lock); 403 return length; 404 } 405 406 rets = mei_cl_write(cl, write_cb, false); 407 out: 408 mei_me_cl_put(me_cl); 409 mutex_unlock(&dev->device_lock); 410 if (rets < 0) 411 mei_io_cb_free(write_cb); 412 return rets; 413 } 414 415 /** 416 * mei_ioctl_connect_client - the connect to fw client IOCTL function 417 * 418 * @file: private data of the file object 419 * @data: IOCTL connect data, input and output parameters 420 * 421 * Locking: called under "dev->device_lock" lock 422 * 423 * Return: 0 on success, <0 on failure. 424 */ 425 static int mei_ioctl_connect_client(struct file *file, 426 struct mei_connect_client_data *data) 427 { 428 struct mei_device *dev; 429 struct mei_client *client; 430 struct mei_me_client *me_cl; 431 struct mei_cl *cl; 432 int rets; 433 434 cl = file->private_data; 435 dev = cl->dev; 436 437 if (dev->dev_state != MEI_DEV_ENABLED) 438 return -ENODEV; 439 440 if (cl->state != MEI_FILE_INITIALIZING && 441 cl->state != MEI_FILE_DISCONNECTED) 442 return -EBUSY; 443 444 /* find ME client we're trying to connect to */ 445 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); 446 if (!me_cl || me_cl->props.fixed_address) { 447 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 448 &data->in_client_uuid); 449 return -ENOTTY; 450 } 451 452 cl->me_client_id = me_cl->client_id; 453 cl->cl_uuid = me_cl->props.protocol_name; 454 455 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", 456 cl->me_client_id); 457 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", 458 me_cl->props.protocol_version); 459 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", 460 me_cl->props.max_msg_length); 461 462 /* if we're connecting to amthif client then we will use the 463 * existing connection 464 */ 465 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) { 466 dev_dbg(dev->dev, "FW Client is amthi\n"); 467 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) { 468 rets = -ENODEV; 469 goto end; 470 } 471 mei_cl_unlink(cl); 472 473 kfree(cl); 474 cl = NULL; 475 dev->iamthif_open_count++; 476 file->private_data = &dev->iamthif_cl; 477 478 client = &data->out_client_properties; 479 client->max_msg_length = me_cl->props.max_msg_length; 480 client->protocol_version = me_cl->props.protocol_version; 481 rets = dev->iamthif_cl.status; 482 483 goto end; 484 } 485 486 /* prepare the output buffer */ 487 client = &data->out_client_properties; 488 client->max_msg_length = me_cl->props.max_msg_length; 489 client->protocol_version = me_cl->props.protocol_version; 490 dev_dbg(dev->dev, "Can connect?\n"); 491 492 rets = mei_cl_connect(cl, file); 493 494 end: 495 mei_me_cl_put(me_cl); 496 return rets; 497 } 498 499 /** 500 * mei_ioctl - the IOCTL function 501 * 502 * @file: pointer to file structure 503 * @cmd: ioctl command 504 * @data: pointer to mei message structure 505 * 506 * Return: 0 on success , <0 on error 507 */ 508 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 509 { 510 struct mei_device *dev; 511 struct mei_cl *cl = file->private_data; 512 struct mei_connect_client_data connect_data; 513 int rets; 514 515 516 if (WARN_ON(!cl || !cl->dev)) 517 return -ENODEV; 518 519 dev = cl->dev; 520 521 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); 522 523 mutex_lock(&dev->device_lock); 524 if (dev->dev_state != MEI_DEV_ENABLED) { 525 rets = -ENODEV; 526 goto out; 527 } 528 529 switch (cmd) { 530 case IOCTL_MEI_CONNECT_CLIENT: 531 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 532 if (copy_from_user(&connect_data, (char __user *)data, 533 sizeof(struct mei_connect_client_data))) { 534 dev_dbg(dev->dev, "failed to copy data from userland\n"); 535 rets = -EFAULT; 536 goto out; 537 } 538 539 rets = mei_ioctl_connect_client(file, &connect_data); 540 if (rets) 541 goto out; 542 543 /* if all is ok, copying the data back to user. */ 544 if (copy_to_user((char __user *)data, &connect_data, 545 sizeof(struct mei_connect_client_data))) { 546 dev_dbg(dev->dev, "failed to copy data to userland\n"); 547 rets = -EFAULT; 548 goto out; 549 } 550 551 break; 552 553 default: 554 dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); 555 rets = -ENOIOCTLCMD; 556 } 557 558 out: 559 mutex_unlock(&dev->device_lock); 560 return rets; 561 } 562 563 /** 564 * mei_compat_ioctl - the compat IOCTL function 565 * 566 * @file: pointer to file structure 567 * @cmd: ioctl command 568 * @data: pointer to mei message structure 569 * 570 * Return: 0 on success , <0 on error 571 */ 572 #ifdef CONFIG_COMPAT 573 static long mei_compat_ioctl(struct file *file, 574 unsigned int cmd, unsigned long data) 575 { 576 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); 577 } 578 #endif 579 580 581 /** 582 * mei_poll - the poll function 583 * 584 * @file: pointer to file structure 585 * @wait: pointer to poll_table structure 586 * 587 * Return: poll mask 588 */ 589 static unsigned int mei_poll(struct file *file, poll_table *wait) 590 { 591 struct mei_cl *cl = file->private_data; 592 struct mei_device *dev; 593 unsigned int mask = 0; 594 595 if (WARN_ON(!cl || !cl->dev)) 596 return POLLERR; 597 598 dev = cl->dev; 599 600 mutex_lock(&dev->device_lock); 601 602 if (!mei_cl_is_connected(cl)) { 603 mask = POLLERR; 604 goto out; 605 } 606 607 mutex_unlock(&dev->device_lock); 608 609 610 if (cl == &dev->iamthif_cl) 611 return mei_amthif_poll(dev, file, wait); 612 613 poll_wait(file, &cl->tx_wait, wait); 614 615 mutex_lock(&dev->device_lock); 616 617 if (!mei_cl_is_connected(cl)) { 618 mask = POLLERR; 619 goto out; 620 } 621 622 mask |= (POLLIN | POLLRDNORM); 623 624 out: 625 mutex_unlock(&dev->device_lock); 626 return mask; 627 } 628 629 /** 630 * fw_status_show - mei device attribute show method 631 * 632 * @device: device pointer 633 * @attr: attribute pointer 634 * @buf: char out buffer 635 * 636 * Return: number of the bytes printed into buf or error 637 */ 638 static ssize_t fw_status_show(struct device *device, 639 struct device_attribute *attr, char *buf) 640 { 641 struct mei_device *dev = dev_get_drvdata(device); 642 struct mei_fw_status fw_status; 643 int err, i; 644 ssize_t cnt = 0; 645 646 mutex_lock(&dev->device_lock); 647 err = mei_fw_status(dev, &fw_status); 648 mutex_unlock(&dev->device_lock); 649 if (err) { 650 dev_err(device, "read fw_status error = %d\n", err); 651 return err; 652 } 653 654 for (i = 0; i < fw_status.count; i++) 655 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n", 656 fw_status.status[i]); 657 return cnt; 658 } 659 static DEVICE_ATTR_RO(fw_status); 660 661 static struct attribute *mei_attrs[] = { 662 &dev_attr_fw_status.attr, 663 NULL 664 }; 665 ATTRIBUTE_GROUPS(mei); 666 667 /* 668 * file operations structure will be used for mei char device. 669 */ 670 static const struct file_operations mei_fops = { 671 .owner = THIS_MODULE, 672 .read = mei_read, 673 .unlocked_ioctl = mei_ioctl, 674 #ifdef CONFIG_COMPAT 675 .compat_ioctl = mei_compat_ioctl, 676 #endif 677 .open = mei_open, 678 .release = mei_release, 679 .write = mei_write, 680 .poll = mei_poll, 681 .llseek = no_llseek 682 }; 683 684 static struct class *mei_class; 685 static dev_t mei_devt; 686 #define MEI_MAX_DEVS MINORMASK 687 static DEFINE_MUTEX(mei_minor_lock); 688 static DEFINE_IDR(mei_idr); 689 690 /** 691 * mei_minor_get - obtain next free device minor number 692 * 693 * @dev: device pointer 694 * 695 * Return: allocated minor, or -ENOSPC if no free minor left 696 */ 697 static int mei_minor_get(struct mei_device *dev) 698 { 699 int ret; 700 701 mutex_lock(&mei_minor_lock); 702 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); 703 if (ret >= 0) 704 dev->minor = ret; 705 else if (ret == -ENOSPC) 706 dev_err(dev->dev, "too many mei devices\n"); 707 708 mutex_unlock(&mei_minor_lock); 709 return ret; 710 } 711 712 /** 713 * mei_minor_free - mark device minor number as free 714 * 715 * @dev: device pointer 716 */ 717 static void mei_minor_free(struct mei_device *dev) 718 { 719 mutex_lock(&mei_minor_lock); 720 idr_remove(&mei_idr, dev->minor); 721 mutex_unlock(&mei_minor_lock); 722 } 723 724 int mei_register(struct mei_device *dev, struct device *parent) 725 { 726 struct device *clsdev; /* class device */ 727 int ret, devno; 728 729 ret = mei_minor_get(dev); 730 if (ret < 0) 731 return ret; 732 733 /* Fill in the data structures */ 734 devno = MKDEV(MAJOR(mei_devt), dev->minor); 735 cdev_init(&dev->cdev, &mei_fops); 736 dev->cdev.owner = mei_fops.owner; 737 738 /* Add the device */ 739 ret = cdev_add(&dev->cdev, devno, 1); 740 if (ret) { 741 dev_err(parent, "unable to add device %d:%d\n", 742 MAJOR(mei_devt), dev->minor); 743 goto err_dev_add; 744 } 745 746 clsdev = device_create_with_groups(mei_class, parent, devno, 747 dev, mei_groups, 748 "mei%d", dev->minor); 749 750 if (IS_ERR(clsdev)) { 751 dev_err(parent, "unable to create device %d:%d\n", 752 MAJOR(mei_devt), dev->minor); 753 ret = PTR_ERR(clsdev); 754 goto err_dev_create; 755 } 756 757 ret = mei_dbgfs_register(dev, dev_name(clsdev)); 758 if (ret) { 759 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret); 760 goto err_dev_dbgfs; 761 } 762 763 return 0; 764 765 err_dev_dbgfs: 766 device_destroy(mei_class, devno); 767 err_dev_create: 768 cdev_del(&dev->cdev); 769 err_dev_add: 770 mei_minor_free(dev); 771 return ret; 772 } 773 EXPORT_SYMBOL_GPL(mei_register); 774 775 void mei_deregister(struct mei_device *dev) 776 { 777 int devno; 778 779 devno = dev->cdev.dev; 780 cdev_del(&dev->cdev); 781 782 mei_dbgfs_deregister(dev); 783 784 device_destroy(mei_class, devno); 785 786 mei_minor_free(dev); 787 } 788 EXPORT_SYMBOL_GPL(mei_deregister); 789 790 static int __init mei_init(void) 791 { 792 int ret; 793 794 mei_class = class_create(THIS_MODULE, "mei"); 795 if (IS_ERR(mei_class)) { 796 pr_err("couldn't create class\n"); 797 ret = PTR_ERR(mei_class); 798 goto err; 799 } 800 801 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); 802 if (ret < 0) { 803 pr_err("unable to allocate char dev region\n"); 804 goto err_class; 805 } 806 807 ret = mei_cl_bus_init(); 808 if (ret < 0) { 809 pr_err("unable to initialize bus\n"); 810 goto err_chrdev; 811 } 812 813 return 0; 814 815 err_chrdev: 816 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 817 err_class: 818 class_destroy(mei_class); 819 err: 820 return ret; 821 } 822 823 static void __exit mei_exit(void) 824 { 825 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 826 class_destroy(mei_class); 827 mei_cl_bus_exit(); 828 } 829 830 module_init(mei_init); 831 module_exit(mei_exit); 832 833 MODULE_AUTHOR("Intel Corporation"); 834 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 835 MODULE_LICENSE("GPL v2"); 836 837