1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved. 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 */ 6 7 #include <linux/module.h> 8 #include <linux/moduleparam.h> 9 #include <linux/kernel.h> 10 #include <linux/device.h> 11 #include <linux/slab.h> 12 #include <linux/fs.h> 13 #include <linux/errno.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/poll.h> 17 #include <linux/init.h> 18 #include <linux/ioctl.h> 19 #include <linux/cdev.h> 20 #include <linux/sched/signal.h> 21 #include <linux/uuid.h> 22 #include <linux/compat.h> 23 #include <linux/jiffies.h> 24 #include <linux/interrupt.h> 25 26 #include <linux/mei.h> 27 28 #include "mei_dev.h" 29 #include "client.h" 30 31 static struct class *mei_class; 32 static dev_t mei_devt; 33 #define MEI_MAX_DEVS MINORMASK 34 static DEFINE_MUTEX(mei_minor_lock); 35 static DEFINE_IDR(mei_idr); 36 37 /** 38 * mei_open - the open function 39 * 40 * @inode: pointer to inode structure 41 * @file: pointer to file structure 42 * 43 * Return: 0 on success, <0 on error 44 */ 45 static int mei_open(struct inode *inode, struct file *file) 46 { 47 struct mei_device *dev; 48 struct mei_cl *cl; 49 50 int err; 51 52 dev = container_of(inode->i_cdev, struct mei_device, cdev); 53 if (!dev) 54 return -ENODEV; 55 56 mutex_lock(&dev->device_lock); 57 58 if (dev->dev_state != MEI_DEV_ENABLED) { 59 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n", 60 mei_dev_state_str(dev->dev_state)); 61 err = -ENODEV; 62 goto err_unlock; 63 } 64 65 cl = mei_cl_alloc_linked(dev); 66 if (IS_ERR(cl)) { 67 err = PTR_ERR(cl); 68 goto err_unlock; 69 } 70 71 cl->fp = file; 72 file->private_data = cl; 73 74 mutex_unlock(&dev->device_lock); 75 76 return nonseekable_open(inode, file); 77 78 err_unlock: 79 mutex_unlock(&dev->device_lock); 80 return err; 81 } 82 83 /** 84 * mei_release - the release function 85 * 86 * @inode: pointer to inode structure 87 * @file: pointer to file structure 88 * 89 * Return: 0 on success, <0 on error 90 */ 91 static int mei_release(struct inode *inode, struct file *file) 92 { 93 struct mei_cl *cl = file->private_data; 94 struct mei_device *dev; 95 int rets; 96 97 if (WARN_ON(!cl || !cl->dev)) 98 return -ENODEV; 99 100 dev = cl->dev; 101 102 mutex_lock(&dev->device_lock); 103 104 rets = mei_cl_disconnect(cl); 105 106 mei_cl_flush_queues(cl, file); 107 cl_dbg(dev, cl, "removing\n"); 108 109 mei_cl_unlink(cl); 110 111 file->private_data = NULL; 112 113 kfree(cl); 114 115 mutex_unlock(&dev->device_lock); 116 return rets; 117 } 118 119 120 /** 121 * mei_read - the read function. 122 * 123 * @file: pointer to file structure 124 * @ubuf: pointer to user buffer 125 * @length: buffer length 126 * @offset: data offset in buffer 127 * 128 * Return: >=0 data length on success , <0 on error 129 */ 130 static ssize_t mei_read(struct file *file, char __user *ubuf, 131 size_t length, loff_t *offset) 132 { 133 struct mei_cl *cl = file->private_data; 134 struct mei_device *dev; 135 struct mei_cl_cb *cb = NULL; 136 bool nonblock = !!(file->f_flags & O_NONBLOCK); 137 ssize_t rets; 138 139 if (WARN_ON(!cl || !cl->dev)) 140 return -ENODEV; 141 142 dev = cl->dev; 143 144 145 mutex_lock(&dev->device_lock); 146 if (dev->dev_state != MEI_DEV_ENABLED) { 147 rets = -ENODEV; 148 goto out; 149 } 150 151 if (length == 0) { 152 rets = 0; 153 goto out; 154 } 155 156 if (ubuf == NULL) { 157 rets = -EMSGSIZE; 158 goto out; 159 } 160 161 cb = mei_cl_read_cb(cl, file); 162 if (cb) 163 goto copy_buffer; 164 165 if (*offset > 0) 166 *offset = 0; 167 168 rets = mei_cl_read_start(cl, length, file); 169 if (rets && rets != -EBUSY) { 170 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets); 171 goto out; 172 } 173 174 if (nonblock) { 175 rets = -EAGAIN; 176 goto out; 177 } 178 179 mutex_unlock(&dev->device_lock); 180 if (wait_event_interruptible(cl->rx_wait, 181 !list_empty(&cl->rd_completed) || 182 !mei_cl_is_connected(cl))) { 183 if (signal_pending(current)) 184 return -EINTR; 185 return -ERESTARTSYS; 186 } 187 mutex_lock(&dev->device_lock); 188 189 if (!mei_cl_is_connected(cl)) { 190 rets = -ENODEV; 191 goto out; 192 } 193 194 cb = mei_cl_read_cb(cl, file); 195 if (!cb) { 196 rets = 0; 197 goto out; 198 } 199 200 copy_buffer: 201 /* now copy the data to user space */ 202 if (cb->status) { 203 rets = cb->status; 204 cl_dbg(dev, cl, "read operation failed %zd\n", rets); 205 goto free; 206 } 207 208 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n", 209 cb->buf.size, cb->buf_idx, *offset); 210 if (*offset >= cb->buf_idx) { 211 rets = 0; 212 goto free; 213 } 214 215 /* length is being truncated to PAGE_SIZE, 216 * however buf_idx may point beyond that */ 217 length = min_t(size_t, length, cb->buf_idx - *offset); 218 219 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) { 220 dev_dbg(dev->dev, "failed to copy data to userland\n"); 221 rets = -EFAULT; 222 goto free; 223 } 224 225 rets = length; 226 *offset += length; 227 /* not all data was read, keep the cb */ 228 if (*offset < cb->buf_idx) 229 goto out; 230 231 free: 232 mei_io_cb_free(cb); 233 *offset = 0; 234 235 out: 236 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets); 237 mutex_unlock(&dev->device_lock); 238 return rets; 239 } 240 /** 241 * mei_write - the write function. 242 * 243 * @file: pointer to file structure 244 * @ubuf: pointer to user buffer 245 * @length: buffer length 246 * @offset: data offset in buffer 247 * 248 * Return: >=0 data length on success , <0 on error 249 */ 250 static ssize_t mei_write(struct file *file, const char __user *ubuf, 251 size_t length, loff_t *offset) 252 { 253 struct mei_cl *cl = file->private_data; 254 struct mei_cl_cb *cb; 255 struct mei_device *dev; 256 ssize_t rets; 257 258 if (WARN_ON(!cl || !cl->dev)) 259 return -ENODEV; 260 261 dev = cl->dev; 262 263 mutex_lock(&dev->device_lock); 264 265 if (dev->dev_state != MEI_DEV_ENABLED) { 266 rets = -ENODEV; 267 goto out; 268 } 269 270 if (!mei_cl_is_connected(cl)) { 271 cl_err(dev, cl, "is not connected"); 272 rets = -ENODEV; 273 goto out; 274 } 275 276 if (!mei_me_cl_is_active(cl->me_cl)) { 277 rets = -ENOTTY; 278 goto out; 279 } 280 281 if (length > mei_cl_mtu(cl)) { 282 rets = -EFBIG; 283 goto out; 284 } 285 286 if (length == 0) { 287 rets = 0; 288 goto out; 289 } 290 291 while (cl->tx_cb_queued >= dev->tx_queue_limit) { 292 if (file->f_flags & O_NONBLOCK) { 293 rets = -EAGAIN; 294 goto out; 295 } 296 mutex_unlock(&dev->device_lock); 297 rets = wait_event_interruptible(cl->tx_wait, 298 cl->writing_state == MEI_WRITE_COMPLETE || 299 (!mei_cl_is_connected(cl))); 300 mutex_lock(&dev->device_lock); 301 if (rets) { 302 if (signal_pending(current)) 303 rets = -EINTR; 304 goto out; 305 } 306 if (!mei_cl_is_connected(cl)) { 307 rets = -ENODEV; 308 goto out; 309 } 310 } 311 312 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file); 313 if (!cb) { 314 rets = -ENOMEM; 315 goto out; 316 } 317 318 rets = copy_from_user(cb->buf.data, ubuf, length); 319 if (rets) { 320 dev_dbg(dev->dev, "failed to copy data from userland\n"); 321 rets = -EFAULT; 322 mei_io_cb_free(cb); 323 goto out; 324 } 325 326 rets = mei_cl_write(cl, cb); 327 out: 328 mutex_unlock(&dev->device_lock); 329 return rets; 330 } 331 332 /** 333 * mei_ioctl_connect_client - the connect to fw client IOCTL function 334 * 335 * @file: private data of the file object 336 * @data: IOCTL connect data, input and output parameters 337 * 338 * Locking: called under "dev->device_lock" lock 339 * 340 * Return: 0 on success, <0 on failure. 341 */ 342 static int mei_ioctl_connect_client(struct file *file, 343 struct mei_connect_client_data *data) 344 { 345 struct mei_device *dev; 346 struct mei_client *client; 347 struct mei_me_client *me_cl; 348 struct mei_cl *cl; 349 int rets; 350 351 cl = file->private_data; 352 dev = cl->dev; 353 354 if (dev->dev_state != MEI_DEV_ENABLED) 355 return -ENODEV; 356 357 if (cl->state != MEI_FILE_INITIALIZING && 358 cl->state != MEI_FILE_DISCONNECTED) 359 return -EBUSY; 360 361 /* find ME client we're trying to connect to */ 362 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid); 363 if (!me_cl) { 364 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n", 365 &data->in_client_uuid); 366 rets = -ENOTTY; 367 goto end; 368 } 369 370 if (me_cl->props.fixed_address) { 371 bool forbidden = dev->override_fixed_address ? 372 !dev->allow_fixed_address : !dev->hbm_f_fa_supported; 373 if (forbidden) { 374 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n", 375 &data->in_client_uuid); 376 rets = -ENOTTY; 377 goto end; 378 } 379 } 380 381 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n", 382 me_cl->client_id); 383 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n", 384 me_cl->props.protocol_version); 385 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n", 386 me_cl->props.max_msg_length); 387 388 /* prepare the output buffer */ 389 client = &data->out_client_properties; 390 client->max_msg_length = me_cl->props.max_msg_length; 391 client->protocol_version = me_cl->props.protocol_version; 392 dev_dbg(dev->dev, "Can connect?\n"); 393 394 rets = mei_cl_connect(cl, me_cl, file); 395 396 end: 397 mei_me_cl_put(me_cl); 398 return rets; 399 } 400 401 /** 402 * mei_ioctl_client_notify_request - 403 * propagate event notification request to client 404 * 405 * @file: pointer to file structure 406 * @request: 0 - disable, 1 - enable 407 * 408 * Return: 0 on success , <0 on error 409 */ 410 static int mei_ioctl_client_notify_request(const struct file *file, u32 request) 411 { 412 struct mei_cl *cl = file->private_data; 413 414 if (request != MEI_HBM_NOTIFICATION_START && 415 request != MEI_HBM_NOTIFICATION_STOP) 416 return -EINVAL; 417 418 return mei_cl_notify_request(cl, file, (u8)request); 419 } 420 421 /** 422 * mei_ioctl_client_notify_get - wait for notification request 423 * 424 * @file: pointer to file structure 425 * @notify_get: 0 - disable, 1 - enable 426 * 427 * Return: 0 on success , <0 on error 428 */ 429 static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get) 430 { 431 struct mei_cl *cl = file->private_data; 432 bool notify_ev; 433 bool block = (file->f_flags & O_NONBLOCK) == 0; 434 int rets; 435 436 rets = mei_cl_notify_get(cl, block, ¬ify_ev); 437 if (rets) 438 return rets; 439 440 *notify_get = notify_ev ? 1 : 0; 441 return 0; 442 } 443 444 /** 445 * mei_ioctl - the IOCTL function 446 * 447 * @file: pointer to file structure 448 * @cmd: ioctl command 449 * @data: pointer to mei message structure 450 * 451 * Return: 0 on success , <0 on error 452 */ 453 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) 454 { 455 struct mei_device *dev; 456 struct mei_cl *cl = file->private_data; 457 struct mei_connect_client_data connect_data; 458 u32 notify_get, notify_req; 459 int rets; 460 461 462 if (WARN_ON(!cl || !cl->dev)) 463 return -ENODEV; 464 465 dev = cl->dev; 466 467 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd); 468 469 mutex_lock(&dev->device_lock); 470 if (dev->dev_state != MEI_DEV_ENABLED) { 471 rets = -ENODEV; 472 goto out; 473 } 474 475 switch (cmd) { 476 case IOCTL_MEI_CONNECT_CLIENT: 477 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n"); 478 if (copy_from_user(&connect_data, (char __user *)data, 479 sizeof(struct mei_connect_client_data))) { 480 dev_dbg(dev->dev, "failed to copy data from userland\n"); 481 rets = -EFAULT; 482 goto out; 483 } 484 485 rets = mei_ioctl_connect_client(file, &connect_data); 486 if (rets) 487 goto out; 488 489 /* if all is ok, copying the data back to user. */ 490 if (copy_to_user((char __user *)data, &connect_data, 491 sizeof(struct mei_connect_client_data))) { 492 dev_dbg(dev->dev, "failed to copy data to userland\n"); 493 rets = -EFAULT; 494 goto out; 495 } 496 497 break; 498 499 case IOCTL_MEI_NOTIFY_SET: 500 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n"); 501 if (copy_from_user(¬ify_req, 502 (char __user *)data, sizeof(notify_req))) { 503 dev_dbg(dev->dev, "failed to copy data from userland\n"); 504 rets = -EFAULT; 505 goto out; 506 } 507 rets = mei_ioctl_client_notify_request(file, notify_req); 508 break; 509 510 case IOCTL_MEI_NOTIFY_GET: 511 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n"); 512 rets = mei_ioctl_client_notify_get(file, ¬ify_get); 513 if (rets) 514 goto out; 515 516 dev_dbg(dev->dev, "copy connect data to user\n"); 517 if (copy_to_user((char __user *)data, 518 ¬ify_get, sizeof(notify_get))) { 519 dev_dbg(dev->dev, "failed to copy data to userland\n"); 520 rets = -EFAULT; 521 goto out; 522 523 } 524 break; 525 526 default: 527 rets = -ENOIOCTLCMD; 528 } 529 530 out: 531 mutex_unlock(&dev->device_lock); 532 return rets; 533 } 534 535 /** 536 * mei_compat_ioctl - the compat IOCTL function 537 * 538 * @file: pointer to file structure 539 * @cmd: ioctl command 540 * @data: pointer to mei message structure 541 * 542 * Return: 0 on success , <0 on error 543 */ 544 #ifdef CONFIG_COMPAT 545 static long mei_compat_ioctl(struct file *file, 546 unsigned int cmd, unsigned long data) 547 { 548 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data)); 549 } 550 #endif 551 552 553 /** 554 * mei_poll - the poll function 555 * 556 * @file: pointer to file structure 557 * @wait: pointer to poll_table structure 558 * 559 * Return: poll mask 560 */ 561 static __poll_t mei_poll(struct file *file, poll_table *wait) 562 { 563 __poll_t req_events = poll_requested_events(wait); 564 struct mei_cl *cl = file->private_data; 565 struct mei_device *dev; 566 __poll_t mask = 0; 567 bool notify_en; 568 569 if (WARN_ON(!cl || !cl->dev)) 570 return EPOLLERR; 571 572 dev = cl->dev; 573 574 mutex_lock(&dev->device_lock); 575 576 notify_en = cl->notify_en && (req_events & EPOLLPRI); 577 578 if (dev->dev_state != MEI_DEV_ENABLED || 579 !mei_cl_is_connected(cl)) { 580 mask = EPOLLERR; 581 goto out; 582 } 583 584 if (notify_en) { 585 poll_wait(file, &cl->ev_wait, wait); 586 if (cl->notify_ev) 587 mask |= EPOLLPRI; 588 } 589 590 if (req_events & (EPOLLIN | EPOLLRDNORM)) { 591 poll_wait(file, &cl->rx_wait, wait); 592 593 if (!list_empty(&cl->rd_completed)) 594 mask |= EPOLLIN | EPOLLRDNORM; 595 else 596 mei_cl_read_start(cl, mei_cl_mtu(cl), file); 597 } 598 599 if (req_events & (EPOLLOUT | EPOLLWRNORM)) { 600 poll_wait(file, &cl->tx_wait, wait); 601 if (cl->tx_cb_queued < dev->tx_queue_limit) 602 mask |= EPOLLOUT | EPOLLWRNORM; 603 } 604 605 out: 606 mutex_unlock(&dev->device_lock); 607 return mask; 608 } 609 610 /** 611 * mei_cl_is_write_queued - check if the client has pending writes. 612 * 613 * @cl: writing host client 614 * 615 * Return: true if client is writing, false otherwise. 616 */ 617 static bool mei_cl_is_write_queued(struct mei_cl *cl) 618 { 619 struct mei_device *dev = cl->dev; 620 struct mei_cl_cb *cb; 621 622 list_for_each_entry(cb, &dev->write_list, list) 623 if (cb->cl == cl) 624 return true; 625 list_for_each_entry(cb, &dev->write_waiting_list, list) 626 if (cb->cl == cl) 627 return true; 628 return false; 629 } 630 631 /** 632 * mei_fsync - the fsync handler 633 * 634 * @fp: pointer to file structure 635 * @start: unused 636 * @end: unused 637 * @datasync: unused 638 * 639 * Return: 0 on success, -ENODEV if client is not connected 640 */ 641 static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync) 642 { 643 struct mei_cl *cl = fp->private_data; 644 struct mei_device *dev; 645 int rets; 646 647 if (WARN_ON(!cl || !cl->dev)) 648 return -ENODEV; 649 650 dev = cl->dev; 651 652 mutex_lock(&dev->device_lock); 653 654 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) { 655 rets = -ENODEV; 656 goto out; 657 } 658 659 while (mei_cl_is_write_queued(cl)) { 660 mutex_unlock(&dev->device_lock); 661 rets = wait_event_interruptible(cl->tx_wait, 662 cl->writing_state == MEI_WRITE_COMPLETE || 663 !mei_cl_is_connected(cl)); 664 mutex_lock(&dev->device_lock); 665 if (rets) { 666 if (signal_pending(current)) 667 rets = -EINTR; 668 goto out; 669 } 670 if (!mei_cl_is_connected(cl)) { 671 rets = -ENODEV; 672 goto out; 673 } 674 } 675 rets = 0; 676 out: 677 mutex_unlock(&dev->device_lock); 678 return rets; 679 } 680 681 /** 682 * mei_fasync - asynchronous io support 683 * 684 * @fd: file descriptor 685 * @file: pointer to file structure 686 * @band: band bitmap 687 * 688 * Return: negative on error, 689 * 0 if it did no changes, 690 * and positive a process was added or deleted 691 */ 692 static int mei_fasync(int fd, struct file *file, int band) 693 { 694 695 struct mei_cl *cl = file->private_data; 696 697 if (!mei_cl_is_connected(cl)) 698 return -ENODEV; 699 700 return fasync_helper(fd, file, band, &cl->ev_async); 701 } 702 703 /** 704 * trc_show - mei device trc attribute show method 705 * 706 * @device: device pointer 707 * @attr: attribute pointer 708 * @buf: char out buffer 709 * 710 * Return: number of the bytes printed into buf or error 711 */ 712 static ssize_t trc_show(struct device *device, 713 struct device_attribute *attr, char *buf) 714 { 715 struct mei_device *dev = dev_get_drvdata(device); 716 u32 trc; 717 int ret; 718 719 ret = mei_trc_status(dev, &trc); 720 if (ret) 721 return ret; 722 return sprintf(buf, "%08X\n", trc); 723 } 724 static DEVICE_ATTR_RO(trc); 725 726 /** 727 * fw_status_show - mei device fw_status attribute show method 728 * 729 * @device: device pointer 730 * @attr: attribute pointer 731 * @buf: char out buffer 732 * 733 * Return: number of the bytes printed into buf or error 734 */ 735 static ssize_t fw_status_show(struct device *device, 736 struct device_attribute *attr, char *buf) 737 { 738 struct mei_device *dev = dev_get_drvdata(device); 739 struct mei_fw_status fw_status; 740 int err, i; 741 ssize_t cnt = 0; 742 743 mutex_lock(&dev->device_lock); 744 err = mei_fw_status(dev, &fw_status); 745 mutex_unlock(&dev->device_lock); 746 if (err) { 747 dev_err(device, "read fw_status error = %d\n", err); 748 return err; 749 } 750 751 for (i = 0; i < fw_status.count; i++) 752 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n", 753 fw_status.status[i]); 754 return cnt; 755 } 756 static DEVICE_ATTR_RO(fw_status); 757 758 /** 759 * hbm_ver_show - display HBM protocol version negotiated with FW 760 * 761 * @device: device pointer 762 * @attr: attribute pointer 763 * @buf: char out buffer 764 * 765 * Return: number of the bytes printed into buf or error 766 */ 767 static ssize_t hbm_ver_show(struct device *device, 768 struct device_attribute *attr, char *buf) 769 { 770 struct mei_device *dev = dev_get_drvdata(device); 771 struct hbm_version ver; 772 773 mutex_lock(&dev->device_lock); 774 ver = dev->version; 775 mutex_unlock(&dev->device_lock); 776 777 return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version); 778 } 779 static DEVICE_ATTR_RO(hbm_ver); 780 781 /** 782 * hbm_ver_drv_show - display HBM protocol version advertised by driver 783 * 784 * @device: device pointer 785 * @attr: attribute pointer 786 * @buf: char out buffer 787 * 788 * Return: number of the bytes printed into buf or error 789 */ 790 static ssize_t hbm_ver_drv_show(struct device *device, 791 struct device_attribute *attr, char *buf) 792 { 793 return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION); 794 } 795 static DEVICE_ATTR_RO(hbm_ver_drv); 796 797 static ssize_t tx_queue_limit_show(struct device *device, 798 struct device_attribute *attr, char *buf) 799 { 800 struct mei_device *dev = dev_get_drvdata(device); 801 u8 size = 0; 802 803 mutex_lock(&dev->device_lock); 804 size = dev->tx_queue_limit; 805 mutex_unlock(&dev->device_lock); 806 807 return snprintf(buf, PAGE_SIZE, "%u\n", size); 808 } 809 810 static ssize_t tx_queue_limit_store(struct device *device, 811 struct device_attribute *attr, 812 const char *buf, size_t count) 813 { 814 struct mei_device *dev = dev_get_drvdata(device); 815 u8 limit; 816 unsigned int inp; 817 int err; 818 819 err = kstrtouint(buf, 10, &inp); 820 if (err) 821 return err; 822 if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN) 823 return -EINVAL; 824 limit = inp; 825 826 mutex_lock(&dev->device_lock); 827 dev->tx_queue_limit = limit; 828 mutex_unlock(&dev->device_lock); 829 830 return count; 831 } 832 static DEVICE_ATTR_RW(tx_queue_limit); 833 834 /** 835 * fw_ver_show - display ME FW version 836 * 837 * @device: device pointer 838 * @attr: attribute pointer 839 * @buf: char out buffer 840 * 841 * Return: number of the bytes printed into buf or error 842 */ 843 static ssize_t fw_ver_show(struct device *device, 844 struct device_attribute *attr, char *buf) 845 { 846 struct mei_device *dev = dev_get_drvdata(device); 847 struct mei_fw_version *ver; 848 ssize_t cnt = 0; 849 int i; 850 851 ver = dev->fw_ver; 852 853 for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++) 854 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n", 855 ver[i].platform, ver[i].major, ver[i].minor, 856 ver[i].hotfix, ver[i].buildno); 857 return cnt; 858 } 859 static DEVICE_ATTR_RO(fw_ver); 860 861 /** 862 * dev_state_show - display device state 863 * 864 * @device: device pointer 865 * @attr: attribute pointer 866 * @buf: char out buffer 867 * 868 * Return: number of the bytes printed into buf or error 869 */ 870 static ssize_t dev_state_show(struct device *device, 871 struct device_attribute *attr, char *buf) 872 { 873 struct mei_device *dev = dev_get_drvdata(device); 874 enum mei_dev_state dev_state; 875 876 mutex_lock(&dev->device_lock); 877 dev_state = dev->dev_state; 878 mutex_unlock(&dev->device_lock); 879 880 return sprintf(buf, "%s", mei_dev_state_str(dev_state)); 881 } 882 static DEVICE_ATTR_RO(dev_state); 883 884 /** 885 * dev_set_devstate: set to new device state and notify sysfs file. 886 * 887 * @dev: mei_device 888 * @state: new device state 889 */ 890 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state) 891 { 892 struct device *clsdev; 893 894 if (dev->dev_state == state) 895 return; 896 897 dev->dev_state = state; 898 899 clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev); 900 if (clsdev) { 901 sysfs_notify(&clsdev->kobj, NULL, "dev_state"); 902 put_device(clsdev); 903 } 904 } 905 906 static struct attribute *mei_attrs[] = { 907 &dev_attr_fw_status.attr, 908 &dev_attr_hbm_ver.attr, 909 &dev_attr_hbm_ver_drv.attr, 910 &dev_attr_tx_queue_limit.attr, 911 &dev_attr_fw_ver.attr, 912 &dev_attr_dev_state.attr, 913 &dev_attr_trc.attr, 914 NULL 915 }; 916 ATTRIBUTE_GROUPS(mei); 917 918 /* 919 * file operations structure will be used for mei char device. 920 */ 921 static const struct file_operations mei_fops = { 922 .owner = THIS_MODULE, 923 .read = mei_read, 924 .unlocked_ioctl = mei_ioctl, 925 #ifdef CONFIG_COMPAT 926 .compat_ioctl = mei_compat_ioctl, 927 #endif 928 .open = mei_open, 929 .release = mei_release, 930 .write = mei_write, 931 .poll = mei_poll, 932 .fsync = mei_fsync, 933 .fasync = mei_fasync, 934 .llseek = no_llseek 935 }; 936 937 /** 938 * mei_minor_get - obtain next free device minor number 939 * 940 * @dev: device pointer 941 * 942 * Return: allocated minor, or -ENOSPC if no free minor left 943 */ 944 static int mei_minor_get(struct mei_device *dev) 945 { 946 int ret; 947 948 mutex_lock(&mei_minor_lock); 949 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL); 950 if (ret >= 0) 951 dev->minor = ret; 952 else if (ret == -ENOSPC) 953 dev_err(dev->dev, "too many mei devices\n"); 954 955 mutex_unlock(&mei_minor_lock); 956 return ret; 957 } 958 959 /** 960 * mei_minor_free - mark device minor number as free 961 * 962 * @dev: device pointer 963 */ 964 static void mei_minor_free(struct mei_device *dev) 965 { 966 mutex_lock(&mei_minor_lock); 967 idr_remove(&mei_idr, dev->minor); 968 mutex_unlock(&mei_minor_lock); 969 } 970 971 int mei_register(struct mei_device *dev, struct device *parent) 972 { 973 struct device *clsdev; /* class device */ 974 int ret, devno; 975 976 ret = mei_minor_get(dev); 977 if (ret < 0) 978 return ret; 979 980 /* Fill in the data structures */ 981 devno = MKDEV(MAJOR(mei_devt), dev->minor); 982 cdev_init(&dev->cdev, &mei_fops); 983 dev->cdev.owner = parent->driver->owner; 984 985 /* Add the device */ 986 ret = cdev_add(&dev->cdev, devno, 1); 987 if (ret) { 988 dev_err(parent, "unable to add device %d:%d\n", 989 MAJOR(mei_devt), dev->minor); 990 goto err_dev_add; 991 } 992 993 clsdev = device_create_with_groups(mei_class, parent, devno, 994 dev, mei_groups, 995 "mei%d", dev->minor); 996 997 if (IS_ERR(clsdev)) { 998 dev_err(parent, "unable to create device %d:%d\n", 999 MAJOR(mei_devt), dev->minor); 1000 ret = PTR_ERR(clsdev); 1001 goto err_dev_create; 1002 } 1003 1004 mei_dbgfs_register(dev, dev_name(clsdev)); 1005 1006 return 0; 1007 1008 err_dev_create: 1009 cdev_del(&dev->cdev); 1010 err_dev_add: 1011 mei_minor_free(dev); 1012 return ret; 1013 } 1014 EXPORT_SYMBOL_GPL(mei_register); 1015 1016 void mei_deregister(struct mei_device *dev) 1017 { 1018 int devno; 1019 1020 devno = dev->cdev.dev; 1021 cdev_del(&dev->cdev); 1022 1023 mei_dbgfs_deregister(dev); 1024 1025 device_destroy(mei_class, devno); 1026 1027 mei_minor_free(dev); 1028 } 1029 EXPORT_SYMBOL_GPL(mei_deregister); 1030 1031 static int __init mei_init(void) 1032 { 1033 int ret; 1034 1035 mei_class = class_create(THIS_MODULE, "mei"); 1036 if (IS_ERR(mei_class)) { 1037 pr_err("couldn't create class\n"); 1038 ret = PTR_ERR(mei_class); 1039 goto err; 1040 } 1041 1042 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei"); 1043 if (ret < 0) { 1044 pr_err("unable to allocate char dev region\n"); 1045 goto err_class; 1046 } 1047 1048 ret = mei_cl_bus_init(); 1049 if (ret < 0) { 1050 pr_err("unable to initialize bus\n"); 1051 goto err_chrdev; 1052 } 1053 1054 return 0; 1055 1056 err_chrdev: 1057 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 1058 err_class: 1059 class_destroy(mei_class); 1060 err: 1061 return ret; 1062 } 1063 1064 static void __exit mei_exit(void) 1065 { 1066 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS); 1067 class_destroy(mei_class); 1068 mei_cl_bus_exit(); 1069 } 1070 1071 module_init(mei_init); 1072 module_exit(mei_exit); 1073 1074 MODULE_AUTHOR("Intel Corporation"); 1075 MODULE_DESCRIPTION("Intel(R) Management Engine Interface"); 1076 MODULE_LICENSE("GPL v2"); 1077 1078