1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * ipmi_devintf.c 4 * 5 * Linux device interface for the IPMI message handler. 6 * 7 * Author: MontaVista Software, Inc. 8 * Corey Minyard <minyard@mvista.com> 9 * source@mvista.com 10 * 11 * Copyright 2002 MontaVista Software Inc. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/errno.h> 17 #include <linux/poll.h> 18 #include <linux/sched.h> 19 #include <linux/spinlock.h> 20 #include <linux/slab.h> 21 #include <linux/ipmi.h> 22 #include <linux/mutex.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/compat.h> 26 27 struct ipmi_file_private 28 { 29 struct ipmi_user *user; 30 spinlock_t recv_msg_lock; 31 struct list_head recv_msgs; 32 struct fasync_struct *fasync_queue; 33 wait_queue_head_t wait; 34 struct mutex recv_mutex; 35 int default_retries; 36 unsigned int default_retry_time_ms; 37 }; 38 39 static void file_receive_handler(struct ipmi_recv_msg *msg, 40 void *handler_data) 41 { 42 struct ipmi_file_private *priv = handler_data; 43 int was_empty; 44 unsigned long flags; 45 46 spin_lock_irqsave(&priv->recv_msg_lock, flags); 47 was_empty = list_empty(&priv->recv_msgs); 48 list_add_tail(&msg->link, &priv->recv_msgs); 49 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 50 51 if (was_empty) { 52 wake_up_interruptible(&priv->wait); 53 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 54 } 55 } 56 57 static __poll_t ipmi_poll(struct file *file, poll_table *wait) 58 { 59 struct ipmi_file_private *priv = file->private_data; 60 __poll_t mask = 0; 61 unsigned long flags; 62 63 poll_wait(file, &priv->wait, wait); 64 65 spin_lock_irqsave(&priv->recv_msg_lock, flags); 66 67 if (!list_empty(&priv->recv_msgs)) 68 mask |= (EPOLLIN | EPOLLRDNORM); 69 70 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 71 72 return mask; 73 } 74 75 static int ipmi_fasync(int fd, struct file *file, int on) 76 { 77 struct ipmi_file_private *priv = file->private_data; 78 79 return fasync_helper(fd, file, on, &priv->fasync_queue); 80 } 81 82 static const struct ipmi_user_hndl ipmi_hndlrs = 83 { 84 .ipmi_recv_hndl = file_receive_handler, 85 }; 86 87 static int ipmi_open(struct inode *inode, struct file *file) 88 { 89 int if_num = iminor(inode); 90 int rv; 91 struct ipmi_file_private *priv; 92 93 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 94 if (!priv) 95 return -ENOMEM; 96 97 rv = ipmi_create_user(if_num, 98 &ipmi_hndlrs, 99 priv, 100 &priv->user); 101 if (rv) { 102 kfree(priv); 103 goto out; 104 } 105 106 file->private_data = priv; 107 108 spin_lock_init(&priv->recv_msg_lock); 109 INIT_LIST_HEAD(&priv->recv_msgs); 110 init_waitqueue_head(&priv->wait); 111 priv->fasync_queue = NULL; 112 mutex_init(&priv->recv_mutex); 113 114 /* Use the low-level defaults. */ 115 priv->default_retries = -1; 116 priv->default_retry_time_ms = 0; 117 118 out: 119 return rv; 120 } 121 122 static int ipmi_release(struct inode *inode, struct file *file) 123 { 124 struct ipmi_file_private *priv = file->private_data; 125 struct ipmi_recv_msg *msg, *next; 126 127 ipmi_destroy_user(priv->user); 128 129 list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) 130 ipmi_free_recv_msg(msg); 131 132 kfree(priv); 133 134 return 0; 135 } 136 137 static int handle_send_req(struct ipmi_user *user, 138 struct ipmi_req *req, 139 int retries, 140 unsigned int retry_time_ms) 141 { 142 int rv; 143 struct ipmi_addr addr; 144 struct kernel_ipmi_msg msg; 145 146 if (req->addr_len > sizeof(struct ipmi_addr)) 147 return -EINVAL; 148 149 if (copy_from_user(&addr, req->addr, req->addr_len)) 150 return -EFAULT; 151 152 msg.netfn = req->msg.netfn; 153 msg.cmd = req->msg.cmd; 154 msg.data_len = req->msg.data_len; 155 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 156 if (!msg.data) 157 return -ENOMEM; 158 159 /* From here out we cannot return, we must jump to "out" for 160 error exits to free msgdata. */ 161 162 rv = ipmi_validate_addr(&addr, req->addr_len); 163 if (rv) 164 goto out; 165 166 if (req->msg.data != NULL) { 167 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 168 rv = -EMSGSIZE; 169 goto out; 170 } 171 172 if (copy_from_user(msg.data, 173 req->msg.data, 174 req->msg.data_len)) { 175 rv = -EFAULT; 176 goto out; 177 } 178 } else { 179 msg.data_len = 0; 180 } 181 182 rv = ipmi_request_settime(user, 183 &addr, 184 req->msgid, 185 &msg, 186 NULL, 187 0, 188 retries, 189 retry_time_ms); 190 out: 191 kfree(msg.data); 192 return rv; 193 } 194 195 static int handle_recv(struct ipmi_file_private *priv, 196 bool trunc, struct ipmi_recv *rsp, 197 int (*copyout)(struct ipmi_recv *, void __user *), 198 void __user *to) 199 { 200 int addr_len; 201 struct list_head *entry; 202 struct ipmi_recv_msg *msg; 203 unsigned long flags; 204 int rv = 0, rv2 = 0; 205 206 /* We claim a mutex because we don't want two 207 users getting something from the queue at a time. 208 Since we have to release the spinlock before we can 209 copy the data to the user, it's possible another 210 user will grab something from the queue, too. Then 211 the messages might get out of order if something 212 fails and the message gets put back onto the 213 queue. This mutex prevents that problem. */ 214 mutex_lock(&priv->recv_mutex); 215 216 /* Grab the message off the list. */ 217 spin_lock_irqsave(&priv->recv_msg_lock, flags); 218 if (list_empty(&(priv->recv_msgs))) { 219 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 220 rv = -EAGAIN; 221 goto recv_err; 222 } 223 entry = priv->recv_msgs.next; 224 msg = list_entry(entry, struct ipmi_recv_msg, link); 225 list_del(entry); 226 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 227 228 addr_len = ipmi_addr_length(msg->addr.addr_type); 229 if (rsp->addr_len < addr_len) { 230 rv = -EINVAL; 231 goto recv_putback_on_err; 232 } 233 234 if (copy_to_user(rsp->addr, &msg->addr, addr_len)) { 235 rv = -EFAULT; 236 goto recv_putback_on_err; 237 } 238 rsp->addr_len = addr_len; 239 240 rsp->recv_type = msg->recv_type; 241 rsp->msgid = msg->msgid; 242 rsp->msg.netfn = msg->msg.netfn; 243 rsp->msg.cmd = msg->msg.cmd; 244 245 if (msg->msg.data_len > 0) { 246 if (rsp->msg.data_len < msg->msg.data_len) { 247 if (trunc) { 248 rv2 = -EMSGSIZE; 249 msg->msg.data_len = rsp->msg.data_len; 250 } else { 251 rv = -EMSGSIZE; 252 goto recv_putback_on_err; 253 } 254 } 255 256 if (copy_to_user(rsp->msg.data, 257 msg->msg.data, 258 msg->msg.data_len)) { 259 rv = -EFAULT; 260 goto recv_putback_on_err; 261 } 262 rsp->msg.data_len = msg->msg.data_len; 263 } else { 264 rsp->msg.data_len = 0; 265 } 266 267 rv = copyout(rsp, to); 268 if (rv) 269 goto recv_putback_on_err; 270 271 mutex_unlock(&priv->recv_mutex); 272 ipmi_free_recv_msg(msg); 273 return rv2; 274 275 recv_putback_on_err: 276 /* If we got an error, put the message back onto 277 the head of the queue. */ 278 spin_lock_irqsave(&priv->recv_msg_lock, flags); 279 list_add(entry, &priv->recv_msgs); 280 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 281 recv_err: 282 mutex_unlock(&priv->recv_mutex); 283 return rv; 284 } 285 286 static int copyout_recv(struct ipmi_recv *rsp, void __user *to) 287 { 288 return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0; 289 } 290 291 static long ipmi_ioctl(struct file *file, 292 unsigned int cmd, 293 unsigned long data) 294 { 295 int rv = -EINVAL; 296 struct ipmi_file_private *priv = file->private_data; 297 void __user *arg = (void __user *)data; 298 299 switch (cmd) 300 { 301 case IPMICTL_SEND_COMMAND: 302 { 303 struct ipmi_req req; 304 int retries; 305 unsigned int retry_time_ms; 306 307 if (copy_from_user(&req, arg, sizeof(req))) { 308 rv = -EFAULT; 309 break; 310 } 311 312 mutex_lock(&priv->recv_mutex); 313 retries = priv->default_retries; 314 retry_time_ms = priv->default_retry_time_ms; 315 mutex_unlock(&priv->recv_mutex); 316 317 rv = handle_send_req(priv->user, &req, retries, retry_time_ms); 318 break; 319 } 320 321 case IPMICTL_SEND_COMMAND_SETTIME: 322 { 323 struct ipmi_req_settime req; 324 325 if (copy_from_user(&req, arg, sizeof(req))) { 326 rv = -EFAULT; 327 break; 328 } 329 330 rv = handle_send_req(priv->user, 331 &req.req, 332 req.retries, 333 req.retry_time_ms); 334 break; 335 } 336 337 case IPMICTL_RECEIVE_MSG: 338 case IPMICTL_RECEIVE_MSG_TRUNC: 339 { 340 struct ipmi_recv rsp; 341 342 if (copy_from_user(&rsp, arg, sizeof(rsp))) 343 rv = -EFAULT; 344 else 345 rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC, 346 &rsp, copyout_recv, arg); 347 break; 348 } 349 350 case IPMICTL_REGISTER_FOR_CMD: 351 { 352 struct ipmi_cmdspec val; 353 354 if (copy_from_user(&val, arg, sizeof(val))) { 355 rv = -EFAULT; 356 break; 357 } 358 359 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 360 IPMI_CHAN_ALL); 361 break; 362 } 363 364 case IPMICTL_UNREGISTER_FOR_CMD: 365 { 366 struct ipmi_cmdspec val; 367 368 if (copy_from_user(&val, arg, sizeof(val))) { 369 rv = -EFAULT; 370 break; 371 } 372 373 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 374 IPMI_CHAN_ALL); 375 break; 376 } 377 378 case IPMICTL_REGISTER_FOR_CMD_CHANS: 379 { 380 struct ipmi_cmdspec_chans val; 381 382 if (copy_from_user(&val, arg, sizeof(val))) { 383 rv = -EFAULT; 384 break; 385 } 386 387 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd, 388 val.chans); 389 break; 390 } 391 392 case IPMICTL_UNREGISTER_FOR_CMD_CHANS: 393 { 394 struct ipmi_cmdspec_chans val; 395 396 if (copy_from_user(&val, arg, sizeof(val))) { 397 rv = -EFAULT; 398 break; 399 } 400 401 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd, 402 val.chans); 403 break; 404 } 405 406 case IPMICTL_SET_GETS_EVENTS_CMD: 407 { 408 int val; 409 410 if (copy_from_user(&val, arg, sizeof(val))) { 411 rv = -EFAULT; 412 break; 413 } 414 415 rv = ipmi_set_gets_events(priv->user, val); 416 break; 417 } 418 419 /* The next four are legacy, not per-channel. */ 420 case IPMICTL_SET_MY_ADDRESS_CMD: 421 { 422 unsigned int val; 423 424 if (copy_from_user(&val, arg, sizeof(val))) { 425 rv = -EFAULT; 426 break; 427 } 428 429 rv = ipmi_set_my_address(priv->user, 0, val); 430 break; 431 } 432 433 case IPMICTL_GET_MY_ADDRESS_CMD: 434 { 435 unsigned int val; 436 unsigned char rval; 437 438 rv = ipmi_get_my_address(priv->user, 0, &rval); 439 if (rv) 440 break; 441 442 val = rval; 443 444 if (copy_to_user(arg, &val, sizeof(val))) { 445 rv = -EFAULT; 446 break; 447 } 448 break; 449 } 450 451 case IPMICTL_SET_MY_LUN_CMD: 452 { 453 unsigned int val; 454 455 if (copy_from_user(&val, arg, sizeof(val))) { 456 rv = -EFAULT; 457 break; 458 } 459 460 rv = ipmi_set_my_LUN(priv->user, 0, val); 461 break; 462 } 463 464 case IPMICTL_GET_MY_LUN_CMD: 465 { 466 unsigned int val; 467 unsigned char rval; 468 469 rv = ipmi_get_my_LUN(priv->user, 0, &rval); 470 if (rv) 471 break; 472 473 val = rval; 474 475 if (copy_to_user(arg, &val, sizeof(val))) { 476 rv = -EFAULT; 477 break; 478 } 479 break; 480 } 481 482 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD: 483 { 484 struct ipmi_channel_lun_address_set val; 485 486 if (copy_from_user(&val, arg, sizeof(val))) { 487 rv = -EFAULT; 488 break; 489 } 490 491 return ipmi_set_my_address(priv->user, val.channel, val.value); 492 } 493 494 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD: 495 { 496 struct ipmi_channel_lun_address_set val; 497 498 if (copy_from_user(&val, arg, sizeof(val))) { 499 rv = -EFAULT; 500 break; 501 } 502 503 rv = ipmi_get_my_address(priv->user, val.channel, &val.value); 504 if (rv) 505 break; 506 507 if (copy_to_user(arg, &val, sizeof(val))) { 508 rv = -EFAULT; 509 break; 510 } 511 break; 512 } 513 514 case IPMICTL_SET_MY_CHANNEL_LUN_CMD: 515 { 516 struct ipmi_channel_lun_address_set val; 517 518 if (copy_from_user(&val, arg, sizeof(val))) { 519 rv = -EFAULT; 520 break; 521 } 522 523 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value); 524 break; 525 } 526 527 case IPMICTL_GET_MY_CHANNEL_LUN_CMD: 528 { 529 struct ipmi_channel_lun_address_set val; 530 531 if (copy_from_user(&val, arg, sizeof(val))) { 532 rv = -EFAULT; 533 break; 534 } 535 536 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value); 537 if (rv) 538 break; 539 540 if (copy_to_user(arg, &val, sizeof(val))) { 541 rv = -EFAULT; 542 break; 543 } 544 break; 545 } 546 547 case IPMICTL_SET_TIMING_PARMS_CMD: 548 { 549 struct ipmi_timing_parms parms; 550 551 if (copy_from_user(&parms, arg, sizeof(parms))) { 552 rv = -EFAULT; 553 break; 554 } 555 556 mutex_lock(&priv->recv_mutex); 557 priv->default_retries = parms.retries; 558 priv->default_retry_time_ms = parms.retry_time_ms; 559 mutex_unlock(&priv->recv_mutex); 560 rv = 0; 561 break; 562 } 563 564 case IPMICTL_GET_TIMING_PARMS_CMD: 565 { 566 struct ipmi_timing_parms parms; 567 568 mutex_lock(&priv->recv_mutex); 569 parms.retries = priv->default_retries; 570 parms.retry_time_ms = priv->default_retry_time_ms; 571 mutex_unlock(&priv->recv_mutex); 572 573 if (copy_to_user(arg, &parms, sizeof(parms))) { 574 rv = -EFAULT; 575 break; 576 } 577 578 rv = 0; 579 break; 580 } 581 582 case IPMICTL_GET_MAINTENANCE_MODE_CMD: 583 { 584 int mode; 585 586 mode = ipmi_get_maintenance_mode(priv->user); 587 if (copy_to_user(arg, &mode, sizeof(mode))) { 588 rv = -EFAULT; 589 break; 590 } 591 rv = 0; 592 break; 593 } 594 595 case IPMICTL_SET_MAINTENANCE_MODE_CMD: 596 { 597 int mode; 598 599 if (copy_from_user(&mode, arg, sizeof(mode))) { 600 rv = -EFAULT; 601 break; 602 } 603 rv = ipmi_set_maintenance_mode(priv->user, mode); 604 break; 605 } 606 607 default: 608 rv = -ENOTTY; 609 break; 610 } 611 612 return rv; 613 } 614 615 #ifdef CONFIG_COMPAT 616 /* 617 * The following code contains code for supporting 32-bit compatible 618 * ioctls on 64-bit kernels. This allows running 32-bit apps on the 619 * 64-bit kernel 620 */ 621 #define COMPAT_IPMICTL_SEND_COMMAND \ 622 _IOR(IPMI_IOC_MAGIC, 13, struct compat_ipmi_req) 623 #define COMPAT_IPMICTL_SEND_COMMAND_SETTIME \ 624 _IOR(IPMI_IOC_MAGIC, 21, struct compat_ipmi_req_settime) 625 #define COMPAT_IPMICTL_RECEIVE_MSG \ 626 _IOWR(IPMI_IOC_MAGIC, 12, struct compat_ipmi_recv) 627 #define COMPAT_IPMICTL_RECEIVE_MSG_TRUNC \ 628 _IOWR(IPMI_IOC_MAGIC, 11, struct compat_ipmi_recv) 629 630 struct compat_ipmi_msg { 631 u8 netfn; 632 u8 cmd; 633 u16 data_len; 634 compat_uptr_t data; 635 }; 636 637 struct compat_ipmi_req { 638 compat_uptr_t addr; 639 compat_uint_t addr_len; 640 compat_long_t msgid; 641 struct compat_ipmi_msg msg; 642 }; 643 644 struct compat_ipmi_recv { 645 compat_int_t recv_type; 646 compat_uptr_t addr; 647 compat_uint_t addr_len; 648 compat_long_t msgid; 649 struct compat_ipmi_msg msg; 650 }; 651 652 struct compat_ipmi_req_settime { 653 struct compat_ipmi_req req; 654 compat_int_t retries; 655 compat_uint_t retry_time_ms; 656 }; 657 658 /* 659 * Define some helper functions for copying IPMI data 660 */ 661 static void get_compat_ipmi_msg(struct ipmi_msg *p64, 662 struct compat_ipmi_msg *p32) 663 { 664 p64->netfn = p32->netfn; 665 p64->cmd = p32->cmd; 666 p64->data_len = p32->data_len; 667 p64->data = compat_ptr(p32->data); 668 } 669 670 static void get_compat_ipmi_req(struct ipmi_req *p64, 671 struct compat_ipmi_req *p32) 672 { 673 p64->addr = compat_ptr(p32->addr); 674 p64->addr_len = p32->addr_len; 675 p64->msgid = p32->msgid; 676 get_compat_ipmi_msg(&p64->msg, &p32->msg); 677 } 678 679 static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64, 680 struct compat_ipmi_req_settime *p32) 681 { 682 get_compat_ipmi_req(&p64->req, &p32->req); 683 p64->retries = p32->retries; 684 p64->retry_time_ms = p32->retry_time_ms; 685 } 686 687 static void get_compat_ipmi_recv(struct ipmi_recv *p64, 688 struct compat_ipmi_recv *p32) 689 { 690 memset(p64, 0, sizeof(struct ipmi_recv)); 691 p64->recv_type = p32->recv_type; 692 p64->addr = compat_ptr(p32->addr); 693 p64->addr_len = p32->addr_len; 694 p64->msgid = p32->msgid; 695 get_compat_ipmi_msg(&p64->msg, &p32->msg); 696 } 697 698 static int copyout_recv32(struct ipmi_recv *p64, void __user *to) 699 { 700 struct compat_ipmi_recv v32; 701 memset(&v32, 0, sizeof(struct compat_ipmi_recv)); 702 v32.recv_type = p64->recv_type; 703 v32.addr = ptr_to_compat(p64->addr); 704 v32.addr_len = p64->addr_len; 705 v32.msgid = p64->msgid; 706 v32.msg.netfn = p64->msg.netfn; 707 v32.msg.cmd = p64->msg.cmd; 708 v32.msg.data_len = p64->msg.data_len; 709 v32.msg.data = ptr_to_compat(p64->msg.data); 710 return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0; 711 } 712 713 /* 714 * Handle compatibility ioctls 715 */ 716 static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, 717 unsigned long arg) 718 { 719 struct ipmi_file_private *priv = filep->private_data; 720 721 switch(cmd) { 722 case COMPAT_IPMICTL_SEND_COMMAND: 723 { 724 struct ipmi_req rp; 725 struct compat_ipmi_req r32; 726 int retries; 727 unsigned int retry_time_ms; 728 729 if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32))) 730 return -EFAULT; 731 732 get_compat_ipmi_req(&rp, &r32); 733 734 mutex_lock(&priv->recv_mutex); 735 retries = priv->default_retries; 736 retry_time_ms = priv->default_retry_time_ms; 737 mutex_unlock(&priv->recv_mutex); 738 739 return handle_send_req(priv->user, &rp, 740 retries, retry_time_ms); 741 } 742 case COMPAT_IPMICTL_SEND_COMMAND_SETTIME: 743 { 744 struct ipmi_req_settime sp; 745 struct compat_ipmi_req_settime sp32; 746 747 if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32))) 748 return -EFAULT; 749 750 get_compat_ipmi_req_settime(&sp, &sp32); 751 752 return handle_send_req(priv->user, &sp.req, 753 sp.retries, sp.retry_time_ms); 754 } 755 case COMPAT_IPMICTL_RECEIVE_MSG: 756 case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC: 757 { 758 struct ipmi_recv recv64; 759 struct compat_ipmi_recv recv32; 760 761 if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32))) 762 return -EFAULT; 763 764 get_compat_ipmi_recv(&recv64, &recv32); 765 766 return handle_recv(priv, 767 cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC, 768 &recv64, copyout_recv32, compat_ptr(arg)); 769 } 770 default: 771 return ipmi_ioctl(filep, cmd, arg); 772 } 773 } 774 #endif 775 776 static const struct file_operations ipmi_fops = { 777 .owner = THIS_MODULE, 778 .unlocked_ioctl = ipmi_ioctl, 779 #ifdef CONFIG_COMPAT 780 .compat_ioctl = compat_ipmi_ioctl, 781 #endif 782 .open = ipmi_open, 783 .release = ipmi_release, 784 .fasync = ipmi_fasync, 785 .poll = ipmi_poll, 786 .llseek = noop_llseek, 787 }; 788 789 #define DEVICE_NAME "ipmidev" 790 791 static int ipmi_major; 792 module_param(ipmi_major, int, 0); 793 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 794 " default, or if you set it to zero, it will choose the next" 795 " available device. Setting it to -1 will disable the" 796 " interface. Other values will set the major device number" 797 " to that value."); 798 799 /* Keep track of the devices that are registered. */ 800 struct ipmi_reg_list { 801 dev_t dev; 802 struct list_head link; 803 }; 804 static LIST_HEAD(reg_list); 805 static DEFINE_MUTEX(reg_list_mutex); 806 807 static const struct class ipmi_class = { 808 .name = "ipmi", 809 }; 810 811 static void ipmi_new_smi(int if_num, struct device *device) 812 { 813 dev_t dev = MKDEV(ipmi_major, if_num); 814 struct ipmi_reg_list *entry; 815 816 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 817 if (!entry) { 818 pr_err("ipmi_devintf: Unable to create the ipmi class device link\n"); 819 return; 820 } 821 entry->dev = dev; 822 823 mutex_lock(®_list_mutex); 824 device_create(&ipmi_class, device, dev, NULL, "ipmi%d", if_num); 825 list_add(&entry->link, ®_list); 826 mutex_unlock(®_list_mutex); 827 } 828 829 static void ipmi_smi_gone(int if_num) 830 { 831 dev_t dev = MKDEV(ipmi_major, if_num); 832 struct ipmi_reg_list *entry; 833 834 mutex_lock(®_list_mutex); 835 list_for_each_entry(entry, ®_list, link) { 836 if (entry->dev == dev) { 837 list_del(&entry->link); 838 kfree(entry); 839 break; 840 } 841 } 842 device_destroy(&ipmi_class, dev); 843 mutex_unlock(®_list_mutex); 844 } 845 846 static struct ipmi_smi_watcher smi_watcher = 847 { 848 .owner = THIS_MODULE, 849 .new_smi = ipmi_new_smi, 850 .smi_gone = ipmi_smi_gone, 851 }; 852 853 static int __init init_ipmi_devintf(void) 854 { 855 int rv; 856 857 if (ipmi_major < 0) 858 return -EINVAL; 859 860 pr_info("ipmi device interface\n"); 861 862 rv = class_register(&ipmi_class); 863 if (rv) 864 return rv; 865 866 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 867 if (rv < 0) { 868 class_unregister(&ipmi_class); 869 pr_err("ipmi: can't get major %d\n", ipmi_major); 870 return rv; 871 } 872 873 if (ipmi_major == 0) { 874 ipmi_major = rv; 875 } 876 877 rv = ipmi_smi_watcher_register(&smi_watcher); 878 if (rv) { 879 unregister_chrdev(ipmi_major, DEVICE_NAME); 880 class_unregister(&ipmi_class); 881 pr_warn("ipmi: can't register smi watcher\n"); 882 return rv; 883 } 884 885 return 0; 886 } 887 module_init(init_ipmi_devintf); 888 889 static void __exit cleanup_ipmi(void) 890 { 891 struct ipmi_reg_list *entry, *entry2; 892 mutex_lock(®_list_mutex); 893 list_for_each_entry_safe(entry, entry2, ®_list, link) { 894 list_del(&entry->link); 895 device_destroy(&ipmi_class, entry->dev); 896 kfree(entry); 897 } 898 mutex_unlock(®_list_mutex); 899 class_unregister(&ipmi_class); 900 ipmi_smi_watcher_unregister(&smi_watcher); 901 unregister_chrdev(ipmi_major, DEVICE_NAME); 902 } 903 module_exit(cleanup_ipmi); 904 905 MODULE_LICENSE("GPL"); 906 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 907 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler."); 908