1 /* 2 * ipmi_devintf.c 3 * 4 * Linux device interface for the IPMI message handler. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/config.h> 35 #include <linux/module.h> 36 #include <linux/moduleparam.h> 37 #include <linux/errno.h> 38 #include <asm/system.h> 39 #include <linux/sched.h> 40 #include <linux/poll.h> 41 #include <linux/spinlock.h> 42 #include <linux/slab.h> 43 #include <linux/devfs_fs_kernel.h> 44 #include <linux/ipmi.h> 45 #include <asm/semaphore.h> 46 #include <linux/init.h> 47 #include <linux/device.h> 48 49 #define IPMI_DEVINTF_VERSION "v33" 50 51 struct ipmi_file_private 52 { 53 ipmi_user_t user; 54 spinlock_t recv_msg_lock; 55 struct list_head recv_msgs; 56 struct file *file; 57 struct fasync_struct *fasync_queue; 58 wait_queue_head_t wait; 59 struct semaphore recv_sem; 60 int default_retries; 61 unsigned int default_retry_time_ms; 62 }; 63 64 static void file_receive_handler(struct ipmi_recv_msg *msg, 65 void *handler_data) 66 { 67 struct ipmi_file_private *priv = handler_data; 68 int was_empty; 69 unsigned long flags; 70 71 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 72 73 was_empty = list_empty(&(priv->recv_msgs)); 74 list_add_tail(&(msg->link), &(priv->recv_msgs)); 75 76 if (was_empty) { 77 wake_up_interruptible(&priv->wait); 78 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN); 79 } 80 81 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 82 } 83 84 static unsigned int ipmi_poll(struct file *file, poll_table *wait) 85 { 86 struct ipmi_file_private *priv = file->private_data; 87 unsigned int mask = 0; 88 unsigned long flags; 89 90 poll_wait(file, &priv->wait, wait); 91 92 spin_lock_irqsave(&priv->recv_msg_lock, flags); 93 94 if (! list_empty(&(priv->recv_msgs))) 95 mask |= (POLLIN | POLLRDNORM); 96 97 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 98 99 return mask; 100 } 101 102 static int ipmi_fasync(int fd, struct file *file, int on) 103 { 104 struct ipmi_file_private *priv = file->private_data; 105 int result; 106 107 result = fasync_helper(fd, file, on, &priv->fasync_queue); 108 109 return (result); 110 } 111 112 static struct ipmi_user_hndl ipmi_hndlrs = 113 { 114 .ipmi_recv_hndl = file_receive_handler, 115 }; 116 117 static int ipmi_open(struct inode *inode, struct file *file) 118 { 119 int if_num = iminor(inode); 120 int rv; 121 struct ipmi_file_private *priv; 122 123 124 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 125 if (!priv) 126 return -ENOMEM; 127 128 priv->file = file; 129 130 rv = ipmi_create_user(if_num, 131 &ipmi_hndlrs, 132 priv, 133 &(priv->user)); 134 if (rv) { 135 kfree(priv); 136 return rv; 137 } 138 139 file->private_data = priv; 140 141 spin_lock_init(&(priv->recv_msg_lock)); 142 INIT_LIST_HEAD(&(priv->recv_msgs)); 143 init_waitqueue_head(&priv->wait); 144 priv->fasync_queue = NULL; 145 sema_init(&(priv->recv_sem), 1); 146 147 /* Use the low-level defaults. */ 148 priv->default_retries = -1; 149 priv->default_retry_time_ms = 0; 150 151 return 0; 152 } 153 154 static int ipmi_release(struct inode *inode, struct file *file) 155 { 156 struct ipmi_file_private *priv = file->private_data; 157 int rv; 158 159 rv = ipmi_destroy_user(priv->user); 160 if (rv) 161 return rv; 162 163 ipmi_fasync (-1, file, 0); 164 165 /* FIXME - free the messages in the list. */ 166 kfree(priv); 167 168 return 0; 169 } 170 171 static int handle_send_req(ipmi_user_t user, 172 struct ipmi_req *req, 173 int retries, 174 unsigned int retry_time_ms) 175 { 176 int rv; 177 struct ipmi_addr addr; 178 struct kernel_ipmi_msg msg; 179 180 if (req->addr_len > sizeof(struct ipmi_addr)) 181 return -EINVAL; 182 183 if (copy_from_user(&addr, req->addr, req->addr_len)) 184 return -EFAULT; 185 186 msg.netfn = req->msg.netfn; 187 msg.cmd = req->msg.cmd; 188 msg.data_len = req->msg.data_len; 189 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 190 if (!msg.data) 191 return -ENOMEM; 192 193 /* From here out we cannot return, we must jump to "out" for 194 error exits to free msgdata. */ 195 196 rv = ipmi_validate_addr(&addr, req->addr_len); 197 if (rv) 198 goto out; 199 200 if (req->msg.data != NULL) { 201 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) { 202 rv = -EMSGSIZE; 203 goto out; 204 } 205 206 if (copy_from_user(msg.data, 207 req->msg.data, 208 req->msg.data_len)) 209 { 210 rv = -EFAULT; 211 goto out; 212 } 213 } else { 214 msg.data_len = 0; 215 } 216 217 rv = ipmi_request_settime(user, 218 &addr, 219 req->msgid, 220 &msg, 221 NULL, 222 0, 223 retries, 224 retry_time_ms); 225 out: 226 kfree(msg.data); 227 return rv; 228 } 229 230 static int ipmi_ioctl(struct inode *inode, 231 struct file *file, 232 unsigned int cmd, 233 unsigned long data) 234 { 235 int rv = -EINVAL; 236 struct ipmi_file_private *priv = file->private_data; 237 void __user *arg = (void __user *)data; 238 239 switch (cmd) 240 { 241 case IPMICTL_SEND_COMMAND: 242 { 243 struct ipmi_req req; 244 245 if (copy_from_user(&req, arg, sizeof(req))) { 246 rv = -EFAULT; 247 break; 248 } 249 250 rv = handle_send_req(priv->user, 251 &req, 252 priv->default_retries, 253 priv->default_retry_time_ms); 254 break; 255 } 256 257 case IPMICTL_SEND_COMMAND_SETTIME: 258 { 259 struct ipmi_req_settime req; 260 261 if (copy_from_user(&req, arg, sizeof(req))) { 262 rv = -EFAULT; 263 break; 264 } 265 266 rv = handle_send_req(priv->user, 267 &req.req, 268 req.retries, 269 req.retry_time_ms); 270 break; 271 } 272 273 case IPMICTL_RECEIVE_MSG: 274 case IPMICTL_RECEIVE_MSG_TRUNC: 275 { 276 struct ipmi_recv rsp; 277 int addr_len; 278 struct list_head *entry; 279 struct ipmi_recv_msg *msg; 280 unsigned long flags; 281 282 283 rv = 0; 284 if (copy_from_user(&rsp, arg, sizeof(rsp))) { 285 rv = -EFAULT; 286 break; 287 } 288 289 /* We claim a semaphore because we don't want two 290 users getting something from the queue at a time. 291 Since we have to release the spinlock before we can 292 copy the data to the user, it's possible another 293 user will grab something from the queue, too. Then 294 the messages might get out of order if something 295 fails and the message gets put back onto the 296 queue. This semaphore prevents that problem. */ 297 down(&(priv->recv_sem)); 298 299 /* Grab the message off the list. */ 300 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 301 if (list_empty(&(priv->recv_msgs))) { 302 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 303 rv = -EAGAIN; 304 goto recv_err; 305 } 306 entry = priv->recv_msgs.next; 307 msg = list_entry(entry, struct ipmi_recv_msg, link); 308 list_del(entry); 309 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 310 311 addr_len = ipmi_addr_length(msg->addr.addr_type); 312 if (rsp.addr_len < addr_len) 313 { 314 rv = -EINVAL; 315 goto recv_putback_on_err; 316 } 317 318 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) { 319 rv = -EFAULT; 320 goto recv_putback_on_err; 321 } 322 rsp.addr_len = addr_len; 323 324 rsp.recv_type = msg->recv_type; 325 rsp.msgid = msg->msgid; 326 rsp.msg.netfn = msg->msg.netfn; 327 rsp.msg.cmd = msg->msg.cmd; 328 329 if (msg->msg.data_len > 0) { 330 if (rsp.msg.data_len < msg->msg.data_len) { 331 rv = -EMSGSIZE; 332 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) { 333 msg->msg.data_len = rsp.msg.data_len; 334 } else { 335 goto recv_putback_on_err; 336 } 337 } 338 339 if (copy_to_user(rsp.msg.data, 340 msg->msg.data, 341 msg->msg.data_len)) 342 { 343 rv = -EFAULT; 344 goto recv_putback_on_err; 345 } 346 rsp.msg.data_len = msg->msg.data_len; 347 } else { 348 rsp.msg.data_len = 0; 349 } 350 351 if (copy_to_user(arg, &rsp, sizeof(rsp))) { 352 rv = -EFAULT; 353 goto recv_putback_on_err; 354 } 355 356 up(&(priv->recv_sem)); 357 ipmi_free_recv_msg(msg); 358 break; 359 360 recv_putback_on_err: 361 /* If we got an error, put the message back onto 362 the head of the queue. */ 363 spin_lock_irqsave(&(priv->recv_msg_lock), flags); 364 list_add(entry, &(priv->recv_msgs)); 365 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags); 366 up(&(priv->recv_sem)); 367 break; 368 369 recv_err: 370 up(&(priv->recv_sem)); 371 break; 372 } 373 374 case IPMICTL_REGISTER_FOR_CMD: 375 { 376 struct ipmi_cmdspec val; 377 378 if (copy_from_user(&val, arg, sizeof(val))) { 379 rv = -EFAULT; 380 break; 381 } 382 383 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd); 384 break; 385 } 386 387 case IPMICTL_UNREGISTER_FOR_CMD: 388 { 389 struct ipmi_cmdspec val; 390 391 if (copy_from_user(&val, arg, sizeof(val))) { 392 rv = -EFAULT; 393 break; 394 } 395 396 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd); 397 break; 398 } 399 400 case IPMICTL_SET_GETS_EVENTS_CMD: 401 { 402 int val; 403 404 if (copy_from_user(&val, arg, sizeof(val))) { 405 rv = -EFAULT; 406 break; 407 } 408 409 rv = ipmi_set_gets_events(priv->user, val); 410 break; 411 } 412 413 case IPMICTL_SET_MY_ADDRESS_CMD: 414 { 415 unsigned int val; 416 417 if (copy_from_user(&val, arg, sizeof(val))) { 418 rv = -EFAULT; 419 break; 420 } 421 422 ipmi_set_my_address(priv->user, val); 423 rv = 0; 424 break; 425 } 426 427 case IPMICTL_GET_MY_ADDRESS_CMD: 428 { 429 unsigned int val; 430 431 val = ipmi_get_my_address(priv->user); 432 433 if (copy_to_user(arg, &val, sizeof(val))) { 434 rv = -EFAULT; 435 break; 436 } 437 rv = 0; 438 break; 439 } 440 441 case IPMICTL_SET_MY_LUN_CMD: 442 { 443 unsigned int val; 444 445 if (copy_from_user(&val, arg, sizeof(val))) { 446 rv = -EFAULT; 447 break; 448 } 449 450 ipmi_set_my_LUN(priv->user, val); 451 rv = 0; 452 break; 453 } 454 455 case IPMICTL_GET_MY_LUN_CMD: 456 { 457 unsigned int val; 458 459 val = ipmi_get_my_LUN(priv->user); 460 461 if (copy_to_user(arg, &val, sizeof(val))) { 462 rv = -EFAULT; 463 break; 464 } 465 rv = 0; 466 break; 467 } 468 case IPMICTL_SET_TIMING_PARMS_CMD: 469 { 470 struct ipmi_timing_parms parms; 471 472 if (copy_from_user(&parms, arg, sizeof(parms))) { 473 rv = -EFAULT; 474 break; 475 } 476 477 priv->default_retries = parms.retries; 478 priv->default_retry_time_ms = parms.retry_time_ms; 479 rv = 0; 480 break; 481 } 482 483 case IPMICTL_GET_TIMING_PARMS_CMD: 484 { 485 struct ipmi_timing_parms parms; 486 487 parms.retries = priv->default_retries; 488 parms.retry_time_ms = priv->default_retry_time_ms; 489 490 if (copy_to_user(arg, &parms, sizeof(parms))) { 491 rv = -EFAULT; 492 break; 493 } 494 495 rv = 0; 496 break; 497 } 498 } 499 500 return rv; 501 } 502 503 504 static struct file_operations ipmi_fops = { 505 .owner = THIS_MODULE, 506 .ioctl = ipmi_ioctl, 507 .open = ipmi_open, 508 .release = ipmi_release, 509 .fasync = ipmi_fasync, 510 .poll = ipmi_poll, 511 }; 512 513 #define DEVICE_NAME "ipmidev" 514 515 static int ipmi_major = 0; 516 module_param(ipmi_major, int, 0); 517 MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" 518 " default, or if you set it to zero, it will choose the next" 519 " available device. Setting it to -1 will disable the" 520 " interface. Other values will set the major device number" 521 " to that value."); 522 523 static struct class_simple *ipmi_class; 524 525 static void ipmi_new_smi(int if_num) 526 { 527 dev_t dev = MKDEV(ipmi_major, if_num); 528 529 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, 530 "ipmidev/%d", if_num); 531 532 class_simple_device_add(ipmi_class, dev, NULL, "ipmi%d", if_num); 533 } 534 535 static void ipmi_smi_gone(int if_num) 536 { 537 class_simple_device_remove(MKDEV(ipmi_major, if_num)); 538 devfs_remove("ipmidev/%d", if_num); 539 } 540 541 static struct ipmi_smi_watcher smi_watcher = 542 { 543 .owner = THIS_MODULE, 544 .new_smi = ipmi_new_smi, 545 .smi_gone = ipmi_smi_gone, 546 }; 547 548 static __init int init_ipmi_devintf(void) 549 { 550 int rv; 551 552 if (ipmi_major < 0) 553 return -EINVAL; 554 555 printk(KERN_INFO "ipmi device interface version " 556 IPMI_DEVINTF_VERSION "\n"); 557 558 ipmi_class = class_simple_create(THIS_MODULE, "ipmi"); 559 if (IS_ERR(ipmi_class)) { 560 printk(KERN_ERR "ipmi: can't register device class\n"); 561 return PTR_ERR(ipmi_class); 562 } 563 564 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 565 if (rv < 0) { 566 class_simple_destroy(ipmi_class); 567 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); 568 return rv; 569 } 570 571 if (ipmi_major == 0) { 572 ipmi_major = rv; 573 } 574 575 devfs_mk_dir(DEVICE_NAME); 576 577 rv = ipmi_smi_watcher_register(&smi_watcher); 578 if (rv) { 579 unregister_chrdev(ipmi_major, DEVICE_NAME); 580 class_simple_destroy(ipmi_class); 581 printk(KERN_WARNING "ipmi: can't register smi watcher\n"); 582 return rv; 583 } 584 585 return 0; 586 } 587 module_init(init_ipmi_devintf); 588 589 static __exit void cleanup_ipmi(void) 590 { 591 class_simple_destroy(ipmi_class); 592 ipmi_smi_watcher_unregister(&smi_watcher); 593 devfs_remove(DEVICE_NAME); 594 unregister_chrdev(ipmi_major, DEVICE_NAME); 595 } 596 module_exit(cleanup_ipmi); 597 598 MODULE_LICENSE("GPL"); 599