1 /* 2 * ipmi_msghandler.c 3 * 4 * Incoming and outgoing message routing for an IPMI interface. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/config.h> 35 #include <linux/module.h> 36 #include <linux/errno.h> 37 #include <asm/system.h> 38 #include <linux/sched.h> 39 #include <linux/poll.h> 40 #include <linux/spinlock.h> 41 #include <linux/slab.h> 42 #include <linux/ipmi.h> 43 #include <linux/ipmi_smi.h> 44 #include <linux/notifier.h> 45 #include <linux/init.h> 46 #include <linux/proc_fs.h> 47 #include <linux/rcupdate.h> 48 49 #define PFX "IPMI message handler: " 50 51 #define IPMI_DRIVER_VERSION "39.0" 52 53 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 54 static int ipmi_init_msghandler(void); 55 56 static int initialized = 0; 57 58 #ifdef CONFIG_PROC_FS 59 struct proc_dir_entry *proc_ipmi_root = NULL; 60 EXPORT_SYMBOL(proc_ipmi_root); 61 #endif /* CONFIG_PROC_FS */ 62 63 #define MAX_EVENTS_IN_QUEUE 25 64 65 /* Don't let a message sit in a queue forever, always time it with at lest 66 the max message timer. This is in milliseconds. */ 67 #define MAX_MSG_TIMEOUT 60000 68 69 70 /* 71 * The main "user" data structure. 72 */ 73 struct ipmi_user 74 { 75 struct list_head link; 76 77 /* Set to "0" when the user is destroyed. */ 78 int valid; 79 80 struct kref refcount; 81 82 /* The upper layer that handles receive messages. */ 83 struct ipmi_user_hndl *handler; 84 void *handler_data; 85 86 /* The interface this user is bound to. */ 87 ipmi_smi_t intf; 88 89 /* Does this interface receive IPMI events? */ 90 int gets_events; 91 }; 92 93 struct cmd_rcvr 94 { 95 struct list_head link; 96 97 ipmi_user_t user; 98 unsigned char netfn; 99 unsigned char cmd; 100 101 /* 102 * This is used to form a linked lised during mass deletion. 103 * Since this is in an RCU list, we cannot use the link above 104 * or change any data until the RCU period completes. So we 105 * use this next variable during mass deletion so we can have 106 * a list and don't have to wait and restart the search on 107 * every individual deletion of a command. */ 108 struct cmd_rcvr *next; 109 }; 110 111 struct seq_table 112 { 113 unsigned int inuse : 1; 114 unsigned int broadcast : 1; 115 116 unsigned long timeout; 117 unsigned long orig_timeout; 118 unsigned int retries_left; 119 120 /* To verify on an incoming send message response that this is 121 the message that the response is for, we keep a sequence id 122 and increment it every time we send a message. */ 123 long seqid; 124 125 /* This is held so we can properly respond to the message on a 126 timeout, and it is used to hold the temporary data for 127 retransmission, too. */ 128 struct ipmi_recv_msg *recv_msg; 129 }; 130 131 /* Store the information in a msgid (long) to allow us to find a 132 sequence table entry from the msgid. */ 133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) 134 135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 136 do { \ 137 seq = ((msgid >> 26) & 0x3f); \ 138 seqid = (msgid & 0x3fffff); \ 139 } while (0) 140 141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 142 143 struct ipmi_channel 144 { 145 unsigned char medium; 146 unsigned char protocol; 147 148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 149 but may be changed by the user. */ 150 unsigned char address; 151 152 /* My LUN. This should generally stay the SMS LUN, but just in 153 case... */ 154 unsigned char lun; 155 }; 156 157 #ifdef CONFIG_PROC_FS 158 struct ipmi_proc_entry 159 { 160 char *name; 161 struct ipmi_proc_entry *next; 162 }; 163 #endif 164 165 struct bmc_device 166 { 167 struct platform_device *dev; 168 struct ipmi_device_id id; 169 unsigned char guid[16]; 170 int guid_set; 171 172 struct kref refcount; 173 174 /* bmc device attributes */ 175 struct device_attribute device_id_attr; 176 struct device_attribute provides_dev_sdrs_attr; 177 struct device_attribute revision_attr; 178 struct device_attribute firmware_rev_attr; 179 struct device_attribute version_attr; 180 struct device_attribute add_dev_support_attr; 181 struct device_attribute manufacturer_id_attr; 182 struct device_attribute product_id_attr; 183 struct device_attribute guid_attr; 184 struct device_attribute aux_firmware_rev_attr; 185 }; 186 187 #define IPMI_IPMB_NUM_SEQ 64 188 #define IPMI_MAX_CHANNELS 16 189 struct ipmi_smi 190 { 191 /* What interface number are we? */ 192 int intf_num; 193 194 struct kref refcount; 195 196 /* The list of upper layers that are using me. seq_lock 197 * protects this. */ 198 struct list_head users; 199 200 /* Used for wake ups at startup. */ 201 wait_queue_head_t waitq; 202 203 struct bmc_device *bmc; 204 char *my_dev_name; 205 206 /* This is the lower-layer's sender routine. */ 207 struct ipmi_smi_handlers *handlers; 208 void *send_info; 209 210 #ifdef CONFIG_PROC_FS 211 /* A list of proc entries for this interface. This does not 212 need a lock, only one thread creates it and only one thread 213 destroys it. */ 214 spinlock_t proc_entry_lock; 215 struct ipmi_proc_entry *proc_entries; 216 #endif 217 218 /* Driver-model device for the system interface. */ 219 struct device *si_dev; 220 221 /* A table of sequence numbers for this interface. We use the 222 sequence numbers for IPMB messages that go out of the 223 interface to match them up with their responses. A routine 224 is called periodically to time the items in this list. */ 225 spinlock_t seq_lock; 226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 227 int curr_seq; 228 229 /* Messages that were delayed for some reason (out of memory, 230 for instance), will go in here to be processed later in a 231 periodic timer interrupt. */ 232 spinlock_t waiting_msgs_lock; 233 struct list_head waiting_msgs; 234 235 /* The list of command receivers that are registered for commands 236 on this interface. */ 237 struct semaphore cmd_rcvrs_lock; 238 struct list_head cmd_rcvrs; 239 240 /* Events that were queues because no one was there to receive 241 them. */ 242 spinlock_t events_lock; /* For dealing with event stuff. */ 243 struct list_head waiting_events; 244 unsigned int waiting_events_count; /* How many events in queue? */ 245 246 /* The event receiver for my BMC, only really used at panic 247 shutdown as a place to store this. */ 248 unsigned char event_receiver; 249 unsigned char event_receiver_lun; 250 unsigned char local_sel_device; 251 unsigned char local_event_generator; 252 253 /* A cheap hack, if this is non-null and a message to an 254 interface comes in with a NULL user, call this routine with 255 it. Note that the message will still be freed by the 256 caller. This only works on the system interface. */ 257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); 258 259 /* When we are scanning the channels for an SMI, this will 260 tell which channel we are scanning. */ 261 int curr_channel; 262 263 /* Channel information */ 264 struct ipmi_channel channels[IPMI_MAX_CHANNELS]; 265 266 /* Proc FS stuff. */ 267 struct proc_dir_entry *proc_dir; 268 char proc_dir_name[10]; 269 270 spinlock_t counter_lock; /* For making counters atomic. */ 271 272 /* Commands we got that were invalid. */ 273 unsigned int sent_invalid_commands; 274 275 /* Commands we sent to the MC. */ 276 unsigned int sent_local_commands; 277 /* Responses from the MC that were delivered to a user. */ 278 unsigned int handled_local_responses; 279 /* Responses from the MC that were not delivered to a user. */ 280 unsigned int unhandled_local_responses; 281 282 /* Commands we sent out to the IPMB bus. */ 283 unsigned int sent_ipmb_commands; 284 /* Commands sent on the IPMB that had errors on the SEND CMD */ 285 unsigned int sent_ipmb_command_errs; 286 /* Each retransmit increments this count. */ 287 unsigned int retransmitted_ipmb_commands; 288 /* When a message times out (runs out of retransmits) this is 289 incremented. */ 290 unsigned int timed_out_ipmb_commands; 291 292 /* This is like above, but for broadcasts. Broadcasts are 293 *not* included in the above count (they are expected to 294 time out). */ 295 unsigned int timed_out_ipmb_broadcasts; 296 297 /* Responses I have sent to the IPMB bus. */ 298 unsigned int sent_ipmb_responses; 299 300 /* The response was delivered to the user. */ 301 unsigned int handled_ipmb_responses; 302 /* The response had invalid data in it. */ 303 unsigned int invalid_ipmb_responses; 304 /* The response didn't have anyone waiting for it. */ 305 unsigned int unhandled_ipmb_responses; 306 307 /* Commands we sent out to the IPMB bus. */ 308 unsigned int sent_lan_commands; 309 /* Commands sent on the IPMB that had errors on the SEND CMD */ 310 unsigned int sent_lan_command_errs; 311 /* Each retransmit increments this count. */ 312 unsigned int retransmitted_lan_commands; 313 /* When a message times out (runs out of retransmits) this is 314 incremented. */ 315 unsigned int timed_out_lan_commands; 316 317 /* Responses I have sent to the IPMB bus. */ 318 unsigned int sent_lan_responses; 319 320 /* The response was delivered to the user. */ 321 unsigned int handled_lan_responses; 322 /* The response had invalid data in it. */ 323 unsigned int invalid_lan_responses; 324 /* The response didn't have anyone waiting for it. */ 325 unsigned int unhandled_lan_responses; 326 327 /* The command was delivered to the user. */ 328 unsigned int handled_commands; 329 /* The command had invalid data in it. */ 330 unsigned int invalid_commands; 331 /* The command didn't have anyone waiting for it. */ 332 unsigned int unhandled_commands; 333 334 /* Invalid data in an event. */ 335 unsigned int invalid_events; 336 /* Events that were received with the proper format. */ 337 unsigned int events; 338 }; 339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 340 341 /* Used to mark an interface entry that cannot be used but is not a 342 * free entry, either, primarily used at creation and deletion time so 343 * a slot doesn't get reused too quickly. */ 344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1)) 345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ 346 || (i == IPMI_INVALID_INTERFACE_ENTRY)) 347 348 /** 349 * The driver model view of the IPMI messaging driver. 350 */ 351 static struct device_driver ipmidriver = { 352 .name = "ipmi", 353 .bus = &platform_bus_type 354 }; 355 static DEFINE_MUTEX(ipmidriver_mutex); 356 357 #define MAX_IPMI_INTERFACES 4 358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 359 360 /* Directly protects the ipmi_interfaces data structure. */ 361 static DEFINE_SPINLOCK(interfaces_lock); 362 363 /* List of watchers that want to know when smi's are added and 364 deleted. */ 365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); 366 static DECLARE_RWSEM(smi_watchers_sem); 367 368 369 static void free_recv_msg_list(struct list_head *q) 370 { 371 struct ipmi_recv_msg *msg, *msg2; 372 373 list_for_each_entry_safe(msg, msg2, q, link) { 374 list_del(&msg->link); 375 ipmi_free_recv_msg(msg); 376 } 377 } 378 379 static void clean_up_interface_data(ipmi_smi_t intf) 380 { 381 int i; 382 struct cmd_rcvr *rcvr, *rcvr2; 383 struct list_head list; 384 385 free_recv_msg_list(&intf->waiting_msgs); 386 free_recv_msg_list(&intf->waiting_events); 387 388 /* Wholesale remove all the entries from the list in the 389 * interface and wait for RCU to know that none are in use. */ 390 down(&intf->cmd_rcvrs_lock); 391 list_add_rcu(&list, &intf->cmd_rcvrs); 392 list_del_rcu(&intf->cmd_rcvrs); 393 up(&intf->cmd_rcvrs_lock); 394 synchronize_rcu(); 395 396 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 397 kfree(rcvr); 398 399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 400 if ((intf->seq_table[i].inuse) 401 && (intf->seq_table[i].recv_msg)) 402 { 403 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 404 } 405 } 406 } 407 408 static void intf_free(struct kref *ref) 409 { 410 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); 411 412 clean_up_interface_data(intf); 413 kfree(intf); 414 } 415 416 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 417 { 418 int i; 419 unsigned long flags; 420 421 down_write(&smi_watchers_sem); 422 list_add(&(watcher->link), &smi_watchers); 423 up_write(&smi_watchers_sem); 424 spin_lock_irqsave(&interfaces_lock, flags); 425 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 426 ipmi_smi_t intf = ipmi_interfaces[i]; 427 if (IPMI_INVALID_INTERFACE(intf)) 428 continue; 429 spin_unlock_irqrestore(&interfaces_lock, flags); 430 watcher->new_smi(i, intf->si_dev); 431 spin_lock_irqsave(&interfaces_lock, flags); 432 } 433 spin_unlock_irqrestore(&interfaces_lock, flags); 434 return 0; 435 } 436 437 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 438 { 439 down_write(&smi_watchers_sem); 440 list_del(&(watcher->link)); 441 up_write(&smi_watchers_sem); 442 return 0; 443 } 444 445 static void 446 call_smi_watchers(int i, struct device *dev) 447 { 448 struct ipmi_smi_watcher *w; 449 450 down_read(&smi_watchers_sem); 451 list_for_each_entry(w, &smi_watchers, link) { 452 if (try_module_get(w->owner)) { 453 w->new_smi(i, dev); 454 module_put(w->owner); 455 } 456 } 457 up_read(&smi_watchers_sem); 458 } 459 460 static int 461 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 462 { 463 if (addr1->addr_type != addr2->addr_type) 464 return 0; 465 466 if (addr1->channel != addr2->channel) 467 return 0; 468 469 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 470 struct ipmi_system_interface_addr *smi_addr1 471 = (struct ipmi_system_interface_addr *) addr1; 472 struct ipmi_system_interface_addr *smi_addr2 473 = (struct ipmi_system_interface_addr *) addr2; 474 return (smi_addr1->lun == smi_addr2->lun); 475 } 476 477 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) 478 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 479 { 480 struct ipmi_ipmb_addr *ipmb_addr1 481 = (struct ipmi_ipmb_addr *) addr1; 482 struct ipmi_ipmb_addr *ipmb_addr2 483 = (struct ipmi_ipmb_addr *) addr2; 484 485 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 486 && (ipmb_addr1->lun == ipmb_addr2->lun)); 487 } 488 489 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) { 490 struct ipmi_lan_addr *lan_addr1 491 = (struct ipmi_lan_addr *) addr1; 492 struct ipmi_lan_addr *lan_addr2 493 = (struct ipmi_lan_addr *) addr2; 494 495 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 496 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 497 && (lan_addr1->session_handle 498 == lan_addr2->session_handle) 499 && (lan_addr1->lun == lan_addr2->lun)); 500 } 501 502 return 1; 503 } 504 505 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 506 { 507 if (len < sizeof(struct ipmi_system_interface_addr)) { 508 return -EINVAL; 509 } 510 511 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 512 if (addr->channel != IPMI_BMC_CHANNEL) 513 return -EINVAL; 514 return 0; 515 } 516 517 if ((addr->channel == IPMI_BMC_CHANNEL) 518 || (addr->channel >= IPMI_MAX_CHANNELS) 519 || (addr->channel < 0)) 520 return -EINVAL; 521 522 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 523 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 524 { 525 if (len < sizeof(struct ipmi_ipmb_addr)) { 526 return -EINVAL; 527 } 528 return 0; 529 } 530 531 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 532 if (len < sizeof(struct ipmi_lan_addr)) { 533 return -EINVAL; 534 } 535 return 0; 536 } 537 538 return -EINVAL; 539 } 540 541 unsigned int ipmi_addr_length(int addr_type) 542 { 543 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 544 return sizeof(struct ipmi_system_interface_addr); 545 546 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 547 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 548 { 549 return sizeof(struct ipmi_ipmb_addr); 550 } 551 552 if (addr_type == IPMI_LAN_ADDR_TYPE) 553 return sizeof(struct ipmi_lan_addr); 554 555 return 0; 556 } 557 558 static void deliver_response(struct ipmi_recv_msg *msg) 559 { 560 if (! msg->user) { 561 ipmi_smi_t intf = msg->user_msg_data; 562 unsigned long flags; 563 564 /* Special handling for NULL users. */ 565 if (intf->null_user_handler) { 566 intf->null_user_handler(intf, msg); 567 spin_lock_irqsave(&intf->counter_lock, flags); 568 intf->handled_local_responses++; 569 spin_unlock_irqrestore(&intf->counter_lock, flags); 570 } else { 571 /* No handler, so give up. */ 572 spin_lock_irqsave(&intf->counter_lock, flags); 573 intf->unhandled_local_responses++; 574 spin_unlock_irqrestore(&intf->counter_lock, flags); 575 } 576 ipmi_free_recv_msg(msg); 577 } else { 578 ipmi_user_t user = msg->user; 579 user->handler->ipmi_recv_hndl(msg, user->handler_data); 580 } 581 } 582 583 /* Find the next sequence number not being used and add the given 584 message with the given timeout to the sequence table. This must be 585 called with the interface's seq_lock held. */ 586 static int intf_next_seq(ipmi_smi_t intf, 587 struct ipmi_recv_msg *recv_msg, 588 unsigned long timeout, 589 int retries, 590 int broadcast, 591 unsigned char *seq, 592 long *seqid) 593 { 594 int rv = 0; 595 unsigned int i; 596 597 for (i = intf->curr_seq; 598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 599 i = (i+1)%IPMI_IPMB_NUM_SEQ) 600 { 601 if (! intf->seq_table[i].inuse) 602 break; 603 } 604 605 if (! intf->seq_table[i].inuse) { 606 intf->seq_table[i].recv_msg = recv_msg; 607 608 /* Start with the maximum timeout, when the send response 609 comes in we will start the real timer. */ 610 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 611 intf->seq_table[i].orig_timeout = timeout; 612 intf->seq_table[i].retries_left = retries; 613 intf->seq_table[i].broadcast = broadcast; 614 intf->seq_table[i].inuse = 1; 615 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 616 *seq = i; 617 *seqid = intf->seq_table[i].seqid; 618 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 619 } else { 620 rv = -EAGAIN; 621 } 622 623 return rv; 624 } 625 626 /* Return the receive message for the given sequence number and 627 release the sequence number so it can be reused. Some other data 628 is passed in to be sure the message matches up correctly (to help 629 guard against message coming in after their timeout and the 630 sequence number being reused). */ 631 static int intf_find_seq(ipmi_smi_t intf, 632 unsigned char seq, 633 short channel, 634 unsigned char cmd, 635 unsigned char netfn, 636 struct ipmi_addr *addr, 637 struct ipmi_recv_msg **recv_msg) 638 { 639 int rv = -ENODEV; 640 unsigned long flags; 641 642 if (seq >= IPMI_IPMB_NUM_SEQ) 643 return -EINVAL; 644 645 spin_lock_irqsave(&(intf->seq_lock), flags); 646 if (intf->seq_table[seq].inuse) { 647 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 648 649 if ((msg->addr.channel == channel) 650 && (msg->msg.cmd == cmd) 651 && (msg->msg.netfn == netfn) 652 && (ipmi_addr_equal(addr, &(msg->addr)))) 653 { 654 *recv_msg = msg; 655 intf->seq_table[seq].inuse = 0; 656 rv = 0; 657 } 658 } 659 spin_unlock_irqrestore(&(intf->seq_lock), flags); 660 661 return rv; 662 } 663 664 665 /* Start the timer for a specific sequence table entry. */ 666 static int intf_start_seq_timer(ipmi_smi_t intf, 667 long msgid) 668 { 669 int rv = -ENODEV; 670 unsigned long flags; 671 unsigned char seq; 672 unsigned long seqid; 673 674 675 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 676 677 spin_lock_irqsave(&(intf->seq_lock), flags); 678 /* We do this verification because the user can be deleted 679 while a message is outstanding. */ 680 if ((intf->seq_table[seq].inuse) 681 && (intf->seq_table[seq].seqid == seqid)) 682 { 683 struct seq_table *ent = &(intf->seq_table[seq]); 684 ent->timeout = ent->orig_timeout; 685 rv = 0; 686 } 687 spin_unlock_irqrestore(&(intf->seq_lock), flags); 688 689 return rv; 690 } 691 692 /* Got an error for the send message for a specific sequence number. */ 693 static int intf_err_seq(ipmi_smi_t intf, 694 long msgid, 695 unsigned int err) 696 { 697 int rv = -ENODEV; 698 unsigned long flags; 699 unsigned char seq; 700 unsigned long seqid; 701 struct ipmi_recv_msg *msg = NULL; 702 703 704 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 705 706 spin_lock_irqsave(&(intf->seq_lock), flags); 707 /* We do this verification because the user can be deleted 708 while a message is outstanding. */ 709 if ((intf->seq_table[seq].inuse) 710 && (intf->seq_table[seq].seqid == seqid)) 711 { 712 struct seq_table *ent = &(intf->seq_table[seq]); 713 714 ent->inuse = 0; 715 msg = ent->recv_msg; 716 rv = 0; 717 } 718 spin_unlock_irqrestore(&(intf->seq_lock), flags); 719 720 if (msg) { 721 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 722 msg->msg_data[0] = err; 723 msg->msg.netfn |= 1; /* Convert to a response. */ 724 msg->msg.data_len = 1; 725 msg->msg.data = msg->msg_data; 726 deliver_response(msg); 727 } 728 729 return rv; 730 } 731 732 733 int ipmi_create_user(unsigned int if_num, 734 struct ipmi_user_hndl *handler, 735 void *handler_data, 736 ipmi_user_t *user) 737 { 738 unsigned long flags; 739 ipmi_user_t new_user; 740 int rv = 0; 741 ipmi_smi_t intf; 742 743 /* There is no module usecount here, because it's not 744 required. Since this can only be used by and called from 745 other modules, they will implicitly use this module, and 746 thus this can't be removed unless the other modules are 747 removed. */ 748 749 if (handler == NULL) 750 return -EINVAL; 751 752 /* Make sure the driver is actually initialized, this handles 753 problems with initialization order. */ 754 if (!initialized) { 755 rv = ipmi_init_msghandler(); 756 if (rv) 757 return rv; 758 759 /* The init code doesn't return an error if it was turned 760 off, but it won't initialize. Check that. */ 761 if (!initialized) 762 return -ENODEV; 763 } 764 765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 766 if (! new_user) 767 return -ENOMEM; 768 769 spin_lock_irqsave(&interfaces_lock, flags); 770 intf = ipmi_interfaces[if_num]; 771 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) { 772 spin_unlock_irqrestore(&interfaces_lock, flags); 773 rv = -EINVAL; 774 goto out_kfree; 775 } 776 777 /* Note that each existing user holds a refcount to the interface. */ 778 kref_get(&intf->refcount); 779 spin_unlock_irqrestore(&interfaces_lock, flags); 780 781 kref_init(&new_user->refcount); 782 new_user->handler = handler; 783 new_user->handler_data = handler_data; 784 new_user->intf = intf; 785 new_user->gets_events = 0; 786 787 if (!try_module_get(intf->handlers->owner)) { 788 rv = -ENODEV; 789 goto out_kref; 790 } 791 792 if (intf->handlers->inc_usecount) { 793 rv = intf->handlers->inc_usecount(intf->send_info); 794 if (rv) { 795 module_put(intf->handlers->owner); 796 goto out_kref; 797 } 798 } 799 800 new_user->valid = 1; 801 spin_lock_irqsave(&intf->seq_lock, flags); 802 list_add_rcu(&new_user->link, &intf->users); 803 spin_unlock_irqrestore(&intf->seq_lock, flags); 804 *user = new_user; 805 return 0; 806 807 out_kref: 808 kref_put(&intf->refcount, intf_free); 809 out_kfree: 810 kfree(new_user); 811 return rv; 812 } 813 814 static void free_user(struct kref *ref) 815 { 816 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); 817 kfree(user); 818 } 819 820 int ipmi_destroy_user(ipmi_user_t user) 821 { 822 int rv = -ENODEV; 823 ipmi_smi_t intf = user->intf; 824 int i; 825 unsigned long flags; 826 struct cmd_rcvr *rcvr; 827 struct cmd_rcvr *rcvrs = NULL; 828 829 user->valid = 1; 830 831 /* Remove the user from the interface's sequence table. */ 832 spin_lock_irqsave(&intf->seq_lock, flags); 833 list_del_rcu(&user->link); 834 835 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 836 if (intf->seq_table[i].inuse 837 && (intf->seq_table[i].recv_msg->user == user)) 838 { 839 intf->seq_table[i].inuse = 0; 840 } 841 } 842 spin_unlock_irqrestore(&intf->seq_lock, flags); 843 844 /* 845 * Remove the user from the command receiver's table. First 846 * we build a list of everything (not using the standard link, 847 * since other things may be using it till we do 848 * synchronize_rcu()) then free everything in that list. 849 */ 850 down(&intf->cmd_rcvrs_lock); 851 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 852 if (rcvr->user == user) { 853 list_del_rcu(&rcvr->link); 854 rcvr->next = rcvrs; 855 rcvrs = rcvr; 856 } 857 } 858 up(&intf->cmd_rcvrs_lock); 859 synchronize_rcu(); 860 while (rcvrs) { 861 rcvr = rcvrs; 862 rcvrs = rcvr->next; 863 kfree(rcvr); 864 } 865 866 module_put(intf->handlers->owner); 867 if (intf->handlers->dec_usecount) 868 intf->handlers->dec_usecount(intf->send_info); 869 870 kref_put(&intf->refcount, intf_free); 871 872 kref_put(&user->refcount, free_user); 873 874 return rv; 875 } 876 877 void ipmi_get_version(ipmi_user_t user, 878 unsigned char *major, 879 unsigned char *minor) 880 { 881 *major = ipmi_version_major(&user->intf->bmc->id); 882 *minor = ipmi_version_minor(&user->intf->bmc->id); 883 } 884 885 int ipmi_set_my_address(ipmi_user_t user, 886 unsigned int channel, 887 unsigned char address) 888 { 889 if (channel >= IPMI_MAX_CHANNELS) 890 return -EINVAL; 891 user->intf->channels[channel].address = address; 892 return 0; 893 } 894 895 int ipmi_get_my_address(ipmi_user_t user, 896 unsigned int channel, 897 unsigned char *address) 898 { 899 if (channel >= IPMI_MAX_CHANNELS) 900 return -EINVAL; 901 *address = user->intf->channels[channel].address; 902 return 0; 903 } 904 905 int ipmi_set_my_LUN(ipmi_user_t user, 906 unsigned int channel, 907 unsigned char LUN) 908 { 909 if (channel >= IPMI_MAX_CHANNELS) 910 return -EINVAL; 911 user->intf->channels[channel].lun = LUN & 0x3; 912 return 0; 913 } 914 915 int ipmi_get_my_LUN(ipmi_user_t user, 916 unsigned int channel, 917 unsigned char *address) 918 { 919 if (channel >= IPMI_MAX_CHANNELS) 920 return -EINVAL; 921 *address = user->intf->channels[channel].lun; 922 return 0; 923 } 924 925 int ipmi_set_gets_events(ipmi_user_t user, int val) 926 { 927 unsigned long flags; 928 ipmi_smi_t intf = user->intf; 929 struct ipmi_recv_msg *msg, *msg2; 930 struct list_head msgs; 931 932 INIT_LIST_HEAD(&msgs); 933 934 spin_lock_irqsave(&intf->events_lock, flags); 935 user->gets_events = val; 936 937 if (val) { 938 /* Deliver any queued events. */ 939 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) { 940 list_del(&msg->link); 941 list_add_tail(&msg->link, &msgs); 942 } 943 } 944 945 /* Hold the events lock while doing this to preserve order. */ 946 list_for_each_entry_safe(msg, msg2, &msgs, link) { 947 msg->user = user; 948 kref_get(&user->refcount); 949 deliver_response(msg); 950 } 951 952 spin_unlock_irqrestore(&intf->events_lock, flags); 953 954 return 0; 955 } 956 957 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, 958 unsigned char netfn, 959 unsigned char cmd) 960 { 961 struct cmd_rcvr *rcvr; 962 963 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 964 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) 965 return rcvr; 966 } 967 return NULL; 968 } 969 970 int ipmi_register_for_cmd(ipmi_user_t user, 971 unsigned char netfn, 972 unsigned char cmd) 973 { 974 ipmi_smi_t intf = user->intf; 975 struct cmd_rcvr *rcvr; 976 struct cmd_rcvr *entry; 977 int rv = 0; 978 979 980 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 981 if (! rcvr) 982 return -ENOMEM; 983 rcvr->cmd = cmd; 984 rcvr->netfn = netfn; 985 rcvr->user = user; 986 987 down(&intf->cmd_rcvrs_lock); 988 /* Make sure the command/netfn is not already registered. */ 989 entry = find_cmd_rcvr(intf, netfn, cmd); 990 if (entry) { 991 rv = -EBUSY; 992 goto out_unlock; 993 } 994 995 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 996 997 out_unlock: 998 up(&intf->cmd_rcvrs_lock); 999 if (rv) 1000 kfree(rcvr); 1001 1002 return rv; 1003 } 1004 1005 int ipmi_unregister_for_cmd(ipmi_user_t user, 1006 unsigned char netfn, 1007 unsigned char cmd) 1008 { 1009 ipmi_smi_t intf = user->intf; 1010 struct cmd_rcvr *rcvr; 1011 1012 down(&intf->cmd_rcvrs_lock); 1013 /* Make sure the command/netfn is not already registered. */ 1014 rcvr = find_cmd_rcvr(intf, netfn, cmd); 1015 if ((rcvr) && (rcvr->user == user)) { 1016 list_del_rcu(&rcvr->link); 1017 up(&intf->cmd_rcvrs_lock); 1018 synchronize_rcu(); 1019 kfree(rcvr); 1020 return 0; 1021 } else { 1022 up(&intf->cmd_rcvrs_lock); 1023 return -ENOENT; 1024 } 1025 } 1026 1027 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) 1028 { 1029 ipmi_smi_t intf = user->intf; 1030 intf->handlers->set_run_to_completion(intf->send_info, val); 1031 } 1032 1033 static unsigned char 1034 ipmb_checksum(unsigned char *data, int size) 1035 { 1036 unsigned char csum = 0; 1037 1038 for (; size > 0; size--, data++) 1039 csum += *data; 1040 1041 return -csum; 1042 } 1043 1044 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1045 struct kernel_ipmi_msg *msg, 1046 struct ipmi_ipmb_addr *ipmb_addr, 1047 long msgid, 1048 unsigned char ipmb_seq, 1049 int broadcast, 1050 unsigned char source_address, 1051 unsigned char source_lun) 1052 { 1053 int i = broadcast; 1054 1055 /* Format the IPMB header data. */ 1056 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1057 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1058 smi_msg->data[2] = ipmb_addr->channel; 1059 if (broadcast) 1060 smi_msg->data[3] = 0; 1061 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1062 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1063 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); 1064 smi_msg->data[i+6] = source_address; 1065 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1066 smi_msg->data[i+8] = msg->cmd; 1067 1068 /* Now tack on the data to the message. */ 1069 if (msg->data_len > 0) 1070 memcpy(&(smi_msg->data[i+9]), msg->data, 1071 msg->data_len); 1072 smi_msg->data_size = msg->data_len + 9; 1073 1074 /* Now calculate the checksum and tack it on. */ 1075 smi_msg->data[i+smi_msg->data_size] 1076 = ipmb_checksum(&(smi_msg->data[i+6]), 1077 smi_msg->data_size-6); 1078 1079 /* Add on the checksum size and the offset from the 1080 broadcast. */ 1081 smi_msg->data_size += 1 + i; 1082 1083 smi_msg->msgid = msgid; 1084 } 1085 1086 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1087 struct kernel_ipmi_msg *msg, 1088 struct ipmi_lan_addr *lan_addr, 1089 long msgid, 1090 unsigned char ipmb_seq, 1091 unsigned char source_lun) 1092 { 1093 /* Format the IPMB header data. */ 1094 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1095 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1096 smi_msg->data[2] = lan_addr->channel; 1097 smi_msg->data[3] = lan_addr->session_handle; 1098 smi_msg->data[4] = lan_addr->remote_SWID; 1099 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1100 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); 1101 smi_msg->data[7] = lan_addr->local_SWID; 1102 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1103 smi_msg->data[9] = msg->cmd; 1104 1105 /* Now tack on the data to the message. */ 1106 if (msg->data_len > 0) 1107 memcpy(&(smi_msg->data[10]), msg->data, 1108 msg->data_len); 1109 smi_msg->data_size = msg->data_len + 10; 1110 1111 /* Now calculate the checksum and tack it on. */ 1112 smi_msg->data[smi_msg->data_size] 1113 = ipmb_checksum(&(smi_msg->data[7]), 1114 smi_msg->data_size-7); 1115 1116 /* Add on the checksum size and the offset from the 1117 broadcast. */ 1118 smi_msg->data_size += 1; 1119 1120 smi_msg->msgid = msgid; 1121 } 1122 1123 /* Separate from ipmi_request so that the user does not have to be 1124 supplied in certain circumstances (mainly at panic time). If 1125 messages are supplied, they will be freed, even if an error 1126 occurs. */ 1127 static int i_ipmi_request(ipmi_user_t user, 1128 ipmi_smi_t intf, 1129 struct ipmi_addr *addr, 1130 long msgid, 1131 struct kernel_ipmi_msg *msg, 1132 void *user_msg_data, 1133 void *supplied_smi, 1134 struct ipmi_recv_msg *supplied_recv, 1135 int priority, 1136 unsigned char source_address, 1137 unsigned char source_lun, 1138 int retries, 1139 unsigned int retry_time_ms) 1140 { 1141 int rv = 0; 1142 struct ipmi_smi_msg *smi_msg; 1143 struct ipmi_recv_msg *recv_msg; 1144 unsigned long flags; 1145 1146 1147 if (supplied_recv) { 1148 recv_msg = supplied_recv; 1149 } else { 1150 recv_msg = ipmi_alloc_recv_msg(); 1151 if (recv_msg == NULL) { 1152 return -ENOMEM; 1153 } 1154 } 1155 recv_msg->user_msg_data = user_msg_data; 1156 1157 if (supplied_smi) { 1158 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 1159 } else { 1160 smi_msg = ipmi_alloc_smi_msg(); 1161 if (smi_msg == NULL) { 1162 ipmi_free_recv_msg(recv_msg); 1163 return -ENOMEM; 1164 } 1165 } 1166 1167 recv_msg->user = user; 1168 if (user) 1169 kref_get(&user->refcount); 1170 recv_msg->msgid = msgid; 1171 /* Store the message to send in the receive message so timeout 1172 responses can get the proper response data. */ 1173 recv_msg->msg = *msg; 1174 1175 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 1176 struct ipmi_system_interface_addr *smi_addr; 1177 1178 if (msg->netfn & 1) { 1179 /* Responses are not allowed to the SMI. */ 1180 rv = -EINVAL; 1181 goto out_err; 1182 } 1183 1184 smi_addr = (struct ipmi_system_interface_addr *) addr; 1185 if (smi_addr->lun > 3) { 1186 spin_lock_irqsave(&intf->counter_lock, flags); 1187 intf->sent_invalid_commands++; 1188 spin_unlock_irqrestore(&intf->counter_lock, flags); 1189 rv = -EINVAL; 1190 goto out_err; 1191 } 1192 1193 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1194 1195 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1196 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1197 || (msg->cmd == IPMI_GET_MSG_CMD) 1198 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) 1199 { 1200 /* We don't let the user do these, since we manage 1201 the sequence numbers. */ 1202 spin_lock_irqsave(&intf->counter_lock, flags); 1203 intf->sent_invalid_commands++; 1204 spin_unlock_irqrestore(&intf->counter_lock, flags); 1205 rv = -EINVAL; 1206 goto out_err; 1207 } 1208 1209 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { 1210 spin_lock_irqsave(&intf->counter_lock, flags); 1211 intf->sent_invalid_commands++; 1212 spin_unlock_irqrestore(&intf->counter_lock, flags); 1213 rv = -EMSGSIZE; 1214 goto out_err; 1215 } 1216 1217 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1218 smi_msg->data[1] = msg->cmd; 1219 smi_msg->msgid = msgid; 1220 smi_msg->user_data = recv_msg; 1221 if (msg->data_len > 0) 1222 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); 1223 smi_msg->data_size = msg->data_len + 2; 1224 spin_lock_irqsave(&intf->counter_lock, flags); 1225 intf->sent_local_commands++; 1226 spin_unlock_irqrestore(&intf->counter_lock, flags); 1227 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 1228 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 1229 { 1230 struct ipmi_ipmb_addr *ipmb_addr; 1231 unsigned char ipmb_seq; 1232 long seqid; 1233 int broadcast = 0; 1234 1235 if (addr->channel >= IPMI_MAX_CHANNELS) { 1236 spin_lock_irqsave(&intf->counter_lock, flags); 1237 intf->sent_invalid_commands++; 1238 spin_unlock_irqrestore(&intf->counter_lock, flags); 1239 rv = -EINVAL; 1240 goto out_err; 1241 } 1242 1243 if (intf->channels[addr->channel].medium 1244 != IPMI_CHANNEL_MEDIUM_IPMB) 1245 { 1246 spin_lock_irqsave(&intf->counter_lock, flags); 1247 intf->sent_invalid_commands++; 1248 spin_unlock_irqrestore(&intf->counter_lock, flags); 1249 rv = -EINVAL; 1250 goto out_err; 1251 } 1252 1253 if (retries < 0) { 1254 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) 1255 retries = 0; /* Don't retry broadcasts. */ 1256 else 1257 retries = 4; 1258 } 1259 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1260 /* Broadcasts add a zero at the beginning of the 1261 message, but otherwise is the same as an IPMB 1262 address. */ 1263 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1264 broadcast = 1; 1265 } 1266 1267 1268 /* Default to 1 second retries. */ 1269 if (retry_time_ms == 0) 1270 retry_time_ms = 1000; 1271 1272 /* 9 for the header and 1 for the checksum, plus 1273 possibly one for the broadcast. */ 1274 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1275 spin_lock_irqsave(&intf->counter_lock, flags); 1276 intf->sent_invalid_commands++; 1277 spin_unlock_irqrestore(&intf->counter_lock, flags); 1278 rv = -EMSGSIZE; 1279 goto out_err; 1280 } 1281 1282 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1283 if (ipmb_addr->lun > 3) { 1284 spin_lock_irqsave(&intf->counter_lock, flags); 1285 intf->sent_invalid_commands++; 1286 spin_unlock_irqrestore(&intf->counter_lock, flags); 1287 rv = -EINVAL; 1288 goto out_err; 1289 } 1290 1291 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1292 1293 if (recv_msg->msg.netfn & 0x1) { 1294 /* It's a response, so use the user's sequence 1295 from msgid. */ 1296 spin_lock_irqsave(&intf->counter_lock, flags); 1297 intf->sent_ipmb_responses++; 1298 spin_unlock_irqrestore(&intf->counter_lock, flags); 1299 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1300 msgid, broadcast, 1301 source_address, source_lun); 1302 1303 /* Save the receive message so we can use it 1304 to deliver the response. */ 1305 smi_msg->user_data = recv_msg; 1306 } else { 1307 /* It's a command, so get a sequence for it. */ 1308 1309 spin_lock_irqsave(&(intf->seq_lock), flags); 1310 1311 spin_lock(&intf->counter_lock); 1312 intf->sent_ipmb_commands++; 1313 spin_unlock(&intf->counter_lock); 1314 1315 /* Create a sequence number with a 1 second 1316 timeout and 4 retries. */ 1317 rv = intf_next_seq(intf, 1318 recv_msg, 1319 retry_time_ms, 1320 retries, 1321 broadcast, 1322 &ipmb_seq, 1323 &seqid); 1324 if (rv) { 1325 /* We have used up all the sequence numbers, 1326 probably, so abort. */ 1327 spin_unlock_irqrestore(&(intf->seq_lock), 1328 flags); 1329 goto out_err; 1330 } 1331 1332 /* Store the sequence number in the message, 1333 so that when the send message response 1334 comes back we can start the timer. */ 1335 format_ipmb_msg(smi_msg, msg, ipmb_addr, 1336 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1337 ipmb_seq, broadcast, 1338 source_address, source_lun); 1339 1340 /* Copy the message into the recv message data, so we 1341 can retransmit it later if necessary. */ 1342 memcpy(recv_msg->msg_data, smi_msg->data, 1343 smi_msg->data_size); 1344 recv_msg->msg.data = recv_msg->msg_data; 1345 recv_msg->msg.data_len = smi_msg->data_size; 1346 1347 /* We don't unlock until here, because we need 1348 to copy the completed message into the 1349 recv_msg before we release the lock. 1350 Otherwise, race conditions may bite us. I 1351 know that's pretty paranoid, but I prefer 1352 to be correct. */ 1353 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1354 } 1355 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 1356 struct ipmi_lan_addr *lan_addr; 1357 unsigned char ipmb_seq; 1358 long seqid; 1359 1360 if (addr->channel >= IPMI_MAX_CHANNELS) { 1361 spin_lock_irqsave(&intf->counter_lock, flags); 1362 intf->sent_invalid_commands++; 1363 spin_unlock_irqrestore(&intf->counter_lock, flags); 1364 rv = -EINVAL; 1365 goto out_err; 1366 } 1367 1368 if ((intf->channels[addr->channel].medium 1369 != IPMI_CHANNEL_MEDIUM_8023LAN) 1370 && (intf->channels[addr->channel].medium 1371 != IPMI_CHANNEL_MEDIUM_ASYNC)) 1372 { 1373 spin_lock_irqsave(&intf->counter_lock, flags); 1374 intf->sent_invalid_commands++; 1375 spin_unlock_irqrestore(&intf->counter_lock, flags); 1376 rv = -EINVAL; 1377 goto out_err; 1378 } 1379 1380 retries = 4; 1381 1382 /* Default to 1 second retries. */ 1383 if (retry_time_ms == 0) 1384 retry_time_ms = 1000; 1385 1386 /* 11 for the header and 1 for the checksum. */ 1387 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 1388 spin_lock_irqsave(&intf->counter_lock, flags); 1389 intf->sent_invalid_commands++; 1390 spin_unlock_irqrestore(&intf->counter_lock, flags); 1391 rv = -EMSGSIZE; 1392 goto out_err; 1393 } 1394 1395 lan_addr = (struct ipmi_lan_addr *) addr; 1396 if (lan_addr->lun > 3) { 1397 spin_lock_irqsave(&intf->counter_lock, flags); 1398 intf->sent_invalid_commands++; 1399 spin_unlock_irqrestore(&intf->counter_lock, flags); 1400 rv = -EINVAL; 1401 goto out_err; 1402 } 1403 1404 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 1405 1406 if (recv_msg->msg.netfn & 0x1) { 1407 /* It's a response, so use the user's sequence 1408 from msgid. */ 1409 spin_lock_irqsave(&intf->counter_lock, flags); 1410 intf->sent_lan_responses++; 1411 spin_unlock_irqrestore(&intf->counter_lock, flags); 1412 format_lan_msg(smi_msg, msg, lan_addr, msgid, 1413 msgid, source_lun); 1414 1415 /* Save the receive message so we can use it 1416 to deliver the response. */ 1417 smi_msg->user_data = recv_msg; 1418 } else { 1419 /* It's a command, so get a sequence for it. */ 1420 1421 spin_lock_irqsave(&(intf->seq_lock), flags); 1422 1423 spin_lock(&intf->counter_lock); 1424 intf->sent_lan_commands++; 1425 spin_unlock(&intf->counter_lock); 1426 1427 /* Create a sequence number with a 1 second 1428 timeout and 4 retries. */ 1429 rv = intf_next_seq(intf, 1430 recv_msg, 1431 retry_time_ms, 1432 retries, 1433 0, 1434 &ipmb_seq, 1435 &seqid); 1436 if (rv) { 1437 /* We have used up all the sequence numbers, 1438 probably, so abort. */ 1439 spin_unlock_irqrestore(&(intf->seq_lock), 1440 flags); 1441 goto out_err; 1442 } 1443 1444 /* Store the sequence number in the message, 1445 so that when the send message response 1446 comes back we can start the timer. */ 1447 format_lan_msg(smi_msg, msg, lan_addr, 1448 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1449 ipmb_seq, source_lun); 1450 1451 /* Copy the message into the recv message data, so we 1452 can retransmit it later if necessary. */ 1453 memcpy(recv_msg->msg_data, smi_msg->data, 1454 smi_msg->data_size); 1455 recv_msg->msg.data = recv_msg->msg_data; 1456 recv_msg->msg.data_len = smi_msg->data_size; 1457 1458 /* We don't unlock until here, because we need 1459 to copy the completed message into the 1460 recv_msg before we release the lock. 1461 Otherwise, race conditions may bite us. I 1462 know that's pretty paranoid, but I prefer 1463 to be correct. */ 1464 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1465 } 1466 } else { 1467 /* Unknown address type. */ 1468 spin_lock_irqsave(&intf->counter_lock, flags); 1469 intf->sent_invalid_commands++; 1470 spin_unlock_irqrestore(&intf->counter_lock, flags); 1471 rv = -EINVAL; 1472 goto out_err; 1473 } 1474 1475 #ifdef DEBUG_MSGING 1476 { 1477 int m; 1478 for (m = 0; m < smi_msg->data_size; m++) 1479 printk(" %2.2x", smi_msg->data[m]); 1480 printk("\n"); 1481 } 1482 #endif 1483 intf->handlers->sender(intf->send_info, smi_msg, priority); 1484 1485 return 0; 1486 1487 out_err: 1488 ipmi_free_smi_msg(smi_msg); 1489 ipmi_free_recv_msg(recv_msg); 1490 return rv; 1491 } 1492 1493 static int check_addr(ipmi_smi_t intf, 1494 struct ipmi_addr *addr, 1495 unsigned char *saddr, 1496 unsigned char *lun) 1497 { 1498 if (addr->channel >= IPMI_MAX_CHANNELS) 1499 return -EINVAL; 1500 *lun = intf->channels[addr->channel].lun; 1501 *saddr = intf->channels[addr->channel].address; 1502 return 0; 1503 } 1504 1505 int ipmi_request_settime(ipmi_user_t user, 1506 struct ipmi_addr *addr, 1507 long msgid, 1508 struct kernel_ipmi_msg *msg, 1509 void *user_msg_data, 1510 int priority, 1511 int retries, 1512 unsigned int retry_time_ms) 1513 { 1514 unsigned char saddr, lun; 1515 int rv; 1516 1517 if (! user) 1518 return -EINVAL; 1519 rv = check_addr(user->intf, addr, &saddr, &lun); 1520 if (rv) 1521 return rv; 1522 return i_ipmi_request(user, 1523 user->intf, 1524 addr, 1525 msgid, 1526 msg, 1527 user_msg_data, 1528 NULL, NULL, 1529 priority, 1530 saddr, 1531 lun, 1532 retries, 1533 retry_time_ms); 1534 } 1535 1536 int ipmi_request_supply_msgs(ipmi_user_t user, 1537 struct ipmi_addr *addr, 1538 long msgid, 1539 struct kernel_ipmi_msg *msg, 1540 void *user_msg_data, 1541 void *supplied_smi, 1542 struct ipmi_recv_msg *supplied_recv, 1543 int priority) 1544 { 1545 unsigned char saddr, lun; 1546 int rv; 1547 1548 if (! user) 1549 return -EINVAL; 1550 rv = check_addr(user->intf, addr, &saddr, &lun); 1551 if (rv) 1552 return rv; 1553 return i_ipmi_request(user, 1554 user->intf, 1555 addr, 1556 msgid, 1557 msg, 1558 user_msg_data, 1559 supplied_smi, 1560 supplied_recv, 1561 priority, 1562 saddr, 1563 lun, 1564 -1, 0); 1565 } 1566 1567 static int ipmb_file_read_proc(char *page, char **start, off_t off, 1568 int count, int *eof, void *data) 1569 { 1570 char *out = (char *) page; 1571 ipmi_smi_t intf = data; 1572 int i; 1573 int rv= 0; 1574 1575 for (i = 0; i < IPMI_MAX_CHANNELS; i++) 1576 rv += sprintf(out+rv, "%x ", intf->channels[i].address); 1577 out[rv-1] = '\n'; /* Replace the final space with a newline */ 1578 out[rv] = '\0'; 1579 rv++; 1580 return rv; 1581 } 1582 1583 static int version_file_read_proc(char *page, char **start, off_t off, 1584 int count, int *eof, void *data) 1585 { 1586 char *out = (char *) page; 1587 ipmi_smi_t intf = data; 1588 1589 return sprintf(out, "%d.%d\n", 1590 ipmi_version_major(&intf->bmc->id), 1591 ipmi_version_minor(&intf->bmc->id)); 1592 } 1593 1594 static int stat_file_read_proc(char *page, char **start, off_t off, 1595 int count, int *eof, void *data) 1596 { 1597 char *out = (char *) page; 1598 ipmi_smi_t intf = data; 1599 1600 out += sprintf(out, "sent_invalid_commands: %d\n", 1601 intf->sent_invalid_commands); 1602 out += sprintf(out, "sent_local_commands: %d\n", 1603 intf->sent_local_commands); 1604 out += sprintf(out, "handled_local_responses: %d\n", 1605 intf->handled_local_responses); 1606 out += sprintf(out, "unhandled_local_responses: %d\n", 1607 intf->unhandled_local_responses); 1608 out += sprintf(out, "sent_ipmb_commands: %d\n", 1609 intf->sent_ipmb_commands); 1610 out += sprintf(out, "sent_ipmb_command_errs: %d\n", 1611 intf->sent_ipmb_command_errs); 1612 out += sprintf(out, "retransmitted_ipmb_commands: %d\n", 1613 intf->retransmitted_ipmb_commands); 1614 out += sprintf(out, "timed_out_ipmb_commands: %d\n", 1615 intf->timed_out_ipmb_commands); 1616 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", 1617 intf->timed_out_ipmb_broadcasts); 1618 out += sprintf(out, "sent_ipmb_responses: %d\n", 1619 intf->sent_ipmb_responses); 1620 out += sprintf(out, "handled_ipmb_responses: %d\n", 1621 intf->handled_ipmb_responses); 1622 out += sprintf(out, "invalid_ipmb_responses: %d\n", 1623 intf->invalid_ipmb_responses); 1624 out += sprintf(out, "unhandled_ipmb_responses: %d\n", 1625 intf->unhandled_ipmb_responses); 1626 out += sprintf(out, "sent_lan_commands: %d\n", 1627 intf->sent_lan_commands); 1628 out += sprintf(out, "sent_lan_command_errs: %d\n", 1629 intf->sent_lan_command_errs); 1630 out += sprintf(out, "retransmitted_lan_commands: %d\n", 1631 intf->retransmitted_lan_commands); 1632 out += sprintf(out, "timed_out_lan_commands: %d\n", 1633 intf->timed_out_lan_commands); 1634 out += sprintf(out, "sent_lan_responses: %d\n", 1635 intf->sent_lan_responses); 1636 out += sprintf(out, "handled_lan_responses: %d\n", 1637 intf->handled_lan_responses); 1638 out += sprintf(out, "invalid_lan_responses: %d\n", 1639 intf->invalid_lan_responses); 1640 out += sprintf(out, "unhandled_lan_responses: %d\n", 1641 intf->unhandled_lan_responses); 1642 out += sprintf(out, "handled_commands: %d\n", 1643 intf->handled_commands); 1644 out += sprintf(out, "invalid_commands: %d\n", 1645 intf->invalid_commands); 1646 out += sprintf(out, "unhandled_commands: %d\n", 1647 intf->unhandled_commands); 1648 out += sprintf(out, "invalid_events: %d\n", 1649 intf->invalid_events); 1650 out += sprintf(out, "events: %d\n", 1651 intf->events); 1652 1653 return (out - ((char *) page)); 1654 } 1655 1656 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 1657 read_proc_t *read_proc, write_proc_t *write_proc, 1658 void *data, struct module *owner) 1659 { 1660 int rv = 0; 1661 #ifdef CONFIG_PROC_FS 1662 struct proc_dir_entry *file; 1663 struct ipmi_proc_entry *entry; 1664 1665 /* Create a list element. */ 1666 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1667 if (!entry) 1668 return -ENOMEM; 1669 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); 1670 if (!entry->name) { 1671 kfree(entry); 1672 return -ENOMEM; 1673 } 1674 strcpy(entry->name, name); 1675 1676 file = create_proc_entry(name, 0, smi->proc_dir); 1677 if (!file) { 1678 kfree(entry->name); 1679 kfree(entry); 1680 rv = -ENOMEM; 1681 } else { 1682 file->nlink = 1; 1683 file->data = data; 1684 file->read_proc = read_proc; 1685 file->write_proc = write_proc; 1686 file->owner = owner; 1687 1688 spin_lock(&smi->proc_entry_lock); 1689 /* Stick it on the list. */ 1690 entry->next = smi->proc_entries; 1691 smi->proc_entries = entry; 1692 spin_unlock(&smi->proc_entry_lock); 1693 } 1694 #endif /* CONFIG_PROC_FS */ 1695 1696 return rv; 1697 } 1698 1699 static int add_proc_entries(ipmi_smi_t smi, int num) 1700 { 1701 int rv = 0; 1702 1703 #ifdef CONFIG_PROC_FS 1704 sprintf(smi->proc_dir_name, "%d", num); 1705 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); 1706 if (!smi->proc_dir) 1707 rv = -ENOMEM; 1708 else { 1709 smi->proc_dir->owner = THIS_MODULE; 1710 } 1711 1712 if (rv == 0) 1713 rv = ipmi_smi_add_proc_entry(smi, "stats", 1714 stat_file_read_proc, NULL, 1715 smi, THIS_MODULE); 1716 1717 if (rv == 0) 1718 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 1719 ipmb_file_read_proc, NULL, 1720 smi, THIS_MODULE); 1721 1722 if (rv == 0) 1723 rv = ipmi_smi_add_proc_entry(smi, "version", 1724 version_file_read_proc, NULL, 1725 smi, THIS_MODULE); 1726 #endif /* CONFIG_PROC_FS */ 1727 1728 return rv; 1729 } 1730 1731 static void remove_proc_entries(ipmi_smi_t smi) 1732 { 1733 #ifdef CONFIG_PROC_FS 1734 struct ipmi_proc_entry *entry; 1735 1736 spin_lock(&smi->proc_entry_lock); 1737 while (smi->proc_entries) { 1738 entry = smi->proc_entries; 1739 smi->proc_entries = entry->next; 1740 1741 remove_proc_entry(entry->name, smi->proc_dir); 1742 kfree(entry->name); 1743 kfree(entry); 1744 } 1745 spin_unlock(&smi->proc_entry_lock); 1746 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); 1747 #endif /* CONFIG_PROC_FS */ 1748 } 1749 1750 static int __find_bmc_guid(struct device *dev, void *data) 1751 { 1752 unsigned char *id = data; 1753 struct bmc_device *bmc = dev_get_drvdata(dev); 1754 return memcmp(bmc->guid, id, 16) == 0; 1755 } 1756 1757 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 1758 unsigned char *guid) 1759 { 1760 struct device *dev; 1761 1762 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 1763 if (dev) 1764 return dev_get_drvdata(dev); 1765 else 1766 return NULL; 1767 } 1768 1769 struct prod_dev_id { 1770 unsigned int product_id; 1771 unsigned char device_id; 1772 }; 1773 1774 static int __find_bmc_prod_dev_id(struct device *dev, void *data) 1775 { 1776 struct prod_dev_id *id = data; 1777 struct bmc_device *bmc = dev_get_drvdata(dev); 1778 1779 return (bmc->id.product_id == id->product_id 1780 && bmc->id.product_id == id->product_id 1781 && bmc->id.device_id == id->device_id); 1782 } 1783 1784 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 1785 struct device_driver *drv, 1786 unsigned char product_id, unsigned char device_id) 1787 { 1788 struct prod_dev_id id = { 1789 .product_id = product_id, 1790 .device_id = device_id, 1791 }; 1792 struct device *dev; 1793 1794 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 1795 if (dev) 1796 return dev_get_drvdata(dev); 1797 else 1798 return NULL; 1799 } 1800 1801 static ssize_t device_id_show(struct device *dev, 1802 struct device_attribute *attr, 1803 char *buf) 1804 { 1805 struct bmc_device *bmc = dev_get_drvdata(dev); 1806 1807 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 1808 } 1809 1810 static ssize_t provides_dev_sdrs_show(struct device *dev, 1811 struct device_attribute *attr, 1812 char *buf) 1813 { 1814 struct bmc_device *bmc = dev_get_drvdata(dev); 1815 1816 return snprintf(buf, 10, "%u\n", 1817 bmc->id.device_revision && 0x80 >> 7); 1818 } 1819 1820 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 1821 char *buf) 1822 { 1823 struct bmc_device *bmc = dev_get_drvdata(dev); 1824 1825 return snprintf(buf, 20, "%u\n", 1826 bmc->id.device_revision && 0x0F); 1827 } 1828 1829 static ssize_t firmware_rev_show(struct device *dev, 1830 struct device_attribute *attr, 1831 char *buf) 1832 { 1833 struct bmc_device *bmc = dev_get_drvdata(dev); 1834 1835 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 1836 bmc->id.firmware_revision_2); 1837 } 1838 1839 static ssize_t ipmi_version_show(struct device *dev, 1840 struct device_attribute *attr, 1841 char *buf) 1842 { 1843 struct bmc_device *bmc = dev_get_drvdata(dev); 1844 1845 return snprintf(buf, 20, "%u.%u\n", 1846 ipmi_version_major(&bmc->id), 1847 ipmi_version_minor(&bmc->id)); 1848 } 1849 1850 static ssize_t add_dev_support_show(struct device *dev, 1851 struct device_attribute *attr, 1852 char *buf) 1853 { 1854 struct bmc_device *bmc = dev_get_drvdata(dev); 1855 1856 return snprintf(buf, 10, "0x%02x\n", 1857 bmc->id.additional_device_support); 1858 } 1859 1860 static ssize_t manufacturer_id_show(struct device *dev, 1861 struct device_attribute *attr, 1862 char *buf) 1863 { 1864 struct bmc_device *bmc = dev_get_drvdata(dev); 1865 1866 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 1867 } 1868 1869 static ssize_t product_id_show(struct device *dev, 1870 struct device_attribute *attr, 1871 char *buf) 1872 { 1873 struct bmc_device *bmc = dev_get_drvdata(dev); 1874 1875 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 1876 } 1877 1878 static ssize_t aux_firmware_rev_show(struct device *dev, 1879 struct device_attribute *attr, 1880 char *buf) 1881 { 1882 struct bmc_device *bmc = dev_get_drvdata(dev); 1883 1884 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 1885 bmc->id.aux_firmware_revision[3], 1886 bmc->id.aux_firmware_revision[2], 1887 bmc->id.aux_firmware_revision[1], 1888 bmc->id.aux_firmware_revision[0]); 1889 } 1890 1891 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 1892 char *buf) 1893 { 1894 struct bmc_device *bmc = dev_get_drvdata(dev); 1895 1896 return snprintf(buf, 100, "%Lx%Lx\n", 1897 (long long) bmc->guid[0], 1898 (long long) bmc->guid[8]); 1899 } 1900 1901 static void 1902 cleanup_bmc_device(struct kref *ref) 1903 { 1904 struct bmc_device *bmc; 1905 1906 bmc = container_of(ref, struct bmc_device, refcount); 1907 1908 device_remove_file(&bmc->dev->dev, 1909 &bmc->device_id_attr); 1910 device_remove_file(&bmc->dev->dev, 1911 &bmc->provides_dev_sdrs_attr); 1912 device_remove_file(&bmc->dev->dev, 1913 &bmc->revision_attr); 1914 device_remove_file(&bmc->dev->dev, 1915 &bmc->firmware_rev_attr); 1916 device_remove_file(&bmc->dev->dev, 1917 &bmc->version_attr); 1918 device_remove_file(&bmc->dev->dev, 1919 &bmc->add_dev_support_attr); 1920 device_remove_file(&bmc->dev->dev, 1921 &bmc->manufacturer_id_attr); 1922 device_remove_file(&bmc->dev->dev, 1923 &bmc->product_id_attr); 1924 if (bmc->id.aux_firmware_revision_set) 1925 device_remove_file(&bmc->dev->dev, 1926 &bmc->aux_firmware_rev_attr); 1927 if (bmc->guid_set) 1928 device_remove_file(&bmc->dev->dev, 1929 &bmc->guid_attr); 1930 platform_device_unregister(bmc->dev); 1931 kfree(bmc); 1932 } 1933 1934 static void ipmi_bmc_unregister(ipmi_smi_t intf) 1935 { 1936 struct bmc_device *bmc = intf->bmc; 1937 1938 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 1939 if (intf->my_dev_name) { 1940 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); 1941 kfree(intf->my_dev_name); 1942 intf->my_dev_name = NULL; 1943 } 1944 1945 mutex_lock(&ipmidriver_mutex); 1946 kref_put(&bmc->refcount, cleanup_bmc_device); 1947 mutex_unlock(&ipmidriver_mutex); 1948 } 1949 1950 static int ipmi_bmc_register(ipmi_smi_t intf) 1951 { 1952 int rv; 1953 struct bmc_device *bmc = intf->bmc; 1954 struct bmc_device *old_bmc; 1955 int size; 1956 char dummy[1]; 1957 1958 mutex_lock(&ipmidriver_mutex); 1959 1960 /* 1961 * Try to find if there is an bmc_device struct 1962 * representing the interfaced BMC already 1963 */ 1964 if (bmc->guid_set) 1965 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid); 1966 else 1967 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver, 1968 bmc->id.product_id, 1969 bmc->id.device_id); 1970 1971 /* 1972 * If there is already an bmc_device, free the new one, 1973 * otherwise register the new BMC device 1974 */ 1975 if (old_bmc) { 1976 kfree(bmc); 1977 intf->bmc = old_bmc; 1978 bmc = old_bmc; 1979 1980 kref_get(&bmc->refcount); 1981 mutex_unlock(&ipmidriver_mutex); 1982 1983 printk(KERN_INFO 1984 "ipmi: interfacing existing BMC (man_id: 0x%6.6x," 1985 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 1986 bmc->id.manufacturer_id, 1987 bmc->id.product_id, 1988 bmc->id.device_id); 1989 } else { 1990 bmc->dev = platform_device_alloc("ipmi_bmc", 1991 bmc->id.device_id); 1992 if (! bmc->dev) { 1993 printk(KERN_ERR 1994 "ipmi_msghandler:" 1995 " Unable to allocate platform device\n"); 1996 return -ENOMEM; 1997 } 1998 bmc->dev->dev.driver = &ipmidriver; 1999 dev_set_drvdata(&bmc->dev->dev, bmc); 2000 kref_init(&bmc->refcount); 2001 2002 rv = platform_device_register(bmc->dev); 2003 mutex_unlock(&ipmidriver_mutex); 2004 if (rv) { 2005 printk(KERN_ERR 2006 "ipmi_msghandler:" 2007 " Unable to register bmc device: %d\n", 2008 rv); 2009 /* Don't go to out_err, you can only do that if 2010 the device is registered already. */ 2011 return rv; 2012 } 2013 2014 bmc->device_id_attr.attr.name = "device_id"; 2015 bmc->device_id_attr.attr.owner = THIS_MODULE; 2016 bmc->device_id_attr.attr.mode = S_IRUGO; 2017 bmc->device_id_attr.show = device_id_show; 2018 2019 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; 2020 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; 2021 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; 2022 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; 2023 2024 2025 bmc->revision_attr.attr.name = "revision"; 2026 bmc->revision_attr.attr.owner = THIS_MODULE; 2027 bmc->revision_attr.attr.mode = S_IRUGO; 2028 bmc->revision_attr.show = revision_show; 2029 2030 bmc->firmware_rev_attr.attr.name = "firmware_revision"; 2031 bmc->firmware_rev_attr.attr.owner = THIS_MODULE; 2032 bmc->firmware_rev_attr.attr.mode = S_IRUGO; 2033 bmc->firmware_rev_attr.show = firmware_rev_show; 2034 2035 bmc->version_attr.attr.name = "ipmi_version"; 2036 bmc->version_attr.attr.owner = THIS_MODULE; 2037 bmc->version_attr.attr.mode = S_IRUGO; 2038 bmc->version_attr.show = ipmi_version_show; 2039 2040 bmc->add_dev_support_attr.attr.name 2041 = "additional_device_support"; 2042 bmc->add_dev_support_attr.attr.owner = THIS_MODULE; 2043 bmc->add_dev_support_attr.attr.mode = S_IRUGO; 2044 bmc->add_dev_support_attr.show = add_dev_support_show; 2045 2046 bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; 2047 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; 2048 bmc->manufacturer_id_attr.attr.mode = S_IRUGO; 2049 bmc->manufacturer_id_attr.show = manufacturer_id_show; 2050 2051 bmc->product_id_attr.attr.name = "product_id"; 2052 bmc->product_id_attr.attr.owner = THIS_MODULE; 2053 bmc->product_id_attr.attr.mode = S_IRUGO; 2054 bmc->product_id_attr.show = product_id_show; 2055 2056 bmc->guid_attr.attr.name = "guid"; 2057 bmc->guid_attr.attr.owner = THIS_MODULE; 2058 bmc->guid_attr.attr.mode = S_IRUGO; 2059 bmc->guid_attr.show = guid_show; 2060 2061 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; 2062 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; 2063 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; 2064 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; 2065 2066 device_create_file(&bmc->dev->dev, 2067 &bmc->device_id_attr); 2068 device_create_file(&bmc->dev->dev, 2069 &bmc->provides_dev_sdrs_attr); 2070 device_create_file(&bmc->dev->dev, 2071 &bmc->revision_attr); 2072 device_create_file(&bmc->dev->dev, 2073 &bmc->firmware_rev_attr); 2074 device_create_file(&bmc->dev->dev, 2075 &bmc->version_attr); 2076 device_create_file(&bmc->dev->dev, 2077 &bmc->add_dev_support_attr); 2078 device_create_file(&bmc->dev->dev, 2079 &bmc->manufacturer_id_attr); 2080 device_create_file(&bmc->dev->dev, 2081 &bmc->product_id_attr); 2082 if (bmc->id.aux_firmware_revision_set) 2083 device_create_file(&bmc->dev->dev, 2084 &bmc->aux_firmware_rev_attr); 2085 if (bmc->guid_set) 2086 device_create_file(&bmc->dev->dev, 2087 &bmc->guid_attr); 2088 2089 printk(KERN_INFO 2090 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2091 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2092 bmc->id.manufacturer_id, 2093 bmc->id.product_id, 2094 bmc->id.device_id); 2095 } 2096 2097 /* 2098 * create symlink from system interface device to bmc device 2099 * and back. 2100 */ 2101 rv = sysfs_create_link(&intf->si_dev->kobj, 2102 &bmc->dev->dev.kobj, "bmc"); 2103 if (rv) { 2104 printk(KERN_ERR 2105 "ipmi_msghandler: Unable to create bmc symlink: %d\n", 2106 rv); 2107 goto out_err; 2108 } 2109 2110 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); 2111 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); 2112 if (!intf->my_dev_name) { 2113 rv = -ENOMEM; 2114 printk(KERN_ERR 2115 "ipmi_msghandler: allocate link from BMC: %d\n", 2116 rv); 2117 goto out_err; 2118 } 2119 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); 2120 2121 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, 2122 intf->my_dev_name); 2123 if (rv) { 2124 kfree(intf->my_dev_name); 2125 intf->my_dev_name = NULL; 2126 printk(KERN_ERR 2127 "ipmi_msghandler:" 2128 " Unable to create symlink to bmc: %d\n", 2129 rv); 2130 goto out_err; 2131 } 2132 2133 return 0; 2134 2135 out_err: 2136 ipmi_bmc_unregister(intf); 2137 return rv; 2138 } 2139 2140 static int 2141 send_guid_cmd(ipmi_smi_t intf, int chan) 2142 { 2143 struct kernel_ipmi_msg msg; 2144 struct ipmi_system_interface_addr si; 2145 2146 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2147 si.channel = IPMI_BMC_CHANNEL; 2148 si.lun = 0; 2149 2150 msg.netfn = IPMI_NETFN_APP_REQUEST; 2151 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 2152 msg.data = NULL; 2153 msg.data_len = 0; 2154 return i_ipmi_request(NULL, 2155 intf, 2156 (struct ipmi_addr *) &si, 2157 0, 2158 &msg, 2159 intf, 2160 NULL, 2161 NULL, 2162 0, 2163 intf->channels[0].address, 2164 intf->channels[0].lun, 2165 -1, 0); 2166 } 2167 2168 static void 2169 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2170 { 2171 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2172 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2173 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 2174 /* Not for me */ 2175 return; 2176 2177 if (msg->msg.data[0] != 0) { 2178 /* Error from getting the GUID, the BMC doesn't have one. */ 2179 intf->bmc->guid_set = 0; 2180 goto out; 2181 } 2182 2183 if (msg->msg.data_len < 17) { 2184 intf->bmc->guid_set = 0; 2185 printk(KERN_WARNING PFX 2186 "guid_handler: The GUID response from the BMC was too" 2187 " short, it was %d but should have been 17. Assuming" 2188 " GUID is not available.\n", 2189 msg->msg.data_len); 2190 goto out; 2191 } 2192 2193 memcpy(intf->bmc->guid, msg->msg.data, 16); 2194 intf->bmc->guid_set = 1; 2195 out: 2196 wake_up(&intf->waitq); 2197 } 2198 2199 static void 2200 get_guid(ipmi_smi_t intf) 2201 { 2202 int rv; 2203 2204 intf->bmc->guid_set = 0x2; 2205 intf->null_user_handler = guid_handler; 2206 rv = send_guid_cmd(intf, 0); 2207 if (rv) 2208 /* Send failed, no GUID available. */ 2209 intf->bmc->guid_set = 0; 2210 wait_event(intf->waitq, intf->bmc->guid_set != 2); 2211 intf->null_user_handler = NULL; 2212 } 2213 2214 static int 2215 send_channel_info_cmd(ipmi_smi_t intf, int chan) 2216 { 2217 struct kernel_ipmi_msg msg; 2218 unsigned char data[1]; 2219 struct ipmi_system_interface_addr si; 2220 2221 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2222 si.channel = IPMI_BMC_CHANNEL; 2223 si.lun = 0; 2224 2225 msg.netfn = IPMI_NETFN_APP_REQUEST; 2226 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 2227 msg.data = data; 2228 msg.data_len = 1; 2229 data[0] = chan; 2230 return i_ipmi_request(NULL, 2231 intf, 2232 (struct ipmi_addr *) &si, 2233 0, 2234 &msg, 2235 intf, 2236 NULL, 2237 NULL, 2238 0, 2239 intf->channels[0].address, 2240 intf->channels[0].lun, 2241 -1, 0); 2242 } 2243 2244 static void 2245 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2246 { 2247 int rv = 0; 2248 int chan; 2249 2250 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2251 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 2252 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) 2253 { 2254 /* It's the one we want */ 2255 if (msg->msg.data[0] != 0) { 2256 /* Got an error from the channel, just go on. */ 2257 2258 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 2259 /* If the MC does not support this 2260 command, that is legal. We just 2261 assume it has one IPMB at channel 2262 zero. */ 2263 intf->channels[0].medium 2264 = IPMI_CHANNEL_MEDIUM_IPMB; 2265 intf->channels[0].protocol 2266 = IPMI_CHANNEL_PROTOCOL_IPMB; 2267 rv = -ENOSYS; 2268 2269 intf->curr_channel = IPMI_MAX_CHANNELS; 2270 wake_up(&intf->waitq); 2271 goto out; 2272 } 2273 goto next_channel; 2274 } 2275 if (msg->msg.data_len < 4) { 2276 /* Message not big enough, just go on. */ 2277 goto next_channel; 2278 } 2279 chan = intf->curr_channel; 2280 intf->channels[chan].medium = msg->msg.data[2] & 0x7f; 2281 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; 2282 2283 next_channel: 2284 intf->curr_channel++; 2285 if (intf->curr_channel >= IPMI_MAX_CHANNELS) 2286 wake_up(&intf->waitq); 2287 else 2288 rv = send_channel_info_cmd(intf, intf->curr_channel); 2289 2290 if (rv) { 2291 /* Got an error somehow, just give up. */ 2292 intf->curr_channel = IPMI_MAX_CHANNELS; 2293 wake_up(&intf->waitq); 2294 2295 printk(KERN_WARNING PFX 2296 "Error sending channel information: %d\n", 2297 rv); 2298 } 2299 } 2300 out: 2301 return; 2302 } 2303 2304 int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 2305 void *send_info, 2306 struct ipmi_device_id *device_id, 2307 struct device *si_dev, 2308 unsigned char slave_addr, 2309 ipmi_smi_t *new_intf) 2310 { 2311 int i, j; 2312 int rv; 2313 ipmi_smi_t intf; 2314 unsigned long flags; 2315 int version_major; 2316 int version_minor; 2317 2318 version_major = ipmi_version_major(device_id); 2319 version_minor = ipmi_version_minor(device_id); 2320 2321 /* Make sure the driver is actually initialized, this handles 2322 problems with initialization order. */ 2323 if (!initialized) { 2324 rv = ipmi_init_msghandler(); 2325 if (rv) 2326 return rv; 2327 /* The init code doesn't return an error if it was turned 2328 off, but it won't initialize. Check that. */ 2329 if (!initialized) 2330 return -ENODEV; 2331 } 2332 2333 intf = kmalloc(sizeof(*intf), GFP_KERNEL); 2334 if (!intf) 2335 return -ENOMEM; 2336 memset(intf, 0, sizeof(*intf)); 2337 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); 2338 if (!intf->bmc) { 2339 kfree(intf); 2340 return -ENOMEM; 2341 } 2342 intf->intf_num = -1; 2343 kref_init(&intf->refcount); 2344 intf->bmc->id = *device_id; 2345 intf->si_dev = si_dev; 2346 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 2347 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; 2348 intf->channels[j].lun = 2; 2349 } 2350 if (slave_addr != 0) 2351 intf->channels[0].address = slave_addr; 2352 INIT_LIST_HEAD(&intf->users); 2353 intf->handlers = handlers; 2354 intf->send_info = send_info; 2355 spin_lock_init(&intf->seq_lock); 2356 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 2357 intf->seq_table[j].inuse = 0; 2358 intf->seq_table[j].seqid = 0; 2359 } 2360 intf->curr_seq = 0; 2361 #ifdef CONFIG_PROC_FS 2362 spin_lock_init(&intf->proc_entry_lock); 2363 #endif 2364 spin_lock_init(&intf->waiting_msgs_lock); 2365 INIT_LIST_HEAD(&intf->waiting_msgs); 2366 spin_lock_init(&intf->events_lock); 2367 INIT_LIST_HEAD(&intf->waiting_events); 2368 intf->waiting_events_count = 0; 2369 init_MUTEX(&intf->cmd_rcvrs_lock); 2370 INIT_LIST_HEAD(&intf->cmd_rcvrs); 2371 init_waitqueue_head(&intf->waitq); 2372 2373 spin_lock_init(&intf->counter_lock); 2374 intf->proc_dir = NULL; 2375 2376 rv = -ENOMEM; 2377 spin_lock_irqsave(&interfaces_lock, flags); 2378 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2379 if (ipmi_interfaces[i] == NULL) { 2380 intf->intf_num = i; 2381 /* Reserve the entry till we are done. */ 2382 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; 2383 rv = 0; 2384 break; 2385 } 2386 } 2387 spin_unlock_irqrestore(&interfaces_lock, flags); 2388 if (rv) 2389 goto out; 2390 2391 /* FIXME - this is an ugly kludge, this sets the intf for the 2392 caller before sending any messages with it. */ 2393 *new_intf = intf; 2394 2395 get_guid(intf); 2396 2397 if ((version_major > 1) 2398 || ((version_major == 1) && (version_minor >= 5))) 2399 { 2400 /* Start scanning the channels to see what is 2401 available. */ 2402 intf->null_user_handler = channel_handler; 2403 intf->curr_channel = 0; 2404 rv = send_channel_info_cmd(intf, 0); 2405 if (rv) 2406 goto out; 2407 2408 /* Wait for the channel info to be read. */ 2409 wait_event(intf->waitq, 2410 intf->curr_channel >= IPMI_MAX_CHANNELS); 2411 intf->null_user_handler = NULL; 2412 } else { 2413 /* Assume a single IPMB channel at zero. */ 2414 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2415 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 2416 } 2417 2418 if (rv == 0) 2419 rv = add_proc_entries(intf, i); 2420 2421 rv = ipmi_bmc_register(intf); 2422 2423 out: 2424 if (rv) { 2425 if (intf->proc_dir) 2426 remove_proc_entries(intf); 2427 kref_put(&intf->refcount, intf_free); 2428 if (i < MAX_IPMI_INTERFACES) { 2429 spin_lock_irqsave(&interfaces_lock, flags); 2430 ipmi_interfaces[i] = NULL; 2431 spin_unlock_irqrestore(&interfaces_lock, flags); 2432 } 2433 } else { 2434 spin_lock_irqsave(&interfaces_lock, flags); 2435 ipmi_interfaces[i] = intf; 2436 spin_unlock_irqrestore(&interfaces_lock, flags); 2437 call_smi_watchers(i, intf->si_dev); 2438 } 2439 2440 return rv; 2441 } 2442 2443 int ipmi_unregister_smi(ipmi_smi_t intf) 2444 { 2445 int i; 2446 struct ipmi_smi_watcher *w; 2447 unsigned long flags; 2448 2449 ipmi_bmc_unregister(intf); 2450 2451 spin_lock_irqsave(&interfaces_lock, flags); 2452 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2453 if (ipmi_interfaces[i] == intf) { 2454 /* Set the interface number reserved until we 2455 * are done. */ 2456 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; 2457 intf->intf_num = -1; 2458 break; 2459 } 2460 } 2461 spin_unlock_irqrestore(&interfaces_lock,flags); 2462 2463 if (i == MAX_IPMI_INTERFACES) 2464 return -ENODEV; 2465 2466 remove_proc_entries(intf); 2467 2468 /* Call all the watcher interfaces to tell them that 2469 an interface is gone. */ 2470 down_read(&smi_watchers_sem); 2471 list_for_each_entry(w, &smi_watchers, link) 2472 w->smi_gone(i); 2473 up_read(&smi_watchers_sem); 2474 2475 /* Allow the entry to be reused now. */ 2476 spin_lock_irqsave(&interfaces_lock, flags); 2477 ipmi_interfaces[i] = NULL; 2478 spin_unlock_irqrestore(&interfaces_lock,flags); 2479 2480 kref_put(&intf->refcount, intf_free); 2481 return 0; 2482 } 2483 2484 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, 2485 struct ipmi_smi_msg *msg) 2486 { 2487 struct ipmi_ipmb_addr ipmb_addr; 2488 struct ipmi_recv_msg *recv_msg; 2489 unsigned long flags; 2490 2491 2492 /* This is 11, not 10, because the response must contain a 2493 * completion code. */ 2494 if (msg->rsp_size < 11) { 2495 /* Message not big enough, just ignore it. */ 2496 spin_lock_irqsave(&intf->counter_lock, flags); 2497 intf->invalid_ipmb_responses++; 2498 spin_unlock_irqrestore(&intf->counter_lock, flags); 2499 return 0; 2500 } 2501 2502 if (msg->rsp[2] != 0) { 2503 /* An error getting the response, just ignore it. */ 2504 return 0; 2505 } 2506 2507 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 2508 ipmb_addr.slave_addr = msg->rsp[6]; 2509 ipmb_addr.channel = msg->rsp[3] & 0x0f; 2510 ipmb_addr.lun = msg->rsp[7] & 3; 2511 2512 /* It's a response from a remote entity. Look up the sequence 2513 number and handle the response. */ 2514 if (intf_find_seq(intf, 2515 msg->rsp[7] >> 2, 2516 msg->rsp[3] & 0x0f, 2517 msg->rsp[8], 2518 (msg->rsp[4] >> 2) & (~1), 2519 (struct ipmi_addr *) &(ipmb_addr), 2520 &recv_msg)) 2521 { 2522 /* We were unable to find the sequence number, 2523 so just nuke the message. */ 2524 spin_lock_irqsave(&intf->counter_lock, flags); 2525 intf->unhandled_ipmb_responses++; 2526 spin_unlock_irqrestore(&intf->counter_lock, flags); 2527 return 0; 2528 } 2529 2530 memcpy(recv_msg->msg_data, 2531 &(msg->rsp[9]), 2532 msg->rsp_size - 9); 2533 /* THe other fields matched, so no need to set them, except 2534 for netfn, which needs to be the response that was 2535 returned, not the request value. */ 2536 recv_msg->msg.netfn = msg->rsp[4] >> 2; 2537 recv_msg->msg.data = recv_msg->msg_data; 2538 recv_msg->msg.data_len = msg->rsp_size - 10; 2539 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2540 spin_lock_irqsave(&intf->counter_lock, flags); 2541 intf->handled_ipmb_responses++; 2542 spin_unlock_irqrestore(&intf->counter_lock, flags); 2543 deliver_response(recv_msg); 2544 2545 return 0; 2546 } 2547 2548 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, 2549 struct ipmi_smi_msg *msg) 2550 { 2551 struct cmd_rcvr *rcvr; 2552 int rv = 0; 2553 unsigned char netfn; 2554 unsigned char cmd; 2555 ipmi_user_t user = NULL; 2556 struct ipmi_ipmb_addr *ipmb_addr; 2557 struct ipmi_recv_msg *recv_msg; 2558 unsigned long flags; 2559 2560 if (msg->rsp_size < 10) { 2561 /* Message not big enough, just ignore it. */ 2562 spin_lock_irqsave(&intf->counter_lock, flags); 2563 intf->invalid_commands++; 2564 spin_unlock_irqrestore(&intf->counter_lock, flags); 2565 return 0; 2566 } 2567 2568 if (msg->rsp[2] != 0) { 2569 /* An error getting the response, just ignore it. */ 2570 return 0; 2571 } 2572 2573 netfn = msg->rsp[4] >> 2; 2574 cmd = msg->rsp[8]; 2575 2576 rcu_read_lock(); 2577 rcvr = find_cmd_rcvr(intf, netfn, cmd); 2578 if (rcvr) { 2579 user = rcvr->user; 2580 kref_get(&user->refcount); 2581 } else 2582 user = NULL; 2583 rcu_read_unlock(); 2584 2585 if (user == NULL) { 2586 /* We didn't find a user, deliver an error response. */ 2587 spin_lock_irqsave(&intf->counter_lock, flags); 2588 intf->unhandled_commands++; 2589 spin_unlock_irqrestore(&intf->counter_lock, flags); 2590 2591 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 2592 msg->data[1] = IPMI_SEND_MSG_CMD; 2593 msg->data[2] = msg->rsp[3]; 2594 msg->data[3] = msg->rsp[6]; 2595 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 2596 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 2597 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; 2598 /* rqseq/lun */ 2599 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 2600 msg->data[8] = msg->rsp[8]; /* cmd */ 2601 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 2602 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); 2603 msg->data_size = 11; 2604 2605 #ifdef DEBUG_MSGING 2606 { 2607 int m; 2608 printk("Invalid command:"); 2609 for (m = 0; m < msg->data_size; m++) 2610 printk(" %2.2x", msg->data[m]); 2611 printk("\n"); 2612 } 2613 #endif 2614 intf->handlers->sender(intf->send_info, msg, 0); 2615 2616 rv = -1; /* We used the message, so return the value that 2617 causes it to not be freed or queued. */ 2618 } else { 2619 /* Deliver the message to the user. */ 2620 spin_lock_irqsave(&intf->counter_lock, flags); 2621 intf->handled_commands++; 2622 spin_unlock_irqrestore(&intf->counter_lock, flags); 2623 2624 recv_msg = ipmi_alloc_recv_msg(); 2625 if (! recv_msg) { 2626 /* We couldn't allocate memory for the 2627 message, so requeue it for handling 2628 later. */ 2629 rv = 1; 2630 kref_put(&user->refcount, free_user); 2631 } else { 2632 /* Extract the source address from the data. */ 2633 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 2634 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2635 ipmb_addr->slave_addr = msg->rsp[6]; 2636 ipmb_addr->lun = msg->rsp[7] & 3; 2637 ipmb_addr->channel = msg->rsp[3] & 0xf; 2638 2639 /* Extract the rest of the message information 2640 from the IPMB header.*/ 2641 recv_msg->user = user; 2642 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 2643 recv_msg->msgid = msg->rsp[7] >> 2; 2644 recv_msg->msg.netfn = msg->rsp[4] >> 2; 2645 recv_msg->msg.cmd = msg->rsp[8]; 2646 recv_msg->msg.data = recv_msg->msg_data; 2647 2648 /* We chop off 10, not 9 bytes because the checksum 2649 at the end also needs to be removed. */ 2650 recv_msg->msg.data_len = msg->rsp_size - 10; 2651 memcpy(recv_msg->msg_data, 2652 &(msg->rsp[9]), 2653 msg->rsp_size - 10); 2654 deliver_response(recv_msg); 2655 } 2656 } 2657 2658 return rv; 2659 } 2660 2661 static int handle_lan_get_msg_rsp(ipmi_smi_t intf, 2662 struct ipmi_smi_msg *msg) 2663 { 2664 struct ipmi_lan_addr lan_addr; 2665 struct ipmi_recv_msg *recv_msg; 2666 unsigned long flags; 2667 2668 2669 /* This is 13, not 12, because the response must contain a 2670 * completion code. */ 2671 if (msg->rsp_size < 13) { 2672 /* Message not big enough, just ignore it. */ 2673 spin_lock_irqsave(&intf->counter_lock, flags); 2674 intf->invalid_lan_responses++; 2675 spin_unlock_irqrestore(&intf->counter_lock, flags); 2676 return 0; 2677 } 2678 2679 if (msg->rsp[2] != 0) { 2680 /* An error getting the response, just ignore it. */ 2681 return 0; 2682 } 2683 2684 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 2685 lan_addr.session_handle = msg->rsp[4]; 2686 lan_addr.remote_SWID = msg->rsp[8]; 2687 lan_addr.local_SWID = msg->rsp[5]; 2688 lan_addr.channel = msg->rsp[3] & 0x0f; 2689 lan_addr.privilege = msg->rsp[3] >> 4; 2690 lan_addr.lun = msg->rsp[9] & 3; 2691 2692 /* It's a response from a remote entity. Look up the sequence 2693 number and handle the response. */ 2694 if (intf_find_seq(intf, 2695 msg->rsp[9] >> 2, 2696 msg->rsp[3] & 0x0f, 2697 msg->rsp[10], 2698 (msg->rsp[6] >> 2) & (~1), 2699 (struct ipmi_addr *) &(lan_addr), 2700 &recv_msg)) 2701 { 2702 /* We were unable to find the sequence number, 2703 so just nuke the message. */ 2704 spin_lock_irqsave(&intf->counter_lock, flags); 2705 intf->unhandled_lan_responses++; 2706 spin_unlock_irqrestore(&intf->counter_lock, flags); 2707 return 0; 2708 } 2709 2710 memcpy(recv_msg->msg_data, 2711 &(msg->rsp[11]), 2712 msg->rsp_size - 11); 2713 /* The other fields matched, so no need to set them, except 2714 for netfn, which needs to be the response that was 2715 returned, not the request value. */ 2716 recv_msg->msg.netfn = msg->rsp[6] >> 2; 2717 recv_msg->msg.data = recv_msg->msg_data; 2718 recv_msg->msg.data_len = msg->rsp_size - 12; 2719 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2720 spin_lock_irqsave(&intf->counter_lock, flags); 2721 intf->handled_lan_responses++; 2722 spin_unlock_irqrestore(&intf->counter_lock, flags); 2723 deliver_response(recv_msg); 2724 2725 return 0; 2726 } 2727 2728 static int handle_lan_get_msg_cmd(ipmi_smi_t intf, 2729 struct ipmi_smi_msg *msg) 2730 { 2731 struct cmd_rcvr *rcvr; 2732 int rv = 0; 2733 unsigned char netfn; 2734 unsigned char cmd; 2735 ipmi_user_t user = NULL; 2736 struct ipmi_lan_addr *lan_addr; 2737 struct ipmi_recv_msg *recv_msg; 2738 unsigned long flags; 2739 2740 if (msg->rsp_size < 12) { 2741 /* Message not big enough, just ignore it. */ 2742 spin_lock_irqsave(&intf->counter_lock, flags); 2743 intf->invalid_commands++; 2744 spin_unlock_irqrestore(&intf->counter_lock, flags); 2745 return 0; 2746 } 2747 2748 if (msg->rsp[2] != 0) { 2749 /* An error getting the response, just ignore it. */ 2750 return 0; 2751 } 2752 2753 netfn = msg->rsp[6] >> 2; 2754 cmd = msg->rsp[10]; 2755 2756 rcu_read_lock(); 2757 rcvr = find_cmd_rcvr(intf, netfn, cmd); 2758 if (rcvr) { 2759 user = rcvr->user; 2760 kref_get(&user->refcount); 2761 } else 2762 user = NULL; 2763 rcu_read_unlock(); 2764 2765 if (user == NULL) { 2766 /* We didn't find a user, just give up. */ 2767 spin_lock_irqsave(&intf->counter_lock, flags); 2768 intf->unhandled_commands++; 2769 spin_unlock_irqrestore(&intf->counter_lock, flags); 2770 2771 rv = 0; /* Don't do anything with these messages, just 2772 allow them to be freed. */ 2773 } else { 2774 /* Deliver the message to the user. */ 2775 spin_lock_irqsave(&intf->counter_lock, flags); 2776 intf->handled_commands++; 2777 spin_unlock_irqrestore(&intf->counter_lock, flags); 2778 2779 recv_msg = ipmi_alloc_recv_msg(); 2780 if (! recv_msg) { 2781 /* We couldn't allocate memory for the 2782 message, so requeue it for handling 2783 later. */ 2784 rv = 1; 2785 kref_put(&user->refcount, free_user); 2786 } else { 2787 /* Extract the source address from the data. */ 2788 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 2789 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 2790 lan_addr->session_handle = msg->rsp[4]; 2791 lan_addr->remote_SWID = msg->rsp[8]; 2792 lan_addr->local_SWID = msg->rsp[5]; 2793 lan_addr->lun = msg->rsp[9] & 3; 2794 lan_addr->channel = msg->rsp[3] & 0xf; 2795 lan_addr->privilege = msg->rsp[3] >> 4; 2796 2797 /* Extract the rest of the message information 2798 from the IPMB header.*/ 2799 recv_msg->user = user; 2800 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 2801 recv_msg->msgid = msg->rsp[9] >> 2; 2802 recv_msg->msg.netfn = msg->rsp[6] >> 2; 2803 recv_msg->msg.cmd = msg->rsp[10]; 2804 recv_msg->msg.data = recv_msg->msg_data; 2805 2806 /* We chop off 12, not 11 bytes because the checksum 2807 at the end also needs to be removed. */ 2808 recv_msg->msg.data_len = msg->rsp_size - 12; 2809 memcpy(recv_msg->msg_data, 2810 &(msg->rsp[11]), 2811 msg->rsp_size - 12); 2812 deliver_response(recv_msg); 2813 } 2814 } 2815 2816 return rv; 2817 } 2818 2819 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 2820 struct ipmi_smi_msg *msg) 2821 { 2822 struct ipmi_system_interface_addr *smi_addr; 2823 2824 recv_msg->msgid = 0; 2825 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); 2826 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2827 smi_addr->channel = IPMI_BMC_CHANNEL; 2828 smi_addr->lun = msg->rsp[0] & 3; 2829 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 2830 recv_msg->msg.netfn = msg->rsp[0] >> 2; 2831 recv_msg->msg.cmd = msg->rsp[1]; 2832 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); 2833 recv_msg->msg.data = recv_msg->msg_data; 2834 recv_msg->msg.data_len = msg->rsp_size - 3; 2835 } 2836 2837 static int handle_read_event_rsp(ipmi_smi_t intf, 2838 struct ipmi_smi_msg *msg) 2839 { 2840 struct ipmi_recv_msg *recv_msg, *recv_msg2; 2841 struct list_head msgs; 2842 ipmi_user_t user; 2843 int rv = 0; 2844 int deliver_count = 0; 2845 unsigned long flags; 2846 2847 if (msg->rsp_size < 19) { 2848 /* Message is too small to be an IPMB event. */ 2849 spin_lock_irqsave(&intf->counter_lock, flags); 2850 intf->invalid_events++; 2851 spin_unlock_irqrestore(&intf->counter_lock, flags); 2852 return 0; 2853 } 2854 2855 if (msg->rsp[2] != 0) { 2856 /* An error getting the event, just ignore it. */ 2857 return 0; 2858 } 2859 2860 INIT_LIST_HEAD(&msgs); 2861 2862 spin_lock_irqsave(&intf->events_lock, flags); 2863 2864 spin_lock(&intf->counter_lock); 2865 intf->events++; 2866 spin_unlock(&intf->counter_lock); 2867 2868 /* Allocate and fill in one message for every user that is getting 2869 events. */ 2870 rcu_read_lock(); 2871 list_for_each_entry_rcu(user, &intf->users, link) { 2872 if (! user->gets_events) 2873 continue; 2874 2875 recv_msg = ipmi_alloc_recv_msg(); 2876 if (! recv_msg) { 2877 rcu_read_unlock(); 2878 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2879 list_del(&recv_msg->link); 2880 ipmi_free_recv_msg(recv_msg); 2881 } 2882 /* We couldn't allocate memory for the 2883 message, so requeue it for handling 2884 later. */ 2885 rv = 1; 2886 goto out; 2887 } 2888 2889 deliver_count++; 2890 2891 copy_event_into_recv_msg(recv_msg, msg); 2892 recv_msg->user = user; 2893 kref_get(&user->refcount); 2894 list_add_tail(&(recv_msg->link), &msgs); 2895 } 2896 rcu_read_unlock(); 2897 2898 if (deliver_count) { 2899 /* Now deliver all the messages. */ 2900 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2901 list_del(&recv_msg->link); 2902 deliver_response(recv_msg); 2903 } 2904 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 2905 /* No one to receive the message, put it in queue if there's 2906 not already too many things in the queue. */ 2907 recv_msg = ipmi_alloc_recv_msg(); 2908 if (! recv_msg) { 2909 /* We couldn't allocate memory for the 2910 message, so requeue it for handling 2911 later. */ 2912 rv = 1; 2913 goto out; 2914 } 2915 2916 copy_event_into_recv_msg(recv_msg, msg); 2917 list_add_tail(&(recv_msg->link), &(intf->waiting_events)); 2918 } else { 2919 /* There's too many things in the queue, discard this 2920 message. */ 2921 printk(KERN_WARNING PFX "Event queue full, discarding an" 2922 " incoming event\n"); 2923 } 2924 2925 out: 2926 spin_unlock_irqrestore(&(intf->events_lock), flags); 2927 2928 return rv; 2929 } 2930 2931 static int handle_bmc_rsp(ipmi_smi_t intf, 2932 struct ipmi_smi_msg *msg) 2933 { 2934 struct ipmi_recv_msg *recv_msg; 2935 unsigned long flags; 2936 struct ipmi_user *user; 2937 2938 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2939 if (recv_msg == NULL) 2940 { 2941 printk(KERN_WARNING"IPMI message received with no owner. This\n" 2942 "could be because of a malformed message, or\n" 2943 "because of a hardware error. Contact your\n" 2944 "hardware vender for assistance\n"); 2945 return 0; 2946 } 2947 2948 user = recv_msg->user; 2949 /* Make sure the user still exists. */ 2950 if (user && !user->valid) { 2951 /* The user for the message went away, so give up. */ 2952 spin_lock_irqsave(&intf->counter_lock, flags); 2953 intf->unhandled_local_responses++; 2954 spin_unlock_irqrestore(&intf->counter_lock, flags); 2955 ipmi_free_recv_msg(recv_msg); 2956 } else { 2957 struct ipmi_system_interface_addr *smi_addr; 2958 2959 spin_lock_irqsave(&intf->counter_lock, flags); 2960 intf->handled_local_responses++; 2961 spin_unlock_irqrestore(&intf->counter_lock, flags); 2962 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2963 recv_msg->msgid = msg->msgid; 2964 smi_addr = ((struct ipmi_system_interface_addr *) 2965 &(recv_msg->addr)); 2966 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2967 smi_addr->channel = IPMI_BMC_CHANNEL; 2968 smi_addr->lun = msg->rsp[0] & 3; 2969 recv_msg->msg.netfn = msg->rsp[0] >> 2; 2970 recv_msg->msg.cmd = msg->rsp[1]; 2971 memcpy(recv_msg->msg_data, 2972 &(msg->rsp[2]), 2973 msg->rsp_size - 2); 2974 recv_msg->msg.data = recv_msg->msg_data; 2975 recv_msg->msg.data_len = msg->rsp_size - 2; 2976 deliver_response(recv_msg); 2977 } 2978 2979 return 0; 2980 } 2981 2982 /* Handle a new message. Return 1 if the message should be requeued, 2983 0 if the message should be freed, or -1 if the message should not 2984 be freed or requeued. */ 2985 static int handle_new_recv_msg(ipmi_smi_t intf, 2986 struct ipmi_smi_msg *msg) 2987 { 2988 int requeue; 2989 int chan; 2990 2991 #ifdef DEBUG_MSGING 2992 int m; 2993 printk("Recv:"); 2994 for (m = 0; m < msg->rsp_size; m++) 2995 printk(" %2.2x", msg->rsp[m]); 2996 printk("\n"); 2997 #endif 2998 if (msg->rsp_size < 2) { 2999 /* Message is too small to be correct. */ 3000 printk(KERN_WARNING PFX "BMC returned to small a message" 3001 " for netfn %x cmd %x, got %d bytes\n", 3002 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 3003 3004 /* Generate an error response for the message. */ 3005 msg->rsp[0] = msg->data[0] | (1 << 2); 3006 msg->rsp[1] = msg->data[1]; 3007 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3008 msg->rsp_size = 3; 3009 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */ 3010 || (msg->rsp[1] != msg->data[1])) /* Command */ 3011 { 3012 /* The response is not even marginally correct. */ 3013 printk(KERN_WARNING PFX "BMC returned incorrect response," 3014 " expected netfn %x cmd %x, got netfn %x cmd %x\n", 3015 (msg->data[0] >> 2) | 1, msg->data[1], 3016 msg->rsp[0] >> 2, msg->rsp[1]); 3017 3018 /* Generate an error response for the message. */ 3019 msg->rsp[0] = msg->data[0] | (1 << 2); 3020 msg->rsp[1] = msg->data[1]; 3021 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3022 msg->rsp_size = 3; 3023 } 3024 3025 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3026 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 3027 && (msg->user_data != NULL)) 3028 { 3029 /* It's a response to a response we sent. For this we 3030 deliver a send message response to the user. */ 3031 struct ipmi_recv_msg *recv_msg = msg->user_data; 3032 3033 requeue = 0; 3034 if (msg->rsp_size < 2) 3035 /* Message is too small to be correct. */ 3036 goto out; 3037 3038 chan = msg->data[2] & 0x0f; 3039 if (chan >= IPMI_MAX_CHANNELS) 3040 /* Invalid channel number */ 3041 goto out; 3042 3043 if (!recv_msg) 3044 goto out; 3045 3046 /* Make sure the user still exists. */ 3047 if (!recv_msg->user || !recv_msg->user->valid) 3048 goto out; 3049 3050 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 3051 recv_msg->msg.data = recv_msg->msg_data; 3052 recv_msg->msg.data_len = 1; 3053 recv_msg->msg_data[0] = msg->rsp[2]; 3054 deliver_response(recv_msg); 3055 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3056 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) 3057 { 3058 /* It's from the receive queue. */ 3059 chan = msg->rsp[3] & 0xf; 3060 if (chan >= IPMI_MAX_CHANNELS) { 3061 /* Invalid channel number */ 3062 requeue = 0; 3063 goto out; 3064 } 3065 3066 switch (intf->channels[chan].medium) { 3067 case IPMI_CHANNEL_MEDIUM_IPMB: 3068 if (msg->rsp[4] & 0x04) { 3069 /* It's a response, so find the 3070 requesting message and send it up. */ 3071 requeue = handle_ipmb_get_msg_rsp(intf, msg); 3072 } else { 3073 /* It's a command to the SMS from some other 3074 entity. Handle that. */ 3075 requeue = handle_ipmb_get_msg_cmd(intf, msg); 3076 } 3077 break; 3078 3079 case IPMI_CHANNEL_MEDIUM_8023LAN: 3080 case IPMI_CHANNEL_MEDIUM_ASYNC: 3081 if (msg->rsp[6] & 0x04) { 3082 /* It's a response, so find the 3083 requesting message and send it up. */ 3084 requeue = handle_lan_get_msg_rsp(intf, msg); 3085 } else { 3086 /* It's a command to the SMS from some other 3087 entity. Handle that. */ 3088 requeue = handle_lan_get_msg_cmd(intf, msg); 3089 } 3090 break; 3091 3092 default: 3093 /* We don't handle the channel type, so just 3094 * free the message. */ 3095 requeue = 0; 3096 } 3097 3098 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3099 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) 3100 { 3101 /* It's an asyncronous event. */ 3102 requeue = handle_read_event_rsp(intf, msg); 3103 } else { 3104 /* It's a response from the local BMC. */ 3105 requeue = handle_bmc_rsp(intf, msg); 3106 } 3107 3108 out: 3109 return requeue; 3110 } 3111 3112 /* Handle a new message from the lower layer. */ 3113 void ipmi_smi_msg_received(ipmi_smi_t intf, 3114 struct ipmi_smi_msg *msg) 3115 { 3116 unsigned long flags; 3117 int rv; 3118 3119 3120 if ((msg->data_size >= 2) 3121 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3122 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3123 && (msg->user_data == NULL)) 3124 { 3125 /* This is the local response to a command send, start 3126 the timer for these. The user_data will not be 3127 NULL if this is a response send, and we will let 3128 response sends just go through. */ 3129 3130 /* Check for errors, if we get certain errors (ones 3131 that mean basically we can try again later), we 3132 ignore them and start the timer. Otherwise we 3133 report the error immediately. */ 3134 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 3135 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 3136 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)) 3137 { 3138 int chan = msg->rsp[3] & 0xf; 3139 3140 /* Got an error sending the message, handle it. */ 3141 spin_lock_irqsave(&intf->counter_lock, flags); 3142 if (chan >= IPMI_MAX_CHANNELS) 3143 ; /* This shouldn't happen */ 3144 else if ((intf->channels[chan].medium 3145 == IPMI_CHANNEL_MEDIUM_8023LAN) 3146 || (intf->channels[chan].medium 3147 == IPMI_CHANNEL_MEDIUM_ASYNC)) 3148 intf->sent_lan_command_errs++; 3149 else 3150 intf->sent_ipmb_command_errs++; 3151 spin_unlock_irqrestore(&intf->counter_lock, flags); 3152 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 3153 } else { 3154 /* The message was sent, start the timer. */ 3155 intf_start_seq_timer(intf, msg->msgid); 3156 } 3157 3158 ipmi_free_smi_msg(msg); 3159 goto out; 3160 } 3161 3162 /* To preserve message order, if the list is not empty, we 3163 tack this message onto the end of the list. */ 3164 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3165 if (!list_empty(&intf->waiting_msgs)) { 3166 list_add_tail(&msg->link, &intf->waiting_msgs); 3167 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3168 goto out; 3169 } 3170 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3171 3172 rv = handle_new_recv_msg(intf, msg); 3173 if (rv > 0) { 3174 /* Could not handle the message now, just add it to a 3175 list to handle later. */ 3176 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3177 list_add_tail(&msg->link, &intf->waiting_msgs); 3178 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3179 } else if (rv == 0) { 3180 ipmi_free_smi_msg(msg); 3181 } 3182 3183 out: 3184 return; 3185 } 3186 3187 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3188 { 3189 ipmi_user_t user; 3190 3191 rcu_read_lock(); 3192 list_for_each_entry_rcu(user, &intf->users, link) { 3193 if (! user->handler->ipmi_watchdog_pretimeout) 3194 continue; 3195 3196 user->handler->ipmi_watchdog_pretimeout(user->handler_data); 3197 } 3198 rcu_read_unlock(); 3199 } 3200 3201 static void 3202 handle_msg_timeout(struct ipmi_recv_msg *msg) 3203 { 3204 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3205 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE; 3206 msg->msg.netfn |= 1; /* Convert to a response. */ 3207 msg->msg.data_len = 1; 3208 msg->msg.data = msg->msg_data; 3209 deliver_response(msg); 3210 } 3211 3212 static struct ipmi_smi_msg * 3213 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, 3214 unsigned char seq, long seqid) 3215 { 3216 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 3217 if (!smi_msg) 3218 /* If we can't allocate the message, then just return, we 3219 get 4 retries, so this should be ok. */ 3220 return NULL; 3221 3222 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 3223 smi_msg->data_size = recv_msg->msg.data_len; 3224 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 3225 3226 #ifdef DEBUG_MSGING 3227 { 3228 int m; 3229 printk("Resend: "); 3230 for (m = 0; m < smi_msg->data_size; m++) 3231 printk(" %2.2x", smi_msg->data[m]); 3232 printk("\n"); 3233 } 3234 #endif 3235 return smi_msg; 3236 } 3237 3238 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, 3239 struct list_head *timeouts, long timeout_period, 3240 int slot, unsigned long *flags) 3241 { 3242 struct ipmi_recv_msg *msg; 3243 3244 if (!ent->inuse) 3245 return; 3246 3247 ent->timeout -= timeout_period; 3248 if (ent->timeout > 0) 3249 return; 3250 3251 if (ent->retries_left == 0) { 3252 /* The message has used all its retries. */ 3253 ent->inuse = 0; 3254 msg = ent->recv_msg; 3255 list_add_tail(&msg->link, timeouts); 3256 spin_lock(&intf->counter_lock); 3257 if (ent->broadcast) 3258 intf->timed_out_ipmb_broadcasts++; 3259 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3260 intf->timed_out_lan_commands++; 3261 else 3262 intf->timed_out_ipmb_commands++; 3263 spin_unlock(&intf->counter_lock); 3264 } else { 3265 struct ipmi_smi_msg *smi_msg; 3266 /* More retries, send again. */ 3267 3268 /* Start with the max timer, set to normal 3269 timer after the message is sent. */ 3270 ent->timeout = MAX_MSG_TIMEOUT; 3271 ent->retries_left--; 3272 spin_lock(&intf->counter_lock); 3273 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3274 intf->retransmitted_lan_commands++; 3275 else 3276 intf->retransmitted_ipmb_commands++; 3277 spin_unlock(&intf->counter_lock); 3278 3279 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 3280 ent->seqid); 3281 if (! smi_msg) 3282 return; 3283 3284 spin_unlock_irqrestore(&intf->seq_lock, *flags); 3285 /* Send the new message. We send with a zero 3286 * priority. It timed out, I doubt time is 3287 * that critical now, and high priority 3288 * messages are really only for messages to the 3289 * local MC, which don't get resent. */ 3290 intf->handlers->sender(intf->send_info, 3291 smi_msg, 0); 3292 spin_lock_irqsave(&intf->seq_lock, *flags); 3293 } 3294 } 3295 3296 static void ipmi_timeout_handler(long timeout_period) 3297 { 3298 ipmi_smi_t intf; 3299 struct list_head timeouts; 3300 struct ipmi_recv_msg *msg, *msg2; 3301 struct ipmi_smi_msg *smi_msg, *smi_msg2; 3302 unsigned long flags; 3303 int i, j; 3304 3305 INIT_LIST_HEAD(&timeouts); 3306 3307 spin_lock(&interfaces_lock); 3308 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3309 intf = ipmi_interfaces[i]; 3310 if (IPMI_INVALID_INTERFACE(intf)) 3311 continue; 3312 kref_get(&intf->refcount); 3313 spin_unlock(&interfaces_lock); 3314 3315 /* See if any waiting messages need to be processed. */ 3316 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3317 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) { 3318 if (! handle_new_recv_msg(intf, smi_msg)) { 3319 list_del(&smi_msg->link); 3320 ipmi_free_smi_msg(smi_msg); 3321 } else { 3322 /* To preserve message order, quit if we 3323 can't handle a message. */ 3324 break; 3325 } 3326 } 3327 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3328 3329 /* Go through the seq table and find any messages that 3330 have timed out, putting them in the timeouts 3331 list. */ 3332 spin_lock_irqsave(&intf->seq_lock, flags); 3333 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) 3334 check_msg_timeout(intf, &(intf->seq_table[j]), 3335 &timeouts, timeout_period, j, 3336 &flags); 3337 spin_unlock_irqrestore(&intf->seq_lock, flags); 3338 3339 list_for_each_entry_safe(msg, msg2, &timeouts, link) 3340 handle_msg_timeout(msg); 3341 3342 kref_put(&intf->refcount, intf_free); 3343 spin_lock(&interfaces_lock); 3344 } 3345 spin_unlock(&interfaces_lock); 3346 } 3347 3348 static void ipmi_request_event(void) 3349 { 3350 ipmi_smi_t intf; 3351 int i; 3352 3353 spin_lock(&interfaces_lock); 3354 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3355 intf = ipmi_interfaces[i]; 3356 if (IPMI_INVALID_INTERFACE(intf)) 3357 continue; 3358 3359 intf->handlers->request_events(intf->send_info); 3360 } 3361 spin_unlock(&interfaces_lock); 3362 } 3363 3364 static struct timer_list ipmi_timer; 3365 3366 /* Call every ~100 ms. */ 3367 #define IPMI_TIMEOUT_TIME 100 3368 3369 /* How many jiffies does it take to get to the timeout time. */ 3370 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 3371 3372 /* Request events from the queue every second (this is the number of 3373 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 3374 future, IPMI will add a way to know immediately if an event is in 3375 the queue and this silliness can go away. */ 3376 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 3377 3378 static atomic_t stop_operation; 3379 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3380 3381 static void ipmi_timeout(unsigned long data) 3382 { 3383 if (atomic_read(&stop_operation)) 3384 return; 3385 3386 ticks_to_req_ev--; 3387 if (ticks_to_req_ev == 0) { 3388 ipmi_request_event(); 3389 ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3390 } 3391 3392 ipmi_timeout_handler(IPMI_TIMEOUT_TIME); 3393 3394 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 3395 } 3396 3397 3398 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 3399 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 3400 3401 /* FIXME - convert these to slabs. */ 3402 static void free_smi_msg(struct ipmi_smi_msg *msg) 3403 { 3404 atomic_dec(&smi_msg_inuse_count); 3405 kfree(msg); 3406 } 3407 3408 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 3409 { 3410 struct ipmi_smi_msg *rv; 3411 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 3412 if (rv) { 3413 rv->done = free_smi_msg; 3414 rv->user_data = NULL; 3415 atomic_inc(&smi_msg_inuse_count); 3416 } 3417 return rv; 3418 } 3419 3420 static void free_recv_msg(struct ipmi_recv_msg *msg) 3421 { 3422 atomic_dec(&recv_msg_inuse_count); 3423 kfree(msg); 3424 } 3425 3426 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 3427 { 3428 struct ipmi_recv_msg *rv; 3429 3430 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 3431 if (rv) { 3432 rv->done = free_recv_msg; 3433 atomic_inc(&recv_msg_inuse_count); 3434 } 3435 return rv; 3436 } 3437 3438 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 3439 { 3440 if (msg->user) 3441 kref_put(&msg->user->refcount, free_user); 3442 msg->done(msg); 3443 } 3444 3445 #ifdef CONFIG_IPMI_PANIC_EVENT 3446 3447 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 3448 { 3449 } 3450 3451 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 3452 { 3453 } 3454 3455 #ifdef CONFIG_IPMI_PANIC_STRING 3456 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 3457 { 3458 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3459 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 3460 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 3461 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3462 { 3463 /* A get event receiver command, save it. */ 3464 intf->event_receiver = msg->msg.data[1]; 3465 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 3466 } 3467 } 3468 3469 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 3470 { 3471 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3472 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3473 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 3474 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3475 { 3476 /* A get device id command, save if we are an event 3477 receiver or generator. */ 3478 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 3479 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 3480 } 3481 } 3482 #endif 3483 3484 static void send_panic_events(char *str) 3485 { 3486 struct kernel_ipmi_msg msg; 3487 ipmi_smi_t intf; 3488 unsigned char data[16]; 3489 int i; 3490 struct ipmi_system_interface_addr *si; 3491 struct ipmi_addr addr; 3492 struct ipmi_smi_msg smi_msg; 3493 struct ipmi_recv_msg recv_msg; 3494 3495 si = (struct ipmi_system_interface_addr *) &addr; 3496 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3497 si->channel = IPMI_BMC_CHANNEL; 3498 si->lun = 0; 3499 3500 /* Fill in an event telling that we have failed. */ 3501 msg.netfn = 0x04; /* Sensor or Event. */ 3502 msg.cmd = 2; /* Platform event command. */ 3503 msg.data = data; 3504 msg.data_len = 8; 3505 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 3506 data[1] = 0x03; /* This is for IPMI 1.0. */ 3507 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 3508 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 3509 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 3510 3511 /* Put a few breadcrumbs in. Hopefully later we can add more things 3512 to make the panic events more useful. */ 3513 if (str) { 3514 data[3] = str[0]; 3515 data[6] = str[1]; 3516 data[7] = str[2]; 3517 } 3518 3519 smi_msg.done = dummy_smi_done_handler; 3520 recv_msg.done = dummy_recv_done_handler; 3521 3522 /* For every registered interface, send the event. */ 3523 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3524 intf = ipmi_interfaces[i]; 3525 if (IPMI_INVALID_INTERFACE(intf)) 3526 continue; 3527 3528 /* Send the event announcing the panic. */ 3529 intf->handlers->set_run_to_completion(intf->send_info, 1); 3530 i_ipmi_request(NULL, 3531 intf, 3532 &addr, 3533 0, 3534 &msg, 3535 intf, 3536 &smi_msg, 3537 &recv_msg, 3538 0, 3539 intf->channels[0].address, 3540 intf->channels[0].lun, 3541 0, 1); /* Don't retry, and don't wait. */ 3542 } 3543 3544 #ifdef CONFIG_IPMI_PANIC_STRING 3545 /* On every interface, dump a bunch of OEM event holding the 3546 string. */ 3547 if (!str) 3548 return; 3549 3550 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3551 char *p = str; 3552 struct ipmi_ipmb_addr *ipmb; 3553 int j; 3554 3555 intf = ipmi_interfaces[i]; 3556 if (IPMI_INVALID_INTERFACE(intf)) 3557 continue; 3558 3559 /* First job here is to figure out where to send the 3560 OEM events. There's no way in IPMI to send OEM 3561 events using an event send command, so we have to 3562 find the SEL to put them in and stick them in 3563 there. */ 3564 3565 /* Get capabilities from the get device id. */ 3566 intf->local_sel_device = 0; 3567 intf->local_event_generator = 0; 3568 intf->event_receiver = 0; 3569 3570 /* Request the device info from the local MC. */ 3571 msg.netfn = IPMI_NETFN_APP_REQUEST; 3572 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 3573 msg.data = NULL; 3574 msg.data_len = 0; 3575 intf->null_user_handler = device_id_fetcher; 3576 i_ipmi_request(NULL, 3577 intf, 3578 &addr, 3579 0, 3580 &msg, 3581 intf, 3582 &smi_msg, 3583 &recv_msg, 3584 0, 3585 intf->channels[0].address, 3586 intf->channels[0].lun, 3587 0, 1); /* Don't retry, and don't wait. */ 3588 3589 if (intf->local_event_generator) { 3590 /* Request the event receiver from the local MC. */ 3591 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 3592 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 3593 msg.data = NULL; 3594 msg.data_len = 0; 3595 intf->null_user_handler = event_receiver_fetcher; 3596 i_ipmi_request(NULL, 3597 intf, 3598 &addr, 3599 0, 3600 &msg, 3601 intf, 3602 &smi_msg, 3603 &recv_msg, 3604 0, 3605 intf->channels[0].address, 3606 intf->channels[0].lun, 3607 0, 1); /* no retry, and no wait. */ 3608 } 3609 intf->null_user_handler = NULL; 3610 3611 /* Validate the event receiver. The low bit must not 3612 be 1 (it must be a valid IPMB address), it cannot 3613 be zero, and it must not be my address. */ 3614 if (((intf->event_receiver & 1) == 0) 3615 && (intf->event_receiver != 0) 3616 && (intf->event_receiver != intf->channels[0].address)) 3617 { 3618 /* The event receiver is valid, send an IPMB 3619 message. */ 3620 ipmb = (struct ipmi_ipmb_addr *) &addr; 3621 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 3622 ipmb->channel = 0; /* FIXME - is this right? */ 3623 ipmb->lun = intf->event_receiver_lun; 3624 ipmb->slave_addr = intf->event_receiver; 3625 } else if (intf->local_sel_device) { 3626 /* The event receiver was not valid (or was 3627 me), but I am an SEL device, just dump it 3628 in my SEL. */ 3629 si = (struct ipmi_system_interface_addr *) &addr; 3630 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3631 si->channel = IPMI_BMC_CHANNEL; 3632 si->lun = 0; 3633 } else 3634 continue; /* No where to send the event. */ 3635 3636 3637 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 3638 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 3639 msg.data = data; 3640 msg.data_len = 16; 3641 3642 j = 0; 3643 while (*p) { 3644 int size = strlen(p); 3645 3646 if (size > 11) 3647 size = 11; 3648 data[0] = 0; 3649 data[1] = 0; 3650 data[2] = 0xf0; /* OEM event without timestamp. */ 3651 data[3] = intf->channels[0].address; 3652 data[4] = j++; /* sequence # */ 3653 /* Always give 11 bytes, so strncpy will fill 3654 it with zeroes for me. */ 3655 strncpy(data+5, p, 11); 3656 p += size; 3657 3658 i_ipmi_request(NULL, 3659 intf, 3660 &addr, 3661 0, 3662 &msg, 3663 intf, 3664 &smi_msg, 3665 &recv_msg, 3666 0, 3667 intf->channels[0].address, 3668 intf->channels[0].lun, 3669 0, 1); /* no retry, and no wait. */ 3670 } 3671 } 3672 #endif /* CONFIG_IPMI_PANIC_STRING */ 3673 } 3674 #endif /* CONFIG_IPMI_PANIC_EVENT */ 3675 3676 static int has_paniced = 0; 3677 3678 static int panic_event(struct notifier_block *this, 3679 unsigned long event, 3680 void *ptr) 3681 { 3682 int i; 3683 ipmi_smi_t intf; 3684 3685 if (has_paniced) 3686 return NOTIFY_DONE; 3687 has_paniced = 1; 3688 3689 /* For every registered interface, set it to run to completion. */ 3690 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3691 intf = ipmi_interfaces[i]; 3692 if (IPMI_INVALID_INTERFACE(intf)) 3693 continue; 3694 3695 intf->handlers->set_run_to_completion(intf->send_info, 1); 3696 } 3697 3698 #ifdef CONFIG_IPMI_PANIC_EVENT 3699 send_panic_events(ptr); 3700 #endif 3701 3702 return NOTIFY_DONE; 3703 } 3704 3705 static struct notifier_block panic_block = { 3706 .notifier_call = panic_event, 3707 .next = NULL, 3708 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 3709 }; 3710 3711 static int ipmi_init_msghandler(void) 3712 { 3713 int i; 3714 int rv; 3715 3716 if (initialized) 3717 return 0; 3718 3719 rv = driver_register(&ipmidriver); 3720 if (rv) { 3721 printk(KERN_ERR PFX "Could not register IPMI driver\n"); 3722 return rv; 3723 } 3724 3725 printk(KERN_INFO "ipmi message handler version " 3726 IPMI_DRIVER_VERSION "\n"); 3727 3728 for (i = 0; i < MAX_IPMI_INTERFACES; i++) 3729 ipmi_interfaces[i] = NULL; 3730 3731 #ifdef CONFIG_PROC_FS 3732 proc_ipmi_root = proc_mkdir("ipmi", NULL); 3733 if (!proc_ipmi_root) { 3734 printk(KERN_ERR PFX "Unable to create IPMI proc dir"); 3735 return -ENOMEM; 3736 } 3737 3738 proc_ipmi_root->owner = THIS_MODULE; 3739 #endif /* CONFIG_PROC_FS */ 3740 3741 init_timer(&ipmi_timer); 3742 ipmi_timer.data = 0; 3743 ipmi_timer.function = ipmi_timeout; 3744 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES; 3745 add_timer(&ipmi_timer); 3746 3747 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 3748 3749 initialized = 1; 3750 3751 return 0; 3752 } 3753 3754 static __init int ipmi_init_msghandler_mod(void) 3755 { 3756 ipmi_init_msghandler(); 3757 return 0; 3758 } 3759 3760 static __exit void cleanup_ipmi(void) 3761 { 3762 int count; 3763 3764 if (!initialized) 3765 return; 3766 3767 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); 3768 3769 /* This can't be called if any interfaces exist, so no worry about 3770 shutting down the interfaces. */ 3771 3772 /* Tell the timer to stop, then wait for it to stop. This avoids 3773 problems with race conditions removing the timer here. */ 3774 atomic_inc(&stop_operation); 3775 del_timer_sync(&ipmi_timer); 3776 3777 #ifdef CONFIG_PROC_FS 3778 remove_proc_entry(proc_ipmi_root->name, &proc_root); 3779 #endif /* CONFIG_PROC_FS */ 3780 3781 driver_unregister(&ipmidriver); 3782 3783 initialized = 0; 3784 3785 /* Check for buffer leaks. */ 3786 count = atomic_read(&smi_msg_inuse_count); 3787 if (count != 0) 3788 printk(KERN_WARNING PFX "SMI message count %d at exit\n", 3789 count); 3790 count = atomic_read(&recv_msg_inuse_count); 3791 if (count != 0) 3792 printk(KERN_WARNING PFX "recv message count %d at exit\n", 3793 count); 3794 } 3795 module_exit(cleanup_ipmi); 3796 3797 module_init(ipmi_init_msghandler_mod); 3798 MODULE_LICENSE("GPL"); 3799 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3800 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 3801 MODULE_VERSION(IPMI_DRIVER_VERSION); 3802 3803 EXPORT_SYMBOL(ipmi_create_user); 3804 EXPORT_SYMBOL(ipmi_destroy_user); 3805 EXPORT_SYMBOL(ipmi_get_version); 3806 EXPORT_SYMBOL(ipmi_request_settime); 3807 EXPORT_SYMBOL(ipmi_request_supply_msgs); 3808 EXPORT_SYMBOL(ipmi_register_smi); 3809 EXPORT_SYMBOL(ipmi_unregister_smi); 3810 EXPORT_SYMBOL(ipmi_register_for_cmd); 3811 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 3812 EXPORT_SYMBOL(ipmi_smi_msg_received); 3813 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3814 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 3815 EXPORT_SYMBOL(ipmi_addr_length); 3816 EXPORT_SYMBOL(ipmi_validate_addr); 3817 EXPORT_SYMBOL(ipmi_set_gets_events); 3818 EXPORT_SYMBOL(ipmi_smi_watcher_register); 3819 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 3820 EXPORT_SYMBOL(ipmi_set_my_address); 3821 EXPORT_SYMBOL(ipmi_get_my_address); 3822 EXPORT_SYMBOL(ipmi_set_my_LUN); 3823 EXPORT_SYMBOL(ipmi_get_my_LUN); 3824 EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 3825 EXPORT_SYMBOL(ipmi_user_set_run_to_completion); 3826 EXPORT_SYMBOL(ipmi_free_recv_msg); 3827