1 /* 2 * ipmi_msghandler.c 3 * 4 * Incoming and outgoing message routing for an IPMI interface. 5 * 6 * Author: MontaVista Software, Inc. 7 * Corey Minyard <minyard@mvista.com> 8 * source@mvista.com 9 * 10 * Copyright 2002 MontaVista Software Inc. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License as published by the 14 * Free Software Foundation; either version 2 of the License, or (at your 15 * option) any later version. 16 * 17 * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * You should have received a copy of the GNU General Public License along 30 * with this program; if not, write to the Free Software Foundation, Inc., 31 * 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/config.h> 35 #include <linux/module.h> 36 #include <linux/errno.h> 37 #include <asm/system.h> 38 #include <linux/sched.h> 39 #include <linux/poll.h> 40 #include <linux/spinlock.h> 41 #include <linux/mutex.h> 42 #include <linux/slab.h> 43 #include <linux/ipmi.h> 44 #include <linux/ipmi_smi.h> 45 #include <linux/notifier.h> 46 #include <linux/init.h> 47 #include <linux/proc_fs.h> 48 #include <linux/rcupdate.h> 49 50 #define PFX "IPMI message handler: " 51 52 #define IPMI_DRIVER_VERSION "39.0" 53 54 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 55 static int ipmi_init_msghandler(void); 56 57 static int initialized = 0; 58 59 #ifdef CONFIG_PROC_FS 60 static struct proc_dir_entry *proc_ipmi_root = NULL; 61 #endif /* CONFIG_PROC_FS */ 62 63 #define MAX_EVENTS_IN_QUEUE 25 64 65 /* Don't let a message sit in a queue forever, always time it with at lest 66 the max message timer. This is in milliseconds. */ 67 #define MAX_MSG_TIMEOUT 60000 68 69 70 /* 71 * The main "user" data structure. 72 */ 73 struct ipmi_user 74 { 75 struct list_head link; 76 77 /* Set to "0" when the user is destroyed. */ 78 int valid; 79 80 struct kref refcount; 81 82 /* The upper layer that handles receive messages. */ 83 struct ipmi_user_hndl *handler; 84 void *handler_data; 85 86 /* The interface this user is bound to. */ 87 ipmi_smi_t intf; 88 89 /* Does this interface receive IPMI events? */ 90 int gets_events; 91 }; 92 93 struct cmd_rcvr 94 { 95 struct list_head link; 96 97 ipmi_user_t user; 98 unsigned char netfn; 99 unsigned char cmd; 100 101 /* 102 * This is used to form a linked lised during mass deletion. 103 * Since this is in an RCU list, we cannot use the link above 104 * or change any data until the RCU period completes. So we 105 * use this next variable during mass deletion so we can have 106 * a list and don't have to wait and restart the search on 107 * every individual deletion of a command. */ 108 struct cmd_rcvr *next; 109 }; 110 111 struct seq_table 112 { 113 unsigned int inuse : 1; 114 unsigned int broadcast : 1; 115 116 unsigned long timeout; 117 unsigned long orig_timeout; 118 unsigned int retries_left; 119 120 /* To verify on an incoming send message response that this is 121 the message that the response is for, we keep a sequence id 122 and increment it every time we send a message. */ 123 long seqid; 124 125 /* This is held so we can properly respond to the message on a 126 timeout, and it is used to hold the temporary data for 127 retransmission, too. */ 128 struct ipmi_recv_msg *recv_msg; 129 }; 130 131 /* Store the information in a msgid (long) to allow us to find a 132 sequence table entry from the msgid. */ 133 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) 134 135 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ 136 do { \ 137 seq = ((msgid >> 26) & 0x3f); \ 138 seqid = (msgid & 0x3fffff); \ 139 } while (0) 140 141 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 142 143 struct ipmi_channel 144 { 145 unsigned char medium; 146 unsigned char protocol; 147 148 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, 149 but may be changed by the user. */ 150 unsigned char address; 151 152 /* My LUN. This should generally stay the SMS LUN, but just in 153 case... */ 154 unsigned char lun; 155 }; 156 157 #ifdef CONFIG_PROC_FS 158 struct ipmi_proc_entry 159 { 160 char *name; 161 struct ipmi_proc_entry *next; 162 }; 163 #endif 164 165 struct bmc_device 166 { 167 struct platform_device *dev; 168 struct ipmi_device_id id; 169 unsigned char guid[16]; 170 int guid_set; 171 172 struct kref refcount; 173 174 /* bmc device attributes */ 175 struct device_attribute device_id_attr; 176 struct device_attribute provides_dev_sdrs_attr; 177 struct device_attribute revision_attr; 178 struct device_attribute firmware_rev_attr; 179 struct device_attribute version_attr; 180 struct device_attribute add_dev_support_attr; 181 struct device_attribute manufacturer_id_attr; 182 struct device_attribute product_id_attr; 183 struct device_attribute guid_attr; 184 struct device_attribute aux_firmware_rev_attr; 185 }; 186 187 #define IPMI_IPMB_NUM_SEQ 64 188 #define IPMI_MAX_CHANNELS 16 189 struct ipmi_smi 190 { 191 /* What interface number are we? */ 192 int intf_num; 193 194 struct kref refcount; 195 196 /* The list of upper layers that are using me. seq_lock 197 * protects this. */ 198 struct list_head users; 199 200 /* Used for wake ups at startup. */ 201 wait_queue_head_t waitq; 202 203 struct bmc_device *bmc; 204 char *my_dev_name; 205 206 /* This is the lower-layer's sender routine. */ 207 struct ipmi_smi_handlers *handlers; 208 void *send_info; 209 210 #ifdef CONFIG_PROC_FS 211 /* A list of proc entries for this interface. This does not 212 need a lock, only one thread creates it and only one thread 213 destroys it. */ 214 spinlock_t proc_entry_lock; 215 struct ipmi_proc_entry *proc_entries; 216 #endif 217 218 /* Driver-model device for the system interface. */ 219 struct device *si_dev; 220 221 /* A table of sequence numbers for this interface. We use the 222 sequence numbers for IPMB messages that go out of the 223 interface to match them up with their responses. A routine 224 is called periodically to time the items in this list. */ 225 spinlock_t seq_lock; 226 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; 227 int curr_seq; 228 229 /* Messages that were delayed for some reason (out of memory, 230 for instance), will go in here to be processed later in a 231 periodic timer interrupt. */ 232 spinlock_t waiting_msgs_lock; 233 struct list_head waiting_msgs; 234 235 /* The list of command receivers that are registered for commands 236 on this interface. */ 237 struct mutex cmd_rcvrs_mutex; 238 struct list_head cmd_rcvrs; 239 240 /* Events that were queues because no one was there to receive 241 them. */ 242 spinlock_t events_lock; /* For dealing with event stuff. */ 243 struct list_head waiting_events; 244 unsigned int waiting_events_count; /* How many events in queue? */ 245 246 /* The event receiver for my BMC, only really used at panic 247 shutdown as a place to store this. */ 248 unsigned char event_receiver; 249 unsigned char event_receiver_lun; 250 unsigned char local_sel_device; 251 unsigned char local_event_generator; 252 253 /* A cheap hack, if this is non-null and a message to an 254 interface comes in with a NULL user, call this routine with 255 it. Note that the message will still be freed by the 256 caller. This only works on the system interface. */ 257 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); 258 259 /* When we are scanning the channels for an SMI, this will 260 tell which channel we are scanning. */ 261 int curr_channel; 262 263 /* Channel information */ 264 struct ipmi_channel channels[IPMI_MAX_CHANNELS]; 265 266 /* Proc FS stuff. */ 267 struct proc_dir_entry *proc_dir; 268 char proc_dir_name[10]; 269 270 spinlock_t counter_lock; /* For making counters atomic. */ 271 272 /* Commands we got that were invalid. */ 273 unsigned int sent_invalid_commands; 274 275 /* Commands we sent to the MC. */ 276 unsigned int sent_local_commands; 277 /* Responses from the MC that were delivered to a user. */ 278 unsigned int handled_local_responses; 279 /* Responses from the MC that were not delivered to a user. */ 280 unsigned int unhandled_local_responses; 281 282 /* Commands we sent out to the IPMB bus. */ 283 unsigned int sent_ipmb_commands; 284 /* Commands sent on the IPMB that had errors on the SEND CMD */ 285 unsigned int sent_ipmb_command_errs; 286 /* Each retransmit increments this count. */ 287 unsigned int retransmitted_ipmb_commands; 288 /* When a message times out (runs out of retransmits) this is 289 incremented. */ 290 unsigned int timed_out_ipmb_commands; 291 292 /* This is like above, but for broadcasts. Broadcasts are 293 *not* included in the above count (they are expected to 294 time out). */ 295 unsigned int timed_out_ipmb_broadcasts; 296 297 /* Responses I have sent to the IPMB bus. */ 298 unsigned int sent_ipmb_responses; 299 300 /* The response was delivered to the user. */ 301 unsigned int handled_ipmb_responses; 302 /* The response had invalid data in it. */ 303 unsigned int invalid_ipmb_responses; 304 /* The response didn't have anyone waiting for it. */ 305 unsigned int unhandled_ipmb_responses; 306 307 /* Commands we sent out to the IPMB bus. */ 308 unsigned int sent_lan_commands; 309 /* Commands sent on the IPMB that had errors on the SEND CMD */ 310 unsigned int sent_lan_command_errs; 311 /* Each retransmit increments this count. */ 312 unsigned int retransmitted_lan_commands; 313 /* When a message times out (runs out of retransmits) this is 314 incremented. */ 315 unsigned int timed_out_lan_commands; 316 317 /* Responses I have sent to the IPMB bus. */ 318 unsigned int sent_lan_responses; 319 320 /* The response was delivered to the user. */ 321 unsigned int handled_lan_responses; 322 /* The response had invalid data in it. */ 323 unsigned int invalid_lan_responses; 324 /* The response didn't have anyone waiting for it. */ 325 unsigned int unhandled_lan_responses; 326 327 /* The command was delivered to the user. */ 328 unsigned int handled_commands; 329 /* The command had invalid data in it. */ 330 unsigned int invalid_commands; 331 /* The command didn't have anyone waiting for it. */ 332 unsigned int unhandled_commands; 333 334 /* Invalid data in an event. */ 335 unsigned int invalid_events; 336 /* Events that were received with the proper format. */ 337 unsigned int events; 338 }; 339 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) 340 341 /* Used to mark an interface entry that cannot be used but is not a 342 * free entry, either, primarily used at creation and deletion time so 343 * a slot doesn't get reused too quickly. */ 344 #define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1)) 345 #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ 346 || (i == IPMI_INVALID_INTERFACE_ENTRY)) 347 348 /** 349 * The driver model view of the IPMI messaging driver. 350 */ 351 static struct device_driver ipmidriver = { 352 .name = "ipmi", 353 .bus = &platform_bus_type 354 }; 355 static DEFINE_MUTEX(ipmidriver_mutex); 356 357 #define MAX_IPMI_INTERFACES 4 358 static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 359 360 /* Directly protects the ipmi_interfaces data structure. */ 361 static DEFINE_SPINLOCK(interfaces_lock); 362 363 /* List of watchers that want to know when smi's are added and 364 deleted. */ 365 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); 366 static DECLARE_RWSEM(smi_watchers_sem); 367 368 369 static void free_recv_msg_list(struct list_head *q) 370 { 371 struct ipmi_recv_msg *msg, *msg2; 372 373 list_for_each_entry_safe(msg, msg2, q, link) { 374 list_del(&msg->link); 375 ipmi_free_recv_msg(msg); 376 } 377 } 378 379 static void clean_up_interface_data(ipmi_smi_t intf) 380 { 381 int i; 382 struct cmd_rcvr *rcvr, *rcvr2; 383 struct list_head list; 384 385 free_recv_msg_list(&intf->waiting_msgs); 386 free_recv_msg_list(&intf->waiting_events); 387 388 /* Wholesale remove all the entries from the list in the 389 * interface and wait for RCU to know that none are in use. */ 390 mutex_lock(&intf->cmd_rcvrs_mutex); 391 list_add_rcu(&list, &intf->cmd_rcvrs); 392 list_del_rcu(&intf->cmd_rcvrs); 393 mutex_unlock(&intf->cmd_rcvrs_mutex); 394 synchronize_rcu(); 395 396 list_for_each_entry_safe(rcvr, rcvr2, &list, link) 397 kfree(rcvr); 398 399 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 400 if ((intf->seq_table[i].inuse) 401 && (intf->seq_table[i].recv_msg)) 402 { 403 ipmi_free_recv_msg(intf->seq_table[i].recv_msg); 404 } 405 } 406 } 407 408 static void intf_free(struct kref *ref) 409 { 410 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount); 411 412 clean_up_interface_data(intf); 413 kfree(intf); 414 } 415 416 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 417 { 418 int i; 419 unsigned long flags; 420 421 down_write(&smi_watchers_sem); 422 list_add(&(watcher->link), &smi_watchers); 423 up_write(&smi_watchers_sem); 424 spin_lock_irqsave(&interfaces_lock, flags); 425 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 426 ipmi_smi_t intf = ipmi_interfaces[i]; 427 if (IPMI_INVALID_INTERFACE(intf)) 428 continue; 429 spin_unlock_irqrestore(&interfaces_lock, flags); 430 watcher->new_smi(i, intf->si_dev); 431 spin_lock_irqsave(&interfaces_lock, flags); 432 } 433 spin_unlock_irqrestore(&interfaces_lock, flags); 434 return 0; 435 } 436 437 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 438 { 439 down_write(&smi_watchers_sem); 440 list_del(&(watcher->link)); 441 up_write(&smi_watchers_sem); 442 return 0; 443 } 444 445 static void 446 call_smi_watchers(int i, struct device *dev) 447 { 448 struct ipmi_smi_watcher *w; 449 450 down_read(&smi_watchers_sem); 451 list_for_each_entry(w, &smi_watchers, link) { 452 if (try_module_get(w->owner)) { 453 w->new_smi(i, dev); 454 module_put(w->owner); 455 } 456 } 457 up_read(&smi_watchers_sem); 458 } 459 460 static int 461 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) 462 { 463 if (addr1->addr_type != addr2->addr_type) 464 return 0; 465 466 if (addr1->channel != addr2->channel) 467 return 0; 468 469 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 470 struct ipmi_system_interface_addr *smi_addr1 471 = (struct ipmi_system_interface_addr *) addr1; 472 struct ipmi_system_interface_addr *smi_addr2 473 = (struct ipmi_system_interface_addr *) addr2; 474 return (smi_addr1->lun == smi_addr2->lun); 475 } 476 477 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) 478 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 479 { 480 struct ipmi_ipmb_addr *ipmb_addr1 481 = (struct ipmi_ipmb_addr *) addr1; 482 struct ipmi_ipmb_addr *ipmb_addr2 483 = (struct ipmi_ipmb_addr *) addr2; 484 485 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr) 486 && (ipmb_addr1->lun == ipmb_addr2->lun)); 487 } 488 489 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) { 490 struct ipmi_lan_addr *lan_addr1 491 = (struct ipmi_lan_addr *) addr1; 492 struct ipmi_lan_addr *lan_addr2 493 = (struct ipmi_lan_addr *) addr2; 494 495 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID) 496 && (lan_addr1->local_SWID == lan_addr2->local_SWID) 497 && (lan_addr1->session_handle 498 == lan_addr2->session_handle) 499 && (lan_addr1->lun == lan_addr2->lun)); 500 } 501 502 return 1; 503 } 504 505 int ipmi_validate_addr(struct ipmi_addr *addr, int len) 506 { 507 if (len < sizeof(struct ipmi_system_interface_addr)) { 508 return -EINVAL; 509 } 510 511 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 512 if (addr->channel != IPMI_BMC_CHANNEL) 513 return -EINVAL; 514 return 0; 515 } 516 517 if ((addr->channel == IPMI_BMC_CHANNEL) 518 || (addr->channel >= IPMI_MAX_CHANNELS) 519 || (addr->channel < 0)) 520 return -EINVAL; 521 522 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 523 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 524 { 525 if (len < sizeof(struct ipmi_ipmb_addr)) { 526 return -EINVAL; 527 } 528 return 0; 529 } 530 531 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 532 if (len < sizeof(struct ipmi_lan_addr)) { 533 return -EINVAL; 534 } 535 return 0; 536 } 537 538 return -EINVAL; 539 } 540 541 unsigned int ipmi_addr_length(int addr_type) 542 { 543 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 544 return sizeof(struct ipmi_system_interface_addr); 545 546 if ((addr_type == IPMI_IPMB_ADDR_TYPE) 547 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 548 { 549 return sizeof(struct ipmi_ipmb_addr); 550 } 551 552 if (addr_type == IPMI_LAN_ADDR_TYPE) 553 return sizeof(struct ipmi_lan_addr); 554 555 return 0; 556 } 557 558 static void deliver_response(struct ipmi_recv_msg *msg) 559 { 560 if (!msg->user) { 561 ipmi_smi_t intf = msg->user_msg_data; 562 unsigned long flags; 563 564 /* Special handling for NULL users. */ 565 if (intf->null_user_handler) { 566 intf->null_user_handler(intf, msg); 567 spin_lock_irqsave(&intf->counter_lock, flags); 568 intf->handled_local_responses++; 569 spin_unlock_irqrestore(&intf->counter_lock, flags); 570 } else { 571 /* No handler, so give up. */ 572 spin_lock_irqsave(&intf->counter_lock, flags); 573 intf->unhandled_local_responses++; 574 spin_unlock_irqrestore(&intf->counter_lock, flags); 575 } 576 ipmi_free_recv_msg(msg); 577 } else { 578 ipmi_user_t user = msg->user; 579 user->handler->ipmi_recv_hndl(msg, user->handler_data); 580 } 581 } 582 583 /* Find the next sequence number not being used and add the given 584 message with the given timeout to the sequence table. This must be 585 called with the interface's seq_lock held. */ 586 static int intf_next_seq(ipmi_smi_t intf, 587 struct ipmi_recv_msg *recv_msg, 588 unsigned long timeout, 589 int retries, 590 int broadcast, 591 unsigned char *seq, 592 long *seqid) 593 { 594 int rv = 0; 595 unsigned int i; 596 597 for (i = intf->curr_seq; 598 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 599 i = (i+1)%IPMI_IPMB_NUM_SEQ) 600 { 601 if (!intf->seq_table[i].inuse) 602 break; 603 } 604 605 if (!intf->seq_table[i].inuse) { 606 intf->seq_table[i].recv_msg = recv_msg; 607 608 /* Start with the maximum timeout, when the send response 609 comes in we will start the real timer. */ 610 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; 611 intf->seq_table[i].orig_timeout = timeout; 612 intf->seq_table[i].retries_left = retries; 613 intf->seq_table[i].broadcast = broadcast; 614 intf->seq_table[i].inuse = 1; 615 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid); 616 *seq = i; 617 *seqid = intf->seq_table[i].seqid; 618 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 619 } else { 620 rv = -EAGAIN; 621 } 622 623 return rv; 624 } 625 626 /* Return the receive message for the given sequence number and 627 release the sequence number so it can be reused. Some other data 628 is passed in to be sure the message matches up correctly (to help 629 guard against message coming in after their timeout and the 630 sequence number being reused). */ 631 static int intf_find_seq(ipmi_smi_t intf, 632 unsigned char seq, 633 short channel, 634 unsigned char cmd, 635 unsigned char netfn, 636 struct ipmi_addr *addr, 637 struct ipmi_recv_msg **recv_msg) 638 { 639 int rv = -ENODEV; 640 unsigned long flags; 641 642 if (seq >= IPMI_IPMB_NUM_SEQ) 643 return -EINVAL; 644 645 spin_lock_irqsave(&(intf->seq_lock), flags); 646 if (intf->seq_table[seq].inuse) { 647 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; 648 649 if ((msg->addr.channel == channel) 650 && (msg->msg.cmd == cmd) 651 && (msg->msg.netfn == netfn) 652 && (ipmi_addr_equal(addr, &(msg->addr)))) 653 { 654 *recv_msg = msg; 655 intf->seq_table[seq].inuse = 0; 656 rv = 0; 657 } 658 } 659 spin_unlock_irqrestore(&(intf->seq_lock), flags); 660 661 return rv; 662 } 663 664 665 /* Start the timer for a specific sequence table entry. */ 666 static int intf_start_seq_timer(ipmi_smi_t intf, 667 long msgid) 668 { 669 int rv = -ENODEV; 670 unsigned long flags; 671 unsigned char seq; 672 unsigned long seqid; 673 674 675 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 676 677 spin_lock_irqsave(&(intf->seq_lock), flags); 678 /* We do this verification because the user can be deleted 679 while a message is outstanding. */ 680 if ((intf->seq_table[seq].inuse) 681 && (intf->seq_table[seq].seqid == seqid)) 682 { 683 struct seq_table *ent = &(intf->seq_table[seq]); 684 ent->timeout = ent->orig_timeout; 685 rv = 0; 686 } 687 spin_unlock_irqrestore(&(intf->seq_lock), flags); 688 689 return rv; 690 } 691 692 /* Got an error for the send message for a specific sequence number. */ 693 static int intf_err_seq(ipmi_smi_t intf, 694 long msgid, 695 unsigned int err) 696 { 697 int rv = -ENODEV; 698 unsigned long flags; 699 unsigned char seq; 700 unsigned long seqid; 701 struct ipmi_recv_msg *msg = NULL; 702 703 704 GET_SEQ_FROM_MSGID(msgid, seq, seqid); 705 706 spin_lock_irqsave(&(intf->seq_lock), flags); 707 /* We do this verification because the user can be deleted 708 while a message is outstanding. */ 709 if ((intf->seq_table[seq].inuse) 710 && (intf->seq_table[seq].seqid == seqid)) 711 { 712 struct seq_table *ent = &(intf->seq_table[seq]); 713 714 ent->inuse = 0; 715 msg = ent->recv_msg; 716 rv = 0; 717 } 718 spin_unlock_irqrestore(&(intf->seq_lock), flags); 719 720 if (msg) { 721 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 722 msg->msg_data[0] = err; 723 msg->msg.netfn |= 1; /* Convert to a response. */ 724 msg->msg.data_len = 1; 725 msg->msg.data = msg->msg_data; 726 deliver_response(msg); 727 } 728 729 return rv; 730 } 731 732 733 int ipmi_create_user(unsigned int if_num, 734 struct ipmi_user_hndl *handler, 735 void *handler_data, 736 ipmi_user_t *user) 737 { 738 unsigned long flags; 739 ipmi_user_t new_user; 740 int rv = 0; 741 ipmi_smi_t intf; 742 743 /* There is no module usecount here, because it's not 744 required. Since this can only be used by and called from 745 other modules, they will implicitly use this module, and 746 thus this can't be removed unless the other modules are 747 removed. */ 748 749 if (handler == NULL) 750 return -EINVAL; 751 752 /* Make sure the driver is actually initialized, this handles 753 problems with initialization order. */ 754 if (!initialized) { 755 rv = ipmi_init_msghandler(); 756 if (rv) 757 return rv; 758 759 /* The init code doesn't return an error if it was turned 760 off, but it won't initialize. Check that. */ 761 if (!initialized) 762 return -ENODEV; 763 } 764 765 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 766 if (!new_user) 767 return -ENOMEM; 768 769 spin_lock_irqsave(&interfaces_lock, flags); 770 intf = ipmi_interfaces[if_num]; 771 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) { 772 spin_unlock_irqrestore(&interfaces_lock, flags); 773 rv = -EINVAL; 774 goto out_kfree; 775 } 776 777 /* Note that each existing user holds a refcount to the interface. */ 778 kref_get(&intf->refcount); 779 spin_unlock_irqrestore(&interfaces_lock, flags); 780 781 kref_init(&new_user->refcount); 782 new_user->handler = handler; 783 new_user->handler_data = handler_data; 784 new_user->intf = intf; 785 new_user->gets_events = 0; 786 787 if (!try_module_get(intf->handlers->owner)) { 788 rv = -ENODEV; 789 goto out_kref; 790 } 791 792 if (intf->handlers->inc_usecount) { 793 rv = intf->handlers->inc_usecount(intf->send_info); 794 if (rv) { 795 module_put(intf->handlers->owner); 796 goto out_kref; 797 } 798 } 799 800 new_user->valid = 1; 801 spin_lock_irqsave(&intf->seq_lock, flags); 802 list_add_rcu(&new_user->link, &intf->users); 803 spin_unlock_irqrestore(&intf->seq_lock, flags); 804 *user = new_user; 805 return 0; 806 807 out_kref: 808 kref_put(&intf->refcount, intf_free); 809 out_kfree: 810 kfree(new_user); 811 return rv; 812 } 813 814 static void free_user(struct kref *ref) 815 { 816 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount); 817 kfree(user); 818 } 819 820 int ipmi_destroy_user(ipmi_user_t user) 821 { 822 ipmi_smi_t intf = user->intf; 823 int i; 824 unsigned long flags; 825 struct cmd_rcvr *rcvr; 826 struct cmd_rcvr *rcvrs = NULL; 827 828 user->valid = 0; 829 830 /* Remove the user from the interface's sequence table. */ 831 spin_lock_irqsave(&intf->seq_lock, flags); 832 list_del_rcu(&user->link); 833 834 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 835 if (intf->seq_table[i].inuse 836 && (intf->seq_table[i].recv_msg->user == user)) 837 { 838 intf->seq_table[i].inuse = 0; 839 } 840 } 841 spin_unlock_irqrestore(&intf->seq_lock, flags); 842 843 /* 844 * Remove the user from the command receiver's table. First 845 * we build a list of everything (not using the standard link, 846 * since other things may be using it till we do 847 * synchronize_rcu()) then free everything in that list. 848 */ 849 mutex_lock(&intf->cmd_rcvrs_mutex); 850 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 851 if (rcvr->user == user) { 852 list_del_rcu(&rcvr->link); 853 rcvr->next = rcvrs; 854 rcvrs = rcvr; 855 } 856 } 857 mutex_unlock(&intf->cmd_rcvrs_mutex); 858 synchronize_rcu(); 859 while (rcvrs) { 860 rcvr = rcvrs; 861 rcvrs = rcvr->next; 862 kfree(rcvr); 863 } 864 865 module_put(intf->handlers->owner); 866 if (intf->handlers->dec_usecount) 867 intf->handlers->dec_usecount(intf->send_info); 868 869 kref_put(&intf->refcount, intf_free); 870 871 kref_put(&user->refcount, free_user); 872 873 return 0; 874 } 875 876 void ipmi_get_version(ipmi_user_t user, 877 unsigned char *major, 878 unsigned char *minor) 879 { 880 *major = ipmi_version_major(&user->intf->bmc->id); 881 *minor = ipmi_version_minor(&user->intf->bmc->id); 882 } 883 884 int ipmi_set_my_address(ipmi_user_t user, 885 unsigned int channel, 886 unsigned char address) 887 { 888 if (channel >= IPMI_MAX_CHANNELS) 889 return -EINVAL; 890 user->intf->channels[channel].address = address; 891 return 0; 892 } 893 894 int ipmi_get_my_address(ipmi_user_t user, 895 unsigned int channel, 896 unsigned char *address) 897 { 898 if (channel >= IPMI_MAX_CHANNELS) 899 return -EINVAL; 900 *address = user->intf->channels[channel].address; 901 return 0; 902 } 903 904 int ipmi_set_my_LUN(ipmi_user_t user, 905 unsigned int channel, 906 unsigned char LUN) 907 { 908 if (channel >= IPMI_MAX_CHANNELS) 909 return -EINVAL; 910 user->intf->channels[channel].lun = LUN & 0x3; 911 return 0; 912 } 913 914 int ipmi_get_my_LUN(ipmi_user_t user, 915 unsigned int channel, 916 unsigned char *address) 917 { 918 if (channel >= IPMI_MAX_CHANNELS) 919 return -EINVAL; 920 *address = user->intf->channels[channel].lun; 921 return 0; 922 } 923 924 int ipmi_set_gets_events(ipmi_user_t user, int val) 925 { 926 unsigned long flags; 927 ipmi_smi_t intf = user->intf; 928 struct ipmi_recv_msg *msg, *msg2; 929 struct list_head msgs; 930 931 INIT_LIST_HEAD(&msgs); 932 933 spin_lock_irqsave(&intf->events_lock, flags); 934 user->gets_events = val; 935 936 if (val) { 937 /* Deliver any queued events. */ 938 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 939 list_move_tail(&msg->link, &msgs); 940 intf->waiting_events_count = 0; 941 } 942 943 /* Hold the events lock while doing this to preserve order. */ 944 list_for_each_entry_safe(msg, msg2, &msgs, link) { 945 msg->user = user; 946 kref_get(&user->refcount); 947 deliver_response(msg); 948 } 949 950 spin_unlock_irqrestore(&intf->events_lock, flags); 951 952 return 0; 953 } 954 955 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, 956 unsigned char netfn, 957 unsigned char cmd) 958 { 959 struct cmd_rcvr *rcvr; 960 961 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { 962 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) 963 return rcvr; 964 } 965 return NULL; 966 } 967 968 int ipmi_register_for_cmd(ipmi_user_t user, 969 unsigned char netfn, 970 unsigned char cmd) 971 { 972 ipmi_smi_t intf = user->intf; 973 struct cmd_rcvr *rcvr; 974 struct cmd_rcvr *entry; 975 int rv = 0; 976 977 978 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 979 if (!rcvr) 980 return -ENOMEM; 981 rcvr->cmd = cmd; 982 rcvr->netfn = netfn; 983 rcvr->user = user; 984 985 mutex_lock(&intf->cmd_rcvrs_mutex); 986 /* Make sure the command/netfn is not already registered. */ 987 entry = find_cmd_rcvr(intf, netfn, cmd); 988 if (entry) { 989 rv = -EBUSY; 990 goto out_unlock; 991 } 992 993 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 994 995 out_unlock: 996 mutex_unlock(&intf->cmd_rcvrs_mutex); 997 if (rv) 998 kfree(rcvr); 999 1000 return rv; 1001 } 1002 1003 int ipmi_unregister_for_cmd(ipmi_user_t user, 1004 unsigned char netfn, 1005 unsigned char cmd) 1006 { 1007 ipmi_smi_t intf = user->intf; 1008 struct cmd_rcvr *rcvr; 1009 1010 mutex_lock(&intf->cmd_rcvrs_mutex); 1011 /* Make sure the command/netfn is not already registered. */ 1012 rcvr = find_cmd_rcvr(intf, netfn, cmd); 1013 if ((rcvr) && (rcvr->user == user)) { 1014 list_del_rcu(&rcvr->link); 1015 mutex_unlock(&intf->cmd_rcvrs_mutex); 1016 synchronize_rcu(); 1017 kfree(rcvr); 1018 return 0; 1019 } else { 1020 mutex_unlock(&intf->cmd_rcvrs_mutex); 1021 return -ENOENT; 1022 } 1023 } 1024 1025 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) 1026 { 1027 ipmi_smi_t intf = user->intf; 1028 intf->handlers->set_run_to_completion(intf->send_info, val); 1029 } 1030 1031 static unsigned char 1032 ipmb_checksum(unsigned char *data, int size) 1033 { 1034 unsigned char csum = 0; 1035 1036 for (; size > 0; size--, data++) 1037 csum += *data; 1038 1039 return -csum; 1040 } 1041 1042 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, 1043 struct kernel_ipmi_msg *msg, 1044 struct ipmi_ipmb_addr *ipmb_addr, 1045 long msgid, 1046 unsigned char ipmb_seq, 1047 int broadcast, 1048 unsigned char source_address, 1049 unsigned char source_lun) 1050 { 1051 int i = broadcast; 1052 1053 /* Format the IPMB header data. */ 1054 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1055 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1056 smi_msg->data[2] = ipmb_addr->channel; 1057 if (broadcast) 1058 smi_msg->data[3] = 0; 1059 smi_msg->data[i+3] = ipmb_addr->slave_addr; 1060 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3); 1061 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2); 1062 smi_msg->data[i+6] = source_address; 1063 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun; 1064 smi_msg->data[i+8] = msg->cmd; 1065 1066 /* Now tack on the data to the message. */ 1067 if (msg->data_len > 0) 1068 memcpy(&(smi_msg->data[i+9]), msg->data, 1069 msg->data_len); 1070 smi_msg->data_size = msg->data_len + 9; 1071 1072 /* Now calculate the checksum and tack it on. */ 1073 smi_msg->data[i+smi_msg->data_size] 1074 = ipmb_checksum(&(smi_msg->data[i+6]), 1075 smi_msg->data_size-6); 1076 1077 /* Add on the checksum size and the offset from the 1078 broadcast. */ 1079 smi_msg->data_size += 1 + i; 1080 1081 smi_msg->msgid = msgid; 1082 } 1083 1084 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, 1085 struct kernel_ipmi_msg *msg, 1086 struct ipmi_lan_addr *lan_addr, 1087 long msgid, 1088 unsigned char ipmb_seq, 1089 unsigned char source_lun) 1090 { 1091 /* Format the IPMB header data. */ 1092 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 1093 smi_msg->data[1] = IPMI_SEND_MSG_CMD; 1094 smi_msg->data[2] = lan_addr->channel; 1095 smi_msg->data[3] = lan_addr->session_handle; 1096 smi_msg->data[4] = lan_addr->remote_SWID; 1097 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3); 1098 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2); 1099 smi_msg->data[7] = lan_addr->local_SWID; 1100 smi_msg->data[8] = (ipmb_seq << 2) | source_lun; 1101 smi_msg->data[9] = msg->cmd; 1102 1103 /* Now tack on the data to the message. */ 1104 if (msg->data_len > 0) 1105 memcpy(&(smi_msg->data[10]), msg->data, 1106 msg->data_len); 1107 smi_msg->data_size = msg->data_len + 10; 1108 1109 /* Now calculate the checksum and tack it on. */ 1110 smi_msg->data[smi_msg->data_size] 1111 = ipmb_checksum(&(smi_msg->data[7]), 1112 smi_msg->data_size-7); 1113 1114 /* Add on the checksum size and the offset from the 1115 broadcast. */ 1116 smi_msg->data_size += 1; 1117 1118 smi_msg->msgid = msgid; 1119 } 1120 1121 /* Separate from ipmi_request so that the user does not have to be 1122 supplied in certain circumstances (mainly at panic time). If 1123 messages are supplied, they will be freed, even if an error 1124 occurs. */ 1125 static int i_ipmi_request(ipmi_user_t user, 1126 ipmi_smi_t intf, 1127 struct ipmi_addr *addr, 1128 long msgid, 1129 struct kernel_ipmi_msg *msg, 1130 void *user_msg_data, 1131 void *supplied_smi, 1132 struct ipmi_recv_msg *supplied_recv, 1133 int priority, 1134 unsigned char source_address, 1135 unsigned char source_lun, 1136 int retries, 1137 unsigned int retry_time_ms) 1138 { 1139 int rv = 0; 1140 struct ipmi_smi_msg *smi_msg; 1141 struct ipmi_recv_msg *recv_msg; 1142 unsigned long flags; 1143 1144 1145 if (supplied_recv) { 1146 recv_msg = supplied_recv; 1147 } else { 1148 recv_msg = ipmi_alloc_recv_msg(); 1149 if (recv_msg == NULL) { 1150 return -ENOMEM; 1151 } 1152 } 1153 recv_msg->user_msg_data = user_msg_data; 1154 1155 if (supplied_smi) { 1156 smi_msg = (struct ipmi_smi_msg *) supplied_smi; 1157 } else { 1158 smi_msg = ipmi_alloc_smi_msg(); 1159 if (smi_msg == NULL) { 1160 ipmi_free_recv_msg(recv_msg); 1161 return -ENOMEM; 1162 } 1163 } 1164 1165 recv_msg->user = user; 1166 if (user) 1167 kref_get(&user->refcount); 1168 recv_msg->msgid = msgid; 1169 /* Store the message to send in the receive message so timeout 1170 responses can get the proper response data. */ 1171 recv_msg->msg = *msg; 1172 1173 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { 1174 struct ipmi_system_interface_addr *smi_addr; 1175 1176 if (msg->netfn & 1) { 1177 /* Responses are not allowed to the SMI. */ 1178 rv = -EINVAL; 1179 goto out_err; 1180 } 1181 1182 smi_addr = (struct ipmi_system_interface_addr *) addr; 1183 if (smi_addr->lun > 3) { 1184 spin_lock_irqsave(&intf->counter_lock, flags); 1185 intf->sent_invalid_commands++; 1186 spin_unlock_irqrestore(&intf->counter_lock, flags); 1187 rv = -EINVAL; 1188 goto out_err; 1189 } 1190 1191 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr)); 1192 1193 if ((msg->netfn == IPMI_NETFN_APP_REQUEST) 1194 && ((msg->cmd == IPMI_SEND_MSG_CMD) 1195 || (msg->cmd == IPMI_GET_MSG_CMD) 1196 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) 1197 { 1198 /* We don't let the user do these, since we manage 1199 the sequence numbers. */ 1200 spin_lock_irqsave(&intf->counter_lock, flags); 1201 intf->sent_invalid_commands++; 1202 spin_unlock_irqrestore(&intf->counter_lock, flags); 1203 rv = -EINVAL; 1204 goto out_err; 1205 } 1206 1207 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { 1208 spin_lock_irqsave(&intf->counter_lock, flags); 1209 intf->sent_invalid_commands++; 1210 spin_unlock_irqrestore(&intf->counter_lock, flags); 1211 rv = -EMSGSIZE; 1212 goto out_err; 1213 } 1214 1215 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); 1216 smi_msg->data[1] = msg->cmd; 1217 smi_msg->msgid = msgid; 1218 smi_msg->user_data = recv_msg; 1219 if (msg->data_len > 0) 1220 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); 1221 smi_msg->data_size = msg->data_len + 2; 1222 spin_lock_irqsave(&intf->counter_lock, flags); 1223 intf->sent_local_commands++; 1224 spin_unlock_irqrestore(&intf->counter_lock, flags); 1225 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) 1226 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) 1227 { 1228 struct ipmi_ipmb_addr *ipmb_addr; 1229 unsigned char ipmb_seq; 1230 long seqid; 1231 int broadcast = 0; 1232 1233 if (addr->channel >= IPMI_MAX_CHANNELS) { 1234 spin_lock_irqsave(&intf->counter_lock, flags); 1235 intf->sent_invalid_commands++; 1236 spin_unlock_irqrestore(&intf->counter_lock, flags); 1237 rv = -EINVAL; 1238 goto out_err; 1239 } 1240 1241 if (intf->channels[addr->channel].medium 1242 != IPMI_CHANNEL_MEDIUM_IPMB) 1243 { 1244 spin_lock_irqsave(&intf->counter_lock, flags); 1245 intf->sent_invalid_commands++; 1246 spin_unlock_irqrestore(&intf->counter_lock, flags); 1247 rv = -EINVAL; 1248 goto out_err; 1249 } 1250 1251 if (retries < 0) { 1252 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) 1253 retries = 0; /* Don't retry broadcasts. */ 1254 else 1255 retries = 4; 1256 } 1257 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { 1258 /* Broadcasts add a zero at the beginning of the 1259 message, but otherwise is the same as an IPMB 1260 address. */ 1261 addr->addr_type = IPMI_IPMB_ADDR_TYPE; 1262 broadcast = 1; 1263 } 1264 1265 1266 /* Default to 1 second retries. */ 1267 if (retry_time_ms == 0) 1268 retry_time_ms = 1000; 1269 1270 /* 9 for the header and 1 for the checksum, plus 1271 possibly one for the broadcast. */ 1272 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { 1273 spin_lock_irqsave(&intf->counter_lock, flags); 1274 intf->sent_invalid_commands++; 1275 spin_unlock_irqrestore(&intf->counter_lock, flags); 1276 rv = -EMSGSIZE; 1277 goto out_err; 1278 } 1279 1280 ipmb_addr = (struct ipmi_ipmb_addr *) addr; 1281 if (ipmb_addr->lun > 3) { 1282 spin_lock_irqsave(&intf->counter_lock, flags); 1283 intf->sent_invalid_commands++; 1284 spin_unlock_irqrestore(&intf->counter_lock, flags); 1285 rv = -EINVAL; 1286 goto out_err; 1287 } 1288 1289 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); 1290 1291 if (recv_msg->msg.netfn & 0x1) { 1292 /* It's a response, so use the user's sequence 1293 from msgid. */ 1294 spin_lock_irqsave(&intf->counter_lock, flags); 1295 intf->sent_ipmb_responses++; 1296 spin_unlock_irqrestore(&intf->counter_lock, flags); 1297 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, 1298 msgid, broadcast, 1299 source_address, source_lun); 1300 1301 /* Save the receive message so we can use it 1302 to deliver the response. */ 1303 smi_msg->user_data = recv_msg; 1304 } else { 1305 /* It's a command, so get a sequence for it. */ 1306 1307 spin_lock_irqsave(&(intf->seq_lock), flags); 1308 1309 spin_lock(&intf->counter_lock); 1310 intf->sent_ipmb_commands++; 1311 spin_unlock(&intf->counter_lock); 1312 1313 /* Create a sequence number with a 1 second 1314 timeout and 4 retries. */ 1315 rv = intf_next_seq(intf, 1316 recv_msg, 1317 retry_time_ms, 1318 retries, 1319 broadcast, 1320 &ipmb_seq, 1321 &seqid); 1322 if (rv) { 1323 /* We have used up all the sequence numbers, 1324 probably, so abort. */ 1325 spin_unlock_irqrestore(&(intf->seq_lock), 1326 flags); 1327 goto out_err; 1328 } 1329 1330 /* Store the sequence number in the message, 1331 so that when the send message response 1332 comes back we can start the timer. */ 1333 format_ipmb_msg(smi_msg, msg, ipmb_addr, 1334 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1335 ipmb_seq, broadcast, 1336 source_address, source_lun); 1337 1338 /* Copy the message into the recv message data, so we 1339 can retransmit it later if necessary. */ 1340 memcpy(recv_msg->msg_data, smi_msg->data, 1341 smi_msg->data_size); 1342 recv_msg->msg.data = recv_msg->msg_data; 1343 recv_msg->msg.data_len = smi_msg->data_size; 1344 1345 /* We don't unlock until here, because we need 1346 to copy the completed message into the 1347 recv_msg before we release the lock. 1348 Otherwise, race conditions may bite us. I 1349 know that's pretty paranoid, but I prefer 1350 to be correct. */ 1351 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1352 } 1353 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { 1354 struct ipmi_lan_addr *lan_addr; 1355 unsigned char ipmb_seq; 1356 long seqid; 1357 1358 if (addr->channel >= IPMI_MAX_CHANNELS) { 1359 spin_lock_irqsave(&intf->counter_lock, flags); 1360 intf->sent_invalid_commands++; 1361 spin_unlock_irqrestore(&intf->counter_lock, flags); 1362 rv = -EINVAL; 1363 goto out_err; 1364 } 1365 1366 if ((intf->channels[addr->channel].medium 1367 != IPMI_CHANNEL_MEDIUM_8023LAN) 1368 && (intf->channels[addr->channel].medium 1369 != IPMI_CHANNEL_MEDIUM_ASYNC)) 1370 { 1371 spin_lock_irqsave(&intf->counter_lock, flags); 1372 intf->sent_invalid_commands++; 1373 spin_unlock_irqrestore(&intf->counter_lock, flags); 1374 rv = -EINVAL; 1375 goto out_err; 1376 } 1377 1378 retries = 4; 1379 1380 /* Default to 1 second retries. */ 1381 if (retry_time_ms == 0) 1382 retry_time_ms = 1000; 1383 1384 /* 11 for the header and 1 for the checksum. */ 1385 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { 1386 spin_lock_irqsave(&intf->counter_lock, flags); 1387 intf->sent_invalid_commands++; 1388 spin_unlock_irqrestore(&intf->counter_lock, flags); 1389 rv = -EMSGSIZE; 1390 goto out_err; 1391 } 1392 1393 lan_addr = (struct ipmi_lan_addr *) addr; 1394 if (lan_addr->lun > 3) { 1395 spin_lock_irqsave(&intf->counter_lock, flags); 1396 intf->sent_invalid_commands++; 1397 spin_unlock_irqrestore(&intf->counter_lock, flags); 1398 rv = -EINVAL; 1399 goto out_err; 1400 } 1401 1402 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); 1403 1404 if (recv_msg->msg.netfn & 0x1) { 1405 /* It's a response, so use the user's sequence 1406 from msgid. */ 1407 spin_lock_irqsave(&intf->counter_lock, flags); 1408 intf->sent_lan_responses++; 1409 spin_unlock_irqrestore(&intf->counter_lock, flags); 1410 format_lan_msg(smi_msg, msg, lan_addr, msgid, 1411 msgid, source_lun); 1412 1413 /* Save the receive message so we can use it 1414 to deliver the response. */ 1415 smi_msg->user_data = recv_msg; 1416 } else { 1417 /* It's a command, so get a sequence for it. */ 1418 1419 spin_lock_irqsave(&(intf->seq_lock), flags); 1420 1421 spin_lock(&intf->counter_lock); 1422 intf->sent_lan_commands++; 1423 spin_unlock(&intf->counter_lock); 1424 1425 /* Create a sequence number with a 1 second 1426 timeout and 4 retries. */ 1427 rv = intf_next_seq(intf, 1428 recv_msg, 1429 retry_time_ms, 1430 retries, 1431 0, 1432 &ipmb_seq, 1433 &seqid); 1434 if (rv) { 1435 /* We have used up all the sequence numbers, 1436 probably, so abort. */ 1437 spin_unlock_irqrestore(&(intf->seq_lock), 1438 flags); 1439 goto out_err; 1440 } 1441 1442 /* Store the sequence number in the message, 1443 so that when the send message response 1444 comes back we can start the timer. */ 1445 format_lan_msg(smi_msg, msg, lan_addr, 1446 STORE_SEQ_IN_MSGID(ipmb_seq, seqid), 1447 ipmb_seq, source_lun); 1448 1449 /* Copy the message into the recv message data, so we 1450 can retransmit it later if necessary. */ 1451 memcpy(recv_msg->msg_data, smi_msg->data, 1452 smi_msg->data_size); 1453 recv_msg->msg.data = recv_msg->msg_data; 1454 recv_msg->msg.data_len = smi_msg->data_size; 1455 1456 /* We don't unlock until here, because we need 1457 to copy the completed message into the 1458 recv_msg before we release the lock. 1459 Otherwise, race conditions may bite us. I 1460 know that's pretty paranoid, but I prefer 1461 to be correct. */ 1462 spin_unlock_irqrestore(&(intf->seq_lock), flags); 1463 } 1464 } else { 1465 /* Unknown address type. */ 1466 spin_lock_irqsave(&intf->counter_lock, flags); 1467 intf->sent_invalid_commands++; 1468 spin_unlock_irqrestore(&intf->counter_lock, flags); 1469 rv = -EINVAL; 1470 goto out_err; 1471 } 1472 1473 #ifdef DEBUG_MSGING 1474 { 1475 int m; 1476 for (m = 0; m < smi_msg->data_size; m++) 1477 printk(" %2.2x", smi_msg->data[m]); 1478 printk("\n"); 1479 } 1480 #endif 1481 intf->handlers->sender(intf->send_info, smi_msg, priority); 1482 1483 return 0; 1484 1485 out_err: 1486 ipmi_free_smi_msg(smi_msg); 1487 ipmi_free_recv_msg(recv_msg); 1488 return rv; 1489 } 1490 1491 static int check_addr(ipmi_smi_t intf, 1492 struct ipmi_addr *addr, 1493 unsigned char *saddr, 1494 unsigned char *lun) 1495 { 1496 if (addr->channel >= IPMI_MAX_CHANNELS) 1497 return -EINVAL; 1498 *lun = intf->channels[addr->channel].lun; 1499 *saddr = intf->channels[addr->channel].address; 1500 return 0; 1501 } 1502 1503 int ipmi_request_settime(ipmi_user_t user, 1504 struct ipmi_addr *addr, 1505 long msgid, 1506 struct kernel_ipmi_msg *msg, 1507 void *user_msg_data, 1508 int priority, 1509 int retries, 1510 unsigned int retry_time_ms) 1511 { 1512 unsigned char saddr, lun; 1513 int rv; 1514 1515 if (!user) 1516 return -EINVAL; 1517 rv = check_addr(user->intf, addr, &saddr, &lun); 1518 if (rv) 1519 return rv; 1520 return i_ipmi_request(user, 1521 user->intf, 1522 addr, 1523 msgid, 1524 msg, 1525 user_msg_data, 1526 NULL, NULL, 1527 priority, 1528 saddr, 1529 lun, 1530 retries, 1531 retry_time_ms); 1532 } 1533 1534 int ipmi_request_supply_msgs(ipmi_user_t user, 1535 struct ipmi_addr *addr, 1536 long msgid, 1537 struct kernel_ipmi_msg *msg, 1538 void *user_msg_data, 1539 void *supplied_smi, 1540 struct ipmi_recv_msg *supplied_recv, 1541 int priority) 1542 { 1543 unsigned char saddr, lun; 1544 int rv; 1545 1546 if (!user) 1547 return -EINVAL; 1548 rv = check_addr(user->intf, addr, &saddr, &lun); 1549 if (rv) 1550 return rv; 1551 return i_ipmi_request(user, 1552 user->intf, 1553 addr, 1554 msgid, 1555 msg, 1556 user_msg_data, 1557 supplied_smi, 1558 supplied_recv, 1559 priority, 1560 saddr, 1561 lun, 1562 -1, 0); 1563 } 1564 1565 static int ipmb_file_read_proc(char *page, char **start, off_t off, 1566 int count, int *eof, void *data) 1567 { 1568 char *out = (char *) page; 1569 ipmi_smi_t intf = data; 1570 int i; 1571 int rv = 0; 1572 1573 for (i = 0; i < IPMI_MAX_CHANNELS; i++) 1574 rv += sprintf(out+rv, "%x ", intf->channels[i].address); 1575 out[rv-1] = '\n'; /* Replace the final space with a newline */ 1576 out[rv] = '\0'; 1577 rv++; 1578 return rv; 1579 } 1580 1581 static int version_file_read_proc(char *page, char **start, off_t off, 1582 int count, int *eof, void *data) 1583 { 1584 char *out = (char *) page; 1585 ipmi_smi_t intf = data; 1586 1587 return sprintf(out, "%d.%d\n", 1588 ipmi_version_major(&intf->bmc->id), 1589 ipmi_version_minor(&intf->bmc->id)); 1590 } 1591 1592 static int stat_file_read_proc(char *page, char **start, off_t off, 1593 int count, int *eof, void *data) 1594 { 1595 char *out = (char *) page; 1596 ipmi_smi_t intf = data; 1597 1598 out += sprintf(out, "sent_invalid_commands: %d\n", 1599 intf->sent_invalid_commands); 1600 out += sprintf(out, "sent_local_commands: %d\n", 1601 intf->sent_local_commands); 1602 out += sprintf(out, "handled_local_responses: %d\n", 1603 intf->handled_local_responses); 1604 out += sprintf(out, "unhandled_local_responses: %d\n", 1605 intf->unhandled_local_responses); 1606 out += sprintf(out, "sent_ipmb_commands: %d\n", 1607 intf->sent_ipmb_commands); 1608 out += sprintf(out, "sent_ipmb_command_errs: %d\n", 1609 intf->sent_ipmb_command_errs); 1610 out += sprintf(out, "retransmitted_ipmb_commands: %d\n", 1611 intf->retransmitted_ipmb_commands); 1612 out += sprintf(out, "timed_out_ipmb_commands: %d\n", 1613 intf->timed_out_ipmb_commands); 1614 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", 1615 intf->timed_out_ipmb_broadcasts); 1616 out += sprintf(out, "sent_ipmb_responses: %d\n", 1617 intf->sent_ipmb_responses); 1618 out += sprintf(out, "handled_ipmb_responses: %d\n", 1619 intf->handled_ipmb_responses); 1620 out += sprintf(out, "invalid_ipmb_responses: %d\n", 1621 intf->invalid_ipmb_responses); 1622 out += sprintf(out, "unhandled_ipmb_responses: %d\n", 1623 intf->unhandled_ipmb_responses); 1624 out += sprintf(out, "sent_lan_commands: %d\n", 1625 intf->sent_lan_commands); 1626 out += sprintf(out, "sent_lan_command_errs: %d\n", 1627 intf->sent_lan_command_errs); 1628 out += sprintf(out, "retransmitted_lan_commands: %d\n", 1629 intf->retransmitted_lan_commands); 1630 out += sprintf(out, "timed_out_lan_commands: %d\n", 1631 intf->timed_out_lan_commands); 1632 out += sprintf(out, "sent_lan_responses: %d\n", 1633 intf->sent_lan_responses); 1634 out += sprintf(out, "handled_lan_responses: %d\n", 1635 intf->handled_lan_responses); 1636 out += sprintf(out, "invalid_lan_responses: %d\n", 1637 intf->invalid_lan_responses); 1638 out += sprintf(out, "unhandled_lan_responses: %d\n", 1639 intf->unhandled_lan_responses); 1640 out += sprintf(out, "handled_commands: %d\n", 1641 intf->handled_commands); 1642 out += sprintf(out, "invalid_commands: %d\n", 1643 intf->invalid_commands); 1644 out += sprintf(out, "unhandled_commands: %d\n", 1645 intf->unhandled_commands); 1646 out += sprintf(out, "invalid_events: %d\n", 1647 intf->invalid_events); 1648 out += sprintf(out, "events: %d\n", 1649 intf->events); 1650 1651 return (out - ((char *) page)); 1652 } 1653 1654 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 1655 read_proc_t *read_proc, write_proc_t *write_proc, 1656 void *data, struct module *owner) 1657 { 1658 int rv = 0; 1659 #ifdef CONFIG_PROC_FS 1660 struct proc_dir_entry *file; 1661 struct ipmi_proc_entry *entry; 1662 1663 /* Create a list element. */ 1664 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1665 if (!entry) 1666 return -ENOMEM; 1667 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); 1668 if (!entry->name) { 1669 kfree(entry); 1670 return -ENOMEM; 1671 } 1672 strcpy(entry->name, name); 1673 1674 file = create_proc_entry(name, 0, smi->proc_dir); 1675 if (!file) { 1676 kfree(entry->name); 1677 kfree(entry); 1678 rv = -ENOMEM; 1679 } else { 1680 file->nlink = 1; 1681 file->data = data; 1682 file->read_proc = read_proc; 1683 file->write_proc = write_proc; 1684 file->owner = owner; 1685 1686 spin_lock(&smi->proc_entry_lock); 1687 /* Stick it on the list. */ 1688 entry->next = smi->proc_entries; 1689 smi->proc_entries = entry; 1690 spin_unlock(&smi->proc_entry_lock); 1691 } 1692 #endif /* CONFIG_PROC_FS */ 1693 1694 return rv; 1695 } 1696 1697 static int add_proc_entries(ipmi_smi_t smi, int num) 1698 { 1699 int rv = 0; 1700 1701 #ifdef CONFIG_PROC_FS 1702 sprintf(smi->proc_dir_name, "%d", num); 1703 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); 1704 if (!smi->proc_dir) 1705 rv = -ENOMEM; 1706 else { 1707 smi->proc_dir->owner = THIS_MODULE; 1708 } 1709 1710 if (rv == 0) 1711 rv = ipmi_smi_add_proc_entry(smi, "stats", 1712 stat_file_read_proc, NULL, 1713 smi, THIS_MODULE); 1714 1715 if (rv == 0) 1716 rv = ipmi_smi_add_proc_entry(smi, "ipmb", 1717 ipmb_file_read_proc, NULL, 1718 smi, THIS_MODULE); 1719 1720 if (rv == 0) 1721 rv = ipmi_smi_add_proc_entry(smi, "version", 1722 version_file_read_proc, NULL, 1723 smi, THIS_MODULE); 1724 #endif /* CONFIG_PROC_FS */ 1725 1726 return rv; 1727 } 1728 1729 static void remove_proc_entries(ipmi_smi_t smi) 1730 { 1731 #ifdef CONFIG_PROC_FS 1732 struct ipmi_proc_entry *entry; 1733 1734 spin_lock(&smi->proc_entry_lock); 1735 while (smi->proc_entries) { 1736 entry = smi->proc_entries; 1737 smi->proc_entries = entry->next; 1738 1739 remove_proc_entry(entry->name, smi->proc_dir); 1740 kfree(entry->name); 1741 kfree(entry); 1742 } 1743 spin_unlock(&smi->proc_entry_lock); 1744 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root); 1745 #endif /* CONFIG_PROC_FS */ 1746 } 1747 1748 static int __find_bmc_guid(struct device *dev, void *data) 1749 { 1750 unsigned char *id = data; 1751 struct bmc_device *bmc = dev_get_drvdata(dev); 1752 return memcmp(bmc->guid, id, 16) == 0; 1753 } 1754 1755 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, 1756 unsigned char *guid) 1757 { 1758 struct device *dev; 1759 1760 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 1761 if (dev) 1762 return dev_get_drvdata(dev); 1763 else 1764 return NULL; 1765 } 1766 1767 struct prod_dev_id { 1768 unsigned int product_id; 1769 unsigned char device_id; 1770 }; 1771 1772 static int __find_bmc_prod_dev_id(struct device *dev, void *data) 1773 { 1774 struct prod_dev_id *id = data; 1775 struct bmc_device *bmc = dev_get_drvdata(dev); 1776 1777 return (bmc->id.product_id == id->product_id 1778 && bmc->id.product_id == id->product_id 1779 && bmc->id.device_id == id->device_id); 1780 } 1781 1782 static struct bmc_device *ipmi_find_bmc_prod_dev_id( 1783 struct device_driver *drv, 1784 unsigned char product_id, unsigned char device_id) 1785 { 1786 struct prod_dev_id id = { 1787 .product_id = product_id, 1788 .device_id = device_id, 1789 }; 1790 struct device *dev; 1791 1792 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 1793 if (dev) 1794 return dev_get_drvdata(dev); 1795 else 1796 return NULL; 1797 } 1798 1799 static ssize_t device_id_show(struct device *dev, 1800 struct device_attribute *attr, 1801 char *buf) 1802 { 1803 struct bmc_device *bmc = dev_get_drvdata(dev); 1804 1805 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 1806 } 1807 1808 static ssize_t provides_dev_sdrs_show(struct device *dev, 1809 struct device_attribute *attr, 1810 char *buf) 1811 { 1812 struct bmc_device *bmc = dev_get_drvdata(dev); 1813 1814 return snprintf(buf, 10, "%u\n", 1815 bmc->id.device_revision && 0x80 >> 7); 1816 } 1817 1818 static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 1819 char *buf) 1820 { 1821 struct bmc_device *bmc = dev_get_drvdata(dev); 1822 1823 return snprintf(buf, 20, "%u\n", 1824 bmc->id.device_revision && 0x0F); 1825 } 1826 1827 static ssize_t firmware_rev_show(struct device *dev, 1828 struct device_attribute *attr, 1829 char *buf) 1830 { 1831 struct bmc_device *bmc = dev_get_drvdata(dev); 1832 1833 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 1834 bmc->id.firmware_revision_2); 1835 } 1836 1837 static ssize_t ipmi_version_show(struct device *dev, 1838 struct device_attribute *attr, 1839 char *buf) 1840 { 1841 struct bmc_device *bmc = dev_get_drvdata(dev); 1842 1843 return snprintf(buf, 20, "%u.%u\n", 1844 ipmi_version_major(&bmc->id), 1845 ipmi_version_minor(&bmc->id)); 1846 } 1847 1848 static ssize_t add_dev_support_show(struct device *dev, 1849 struct device_attribute *attr, 1850 char *buf) 1851 { 1852 struct bmc_device *bmc = dev_get_drvdata(dev); 1853 1854 return snprintf(buf, 10, "0x%02x\n", 1855 bmc->id.additional_device_support); 1856 } 1857 1858 static ssize_t manufacturer_id_show(struct device *dev, 1859 struct device_attribute *attr, 1860 char *buf) 1861 { 1862 struct bmc_device *bmc = dev_get_drvdata(dev); 1863 1864 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 1865 } 1866 1867 static ssize_t product_id_show(struct device *dev, 1868 struct device_attribute *attr, 1869 char *buf) 1870 { 1871 struct bmc_device *bmc = dev_get_drvdata(dev); 1872 1873 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 1874 } 1875 1876 static ssize_t aux_firmware_rev_show(struct device *dev, 1877 struct device_attribute *attr, 1878 char *buf) 1879 { 1880 struct bmc_device *bmc = dev_get_drvdata(dev); 1881 1882 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 1883 bmc->id.aux_firmware_revision[3], 1884 bmc->id.aux_firmware_revision[2], 1885 bmc->id.aux_firmware_revision[1], 1886 bmc->id.aux_firmware_revision[0]); 1887 } 1888 1889 static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 1890 char *buf) 1891 { 1892 struct bmc_device *bmc = dev_get_drvdata(dev); 1893 1894 return snprintf(buf, 100, "%Lx%Lx\n", 1895 (long long) bmc->guid[0], 1896 (long long) bmc->guid[8]); 1897 } 1898 1899 static void 1900 cleanup_bmc_device(struct kref *ref) 1901 { 1902 struct bmc_device *bmc; 1903 1904 bmc = container_of(ref, struct bmc_device, refcount); 1905 1906 device_remove_file(&bmc->dev->dev, 1907 &bmc->device_id_attr); 1908 device_remove_file(&bmc->dev->dev, 1909 &bmc->provides_dev_sdrs_attr); 1910 device_remove_file(&bmc->dev->dev, 1911 &bmc->revision_attr); 1912 device_remove_file(&bmc->dev->dev, 1913 &bmc->firmware_rev_attr); 1914 device_remove_file(&bmc->dev->dev, 1915 &bmc->version_attr); 1916 device_remove_file(&bmc->dev->dev, 1917 &bmc->add_dev_support_attr); 1918 device_remove_file(&bmc->dev->dev, 1919 &bmc->manufacturer_id_attr); 1920 device_remove_file(&bmc->dev->dev, 1921 &bmc->product_id_attr); 1922 if (bmc->id.aux_firmware_revision_set) 1923 device_remove_file(&bmc->dev->dev, 1924 &bmc->aux_firmware_rev_attr); 1925 if (bmc->guid_set) 1926 device_remove_file(&bmc->dev->dev, 1927 &bmc->guid_attr); 1928 platform_device_unregister(bmc->dev); 1929 kfree(bmc); 1930 } 1931 1932 static void ipmi_bmc_unregister(ipmi_smi_t intf) 1933 { 1934 struct bmc_device *bmc = intf->bmc; 1935 1936 sysfs_remove_link(&intf->si_dev->kobj, "bmc"); 1937 if (intf->my_dev_name) { 1938 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); 1939 kfree(intf->my_dev_name); 1940 intf->my_dev_name = NULL; 1941 } 1942 1943 mutex_lock(&ipmidriver_mutex); 1944 kref_put(&bmc->refcount, cleanup_bmc_device); 1945 mutex_unlock(&ipmidriver_mutex); 1946 } 1947 1948 static int ipmi_bmc_register(ipmi_smi_t intf) 1949 { 1950 int rv; 1951 struct bmc_device *bmc = intf->bmc; 1952 struct bmc_device *old_bmc; 1953 int size; 1954 char dummy[1]; 1955 1956 mutex_lock(&ipmidriver_mutex); 1957 1958 /* 1959 * Try to find if there is an bmc_device struct 1960 * representing the interfaced BMC already 1961 */ 1962 if (bmc->guid_set) 1963 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid); 1964 else 1965 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver, 1966 bmc->id.product_id, 1967 bmc->id.device_id); 1968 1969 /* 1970 * If there is already an bmc_device, free the new one, 1971 * otherwise register the new BMC device 1972 */ 1973 if (old_bmc) { 1974 kfree(bmc); 1975 intf->bmc = old_bmc; 1976 bmc = old_bmc; 1977 1978 kref_get(&bmc->refcount); 1979 mutex_unlock(&ipmidriver_mutex); 1980 1981 printk(KERN_INFO 1982 "ipmi: interfacing existing BMC (man_id: 0x%6.6x," 1983 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 1984 bmc->id.manufacturer_id, 1985 bmc->id.product_id, 1986 bmc->id.device_id); 1987 } else { 1988 bmc->dev = platform_device_alloc("ipmi_bmc", 1989 bmc->id.device_id); 1990 if (!bmc->dev) { 1991 printk(KERN_ERR 1992 "ipmi_msghandler:" 1993 " Unable to allocate platform device\n"); 1994 return -ENOMEM; 1995 } 1996 bmc->dev->dev.driver = &ipmidriver; 1997 dev_set_drvdata(&bmc->dev->dev, bmc); 1998 kref_init(&bmc->refcount); 1999 2000 rv = platform_device_register(bmc->dev); 2001 mutex_unlock(&ipmidriver_mutex); 2002 if (rv) { 2003 printk(KERN_ERR 2004 "ipmi_msghandler:" 2005 " Unable to register bmc device: %d\n", 2006 rv); 2007 /* Don't go to out_err, you can only do that if 2008 the device is registered already. */ 2009 return rv; 2010 } 2011 2012 bmc->device_id_attr.attr.name = "device_id"; 2013 bmc->device_id_attr.attr.owner = THIS_MODULE; 2014 bmc->device_id_attr.attr.mode = S_IRUGO; 2015 bmc->device_id_attr.show = device_id_show; 2016 2017 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; 2018 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; 2019 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; 2020 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; 2021 2022 2023 bmc->revision_attr.attr.name = "revision"; 2024 bmc->revision_attr.attr.owner = THIS_MODULE; 2025 bmc->revision_attr.attr.mode = S_IRUGO; 2026 bmc->revision_attr.show = revision_show; 2027 2028 bmc->firmware_rev_attr.attr.name = "firmware_revision"; 2029 bmc->firmware_rev_attr.attr.owner = THIS_MODULE; 2030 bmc->firmware_rev_attr.attr.mode = S_IRUGO; 2031 bmc->firmware_rev_attr.show = firmware_rev_show; 2032 2033 bmc->version_attr.attr.name = "ipmi_version"; 2034 bmc->version_attr.attr.owner = THIS_MODULE; 2035 bmc->version_attr.attr.mode = S_IRUGO; 2036 bmc->version_attr.show = ipmi_version_show; 2037 2038 bmc->add_dev_support_attr.attr.name 2039 = "additional_device_support"; 2040 bmc->add_dev_support_attr.attr.owner = THIS_MODULE; 2041 bmc->add_dev_support_attr.attr.mode = S_IRUGO; 2042 bmc->add_dev_support_attr.show = add_dev_support_show; 2043 2044 bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; 2045 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; 2046 bmc->manufacturer_id_attr.attr.mode = S_IRUGO; 2047 bmc->manufacturer_id_attr.show = manufacturer_id_show; 2048 2049 bmc->product_id_attr.attr.name = "product_id"; 2050 bmc->product_id_attr.attr.owner = THIS_MODULE; 2051 bmc->product_id_attr.attr.mode = S_IRUGO; 2052 bmc->product_id_attr.show = product_id_show; 2053 2054 bmc->guid_attr.attr.name = "guid"; 2055 bmc->guid_attr.attr.owner = THIS_MODULE; 2056 bmc->guid_attr.attr.mode = S_IRUGO; 2057 bmc->guid_attr.show = guid_show; 2058 2059 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; 2060 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; 2061 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; 2062 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; 2063 2064 device_create_file(&bmc->dev->dev, 2065 &bmc->device_id_attr); 2066 device_create_file(&bmc->dev->dev, 2067 &bmc->provides_dev_sdrs_attr); 2068 device_create_file(&bmc->dev->dev, 2069 &bmc->revision_attr); 2070 device_create_file(&bmc->dev->dev, 2071 &bmc->firmware_rev_attr); 2072 device_create_file(&bmc->dev->dev, 2073 &bmc->version_attr); 2074 device_create_file(&bmc->dev->dev, 2075 &bmc->add_dev_support_attr); 2076 device_create_file(&bmc->dev->dev, 2077 &bmc->manufacturer_id_attr); 2078 device_create_file(&bmc->dev->dev, 2079 &bmc->product_id_attr); 2080 if (bmc->id.aux_firmware_revision_set) 2081 device_create_file(&bmc->dev->dev, 2082 &bmc->aux_firmware_rev_attr); 2083 if (bmc->guid_set) 2084 device_create_file(&bmc->dev->dev, 2085 &bmc->guid_attr); 2086 2087 printk(KERN_INFO 2088 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2089 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2090 bmc->id.manufacturer_id, 2091 bmc->id.product_id, 2092 bmc->id.device_id); 2093 } 2094 2095 /* 2096 * create symlink from system interface device to bmc device 2097 * and back. 2098 */ 2099 rv = sysfs_create_link(&intf->si_dev->kobj, 2100 &bmc->dev->dev.kobj, "bmc"); 2101 if (rv) { 2102 printk(KERN_ERR 2103 "ipmi_msghandler: Unable to create bmc symlink: %d\n", 2104 rv); 2105 goto out_err; 2106 } 2107 2108 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); 2109 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); 2110 if (!intf->my_dev_name) { 2111 rv = -ENOMEM; 2112 printk(KERN_ERR 2113 "ipmi_msghandler: allocate link from BMC: %d\n", 2114 rv); 2115 goto out_err; 2116 } 2117 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); 2118 2119 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, 2120 intf->my_dev_name); 2121 if (rv) { 2122 kfree(intf->my_dev_name); 2123 intf->my_dev_name = NULL; 2124 printk(KERN_ERR 2125 "ipmi_msghandler:" 2126 " Unable to create symlink to bmc: %d\n", 2127 rv); 2128 goto out_err; 2129 } 2130 2131 return 0; 2132 2133 out_err: 2134 ipmi_bmc_unregister(intf); 2135 return rv; 2136 } 2137 2138 static int 2139 send_guid_cmd(ipmi_smi_t intf, int chan) 2140 { 2141 struct kernel_ipmi_msg msg; 2142 struct ipmi_system_interface_addr si; 2143 2144 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2145 si.channel = IPMI_BMC_CHANNEL; 2146 si.lun = 0; 2147 2148 msg.netfn = IPMI_NETFN_APP_REQUEST; 2149 msg.cmd = IPMI_GET_DEVICE_GUID_CMD; 2150 msg.data = NULL; 2151 msg.data_len = 0; 2152 return i_ipmi_request(NULL, 2153 intf, 2154 (struct ipmi_addr *) &si, 2155 0, 2156 &msg, 2157 intf, 2158 NULL, 2159 NULL, 2160 0, 2161 intf->channels[0].address, 2162 intf->channels[0].lun, 2163 -1, 0); 2164 } 2165 2166 static void 2167 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2168 { 2169 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2170 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) 2171 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) 2172 /* Not for me */ 2173 return; 2174 2175 if (msg->msg.data[0] != 0) { 2176 /* Error from getting the GUID, the BMC doesn't have one. */ 2177 intf->bmc->guid_set = 0; 2178 goto out; 2179 } 2180 2181 if (msg->msg.data_len < 17) { 2182 intf->bmc->guid_set = 0; 2183 printk(KERN_WARNING PFX 2184 "guid_handler: The GUID response from the BMC was too" 2185 " short, it was %d but should have been 17. Assuming" 2186 " GUID is not available.\n", 2187 msg->msg.data_len); 2188 goto out; 2189 } 2190 2191 memcpy(intf->bmc->guid, msg->msg.data, 16); 2192 intf->bmc->guid_set = 1; 2193 out: 2194 wake_up(&intf->waitq); 2195 } 2196 2197 static void 2198 get_guid(ipmi_smi_t intf) 2199 { 2200 int rv; 2201 2202 intf->bmc->guid_set = 0x2; 2203 intf->null_user_handler = guid_handler; 2204 rv = send_guid_cmd(intf, 0); 2205 if (rv) 2206 /* Send failed, no GUID available. */ 2207 intf->bmc->guid_set = 0; 2208 wait_event(intf->waitq, intf->bmc->guid_set != 2); 2209 intf->null_user_handler = NULL; 2210 } 2211 2212 static int 2213 send_channel_info_cmd(ipmi_smi_t intf, int chan) 2214 { 2215 struct kernel_ipmi_msg msg; 2216 unsigned char data[1]; 2217 struct ipmi_system_interface_addr si; 2218 2219 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2220 si.channel = IPMI_BMC_CHANNEL; 2221 si.lun = 0; 2222 2223 msg.netfn = IPMI_NETFN_APP_REQUEST; 2224 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD; 2225 msg.data = data; 2226 msg.data_len = 1; 2227 data[0] = chan; 2228 return i_ipmi_request(NULL, 2229 intf, 2230 (struct ipmi_addr *) &si, 2231 0, 2232 &msg, 2233 intf, 2234 NULL, 2235 NULL, 2236 0, 2237 intf->channels[0].address, 2238 intf->channels[0].lun, 2239 -1, 0); 2240 } 2241 2242 static void 2243 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 2244 { 2245 int rv = 0; 2246 int chan; 2247 2248 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 2249 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 2250 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) 2251 { 2252 /* It's the one we want */ 2253 if (msg->msg.data[0] != 0) { 2254 /* Got an error from the channel, just go on. */ 2255 2256 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { 2257 /* If the MC does not support this 2258 command, that is legal. We just 2259 assume it has one IPMB at channel 2260 zero. */ 2261 intf->channels[0].medium 2262 = IPMI_CHANNEL_MEDIUM_IPMB; 2263 intf->channels[0].protocol 2264 = IPMI_CHANNEL_PROTOCOL_IPMB; 2265 rv = -ENOSYS; 2266 2267 intf->curr_channel = IPMI_MAX_CHANNELS; 2268 wake_up(&intf->waitq); 2269 goto out; 2270 } 2271 goto next_channel; 2272 } 2273 if (msg->msg.data_len < 4) { 2274 /* Message not big enough, just go on. */ 2275 goto next_channel; 2276 } 2277 chan = intf->curr_channel; 2278 intf->channels[chan].medium = msg->msg.data[2] & 0x7f; 2279 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; 2280 2281 next_channel: 2282 intf->curr_channel++; 2283 if (intf->curr_channel >= IPMI_MAX_CHANNELS) 2284 wake_up(&intf->waitq); 2285 else 2286 rv = send_channel_info_cmd(intf, intf->curr_channel); 2287 2288 if (rv) { 2289 /* Got an error somehow, just give up. */ 2290 intf->curr_channel = IPMI_MAX_CHANNELS; 2291 wake_up(&intf->waitq); 2292 2293 printk(KERN_WARNING PFX 2294 "Error sending channel information: %d\n", 2295 rv); 2296 } 2297 } 2298 out: 2299 return; 2300 } 2301 2302 int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 2303 void *send_info, 2304 struct ipmi_device_id *device_id, 2305 struct device *si_dev, 2306 unsigned char slave_addr) 2307 { 2308 int i, j; 2309 int rv; 2310 ipmi_smi_t intf; 2311 unsigned long flags; 2312 int version_major; 2313 int version_minor; 2314 2315 version_major = ipmi_version_major(device_id); 2316 version_minor = ipmi_version_minor(device_id); 2317 2318 /* Make sure the driver is actually initialized, this handles 2319 problems with initialization order. */ 2320 if (!initialized) { 2321 rv = ipmi_init_msghandler(); 2322 if (rv) 2323 return rv; 2324 /* The init code doesn't return an error if it was turned 2325 off, but it won't initialize. Check that. */ 2326 if (!initialized) 2327 return -ENODEV; 2328 } 2329 2330 intf = kmalloc(sizeof(*intf), GFP_KERNEL); 2331 if (!intf) 2332 return -ENOMEM; 2333 memset(intf, 0, sizeof(*intf)); 2334 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); 2335 if (!intf->bmc) { 2336 kfree(intf); 2337 return -ENOMEM; 2338 } 2339 intf->intf_num = -1; 2340 kref_init(&intf->refcount); 2341 intf->bmc->id = *device_id; 2342 intf->si_dev = si_dev; 2343 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 2344 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; 2345 intf->channels[j].lun = 2; 2346 } 2347 if (slave_addr != 0) 2348 intf->channels[0].address = slave_addr; 2349 INIT_LIST_HEAD(&intf->users); 2350 intf->handlers = handlers; 2351 intf->send_info = send_info; 2352 spin_lock_init(&intf->seq_lock); 2353 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 2354 intf->seq_table[j].inuse = 0; 2355 intf->seq_table[j].seqid = 0; 2356 } 2357 intf->curr_seq = 0; 2358 #ifdef CONFIG_PROC_FS 2359 spin_lock_init(&intf->proc_entry_lock); 2360 #endif 2361 spin_lock_init(&intf->waiting_msgs_lock); 2362 INIT_LIST_HEAD(&intf->waiting_msgs); 2363 spin_lock_init(&intf->events_lock); 2364 INIT_LIST_HEAD(&intf->waiting_events); 2365 intf->waiting_events_count = 0; 2366 mutex_init(&intf->cmd_rcvrs_mutex); 2367 INIT_LIST_HEAD(&intf->cmd_rcvrs); 2368 init_waitqueue_head(&intf->waitq); 2369 2370 spin_lock_init(&intf->counter_lock); 2371 intf->proc_dir = NULL; 2372 2373 rv = -ENOMEM; 2374 spin_lock_irqsave(&interfaces_lock, flags); 2375 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2376 if (ipmi_interfaces[i] == NULL) { 2377 intf->intf_num = i; 2378 /* Reserve the entry till we are done. */ 2379 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; 2380 rv = 0; 2381 break; 2382 } 2383 } 2384 spin_unlock_irqrestore(&interfaces_lock, flags); 2385 if (rv) 2386 goto out; 2387 2388 rv = handlers->start_processing(send_info, intf); 2389 if (rv) 2390 goto out; 2391 2392 get_guid(intf); 2393 2394 if ((version_major > 1) 2395 || ((version_major == 1) && (version_minor >= 5))) 2396 { 2397 /* Start scanning the channels to see what is 2398 available. */ 2399 intf->null_user_handler = channel_handler; 2400 intf->curr_channel = 0; 2401 rv = send_channel_info_cmd(intf, 0); 2402 if (rv) 2403 goto out; 2404 2405 /* Wait for the channel info to be read. */ 2406 wait_event(intf->waitq, 2407 intf->curr_channel >= IPMI_MAX_CHANNELS); 2408 intf->null_user_handler = NULL; 2409 } else { 2410 /* Assume a single IPMB channel at zero. */ 2411 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2412 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB; 2413 } 2414 2415 if (rv == 0) 2416 rv = add_proc_entries(intf, i); 2417 2418 rv = ipmi_bmc_register(intf); 2419 2420 out: 2421 if (rv) { 2422 if (intf->proc_dir) 2423 remove_proc_entries(intf); 2424 kref_put(&intf->refcount, intf_free); 2425 if (i < MAX_IPMI_INTERFACES) { 2426 spin_lock_irqsave(&interfaces_lock, flags); 2427 ipmi_interfaces[i] = NULL; 2428 spin_unlock_irqrestore(&interfaces_lock, flags); 2429 } 2430 } else { 2431 spin_lock_irqsave(&interfaces_lock, flags); 2432 ipmi_interfaces[i] = intf; 2433 spin_unlock_irqrestore(&interfaces_lock, flags); 2434 call_smi_watchers(i, intf->si_dev); 2435 } 2436 2437 return rv; 2438 } 2439 2440 int ipmi_unregister_smi(ipmi_smi_t intf) 2441 { 2442 int i; 2443 struct ipmi_smi_watcher *w; 2444 unsigned long flags; 2445 2446 ipmi_bmc_unregister(intf); 2447 2448 spin_lock_irqsave(&interfaces_lock, flags); 2449 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2450 if (ipmi_interfaces[i] == intf) { 2451 /* Set the interface number reserved until we 2452 * are done. */ 2453 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY; 2454 intf->intf_num = -1; 2455 break; 2456 } 2457 } 2458 spin_unlock_irqrestore(&interfaces_lock,flags); 2459 2460 if (i == MAX_IPMI_INTERFACES) 2461 return -ENODEV; 2462 2463 remove_proc_entries(intf); 2464 2465 /* Call all the watcher interfaces to tell them that 2466 an interface is gone. */ 2467 down_read(&smi_watchers_sem); 2468 list_for_each_entry(w, &smi_watchers, link) 2469 w->smi_gone(i); 2470 up_read(&smi_watchers_sem); 2471 2472 /* Allow the entry to be reused now. */ 2473 spin_lock_irqsave(&interfaces_lock, flags); 2474 ipmi_interfaces[i] = NULL; 2475 spin_unlock_irqrestore(&interfaces_lock,flags); 2476 2477 kref_put(&intf->refcount, intf_free); 2478 return 0; 2479 } 2480 2481 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, 2482 struct ipmi_smi_msg *msg) 2483 { 2484 struct ipmi_ipmb_addr ipmb_addr; 2485 struct ipmi_recv_msg *recv_msg; 2486 unsigned long flags; 2487 2488 2489 /* This is 11, not 10, because the response must contain a 2490 * completion code. */ 2491 if (msg->rsp_size < 11) { 2492 /* Message not big enough, just ignore it. */ 2493 spin_lock_irqsave(&intf->counter_lock, flags); 2494 intf->invalid_ipmb_responses++; 2495 spin_unlock_irqrestore(&intf->counter_lock, flags); 2496 return 0; 2497 } 2498 2499 if (msg->rsp[2] != 0) { 2500 /* An error getting the response, just ignore it. */ 2501 return 0; 2502 } 2503 2504 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE; 2505 ipmb_addr.slave_addr = msg->rsp[6]; 2506 ipmb_addr.channel = msg->rsp[3] & 0x0f; 2507 ipmb_addr.lun = msg->rsp[7] & 3; 2508 2509 /* It's a response from a remote entity. Look up the sequence 2510 number and handle the response. */ 2511 if (intf_find_seq(intf, 2512 msg->rsp[7] >> 2, 2513 msg->rsp[3] & 0x0f, 2514 msg->rsp[8], 2515 (msg->rsp[4] >> 2) & (~1), 2516 (struct ipmi_addr *) &(ipmb_addr), 2517 &recv_msg)) 2518 { 2519 /* We were unable to find the sequence number, 2520 so just nuke the message. */ 2521 spin_lock_irqsave(&intf->counter_lock, flags); 2522 intf->unhandled_ipmb_responses++; 2523 spin_unlock_irqrestore(&intf->counter_lock, flags); 2524 return 0; 2525 } 2526 2527 memcpy(recv_msg->msg_data, 2528 &(msg->rsp[9]), 2529 msg->rsp_size - 9); 2530 /* THe other fields matched, so no need to set them, except 2531 for netfn, which needs to be the response that was 2532 returned, not the request value. */ 2533 recv_msg->msg.netfn = msg->rsp[4] >> 2; 2534 recv_msg->msg.data = recv_msg->msg_data; 2535 recv_msg->msg.data_len = msg->rsp_size - 10; 2536 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2537 spin_lock_irqsave(&intf->counter_lock, flags); 2538 intf->handled_ipmb_responses++; 2539 spin_unlock_irqrestore(&intf->counter_lock, flags); 2540 deliver_response(recv_msg); 2541 2542 return 0; 2543 } 2544 2545 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, 2546 struct ipmi_smi_msg *msg) 2547 { 2548 struct cmd_rcvr *rcvr; 2549 int rv = 0; 2550 unsigned char netfn; 2551 unsigned char cmd; 2552 ipmi_user_t user = NULL; 2553 struct ipmi_ipmb_addr *ipmb_addr; 2554 struct ipmi_recv_msg *recv_msg; 2555 unsigned long flags; 2556 2557 if (msg->rsp_size < 10) { 2558 /* Message not big enough, just ignore it. */ 2559 spin_lock_irqsave(&intf->counter_lock, flags); 2560 intf->invalid_commands++; 2561 spin_unlock_irqrestore(&intf->counter_lock, flags); 2562 return 0; 2563 } 2564 2565 if (msg->rsp[2] != 0) { 2566 /* An error getting the response, just ignore it. */ 2567 return 0; 2568 } 2569 2570 netfn = msg->rsp[4] >> 2; 2571 cmd = msg->rsp[8]; 2572 2573 rcu_read_lock(); 2574 rcvr = find_cmd_rcvr(intf, netfn, cmd); 2575 if (rcvr) { 2576 user = rcvr->user; 2577 kref_get(&user->refcount); 2578 } else 2579 user = NULL; 2580 rcu_read_unlock(); 2581 2582 if (user == NULL) { 2583 /* We didn't find a user, deliver an error response. */ 2584 spin_lock_irqsave(&intf->counter_lock, flags); 2585 intf->unhandled_commands++; 2586 spin_unlock_irqrestore(&intf->counter_lock, flags); 2587 2588 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 2589 msg->data[1] = IPMI_SEND_MSG_CMD; 2590 msg->data[2] = msg->rsp[3]; 2591 msg->data[3] = msg->rsp[6]; 2592 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 2593 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 2594 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; 2595 /* rqseq/lun */ 2596 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 2597 msg->data[8] = msg->rsp[8]; /* cmd */ 2598 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; 2599 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); 2600 msg->data_size = 11; 2601 2602 #ifdef DEBUG_MSGING 2603 { 2604 int m; 2605 printk("Invalid command:"); 2606 for (m = 0; m < msg->data_size; m++) 2607 printk(" %2.2x", msg->data[m]); 2608 printk("\n"); 2609 } 2610 #endif 2611 intf->handlers->sender(intf->send_info, msg, 0); 2612 2613 rv = -1; /* We used the message, so return the value that 2614 causes it to not be freed or queued. */ 2615 } else { 2616 /* Deliver the message to the user. */ 2617 spin_lock_irqsave(&intf->counter_lock, flags); 2618 intf->handled_commands++; 2619 spin_unlock_irqrestore(&intf->counter_lock, flags); 2620 2621 recv_msg = ipmi_alloc_recv_msg(); 2622 if (!recv_msg) { 2623 /* We couldn't allocate memory for the 2624 message, so requeue it for handling 2625 later. */ 2626 rv = 1; 2627 kref_put(&user->refcount, free_user); 2628 } else { 2629 /* Extract the source address from the data. */ 2630 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 2631 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; 2632 ipmb_addr->slave_addr = msg->rsp[6]; 2633 ipmb_addr->lun = msg->rsp[7] & 3; 2634 ipmb_addr->channel = msg->rsp[3] & 0xf; 2635 2636 /* Extract the rest of the message information 2637 from the IPMB header.*/ 2638 recv_msg->user = user; 2639 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 2640 recv_msg->msgid = msg->rsp[7] >> 2; 2641 recv_msg->msg.netfn = msg->rsp[4] >> 2; 2642 recv_msg->msg.cmd = msg->rsp[8]; 2643 recv_msg->msg.data = recv_msg->msg_data; 2644 2645 /* We chop off 10, not 9 bytes because the checksum 2646 at the end also needs to be removed. */ 2647 recv_msg->msg.data_len = msg->rsp_size - 10; 2648 memcpy(recv_msg->msg_data, 2649 &(msg->rsp[9]), 2650 msg->rsp_size - 10); 2651 deliver_response(recv_msg); 2652 } 2653 } 2654 2655 return rv; 2656 } 2657 2658 static int handle_lan_get_msg_rsp(ipmi_smi_t intf, 2659 struct ipmi_smi_msg *msg) 2660 { 2661 struct ipmi_lan_addr lan_addr; 2662 struct ipmi_recv_msg *recv_msg; 2663 unsigned long flags; 2664 2665 2666 /* This is 13, not 12, because the response must contain a 2667 * completion code. */ 2668 if (msg->rsp_size < 13) { 2669 /* Message not big enough, just ignore it. */ 2670 spin_lock_irqsave(&intf->counter_lock, flags); 2671 intf->invalid_lan_responses++; 2672 spin_unlock_irqrestore(&intf->counter_lock, flags); 2673 return 0; 2674 } 2675 2676 if (msg->rsp[2] != 0) { 2677 /* An error getting the response, just ignore it. */ 2678 return 0; 2679 } 2680 2681 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE; 2682 lan_addr.session_handle = msg->rsp[4]; 2683 lan_addr.remote_SWID = msg->rsp[8]; 2684 lan_addr.local_SWID = msg->rsp[5]; 2685 lan_addr.channel = msg->rsp[3] & 0x0f; 2686 lan_addr.privilege = msg->rsp[3] >> 4; 2687 lan_addr.lun = msg->rsp[9] & 3; 2688 2689 /* It's a response from a remote entity. Look up the sequence 2690 number and handle the response. */ 2691 if (intf_find_seq(intf, 2692 msg->rsp[9] >> 2, 2693 msg->rsp[3] & 0x0f, 2694 msg->rsp[10], 2695 (msg->rsp[6] >> 2) & (~1), 2696 (struct ipmi_addr *) &(lan_addr), 2697 &recv_msg)) 2698 { 2699 /* We were unable to find the sequence number, 2700 so just nuke the message. */ 2701 spin_lock_irqsave(&intf->counter_lock, flags); 2702 intf->unhandled_lan_responses++; 2703 spin_unlock_irqrestore(&intf->counter_lock, flags); 2704 return 0; 2705 } 2706 2707 memcpy(recv_msg->msg_data, 2708 &(msg->rsp[11]), 2709 msg->rsp_size - 11); 2710 /* The other fields matched, so no need to set them, except 2711 for netfn, which needs to be the response that was 2712 returned, not the request value. */ 2713 recv_msg->msg.netfn = msg->rsp[6] >> 2; 2714 recv_msg->msg.data = recv_msg->msg_data; 2715 recv_msg->msg.data_len = msg->rsp_size - 12; 2716 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2717 spin_lock_irqsave(&intf->counter_lock, flags); 2718 intf->handled_lan_responses++; 2719 spin_unlock_irqrestore(&intf->counter_lock, flags); 2720 deliver_response(recv_msg); 2721 2722 return 0; 2723 } 2724 2725 static int handle_lan_get_msg_cmd(ipmi_smi_t intf, 2726 struct ipmi_smi_msg *msg) 2727 { 2728 struct cmd_rcvr *rcvr; 2729 int rv = 0; 2730 unsigned char netfn; 2731 unsigned char cmd; 2732 ipmi_user_t user = NULL; 2733 struct ipmi_lan_addr *lan_addr; 2734 struct ipmi_recv_msg *recv_msg; 2735 unsigned long flags; 2736 2737 if (msg->rsp_size < 12) { 2738 /* Message not big enough, just ignore it. */ 2739 spin_lock_irqsave(&intf->counter_lock, flags); 2740 intf->invalid_commands++; 2741 spin_unlock_irqrestore(&intf->counter_lock, flags); 2742 return 0; 2743 } 2744 2745 if (msg->rsp[2] != 0) { 2746 /* An error getting the response, just ignore it. */ 2747 return 0; 2748 } 2749 2750 netfn = msg->rsp[6] >> 2; 2751 cmd = msg->rsp[10]; 2752 2753 rcu_read_lock(); 2754 rcvr = find_cmd_rcvr(intf, netfn, cmd); 2755 if (rcvr) { 2756 user = rcvr->user; 2757 kref_get(&user->refcount); 2758 } else 2759 user = NULL; 2760 rcu_read_unlock(); 2761 2762 if (user == NULL) { 2763 /* We didn't find a user, just give up. */ 2764 spin_lock_irqsave(&intf->counter_lock, flags); 2765 intf->unhandled_commands++; 2766 spin_unlock_irqrestore(&intf->counter_lock, flags); 2767 2768 rv = 0; /* Don't do anything with these messages, just 2769 allow them to be freed. */ 2770 } else { 2771 /* Deliver the message to the user. */ 2772 spin_lock_irqsave(&intf->counter_lock, flags); 2773 intf->handled_commands++; 2774 spin_unlock_irqrestore(&intf->counter_lock, flags); 2775 2776 recv_msg = ipmi_alloc_recv_msg(); 2777 if (!recv_msg) { 2778 /* We couldn't allocate memory for the 2779 message, so requeue it for handling 2780 later. */ 2781 rv = 1; 2782 kref_put(&user->refcount, free_user); 2783 } else { 2784 /* Extract the source address from the data. */ 2785 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 2786 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; 2787 lan_addr->session_handle = msg->rsp[4]; 2788 lan_addr->remote_SWID = msg->rsp[8]; 2789 lan_addr->local_SWID = msg->rsp[5]; 2790 lan_addr->lun = msg->rsp[9] & 3; 2791 lan_addr->channel = msg->rsp[3] & 0xf; 2792 lan_addr->privilege = msg->rsp[3] >> 4; 2793 2794 /* Extract the rest of the message information 2795 from the IPMB header.*/ 2796 recv_msg->user = user; 2797 recv_msg->recv_type = IPMI_CMD_RECV_TYPE; 2798 recv_msg->msgid = msg->rsp[9] >> 2; 2799 recv_msg->msg.netfn = msg->rsp[6] >> 2; 2800 recv_msg->msg.cmd = msg->rsp[10]; 2801 recv_msg->msg.data = recv_msg->msg_data; 2802 2803 /* We chop off 12, not 11 bytes because the checksum 2804 at the end also needs to be removed. */ 2805 recv_msg->msg.data_len = msg->rsp_size - 12; 2806 memcpy(recv_msg->msg_data, 2807 &(msg->rsp[11]), 2808 msg->rsp_size - 12); 2809 deliver_response(recv_msg); 2810 } 2811 } 2812 2813 return rv; 2814 } 2815 2816 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, 2817 struct ipmi_smi_msg *msg) 2818 { 2819 struct ipmi_system_interface_addr *smi_addr; 2820 2821 recv_msg->msgid = 0; 2822 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); 2823 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2824 smi_addr->channel = IPMI_BMC_CHANNEL; 2825 smi_addr->lun = msg->rsp[0] & 3; 2826 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE; 2827 recv_msg->msg.netfn = msg->rsp[0] >> 2; 2828 recv_msg->msg.cmd = msg->rsp[1]; 2829 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3); 2830 recv_msg->msg.data = recv_msg->msg_data; 2831 recv_msg->msg.data_len = msg->rsp_size - 3; 2832 } 2833 2834 static int handle_read_event_rsp(ipmi_smi_t intf, 2835 struct ipmi_smi_msg *msg) 2836 { 2837 struct ipmi_recv_msg *recv_msg, *recv_msg2; 2838 struct list_head msgs; 2839 ipmi_user_t user; 2840 int rv = 0; 2841 int deliver_count = 0; 2842 unsigned long flags; 2843 2844 if (msg->rsp_size < 19) { 2845 /* Message is too small to be an IPMB event. */ 2846 spin_lock_irqsave(&intf->counter_lock, flags); 2847 intf->invalid_events++; 2848 spin_unlock_irqrestore(&intf->counter_lock, flags); 2849 return 0; 2850 } 2851 2852 if (msg->rsp[2] != 0) { 2853 /* An error getting the event, just ignore it. */ 2854 return 0; 2855 } 2856 2857 INIT_LIST_HEAD(&msgs); 2858 2859 spin_lock_irqsave(&intf->events_lock, flags); 2860 2861 spin_lock(&intf->counter_lock); 2862 intf->events++; 2863 spin_unlock(&intf->counter_lock); 2864 2865 /* Allocate and fill in one message for every user that is getting 2866 events. */ 2867 rcu_read_lock(); 2868 list_for_each_entry_rcu(user, &intf->users, link) { 2869 if (!user->gets_events) 2870 continue; 2871 2872 recv_msg = ipmi_alloc_recv_msg(); 2873 if (!recv_msg) { 2874 rcu_read_unlock(); 2875 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, 2876 link) { 2877 list_del(&recv_msg->link); 2878 ipmi_free_recv_msg(recv_msg); 2879 } 2880 /* We couldn't allocate memory for the 2881 message, so requeue it for handling 2882 later. */ 2883 rv = 1; 2884 goto out; 2885 } 2886 2887 deliver_count++; 2888 2889 copy_event_into_recv_msg(recv_msg, msg); 2890 recv_msg->user = user; 2891 kref_get(&user->refcount); 2892 list_add_tail(&(recv_msg->link), &msgs); 2893 } 2894 rcu_read_unlock(); 2895 2896 if (deliver_count) { 2897 /* Now deliver all the messages. */ 2898 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2899 list_del(&recv_msg->link); 2900 deliver_response(recv_msg); 2901 } 2902 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { 2903 /* No one to receive the message, put it in queue if there's 2904 not already too many things in the queue. */ 2905 recv_msg = ipmi_alloc_recv_msg(); 2906 if (!recv_msg) { 2907 /* We couldn't allocate memory for the 2908 message, so requeue it for handling 2909 later. */ 2910 rv = 1; 2911 goto out; 2912 } 2913 2914 copy_event_into_recv_msg(recv_msg, msg); 2915 list_add_tail(&(recv_msg->link), &(intf->waiting_events)); 2916 intf->waiting_events_count++; 2917 } else { 2918 /* There's too many things in the queue, discard this 2919 message. */ 2920 printk(KERN_WARNING PFX "Event queue full, discarding an" 2921 " incoming event\n"); 2922 } 2923 2924 out: 2925 spin_unlock_irqrestore(&(intf->events_lock), flags); 2926 2927 return rv; 2928 } 2929 2930 static int handle_bmc_rsp(ipmi_smi_t intf, 2931 struct ipmi_smi_msg *msg) 2932 { 2933 struct ipmi_recv_msg *recv_msg; 2934 unsigned long flags; 2935 struct ipmi_user *user; 2936 2937 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2938 if (recv_msg == NULL) 2939 { 2940 printk(KERN_WARNING"IPMI message received with no owner. This\n" 2941 "could be because of a malformed message, or\n" 2942 "because of a hardware error. Contact your\n" 2943 "hardware vender for assistance\n"); 2944 return 0; 2945 } 2946 2947 user = recv_msg->user; 2948 /* Make sure the user still exists. */ 2949 if (user && !user->valid) { 2950 /* The user for the message went away, so give up. */ 2951 spin_lock_irqsave(&intf->counter_lock, flags); 2952 intf->unhandled_local_responses++; 2953 spin_unlock_irqrestore(&intf->counter_lock, flags); 2954 ipmi_free_recv_msg(recv_msg); 2955 } else { 2956 struct ipmi_system_interface_addr *smi_addr; 2957 2958 spin_lock_irqsave(&intf->counter_lock, flags); 2959 intf->handled_local_responses++; 2960 spin_unlock_irqrestore(&intf->counter_lock, flags); 2961 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 2962 recv_msg->msgid = msg->msgid; 2963 smi_addr = ((struct ipmi_system_interface_addr *) 2964 &(recv_msg->addr)); 2965 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 2966 smi_addr->channel = IPMI_BMC_CHANNEL; 2967 smi_addr->lun = msg->rsp[0] & 3; 2968 recv_msg->msg.netfn = msg->rsp[0] >> 2; 2969 recv_msg->msg.cmd = msg->rsp[1]; 2970 memcpy(recv_msg->msg_data, 2971 &(msg->rsp[2]), 2972 msg->rsp_size - 2); 2973 recv_msg->msg.data = recv_msg->msg_data; 2974 recv_msg->msg.data_len = msg->rsp_size - 2; 2975 deliver_response(recv_msg); 2976 } 2977 2978 return 0; 2979 } 2980 2981 /* Handle a new message. Return 1 if the message should be requeued, 2982 0 if the message should be freed, or -1 if the message should not 2983 be freed or requeued. */ 2984 static int handle_new_recv_msg(ipmi_smi_t intf, 2985 struct ipmi_smi_msg *msg) 2986 { 2987 int requeue; 2988 int chan; 2989 2990 #ifdef DEBUG_MSGING 2991 int m; 2992 printk("Recv:"); 2993 for (m = 0; m < msg->rsp_size; m++) 2994 printk(" %2.2x", msg->rsp[m]); 2995 printk("\n"); 2996 #endif 2997 if (msg->rsp_size < 2) { 2998 /* Message is too small to be correct. */ 2999 printk(KERN_WARNING PFX "BMC returned to small a message" 3000 " for netfn %x cmd %x, got %d bytes\n", 3001 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); 3002 3003 /* Generate an error response for the message. */ 3004 msg->rsp[0] = msg->data[0] | (1 << 2); 3005 msg->rsp[1] = msg->data[1]; 3006 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3007 msg->rsp_size = 3; 3008 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */ 3009 || (msg->rsp[1] != msg->data[1])) /* Command */ 3010 { 3011 /* The response is not even marginally correct. */ 3012 printk(KERN_WARNING PFX "BMC returned incorrect response," 3013 " expected netfn %x cmd %x, got netfn %x cmd %x\n", 3014 (msg->data[0] >> 2) | 1, msg->data[1], 3015 msg->rsp[0] >> 2, msg->rsp[1]); 3016 3017 /* Generate an error response for the message. */ 3018 msg->rsp[0] = msg->data[0] | (1 << 2); 3019 msg->rsp[1] = msg->data[1]; 3020 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 3021 msg->rsp_size = 3; 3022 } 3023 3024 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3025 && (msg->rsp[1] == IPMI_SEND_MSG_CMD) 3026 && (msg->user_data != NULL)) 3027 { 3028 /* It's a response to a response we sent. For this we 3029 deliver a send message response to the user. */ 3030 struct ipmi_recv_msg *recv_msg = msg->user_data; 3031 3032 requeue = 0; 3033 if (msg->rsp_size < 2) 3034 /* Message is too small to be correct. */ 3035 goto out; 3036 3037 chan = msg->data[2] & 0x0f; 3038 if (chan >= IPMI_MAX_CHANNELS) 3039 /* Invalid channel number */ 3040 goto out; 3041 3042 if (!recv_msg) 3043 goto out; 3044 3045 /* Make sure the user still exists. */ 3046 if (!recv_msg->user || !recv_msg->user->valid) 3047 goto out; 3048 3049 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 3050 recv_msg->msg.data = recv_msg->msg_data; 3051 recv_msg->msg.data_len = 1; 3052 recv_msg->msg_data[0] = msg->rsp[2]; 3053 deliver_response(recv_msg); 3054 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3055 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) 3056 { 3057 /* It's from the receive queue. */ 3058 chan = msg->rsp[3] & 0xf; 3059 if (chan >= IPMI_MAX_CHANNELS) { 3060 /* Invalid channel number */ 3061 requeue = 0; 3062 goto out; 3063 } 3064 3065 switch (intf->channels[chan].medium) { 3066 case IPMI_CHANNEL_MEDIUM_IPMB: 3067 if (msg->rsp[4] & 0x04) { 3068 /* It's a response, so find the 3069 requesting message and send it up. */ 3070 requeue = handle_ipmb_get_msg_rsp(intf, msg); 3071 } else { 3072 /* It's a command to the SMS from some other 3073 entity. Handle that. */ 3074 requeue = handle_ipmb_get_msg_cmd(intf, msg); 3075 } 3076 break; 3077 3078 case IPMI_CHANNEL_MEDIUM_8023LAN: 3079 case IPMI_CHANNEL_MEDIUM_ASYNC: 3080 if (msg->rsp[6] & 0x04) { 3081 /* It's a response, so find the 3082 requesting message and send it up. */ 3083 requeue = handle_lan_get_msg_rsp(intf, msg); 3084 } else { 3085 /* It's a command to the SMS from some other 3086 entity. Handle that. */ 3087 requeue = handle_lan_get_msg_cmd(intf, msg); 3088 } 3089 break; 3090 3091 default: 3092 /* We don't handle the channel type, so just 3093 * free the message. */ 3094 requeue = 0; 3095 } 3096 3097 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 3098 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) 3099 { 3100 /* It's an asyncronous event. */ 3101 requeue = handle_read_event_rsp(intf, msg); 3102 } else { 3103 /* It's a response from the local BMC. */ 3104 requeue = handle_bmc_rsp(intf, msg); 3105 } 3106 3107 out: 3108 return requeue; 3109 } 3110 3111 /* Handle a new message from the lower layer. */ 3112 void ipmi_smi_msg_received(ipmi_smi_t intf, 3113 struct ipmi_smi_msg *msg) 3114 { 3115 unsigned long flags; 3116 int rv; 3117 3118 3119 if ((msg->data_size >= 2) 3120 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3121 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3122 && (msg->user_data == NULL)) 3123 { 3124 /* This is the local response to a command send, start 3125 the timer for these. The user_data will not be 3126 NULL if this is a response send, and we will let 3127 response sends just go through. */ 3128 3129 /* Check for errors, if we get certain errors (ones 3130 that mean basically we can try again later), we 3131 ignore them and start the timer. Otherwise we 3132 report the error immediately. */ 3133 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) 3134 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) 3135 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)) 3136 { 3137 int chan = msg->rsp[3] & 0xf; 3138 3139 /* Got an error sending the message, handle it. */ 3140 spin_lock_irqsave(&intf->counter_lock, flags); 3141 if (chan >= IPMI_MAX_CHANNELS) 3142 ; /* This shouldn't happen */ 3143 else if ((intf->channels[chan].medium 3144 == IPMI_CHANNEL_MEDIUM_8023LAN) 3145 || (intf->channels[chan].medium 3146 == IPMI_CHANNEL_MEDIUM_ASYNC)) 3147 intf->sent_lan_command_errs++; 3148 else 3149 intf->sent_ipmb_command_errs++; 3150 spin_unlock_irqrestore(&intf->counter_lock, flags); 3151 intf_err_seq(intf, msg->msgid, msg->rsp[2]); 3152 } else { 3153 /* The message was sent, start the timer. */ 3154 intf_start_seq_timer(intf, msg->msgid); 3155 } 3156 3157 ipmi_free_smi_msg(msg); 3158 goto out; 3159 } 3160 3161 /* To preserve message order, if the list is not empty, we 3162 tack this message onto the end of the list. */ 3163 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3164 if (!list_empty(&intf->waiting_msgs)) { 3165 list_add_tail(&msg->link, &intf->waiting_msgs); 3166 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3167 goto out; 3168 } 3169 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3170 3171 rv = handle_new_recv_msg(intf, msg); 3172 if (rv > 0) { 3173 /* Could not handle the message now, just add it to a 3174 list to handle later. */ 3175 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3176 list_add_tail(&msg->link, &intf->waiting_msgs); 3177 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3178 } else if (rv == 0) { 3179 ipmi_free_smi_msg(msg); 3180 } 3181 3182 out: 3183 return; 3184 } 3185 3186 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3187 { 3188 ipmi_user_t user; 3189 3190 rcu_read_lock(); 3191 list_for_each_entry_rcu(user, &intf->users, link) { 3192 if (!user->handler->ipmi_watchdog_pretimeout) 3193 continue; 3194 3195 user->handler->ipmi_watchdog_pretimeout(user->handler_data); 3196 } 3197 rcu_read_unlock(); 3198 } 3199 3200 static void 3201 handle_msg_timeout(struct ipmi_recv_msg *msg) 3202 { 3203 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 3204 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE; 3205 msg->msg.netfn |= 1; /* Convert to a response. */ 3206 msg->msg.data_len = 1; 3207 msg->msg.data = msg->msg_data; 3208 deliver_response(msg); 3209 } 3210 3211 static struct ipmi_smi_msg * 3212 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, 3213 unsigned char seq, long seqid) 3214 { 3215 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); 3216 if (!smi_msg) 3217 /* If we can't allocate the message, then just return, we 3218 get 4 retries, so this should be ok. */ 3219 return NULL; 3220 3221 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); 3222 smi_msg->data_size = recv_msg->msg.data_len; 3223 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); 3224 3225 #ifdef DEBUG_MSGING 3226 { 3227 int m; 3228 printk("Resend: "); 3229 for (m = 0; m < smi_msg->data_size; m++) 3230 printk(" %2.2x", smi_msg->data[m]); 3231 printk("\n"); 3232 } 3233 #endif 3234 return smi_msg; 3235 } 3236 3237 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, 3238 struct list_head *timeouts, long timeout_period, 3239 int slot, unsigned long *flags) 3240 { 3241 struct ipmi_recv_msg *msg; 3242 3243 if (!ent->inuse) 3244 return; 3245 3246 ent->timeout -= timeout_period; 3247 if (ent->timeout > 0) 3248 return; 3249 3250 if (ent->retries_left == 0) { 3251 /* The message has used all its retries. */ 3252 ent->inuse = 0; 3253 msg = ent->recv_msg; 3254 list_add_tail(&msg->link, timeouts); 3255 spin_lock(&intf->counter_lock); 3256 if (ent->broadcast) 3257 intf->timed_out_ipmb_broadcasts++; 3258 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3259 intf->timed_out_lan_commands++; 3260 else 3261 intf->timed_out_ipmb_commands++; 3262 spin_unlock(&intf->counter_lock); 3263 } else { 3264 struct ipmi_smi_msg *smi_msg; 3265 /* More retries, send again. */ 3266 3267 /* Start with the max timer, set to normal 3268 timer after the message is sent. */ 3269 ent->timeout = MAX_MSG_TIMEOUT; 3270 ent->retries_left--; 3271 spin_lock(&intf->counter_lock); 3272 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) 3273 intf->retransmitted_lan_commands++; 3274 else 3275 intf->retransmitted_ipmb_commands++; 3276 spin_unlock(&intf->counter_lock); 3277 3278 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, 3279 ent->seqid); 3280 if (!smi_msg) 3281 return; 3282 3283 spin_unlock_irqrestore(&intf->seq_lock, *flags); 3284 /* Send the new message. We send with a zero 3285 * priority. It timed out, I doubt time is 3286 * that critical now, and high priority 3287 * messages are really only for messages to the 3288 * local MC, which don't get resent. */ 3289 intf->handlers->sender(intf->send_info, 3290 smi_msg, 0); 3291 spin_lock_irqsave(&intf->seq_lock, *flags); 3292 } 3293 } 3294 3295 static void ipmi_timeout_handler(long timeout_period) 3296 { 3297 ipmi_smi_t intf; 3298 struct list_head timeouts; 3299 struct ipmi_recv_msg *msg, *msg2; 3300 struct ipmi_smi_msg *smi_msg, *smi_msg2; 3301 unsigned long flags; 3302 int i, j; 3303 3304 INIT_LIST_HEAD(&timeouts); 3305 3306 spin_lock(&interfaces_lock); 3307 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3308 intf = ipmi_interfaces[i]; 3309 if (IPMI_INVALID_INTERFACE(intf)) 3310 continue; 3311 kref_get(&intf->refcount); 3312 spin_unlock(&interfaces_lock); 3313 3314 /* See if any waiting messages need to be processed. */ 3315 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3316 list_for_each_entry_safe(smi_msg, smi_msg2, 3317 &intf->waiting_msgs, link) { 3318 if (!handle_new_recv_msg(intf, smi_msg)) { 3319 list_del(&smi_msg->link); 3320 ipmi_free_smi_msg(smi_msg); 3321 } else { 3322 /* To preserve message order, quit if we 3323 can't handle a message. */ 3324 break; 3325 } 3326 } 3327 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3328 3329 /* Go through the seq table and find any messages that 3330 have timed out, putting them in the timeouts 3331 list. */ 3332 spin_lock_irqsave(&intf->seq_lock, flags); 3333 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) 3334 check_msg_timeout(intf, &(intf->seq_table[j]), 3335 &timeouts, timeout_period, j, 3336 &flags); 3337 spin_unlock_irqrestore(&intf->seq_lock, flags); 3338 3339 list_for_each_entry_safe(msg, msg2, &timeouts, link) 3340 handle_msg_timeout(msg); 3341 3342 kref_put(&intf->refcount, intf_free); 3343 spin_lock(&interfaces_lock); 3344 } 3345 spin_unlock(&interfaces_lock); 3346 } 3347 3348 static void ipmi_request_event(void) 3349 { 3350 ipmi_smi_t intf; 3351 int i; 3352 3353 spin_lock(&interfaces_lock); 3354 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3355 intf = ipmi_interfaces[i]; 3356 if (IPMI_INVALID_INTERFACE(intf)) 3357 continue; 3358 3359 intf->handlers->request_events(intf->send_info); 3360 } 3361 spin_unlock(&interfaces_lock); 3362 } 3363 3364 static struct timer_list ipmi_timer; 3365 3366 /* Call every ~100 ms. */ 3367 #define IPMI_TIMEOUT_TIME 100 3368 3369 /* How many jiffies does it take to get to the timeout time. */ 3370 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 3371 3372 /* Request events from the queue every second (this is the number of 3373 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the 3374 future, IPMI will add a way to know immediately if an event is in 3375 the queue and this silliness can go away. */ 3376 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) 3377 3378 static atomic_t stop_operation; 3379 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3380 3381 static void ipmi_timeout(unsigned long data) 3382 { 3383 if (atomic_read(&stop_operation)) 3384 return; 3385 3386 ticks_to_req_ev--; 3387 if (ticks_to_req_ev == 0) { 3388 ipmi_request_event(); 3389 ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 3390 } 3391 3392 ipmi_timeout_handler(IPMI_TIMEOUT_TIME); 3393 3394 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 3395 } 3396 3397 3398 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 3399 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 3400 3401 /* FIXME - convert these to slabs. */ 3402 static void free_smi_msg(struct ipmi_smi_msg *msg) 3403 { 3404 atomic_dec(&smi_msg_inuse_count); 3405 kfree(msg); 3406 } 3407 3408 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) 3409 { 3410 struct ipmi_smi_msg *rv; 3411 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); 3412 if (rv) { 3413 rv->done = free_smi_msg; 3414 rv->user_data = NULL; 3415 atomic_inc(&smi_msg_inuse_count); 3416 } 3417 return rv; 3418 } 3419 3420 static void free_recv_msg(struct ipmi_recv_msg *msg) 3421 { 3422 atomic_dec(&recv_msg_inuse_count); 3423 kfree(msg); 3424 } 3425 3426 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) 3427 { 3428 struct ipmi_recv_msg *rv; 3429 3430 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); 3431 if (rv) { 3432 rv->done = free_recv_msg; 3433 atomic_inc(&recv_msg_inuse_count); 3434 } 3435 return rv; 3436 } 3437 3438 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) 3439 { 3440 if (msg->user) 3441 kref_put(&msg->user->refcount, free_user); 3442 msg->done(msg); 3443 } 3444 3445 #ifdef CONFIG_IPMI_PANIC_EVENT 3446 3447 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 3448 { 3449 } 3450 3451 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 3452 { 3453 } 3454 3455 #ifdef CONFIG_IPMI_PANIC_STRING 3456 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 3457 { 3458 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3459 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) 3460 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) 3461 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3462 { 3463 /* A get event receiver command, save it. */ 3464 intf->event_receiver = msg->msg.data[1]; 3465 intf->event_receiver_lun = msg->msg.data[2] & 0x3; 3466 } 3467 } 3468 3469 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) 3470 { 3471 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) 3472 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) 3473 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) 3474 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) 3475 { 3476 /* A get device id command, save if we are an event 3477 receiver or generator. */ 3478 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; 3479 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; 3480 } 3481 } 3482 #endif 3483 3484 static void send_panic_events(char *str) 3485 { 3486 struct kernel_ipmi_msg msg; 3487 ipmi_smi_t intf; 3488 unsigned char data[16]; 3489 int i; 3490 struct ipmi_system_interface_addr *si; 3491 struct ipmi_addr addr; 3492 struct ipmi_smi_msg smi_msg; 3493 struct ipmi_recv_msg recv_msg; 3494 3495 si = (struct ipmi_system_interface_addr *) &addr; 3496 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3497 si->channel = IPMI_BMC_CHANNEL; 3498 si->lun = 0; 3499 3500 /* Fill in an event telling that we have failed. */ 3501 msg.netfn = 0x04; /* Sensor or Event. */ 3502 msg.cmd = 2; /* Platform event command. */ 3503 msg.data = data; 3504 msg.data_len = 8; 3505 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */ 3506 data[1] = 0x03; /* This is for IPMI 1.0. */ 3507 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */ 3508 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ 3509 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ 3510 3511 /* Put a few breadcrumbs in. Hopefully later we can add more things 3512 to make the panic events more useful. */ 3513 if (str) { 3514 data[3] = str[0]; 3515 data[6] = str[1]; 3516 data[7] = str[2]; 3517 } 3518 3519 smi_msg.done = dummy_smi_done_handler; 3520 recv_msg.done = dummy_recv_done_handler; 3521 3522 /* For every registered interface, send the event. */ 3523 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3524 intf = ipmi_interfaces[i]; 3525 if (IPMI_INVALID_INTERFACE(intf)) 3526 continue; 3527 3528 /* Send the event announcing the panic. */ 3529 intf->handlers->set_run_to_completion(intf->send_info, 1); 3530 i_ipmi_request(NULL, 3531 intf, 3532 &addr, 3533 0, 3534 &msg, 3535 intf, 3536 &smi_msg, 3537 &recv_msg, 3538 0, 3539 intf->channels[0].address, 3540 intf->channels[0].lun, 3541 0, 1); /* Don't retry, and don't wait. */ 3542 } 3543 3544 #ifdef CONFIG_IPMI_PANIC_STRING 3545 /* On every interface, dump a bunch of OEM event holding the 3546 string. */ 3547 if (!str) 3548 return; 3549 3550 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3551 char *p = str; 3552 struct ipmi_ipmb_addr *ipmb; 3553 int j; 3554 3555 intf = ipmi_interfaces[i]; 3556 if (IPMI_INVALID_INTERFACE(intf)) 3557 continue; 3558 3559 /* First job here is to figure out where to send the 3560 OEM events. There's no way in IPMI to send OEM 3561 events using an event send command, so we have to 3562 find the SEL to put them in and stick them in 3563 there. */ 3564 3565 /* Get capabilities from the get device id. */ 3566 intf->local_sel_device = 0; 3567 intf->local_event_generator = 0; 3568 intf->event_receiver = 0; 3569 3570 /* Request the device info from the local MC. */ 3571 msg.netfn = IPMI_NETFN_APP_REQUEST; 3572 msg.cmd = IPMI_GET_DEVICE_ID_CMD; 3573 msg.data = NULL; 3574 msg.data_len = 0; 3575 intf->null_user_handler = device_id_fetcher; 3576 i_ipmi_request(NULL, 3577 intf, 3578 &addr, 3579 0, 3580 &msg, 3581 intf, 3582 &smi_msg, 3583 &recv_msg, 3584 0, 3585 intf->channels[0].address, 3586 intf->channels[0].lun, 3587 0, 1); /* Don't retry, and don't wait. */ 3588 3589 if (intf->local_event_generator) { 3590 /* Request the event receiver from the local MC. */ 3591 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST; 3592 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD; 3593 msg.data = NULL; 3594 msg.data_len = 0; 3595 intf->null_user_handler = event_receiver_fetcher; 3596 i_ipmi_request(NULL, 3597 intf, 3598 &addr, 3599 0, 3600 &msg, 3601 intf, 3602 &smi_msg, 3603 &recv_msg, 3604 0, 3605 intf->channels[0].address, 3606 intf->channels[0].lun, 3607 0, 1); /* no retry, and no wait. */ 3608 } 3609 intf->null_user_handler = NULL; 3610 3611 /* Validate the event receiver. The low bit must not 3612 be 1 (it must be a valid IPMB address), it cannot 3613 be zero, and it must not be my address. */ 3614 if (((intf->event_receiver & 1) == 0) 3615 && (intf->event_receiver != 0) 3616 && (intf->event_receiver != intf->channels[0].address)) 3617 { 3618 /* The event receiver is valid, send an IPMB 3619 message. */ 3620 ipmb = (struct ipmi_ipmb_addr *) &addr; 3621 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; 3622 ipmb->channel = 0; /* FIXME - is this right? */ 3623 ipmb->lun = intf->event_receiver_lun; 3624 ipmb->slave_addr = intf->event_receiver; 3625 } else if (intf->local_sel_device) { 3626 /* The event receiver was not valid (or was 3627 me), but I am an SEL device, just dump it 3628 in my SEL. */ 3629 si = (struct ipmi_system_interface_addr *) &addr; 3630 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 3631 si->channel = IPMI_BMC_CHANNEL; 3632 si->lun = 0; 3633 } else 3634 continue; /* No where to send the event. */ 3635 3636 3637 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ 3638 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; 3639 msg.data = data; 3640 msg.data_len = 16; 3641 3642 j = 0; 3643 while (*p) { 3644 int size = strlen(p); 3645 3646 if (size > 11) 3647 size = 11; 3648 data[0] = 0; 3649 data[1] = 0; 3650 data[2] = 0xf0; /* OEM event without timestamp. */ 3651 data[3] = intf->channels[0].address; 3652 data[4] = j++; /* sequence # */ 3653 /* Always give 11 bytes, so strncpy will fill 3654 it with zeroes for me. */ 3655 strncpy(data+5, p, 11); 3656 p += size; 3657 3658 i_ipmi_request(NULL, 3659 intf, 3660 &addr, 3661 0, 3662 &msg, 3663 intf, 3664 &smi_msg, 3665 &recv_msg, 3666 0, 3667 intf->channels[0].address, 3668 intf->channels[0].lun, 3669 0, 1); /* no retry, and no wait. */ 3670 } 3671 } 3672 #endif /* CONFIG_IPMI_PANIC_STRING */ 3673 } 3674 #endif /* CONFIG_IPMI_PANIC_EVENT */ 3675 3676 static int has_panicked = 0; 3677 3678 static int panic_event(struct notifier_block *this, 3679 unsigned long event, 3680 void *ptr) 3681 { 3682 int i; 3683 ipmi_smi_t intf; 3684 3685 if (has_panicked) 3686 return NOTIFY_DONE; 3687 has_panicked = 1; 3688 3689 /* For every registered interface, set it to run to completion. */ 3690 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3691 intf = ipmi_interfaces[i]; 3692 if (IPMI_INVALID_INTERFACE(intf)) 3693 continue; 3694 3695 intf->handlers->set_run_to_completion(intf->send_info, 1); 3696 } 3697 3698 #ifdef CONFIG_IPMI_PANIC_EVENT 3699 send_panic_events(ptr); 3700 #endif 3701 3702 return NOTIFY_DONE; 3703 } 3704 3705 static struct notifier_block panic_block = { 3706 .notifier_call = panic_event, 3707 .next = NULL, 3708 .priority = 200 /* priority: INT_MAX >= x >= 0 */ 3709 }; 3710 3711 static int ipmi_init_msghandler(void) 3712 { 3713 int i; 3714 int rv; 3715 3716 if (initialized) 3717 return 0; 3718 3719 rv = driver_register(&ipmidriver); 3720 if (rv) { 3721 printk(KERN_ERR PFX "Could not register IPMI driver\n"); 3722 return rv; 3723 } 3724 3725 printk(KERN_INFO "ipmi message handler version " 3726 IPMI_DRIVER_VERSION "\n"); 3727 3728 for (i = 0; i < MAX_IPMI_INTERFACES; i++) 3729 ipmi_interfaces[i] = NULL; 3730 3731 #ifdef CONFIG_PROC_FS 3732 proc_ipmi_root = proc_mkdir("ipmi", NULL); 3733 if (!proc_ipmi_root) { 3734 printk(KERN_ERR PFX "Unable to create IPMI proc dir"); 3735 return -ENOMEM; 3736 } 3737 3738 proc_ipmi_root->owner = THIS_MODULE; 3739 #endif /* CONFIG_PROC_FS */ 3740 3741 setup_timer(&ipmi_timer, ipmi_timeout, 0); 3742 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 3743 3744 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 3745 3746 initialized = 1; 3747 3748 return 0; 3749 } 3750 3751 static __init int ipmi_init_msghandler_mod(void) 3752 { 3753 ipmi_init_msghandler(); 3754 return 0; 3755 } 3756 3757 static __exit void cleanup_ipmi(void) 3758 { 3759 int count; 3760 3761 if (!initialized) 3762 return; 3763 3764 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); 3765 3766 /* This can't be called if any interfaces exist, so no worry about 3767 shutting down the interfaces. */ 3768 3769 /* Tell the timer to stop, then wait for it to stop. This avoids 3770 problems with race conditions removing the timer here. */ 3771 atomic_inc(&stop_operation); 3772 del_timer_sync(&ipmi_timer); 3773 3774 #ifdef CONFIG_PROC_FS 3775 remove_proc_entry(proc_ipmi_root->name, &proc_root); 3776 #endif /* CONFIG_PROC_FS */ 3777 3778 driver_unregister(&ipmidriver); 3779 3780 initialized = 0; 3781 3782 /* Check for buffer leaks. */ 3783 count = atomic_read(&smi_msg_inuse_count); 3784 if (count != 0) 3785 printk(KERN_WARNING PFX "SMI message count %d at exit\n", 3786 count); 3787 count = atomic_read(&recv_msg_inuse_count); 3788 if (count != 0) 3789 printk(KERN_WARNING PFX "recv message count %d at exit\n", 3790 count); 3791 } 3792 module_exit(cleanup_ipmi); 3793 3794 module_init(ipmi_init_msghandler_mod); 3795 MODULE_LICENSE("GPL"); 3796 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3797 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); 3798 MODULE_VERSION(IPMI_DRIVER_VERSION); 3799 3800 EXPORT_SYMBOL(ipmi_create_user); 3801 EXPORT_SYMBOL(ipmi_destroy_user); 3802 EXPORT_SYMBOL(ipmi_get_version); 3803 EXPORT_SYMBOL(ipmi_request_settime); 3804 EXPORT_SYMBOL(ipmi_request_supply_msgs); 3805 EXPORT_SYMBOL(ipmi_register_smi); 3806 EXPORT_SYMBOL(ipmi_unregister_smi); 3807 EXPORT_SYMBOL(ipmi_register_for_cmd); 3808 EXPORT_SYMBOL(ipmi_unregister_for_cmd); 3809 EXPORT_SYMBOL(ipmi_smi_msg_received); 3810 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3811 EXPORT_SYMBOL(ipmi_alloc_smi_msg); 3812 EXPORT_SYMBOL(ipmi_addr_length); 3813 EXPORT_SYMBOL(ipmi_validate_addr); 3814 EXPORT_SYMBOL(ipmi_set_gets_events); 3815 EXPORT_SYMBOL(ipmi_smi_watcher_register); 3816 EXPORT_SYMBOL(ipmi_smi_watcher_unregister); 3817 EXPORT_SYMBOL(ipmi_set_my_address); 3818 EXPORT_SYMBOL(ipmi_get_my_address); 3819 EXPORT_SYMBOL(ipmi_set_my_LUN); 3820 EXPORT_SYMBOL(ipmi_get_my_LUN); 3821 EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 3822 EXPORT_SYMBOL(ipmi_user_set_run_to_completion); 3823 EXPORT_SYMBOL(ipmi_free_recv_msg); 3824